code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
import _dk_core as core
from . import view
from . import font
from . import control
from . import textinput
class TextField(textinput.TextInput, control.Control, view.View):
borderWidth = 1
caretBlinkInterval = 0.5
caretWidth = 1
caretColor = core.Color(0.0, 0.0, 0.0)
caretColorComposition = core.Color(0.0, 0.0, 0.75)
selectionColor = core.Color(0.5, 0.5, 1.0)
selectionColorInactivated = core.Color(0.6, 0.6, 1.0)
selectionColorDisabled = core.Color(1.0, 0.0, 0.0)
textColor = core.Color(0.0, 0.0, 0.0)
textColorInactivated = core.Color(0.3, 0.3, 0.3)
textColorDisabled = core.Color(0.4, 0.4, 0.4)
textColorSelected = core.Color(1.0, 1.0, 1.0)
textColorSelectedInactivated = core.Color(0.9, 0.9, 0.9)
textColorSelectedDisabled = core.Color(0.4, 0.4, 1.0)
textColorComposition = core.Color(0.0, 0.0, 0.5)
textColorCompositionUnderCaret = core.Color(1.0, 1.0, 1.0)
outlineColor = None
outlineColorInactivated = None
outlineColorDisabled = None
outlineColorSelected = None
outlineColorSelectedInactivated = None
outlineColorSelectedDisabled = None
outlineColorComposition = None
outlineColorCompositionUnderCaret = None
backgroundColor = core.Color(1.0, 1.0, 1.0)
backgroundColorDisabled = core.Color(0.6, 0.6, 0.6)
fontAttributes = font.attributes(14, kerning=False, file='SeoulNamsanM.ttf')
keyboardId = 0
acceptTab = False
tabSpace = 4
keyPressingDelay = 0.3
keyRepeatInterval = 0.04
def __init__(self, text='', *args, **kwargs):
super().__init__(*args, **kwargs)
self.__text = text
self.__caretPos = 0
self.__selectionBegin = -1 # selection: from __selectionBegin to __caretPos
self.__timer = core.Timer()
self.__editing = False
self.__caretVisible = False
self.__capturedMouseId = None
self.__unmodifiedText = ''
@property
def text(self):
return self.__text
@text.setter
def text(self, value):
self.__text = value
self.caretPos = self.__caretPos
@property
def caretPos(self):
return self.__caretPos
@caretPos.setter
def caretPos(self, value):
value = max(int(value), 0)
if self.__caretPos != value:
print('setCaretPos:', value)
self.__caretPos = value
tl = len(self.__text)
if self.__caretPos > tl:
self.__caretPos = tl
self.__selectionBegin = -1
if self.enabled:
self.__caretVisible = True
self.__timer.reset()
self.updateScroll()
self.redraw()
@property
def editing(self):
return self.__editing
@editing.setter
def editing(self, value):
b = bool(value)
if self.__editing != b:
self.__editing = b
if self.__editing:
self.captureKeyboard(self.keyboardId)
self.screen().window.setTextInputEnabled(self.keyboardId, True)
self.__caretVisible = True
self.__unmodifiedText = self.__text
print('textfield:{} capture keyboard:{}'.format(id(self), self.keyboardId))
else:
self.screen().window.setTextInputEnabled(self.keyboardId, False)
self.releaseKeyboard(self.keyboardId)
self.__caretVisible = False
self.__unmodifiedText = ''
print('textfield:{} release keyboard:{}'.format(id(self), self.keyboardId))
self.__timer.reset()
self.redraw()
def selectionRange(self):
if self.__selectionBegin >= 0 and self.__selectionBegin != self.__caretPos:
if self.__selectionBegin > self.__caretPos:
return self.__caretPos, self.__selectionBegin
else:
return self.__selectionBegin, self.__caretPos
return self.__caretPos, self.__caretPos
def onLoaded(self):
super().onLoaded()
self.minimumViewHeight = self.font.lineHeight() / self.scaleFactor
self.updateScroll()
def onUnload(self):
super().onUnload()
def onResized(self):
super().onResized()
self.updateScroll()
def onUpdate(self, delta, tick, date):
super().onUpdate(delta, tick, date)
if self.__editing and self.enabled:
if self.__timer.elapsed() > self.caretBlinkInterval:
self.__caretVisible = not self.__caretVisible
self.__timer.reset()
self.redraw()
def characterIndexAtPosition(self, pos):
bounds = self.contentBounds()
x = pos.x - bounds.x
invScale = 1.0 / self.scaleFactor
lineWidth = lambda s: self.font.lineWidth(s) * invScale
if x <= 0:
return 0
if x >= lineWidth(self.text):
return len(self.text)
width = 0
width2 = 0
index = 0
while width < x:
width2 = width
index += 1
width = lineWidth(self.text[0:index])
if abs(x - width2) < abs(x - width):
return index-1
return index
def updateScroll(self):
bounds = self.contentBounds()
if self.font:
invScale = 1.0 / self.scaleFactor
charWidth = self.font.width * invScale
if bounds.width > charWidth * 2: # at least 2 characters should be displayed.
maxX = bounds.width * 0.9
minX = bounds.width * 0.1
text = self.__text + self.composingText
textLen = self.font.lineWidth(text) * invScale
if textLen > maxX:
text = self.__text[0:self.__caretPos] + self.composingText
textLen = self.font.lineWidth(text) * invScale
transform = core.AffineTransform2(core.Matrix3(self.contentTransform))
textLen += transform.translation[0]
indent = min(bounds.width * 0.1, charWidth)
offset = 0
while textLen < minX:
textLen += indent
offset += indent
while textLen > maxX:
textLen -= indent
offset -= indent
if offset != 0:
transform.translate(core.Vector2(offset, 0))
pos = transform.translation
if pos[0] > 0:
transform.translation = 0, pos[1]
self.contentTransform = transform.matrix3()
return
self.contentTransform = core.Matrix3()
def onRender(self, renderer):
invScale = 1.0 / self.scaleFactor
bounds = self.contentBounds()
height = self.font.lineHeight() * invScale
offsetX = bounds.x
offsetY = int(bounds.y + (bounds.height - height) * 0.5)
lineWidth = lambda text: self.font.lineWidth(text) * invScale
drawText = lambda rect, text, tc, oc: \
font.drawText(renderer, rect, text, self.font, tc, oc, align=font.ALIGN_BOTTOM_LEFT, linebreak=font.LINE_BREAK_CLIPPING)
if self.enabled:
super().onRender(renderer)
if self.isKeyboardCapturedBySelf(self.keyboardId): # activated
textColor = self.textColor
outlineColor = self.outlineColor
textColorSelected = self.textColorSelected
outlineColorSelected = self.outlineColorSelected
selectionColor = self.selectionColor
else:
textColor = self.textColorInactivated
outlineColor = self.outlineColorInactivated
textColorSelected = self.textColorSelectedInactivated
outlineColorSelected = self.outlineColorSelectedInactivated
selectionColor = self.selectionColorInactivated
else: # disabled
tmp = self.backgroundColor
self.backgroundColor = self.backgroundColorDisabled
super().onRender(renderer)
self.backgroundColor = tmp
self.__caretVisible = False
textColor = self.textColorDisabled
outlineColor = self.outlineColorDisabled
textColorSelected = self.textColorSelectedDisabled
outlineColorSelected = self.outlineColorSelectedDisabled
selectionColor = self.selectionColorDisabled
selectionRange = self.selectionRange()
if selectionRange[0] != selectionRange[1]:
left = self.__text[0:selectionRange[0]]
right = self.__text[selectionRange[1]:]
else:
left = self.__text[0:self.__caretPos]
right = self.__text[self.__caretPos:]
width = lineWidth(left)
rc = core.Rect(offsetX, offsetY, width, height)
drawText(rc, left, textColor, outlineColor)
offsetX += width
if selectionRange[0] != selectionRange[1]:
text = self.__text[selectionRange[0]:selectionRange[1]]
width = lineWidth(text)
rc = core.Rect(offsetX, offsetY, width, height)
with renderer.contextForSolidRects(selectionColor) as r:
r.add(rc)
drawText(rc, text, textColorSelected, outlineColorSelected)
if self.__caretVisible:
if self.__caretPos > self.__selectionBegin:
rc = core.Rect(offsetX + width, offsetY, self.caretWidth, height)
else:
rc = core.Rect(offsetX, offsetY, self.caretWidth, height)
with renderer.contextForSolidRects(self.caretColor) as r:
r.add(rc)
offsetX += width
else:
if len(self.composingText) > 0:
width = lineWidth(self.composingText)
rc = core.Rect(offsetX, offsetY, width, height)
if self.__caretVisible:
with renderer.contextForSolidRects(self.caretColorComposition) as r:
r.add(rc)
drawText(rc, self.composingText, self.textColorCompositionUnderCaret, self.outlineColorCompositionUnderCaret)
else:
drawText(rc, self.composingText, self.textColorComposition, self.outlineColorComposition)
offsetX += width
else:
if self.__caretVisible:
rc = core.Rect(offsetX, offsetY, self.caretWidth, height)
with renderer.contextForSolidRects(self.caretColor) as r:
r.add(rc)
width = lineWidth(right)
rc = core.Rect(offsetX, offsetY, width, height)
drawText(rc, right, textColor, outlineColor)
def onMouseDown(self, deviceId, buttonId, pos):
editing = self.__editing
selectionRange = self.selectionRange()
self.editing = True
if self.__capturedMouseId is None and buttonId == 0:
if editing or selectionRange[0] == selectionRange[1]:
self.captureMouse(deviceId)
index = self.characterIndexAtPosition(pos)
self.__capturedMouseId = deviceId
self.caretPos = index
self.redraw()
return super().onMouseDown(deviceId, buttonId, pos)
def onMouseUp(self, deviceId, buttonId, pos):
if buttonId == 0 and self.__capturedMouseId == deviceId:
self.__capturedMouseId = None
if self.__selectionBegin == self.__caretPos:
self.__selectionBegin = -1
self.releaseMouse(deviceId)
return super().onMouseUp(deviceId, buttonId, pos)
def onMouseMove(self, deviceId, pos, delta):
if self.__capturedMouseId == deviceId:
index = self.characterIndexAtPosition(pos)
if self.__selectionBegin < 0:
self.__selectionBegin = self.__caretPos
self.__caretPos = index
self.updateScroll()
self.redraw()
return super().onMouseMove(deviceId, pos, delta)
def onMouseLost(self, deviceId):
self.__capturedMouseId = None
return super().onMouseLost(deviceId)
def onKeyboardLost(self, deviceId):
print('textfield:{}.onKeyboardLost:{}'.format(id(self), deviceId))
self.editing = False
return super().onKeyboardLost(deviceId)
def moveLeft(self):
if self.shiftDown:
if self.__selectionBegin < 0:
self.__selectionBegin = self.__caretPos
self.__caretPos = max(self.__caretPos-1, 0)
else:
range = self.selectionRange()
if range[0] == range[1]:
self.__caretPos = max(self.__caretPos-1, 0)
else:
self.__caretPos = range[0]
self.__selectionBegin = -1
self.__caretVisible = True
self.__timer.reset()
self.updateScroll()
self.redraw()
def moveToBeginningOfLine(self):
if self.shiftDown:
if self.__selectionBegin < 0:
self.__selectionBegin = self.__caretPos
self.__caretPos = 0
else:
self.__caretPos = 0
self.__selectionBegin = -1
self.__caretVisible = True
self.__timer.reset()
self.updateScroll()
self.redraw()
def moveRight(self):
if self.shiftDown:
if self.__selectionBegin < 0:
self.__selectionBegin = self.__caretPos
self.__caretPos = min(len(self.__text), self.__caretPos+1)
else:
range = self.selectionRange()
if range[0] == range[1]:
self.__caretPos = min(len(self.__text), self.__caretPos+1)
else:
self.__caretPos = range[1]
self.__selectionBegin = -1
self.__caretVisible = True
self.__timer.reset()
self.updateScroll()
self.redraw()
def moveToEndOfLine(self):
super().moveToEndOfLine()
if self.shiftDown:
if self.__selectionBegin < 0:
self.__selectionBegin = self.__caretPos
self.__caretPos = len(self.__text)
else:
self.__caretPos = len(self.__text)
self.__selectionBegin = -1
self.__caretVisible = True
self.__timer.reset()
self.updateScroll()
self.redraw()
def insertText(self, text):
range = self.selectionRange()
left = self.__text[0:range[0]]
right = self.__text[range[1]:]
if range[0] == range[1]:
self.__text = left + text + right
self.__caretPos += len(text)
else:
self.__text = left + text
self.__caretPos = len(self.text)
self.text += right
self.__selectionBegin = -1
self.__caretVisible = True
self.__timer.reset()
self.updateScroll()
self.redraw()
def setCompositionText(self, text):
range = self.selectionRange()
if range[0] != range[1]:
left = self.__text[0:range[0]]
right = self.__text[range[1]:]
self.__text = left + right
self.__selectionBegin = -1
self.__caretVisible = True
self.__timer.reset()
self.updateScroll()
self.redraw()
def deleteBackward(self):
range = self.selectionRange()
if range[0] == range[1]:
if range[0] > 0:
left = self.__text[0:range[0]-1]
right = self.__text[range[1]:]
self.__text = left + right
self.__caretPos -= 1
else:
left = self.__text[0:range[0]]
right = self.__text[range[1]:]
self.__text = left + right
self.__caretPos = range[0]
self.__selectionBegin = -1
self.__caretVisible = True
self.__timer.reset()
self.updateScroll()
self.redraw()
def deleteForward(self):
range = self.selectionRange()
if range[0] == range[1]:
if range[1] < len(self.__text):
left = self.__text[0:range[0]]
right = self.__text[range[1]+1:]
self.__text = left + right
else:
left = self.__text[0:range[0]]
right = self.__text[range[1]:]
self.__text = left + right
self.__caretPos = range[0]
self.__selectionBegin = -1
self.__caretVisible = True
self.__timer.reset()
self.updateScroll()
self.redraw()
def processEscape(self):
if self.editing:
self.__text = self.__unmodifiedText
self.__caretPos = 0
self.__selectionBegin = -1
self.editing = False
self.updateScroll()
# post notification
print('User cancelled editing. (post notification)')
def processCarriageReturn(self):
if self.editing:
self.__caretPos = 0
self.__selectionBegin = -1
self.editing = False
self.updateScroll()
# post notification!
print('User finish editing. (post notification)')
def processLineFeed(self):
self.processCarriageReturn()
def insertTab(self):
self.processCarriageReturn()
|
DKGL/DKGL
|
Scripts/dk/ui/textfield.py
|
Python
|
bsd-3-clause
| 17,528
|
from twisted.plugin import IPlugin
from twisted.words.protocols import irc
from txircd.module_interface import Command, ICommand, IModuleData, ModuleData
from zope.interface import implementer
from typing import Any, Dict, List, Optional, Tuple
irc.ERR_SERVICES = "955" # Custom numeric; 955 <TYPE> <SUBTYPE> <ERROR>
@implementer(IPlugin, IModuleData)
class AccountGroup(ModuleData):
name = "AccountGroup"
def userCommands(self) -> List[Tuple[str, int, Command]]:
return [ ("ACCOUNTGROUP", 1, CommandGroup(self.ircd)),
("ACCOUNTUNGROUP", 1, CommandUngroup(self.ircd)) ]
@implementer(ICommand)
class CommandGroup(Command):
def __init__(self, ircd):
self.ircd = ircd
def parseParams(self, user: "IRCUser", params: List[str], prefix: str, tags: Dict[str, Optional[str]]) -> Optional[Dict[Any, Any]]:
return {}
def execute(self, user: "IRCUser", data: Dict[Any, Any]) -> bool:
if not user.metadataKeyExists("account"):
user.sendMessage(irc.ERR_SERVICES, "ACCOUNT", "GROUP", "NOTLOGIN")
user.sendMessage("NOTICE", "You're not logged in.")
return True
resultValue = self.ircd.runActionUntilValue("accountaddnick", user.metadataValue("account"), user.nick)
if not resultValue:
user.sendMessage(irc.ERR_SERVICES, "ACCOUNT", "GROUP", "NOACCOUNT")
user.sendMessage("NOTICE", "This server doesn't have accounts set up.")
return True
if resultValue[0]:
user.sendMessage("NOTICE", "{} was successfully linked to your account.".format(user.nick))
return True
user.sendMessage(irc.ERR_SERVICES, "ACCOUNT", "GROUP", resultValue[1])
user.sendMessage("NOTICE", "Couldn't group nick: {}".format(resultValue[2]))
return True
@implementer(ICommand)
class CommandUngroup(Command):
def __init__(self, ircd):
self.ircd = ircd
def parseParams(self, user: "IRCUser", params: List[str], prefix: str, tags: Dict[str, Optional[str]]) -> Optional[Dict[Any, Any]]:
if not params:
user.sendSingleError("UngroupParams", irc.ERR_NEEDMOREPARAMS, "ACCOUNTUNGROUP", "Not enough parameters")
return None
return {
"removenick": params[0]
}
def execute(self, user: "IRCUser", data: Dict[Any, Any]) -> bool:
if not user.metadataKeyExists("account"):
user.sendMessage(irc.ERR_SERVICES, "ACCOUNT", "GROUP", "NOTLOGIN")
user.sendMessage("NOTICE", "You're not logged in.")
return True
removeNick = data["removenick"]
resultValue = self.ircd.runActionUntilValue("accountremovenick", user.metadataValue("account"), removeNick)
if not resultValue:
user.sendMessage(irc.ERR_SERVICES, "ACCOUNT", "GROUP", "NOACCOUNT")
user.sendMessage("NOTICE", "This server doesn't have accounts set up.")
return True
if resultValue[0]:
user.sendMessage("NOTICE", "{} was successfully removed from your account.".format(removeNick))
return True
user.sendMessage(irc.ERR_SERVICES, "ACCOUNT", "GROUP", resultValue[1])
user.sendMessage("NOTICE", "Couldn't ungroup nick: {}".format(resultValue[2]))
return True
groupCommand = AccountGroup()
|
Heufneutje/txircd
|
txircd/modules/extra/services/account_group.py
|
Python
|
bsd-3-clause
| 2,997
|
import logging
import warnings
import inspect
from sqlalchemy.orm.attributes import InstrumentedAttribute
from sqlalchemy.orm import joinedload, aliased
from sqlalchemy.sql.expression import desc, ColumnElement
from sqlalchemy import Boolean, Table, func, or_
from sqlalchemy.exc import IntegrityError
from sqlalchemy.sql.expression import cast
from sqlalchemy import Unicode
from flask import flash
from flask_admin._compat import string_types, text_type
from flask_admin.babel import gettext, ngettext, lazy_gettext
from flask_admin.model import BaseModelView
from flask_admin.model.form import wrap_fields_in_fieldlist
from flask_admin.model.fields import ListEditableFieldList
from flask_admin.actions import action
from flask_admin._backwards import ObsoleteAttr
from flask_admin.contrib.sqla import form, filters as sqla_filters, tools
from .typefmt import DEFAULT_FORMATTERS
from .tools import get_query_for_ids
from .ajax import create_ajax_loader
# Set up logger
log = logging.getLogger("flask-admin.sqla")
class ModelView(BaseModelView):
"""
SQLAlchemy model view
Usage sample::
admin = Admin()
admin.add_view(ModelView(User, db.session))
"""
column_auto_select_related = ObsoleteAttr('column_auto_select_related',
'auto_select_related',
True)
"""
Enable automatic detection of displayed foreign keys in this view
and perform automatic joined loading for related models to improve
query performance.
Please note that detection is not recursive: if `__unicode__` method
of related model uses another model to generate string representation, it
will still make separate database call.
"""
column_select_related_list = ObsoleteAttr('column_select_related',
'list_select_related',
None)
"""
List of parameters for SQLAlchemy `subqueryload`. Overrides `column_auto_select_related`
property.
For example::
class PostAdmin(ModelView):
column_select_related_list = ('user', 'city')
You can also use properties::
class PostAdmin(ModelView):
column_select_related_list = (Post.user, Post.city)
Please refer to the `subqueryload` on list of possible values.
"""
column_display_all_relations = ObsoleteAttr('column_display_all_relations',
'list_display_all_relations',
False)
"""
Controls if list view should display all relations, not only many-to-one.
"""
column_searchable_list = ObsoleteAttr('column_searchable_list',
'searchable_columns',
None)
"""
Collection of the searchable columns.
Example::
class MyModelView(ModelView):
column_searchable_list = ('name', 'email')
You can also pass columns::
class MyModelView(ModelView):
column_searchable_list = (User.name, User.email)
The following search rules apply:
- If you enter ``ZZZ`` in the UI search field, it will generate ``ILIKE '%ZZZ%'``
statement against searchable columns.
- If you enter multiple words, each word will be searched separately, but
only rows that contain all words will be displayed. For example, searching
for ``abc def`` will find all rows that contain ``abc`` and ``def`` in one or
more columns.
- If you prefix your search term with ``^``, it will find all rows
that start with ``^``. So, if you entered ``^ZZZ`` then ``ILIKE 'ZZZ%'`` will be used.
- If you prefix your search term with ``=``, it will perform an exact match.
For example, if you entered ``=ZZZ``, the statement ``ILIKE 'ZZZ'`` will be used.
"""
column_filters = None
"""
Collection of the column filters.
Can contain either field names or instances of
:class:`flask_admin.contrib.sqla.filters.BaseSQLAFilter` classes.
Filters will be grouped by name when displayed in the drop-down.
For example::
class MyModelView(BaseModelView):
column_filters = ('user', 'email')
or::
from flask_admin.contrib.sqla.filters import BooleanEqualFilter
class MyModelView(BaseModelView):
column_filters = (BooleanEqualFilter(column=User.name, name='Name'),)
or::
from flask_admin.contrib.sqla.filters import BaseSQLAFilter
class FilterLastNameBrown(BaseSQLAFilter):
def apply(self, query, value, alias=None):
if value == '1':
return query.filter(self.column == "Brown")
else:
return query.filter(self.column != "Brown")
def operation(self):
return 'is Brown'
class MyModelView(BaseModelView):
column_filters = [
FilterLastNameBrown(
User.last_name, 'Last Name', options=(('1', 'Yes'), ('0', 'No'))
)
]
"""
model_form_converter = form.AdminModelConverter
"""
Model form conversion class. Use this to implement custom field conversion logic.
For example::
class MyModelConverter(AdminModelConverter):
pass
class MyAdminView(ModelView):
model_form_converter = MyModelConverter
"""
inline_model_form_converter = form.InlineModelConverter
"""
Inline model conversion class. If you need some kind of post-processing for inline
forms, you can customize behavior by doing something like this::
class MyInlineModelConverter(InlineModelConverter):
def post_process(self, form_class, info):
form_class.value = wtf.StringField('value')
return form_class
class MyAdminView(ModelView):
inline_model_form_converter = MyInlineModelConverter
"""
filter_converter = sqla_filters.FilterConverter()
"""
Field to filter converter.
Override this attribute to use non-default converter.
"""
fast_mass_delete = False
"""
If set to `False` and user deletes more than one model using built in action,
all models will be read from the database and then deleted one by one
giving SQLAlchemy a chance to manually cleanup any dependencies (many-to-many
relationships, etc).
If set to `True`, will run a ``DELETE`` statement which is somewhat faster,
but may leave corrupted data if you forget to configure ``DELETE
CASCADE`` for your model.
"""
inline_models = None
"""
Inline related-model editing for models with parent-child relations.
Accepts enumerable with one of the following possible values:
1. Child model class::
class MyModelView(ModelView):
inline_models = (Post,)
2. Child model class and additional options::
class MyModelView(ModelView):
inline_models = [(Post, dict(form_columns=['title']))]
3. Django-like ``InlineFormAdmin`` class instance::
class MyInlineModelForm(InlineFormAdmin):
form_columns = ('title', 'date')
class MyModelView(ModelView):
inline_models = (MyInlineModelForm(MyInlineModel),)
You can customize the generated field name by:
1. Using the `form_name` property as a key to the options dictionary::
class MyModelView(ModelView):
inline_models = ((Post, dict(form_label='Hello')))
2. Using forward relation name and `column_labels` property::
class Model1(Base):
pass
class Model2(Base):
# ...
model1 = relation(Model1, backref='models')
class MyModel1View(Base):
inline_models = (Model2,)
column_labels = {'models': 'Hello'}
"""
column_type_formatters = DEFAULT_FORMATTERS
form_choices = None
"""
Map choices to form fields
Example::
class MyModelView(BaseModelView):
form_choices = {'my_form_field': [
('db_value', 'display_value'),
]}
"""
form_optional_types = (Boolean,)
"""
List of field types that should be optional if column is not nullable.
Example::
class MyModelView(BaseModelView):
form_optional_types = (Boolean, Unicode)
"""
def __init__(self, model, session,
name=None, category=None, endpoint=None, url=None, static_folder=None,
menu_class_name=None, menu_icon_type=None, menu_icon_value=None):
"""
Constructor.
:param model:
Model class
:param session:
SQLAlchemy session
:param name:
View name. If not set, defaults to the model name
:param category:
Category name
:param endpoint:
Endpoint name. If not set, defaults to the model name
:param url:
Base URL. If not set, defaults to '/admin/' + endpoint
:param menu_class_name:
Optional class name for the menu item.
:param menu_icon_type:
Optional icon. Possible icon types:
- `flask_admin.consts.ICON_TYPE_GLYPH` - Bootstrap glyph icon
- `flask_admin.consts.ICON_TYPE_FONT_AWESOME` - Font Awesome icon
- `flask_admin.consts.ICON_TYPE_IMAGE` - Image relative to Flask static directory
- `flask_admin.consts.ICON_TYPE_IMAGE_URL` - Image with full URL
:param menu_icon_value:
Icon glyph name or URL, depending on `menu_icon_type` setting
"""
self.session = session
self._search_fields = None
self._filter_joins = dict()
self._sortable_joins = dict()
if self.form_choices is None:
self.form_choices = {}
super(ModelView, self).__init__(model, name, category, endpoint, url, static_folder,
menu_class_name=menu_class_name,
menu_icon_type=menu_icon_type,
menu_icon_value=menu_icon_value)
# Primary key
self._primary_key = self.scaffold_pk()
if self._primary_key is None:
raise Exception('Model %s does not have primary key.' % self.model.__name__)
# Configuration
if not self.column_select_related_list:
self._auto_joins = self.scaffold_auto_joins()
else:
self._auto_joins = self.column_select_related_list
# Internal API
def _get_model_iterator(self, model=None):
"""
Return property iterator for the model
"""
if model is None:
model = self.model
return model._sa_class_manager.mapper.iterate_properties
def _get_columns_for_field(self, field):
if (not field or
not hasattr(field, 'property') or
not hasattr(field.property, 'columns') or
not field.property.columns):
raise Exception('Invalid field %s: does not contains any columns.' % field)
return field.property.columns
def _get_field_with_path(self, name):
"""
Resolve property by name and figure out its join path.
Join path might contain both properties and tables.
"""
path = []
model = self.model
# For strings, resolve path
if isinstance(name, string_types):
for attribute in name.split('.'):
value = getattr(model, attribute)
if (hasattr(value, 'property') and
hasattr(value.property, 'direction')):
model = value.property.mapper.class_
table = model.__table__
if self._need_join(table):
path.append(value)
attr = value
else:
attr = name
# Determine joins if table.column (relation object) is provided
if isinstance(attr, InstrumentedAttribute):
columns = self._get_columns_for_field(attr)
if len(columns) > 1:
raise Exception('Can only handle one column for %s' % name)
column = columns[0]
# TODO: Use SQLAlchemy "path-finder" to find exact join path to the target property
if self._need_join(column.table):
path.append(column.table)
return attr, path
def _need_join(self, table):
"""
Check if join to a table is necessary.
"""
return table not in self.model._sa_class_manager.mapper.tables
def _apply_path_joins(self, query, joins, path, inner_join=True):
"""
Apply join path to the query.
:param query:
Query to add joins to
:param joins:
List of current joins. Used to avoid joining on same relationship more than once
:param path:
Path to be joined
:param fn:
Join function
"""
last = None
if path:
for item in path:
key = (inner_join, item)
alias = joins.get(key)
if key not in joins:
if not isinstance(item, Table):
alias = aliased(item.property.mapper.class_)
fn = query.join if inner_join else query.outerjoin
if last is None:
query = fn(item) if alias is None else fn(alias, item)
else:
prop = getattr(last, item.key)
query = fn(prop) if alias is None else fn(alias, prop)
joins[key] = alias
last = alias
return query, joins, last
# Scaffolding
def scaffold_pk(self):
"""
Return the primary key name(s) from a model
If model has single primary key, will return a string and tuple otherwise
"""
return tools.get_primary_key(self.model)
def get_pk_value(self, model):
"""
Return the primary key value from a model object.
If there are multiple primary keys, they're encoded into string representation.
"""
if isinstance(self._primary_key, tuple):
return tools.iterencode(getattr(model, attr) for attr in self._primary_key)
else:
return tools.escape(getattr(model, self._primary_key))
def scaffold_list_columns(self):
"""
Return a list of columns from the model.
"""
columns = []
for p in self._get_model_iterator():
if hasattr(p, 'direction'):
if self.column_display_all_relations or p.direction.name == 'MANYTOONE':
columns.append(p.key)
elif hasattr(p, 'columns'):
if len(p.columns) > 1:
filtered = tools.filter_foreign_columns(self.model.__table__, p.columns)
if len(filtered) > 1:
warnings.warn('Can not convert multiple-column properties (%s.%s)' % (self.model, p.key))
continue
column = filtered[0]
else:
column = p.columns[0]
if column.foreign_keys:
continue
if not self.column_display_pk and column.primary_key:
continue
columns.append(p.key)
return columns
def scaffold_sortable_columns(self):
"""
Return a dictionary of sortable columns.
Key is column name, value is sort column/field.
"""
columns = dict()
for p in self._get_model_iterator():
if hasattr(p, 'columns'):
# Sanity check
if len(p.columns) > 1:
# Multi-column properties are not supported
continue
column = p.columns[0]
# Can't sort on primary or foreign keys by default
if column.foreign_keys:
continue
if not self.column_display_pk and column.primary_key:
continue
columns[p.key] = column
return columns
def get_sortable_columns(self):
"""
Returns a dictionary of the sortable columns. Key is a model
field name and value is sort column (for example - attribute).
If `column_sortable_list` is set, will use it. Otherwise, will call
`scaffold_sortable_columns` to get them from the model.
"""
self._sortable_joins = dict()
if self.column_sortable_list is None:
return self.scaffold_sortable_columns()
else:
result = dict()
for c in self.column_sortable_list:
if isinstance(c, tuple):
column, path = self._get_field_with_path(c[1])
column_name = c[0]
elif isinstance(c, InstrumentedAttribute):
column, path = self._get_field_with_path(c)
column_name = str(c)
else:
column, path = self._get_field_with_path(c)
column_name = c
result[column_name] = column
if path:
self._sortable_joins[column_name] = path
return result
def init_search(self):
"""
Initialize search. Returns `True` if search is supported for this
view.
For SQLAlchemy, this will initialize internal fields: list of
column objects used for filtering, etc.
"""
if self.column_searchable_list:
self._search_fields = []
for p in self.column_searchable_list:
attr, joins = self._get_field_with_path(p)
if not attr:
raise Exception('Failed to find field for search field: %s' % p)
for column in self._get_columns_for_field(attr):
self._search_fields.append((column, joins))
return bool(self.column_searchable_list)
def scaffold_filters(self, name):
"""
Return list of enabled filters
"""
attr, joins = self._get_field_with_path(name)
if attr is None:
raise Exception('Failed to find field for filter: %s' % name)
# Figure out filters for related column, unless it's a hybrid_property
if isinstance(attr, ColumnElement):
warnings.warn(('Unable to scaffold the filter for %s, scaffolding '
'for hybrid_property is not supported yet.') % name)
elif hasattr(attr, 'property') and hasattr(attr.property, 'direction'):
filters = []
for p in self._get_model_iterator(attr.property.mapper.class_):
if hasattr(p, 'columns'):
# TODO: Check for multiple columns
column = p.columns[0]
if column.foreign_keys or column.primary_key:
continue
visible_name = '%s / %s' % (self.get_column_name(attr.prop.table.name),
self.get_column_name(p.key))
type_name = type(column.type).__name__
flt = self.filter_converter.convert(type_name,
column,
visible_name)
if flt:
table = column.table
if joins:
self._filter_joins[column] = joins
elif self._need_join(table):
self._filter_joins[column] = [table]
filters.extend(flt)
return filters
else:
columns = self._get_columns_for_field(attr)
if len(columns) > 1:
raise Exception('Can not filter more than on one column for %s' % name)
column = columns[0]
if self._need_join(column.table) and name not in self.column_labels:
visible_name = '%s / %s' % (
self.get_column_name(column.table.name),
self.get_column_name(column.name)
)
else:
if not isinstance(name, string_types):
visible_name = self.get_column_name(name.property.key)
else:
visible_name = self.get_column_name(name)
type_name = type(column.type).__name__
flt = self.filter_converter.convert(
type_name,
column,
visible_name,
options=self.column_choices.get(name),
)
if joins:
self._filter_joins[column] = joins
elif self._need_join(column.table):
self._filter_joins[column] = [column.table]
return flt
def handle_filter(self, filter):
if isinstance(filter, sqla_filters.BaseSQLAFilter):
column = filter.column
# hybrid_property joins are not supported yet
if (isinstance(column, InstrumentedAttribute) and
self._need_join(column.table)):
self._filter_joins[column] = [column.table]
return filter
def scaffold_form(self):
"""
Create form from the model.
"""
converter = self.model_form_converter(self.session, self)
form_class = form.get_form(self.model, converter,
base_class=self.form_base_class,
only=self.form_columns,
exclude=self.form_excluded_columns,
field_args=self.form_args,
extra_fields=self.form_extra_fields)
if self.inline_models:
form_class = self.scaffold_inline_form_models(form_class)
return form_class
def scaffold_list_form(self, custom_fieldlist=ListEditableFieldList,
validators=None):
"""
Create form for the `index_view` using only the columns from
`self.column_editable_list`.
:param validators:
`form_args` dict with only validators
{'name': {'validators': [required()]}}
:param custom_fieldlist:
A WTForm FieldList class. By default, `ListEditableFieldList`.
"""
converter = self.model_form_converter(self.session, self)
form_class = form.get_form(self.model, converter,
base_class=self.form_base_class,
only=self.column_editable_list,
field_args=validators)
return wrap_fields_in_fieldlist(self.form_base_class,
form_class,
custom_fieldlist)
def scaffold_inline_form_models(self, form_class):
"""
Contribute inline models to the form
:param form_class:
Form class
"""
inline_converter = self.inline_model_form_converter(self.session,
self,
self.model_form_converter)
for m in self.inline_models:
form_class = inline_converter.contribute(self.model, form_class, m)
return form_class
def scaffold_auto_joins(self):
"""
Return a list of joined tables by going through the
displayed columns.
"""
if not self.column_auto_select_related:
return []
relations = set()
for p in self._get_model_iterator():
if hasattr(p, 'direction'):
# Check if it is pointing to same model
if p.mapper.class_ == self.model:
continue
if p.direction.name in ['MANYTOONE', 'MANYTOMANY']:
relations.add(p.key)
joined = []
for prop, name in self._list_columns:
if prop in relations:
joined.append(getattr(self.model, prop))
return joined
# AJAX foreignkey support
def _create_ajax_loader(self, name, options):
return create_ajax_loader(self.model, self.session, name, name, options)
# Database-related API
def get_query(self):
"""
Return a query for the model type.
If you override this method, don't forget to override `get_count_query` as well.
This method can be used to set a "persistent filter" on an index_view.
Example::
class MyView(ModelView):
def get_query(self):
return super(MyView, self).get_query().filter(User.username == current_user.username)
"""
return self.session.query(self.model)
def get_count_query(self):
"""
Return a the count query for the model type
A ``query(self.model).count()`` approach produces an excessive
subquery, so ``query(func.count('*'))`` should be used instead.
See commit ``#45a2723`` for details.
"""
return self.session.query(func.count('*')).select_from(self.model)
def _order_by(self, query, joins, sort_joins, sort_field, sort_desc):
"""
Apply order_by to the query
:param query:
Query
:pram joins:
Current joins
:param sort_joins:
Sort joins (properties or tables)
:param sort_field:
Sort field
:param sort_desc:
Ascending or descending
"""
if sort_field is not None:
# Handle joins
query, joins, alias = self._apply_path_joins(query, joins, sort_joins, inner_join=False)
column = sort_field if alias is None else getattr(alias, sort_field.key)
if sort_desc:
query = query.order_by(desc(column))
else:
query = query.order_by(column)
return query, joins
def _get_default_order(self):
order = super(ModelView, self)._get_default_order()
if order is not None:
field, direction = order
attr, joins = self._get_field_with_path(field)
return attr, joins, direction
return None
def _apply_sorting(self, query, joins, sort_column, sort_desc):
if sort_column is not None:
if sort_column in self._sortable_columns:
sort_field = self._sortable_columns[sort_column]
sort_joins = self._sortable_joins.get(sort_column)
query, joins = self._order_by(query, joins, sort_joins, sort_field, sort_desc)
else:
order = self._get_default_order()
if order:
sort_field, sort_joins, sort_desc = order
query, joins = self._order_by(query, joins, sort_joins, sort_field, sort_desc)
return query, joins
def _apply_search(self, query, count_query, joins, count_joins, search):
"""
Apply search to a query.
"""
terms = search.split(' ')
for term in terms:
if not term:
continue
stmt = tools.parse_like_term(term)
filter_stmt = []
count_filter_stmt = []
for field, path in self._search_fields:
query, joins, alias = self._apply_path_joins(query, joins, path, inner_join=False)
count_alias = None
if count_query is not None:
count_query, count_joins, count_alias = self._apply_path_joins(count_query,
count_joins,
path,
inner_join=False)
column = field if alias is None else getattr(alias, field.key)
filter_stmt.append(cast(column, Unicode).ilike(stmt))
if count_filter_stmt is not None:
column = field if count_alias is None else getattr(count_alias, field.key)
count_filter_stmt.append(cast(column, Unicode).ilike(stmt))
query = query.filter(or_(*filter_stmt))
if count_query is not None:
count_query = count_query.filter(or_(*count_filter_stmt))
return query, count_query, joins, count_joins
def _apply_filters(self, query, count_query, joins, count_joins, filters):
for idx, flt_name, value in filters:
flt = self._filters[idx]
alias = None
count_alias = None
# Figure out joins
if isinstance(flt, sqla_filters.BaseSQLAFilter):
path = self._filter_joins.get(flt.column, [])
query, joins, alias = self._apply_path_joins(query, joins, path, inner_join=False)
if count_query is not None:
count_query, count_joins, count_alias = self._apply_path_joins(
count_query,
count_joins,
path,
inner_join=False)
# Clean value .clean() and apply the filter
clean_value = flt.clean(value)
try:
query = flt.apply(query, clean_value, alias)
except TypeError:
spec = inspect.getargspec(flt.apply)
if len(spec.args) == 2:
warnings.warn('Please update your custom filter %s to include additional `alias` parameter.' % repr(flt))
else:
raise
query = flt.apply(query, clean_value)
if count_query is not None:
try:
count_query = flt.apply(count_query, clean_value, count_alias)
except TypeError:
count_query = flt.apply(count_query, clean_value)
return query, count_query, joins, count_joins
def _apply_pagination(self, query, page, page_size):
if page_size is None:
page_size = self.page_size
if page_size:
query = query.limit(page_size)
if page and page_size:
query = query.offset(page * page_size)
return query
def get_list(self, page, sort_column, sort_desc, search, filters,
execute=True, page_size=None):
"""
Return records from the database.
:param page:
Page number
:param sort_column:
Sort column name
:param sort_desc:
Descending or ascending sort
:param search:
Search query
:param execute:
Execute query immediately? Default is `True`
:param filters:
List of filter tuples
:param page_size:
Number of results. Defaults to ModelView's page_size. Can be
overriden to change the page_size limit. Removing the page_size
limit requires setting page_size to 0 or False.
"""
# Will contain join paths with optional aliased object
joins = {}
count_joins = {}
query = self.get_query()
count_query = self.get_count_query() if not self.simple_list_pager else None
# Ignore eager-loaded relations (prevent unnecessary joins)
# TODO: Separate join detection for query and count query?
if hasattr(query, '_join_entities'):
for entity in query._join_entities:
for table in entity.tables:
joins[table] = None
# Apply search criteria
if self._search_supported and search:
query, count_query, joins, count_joins = self._apply_search(query,
count_query,
joins,
count_joins,
search)
# Apply filters
if filters and self._filters:
query, count_query, joins, count_joins = self._apply_filters(query,
count_query,
joins,
count_joins,
filters)
# Calculate number of rows if necessary
count = count_query.scalar() if count_query else None
# Auto join
for j in self._auto_joins:
query = query.options(joinedload(j))
# Sorting
query, joins = self._apply_sorting(query, joins, sort_column, sort_desc)
# Pagination
query = self._apply_pagination(query, page, page_size)
# Execute if needed
if execute:
query = query.all()
return count, query
def get_one(self, id):
"""
Return a single model by its id.
:param id:
Model id
"""
return self.session.query(self.model).get(tools.iterdecode(id))
# Error handler
def handle_view_exception(self, exc):
if isinstance(exc, IntegrityError):
flash(gettext('Integrity error. %(message)s', message=text_type(exc)), 'error')
return True
return super(ModelView, self).handle_view_exception(exc)
# Model handlers
def create_model(self, form):
"""
Create model from form.
:param form:
Form instance
"""
try:
model = self.model()
form.populate_obj(model)
self.session.add(model)
self._on_model_change(form, model, True)
self.session.commit()
except Exception as ex:
if not self.handle_view_exception(ex):
flash(gettext('Failed to create record. %(error)s', error=str(ex)), 'error')
log.exception('Failed to create record.')
self.session.rollback()
return False
else:
self.after_model_change(form, model, True)
return model
def update_model(self, form, model):
"""
Update model from form.
:param form:
Form instance
:param model:
Model instance
"""
try:
form.populate_obj(model)
self._on_model_change(form, model, False)
self.session.commit()
except Exception as ex:
if not self.handle_view_exception(ex):
flash(gettext('Failed to update record. %(error)s', error=str(ex)), 'error')
log.exception('Failed to update record.')
self.session.rollback()
return False
else:
self.after_model_change(form, model, False)
return True
def delete_model(self, model):
"""
Delete model.
:param model:
Model to delete
"""
try:
self.on_model_delete(model)
self.session.flush()
self.session.delete(model)
self.session.commit()
except Exception as ex:
if not self.handle_view_exception(ex):
flash(gettext('Failed to delete record. %(error)s', error=str(ex)), 'error')
log.exception('Failed to delete record.')
self.session.rollback()
return False
else:
self.after_model_delete(model)
return True
# Default model actions
def is_action_allowed(self, name):
# Check delete action permission
if name == 'delete' and not self.can_delete:
return False
return super(ModelView, self).is_action_allowed(name)
@action('delete',
lazy_gettext('Delete'),
lazy_gettext('Are you sure you want to delete selected records?'))
def action_delete(self, ids):
try:
query = get_query_for_ids(self.get_query(), self.model, ids)
if self.fast_mass_delete:
count = query.delete(synchronize_session=False)
else:
count = 0
for m in query.all():
if self.delete_model(m):
count += 1
self.session.commit()
flash(ngettext('Record was successfully deleted.',
'%(count)s records were successfully deleted.',
count,
count=count))
except Exception as ex:
if not self.handle_view_exception(ex):
raise
flash(gettext('Failed to delete records. %(error)s', error=str(ex)), 'error')
|
sfpprxy/py-reminder
|
libs/contrib/sqla/view.py
|
Python
|
bsd-3-clause
| 38,654
|
#!/bin/env python
''' Copyright (c) 2013 Potential Ventures Ltd
Copyright (c) 2013 SolarFlare Communications Inc
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Potential Ventures Ltd,
SolarFlare Communications Inc nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. '''
"""
Class defining the standard interface for a monitor within a testbench
The monitor is responsible for watching the pins of the DUT and recreating
the transactions
"""
import math
import cocotb
from cocotb.decorators import coroutine
from cocotb.triggers import Edge, Event, RisingEdge, ReadOnly, Timer
from cocotb.binary import BinaryValue
from cocotb.bus import Bus
from cocotb.log import SimLog
from cocotb.result import ReturnValue
class MonitorStatistics(object):
"""Wrapper class for storing Monitor statistics"""
def __init__(self):
self.received_transactions = 0
class Monitor(object):
def __init__(self, callback=None, event=None):
"""
Constructor for a monitor instance
callback will be called with each recovered transaction as the argument
If the callback isn't used, received transactions will be placed on a
queue and the event used to notify any consumers.
"""
self._event = event
self._wait_event = None
self._recvQ = []
self._callbacks = []
self.stats = MonitorStatistics()
self._wait_event = Event()
# Subclasses may already set up logging
if not hasattr(self, "log"):
self.log = SimLog("cocotb.monitor.%s" % (self.__class__.__name__))
if callback is not None:
self.add_callback(callback)
# Create an independent coroutine which can receive stuff
self._thread = cocotb.scheduler.add(self._monitor_recv())
def kill(self):
if self._thread:
self._thread.kill()
self._thread = None
def __len__(self):
return len(self._recvQ)
def __getitem__(self, idx):
return self._recvQ[idx]
def add_callback(self, callback):
self.log.debug("Adding callback of function %s to monitor" %
(callback.__name__))
self._callbacks.append(callback)
@coroutine
def wait_for_recv(self, timeout=None):
if timeout:
t = Timer(timeout)
fired = yield [self._wait_event.wait(), t]
if fired is t:
raise ReturnValue(None)
else:
yield self._wait_event.wait()
pkt = self._wait_event.data
raise ReturnValue(pkt)
@coroutine
def _monitor_recv(self):
"""
actual impementation of the receiver
subclasses should override this method to implement the actual receive
routine and call self._recv() with the recovered transaction
"""
raise NotImplementedError("Attempt to use base monitor class without "
"providing a _monitor_recv method")
def _recv(self, transaction):
"""Common handling of a received transaction."""
self.stats.received_transactions += 1
# either callback based consumer
for callback in self._callbacks:
callback(transaction)
# Or queued with a notification
if not self._callbacks:
self._recvQ.append(transaction)
if self._event is not None:
self._event.set()
# If anyone was waiting then let them know
if self._wait_event is not None:
self._wait_event.set(data=transaction)
self._wait_event.clear()
class BusMonitor(Monitor):
"""
Wrapper providing common functionality for monitoring busses
"""
_signals = []
_optional_signals = []
def __init__(self, entity, name, clock, reset=None, reset_n=None,
callback=None, event=None):
self.log = SimLog("cocotb.%s.%s" % (entity.name, name))
self.entity = entity
self.name = name
self.clock = clock
self.bus = Bus(self.entity, self.name, self._signals,
optional_signals=self._optional_signals)
self._reset = reset
self._reset_n = reset_n
Monitor.__init__(self, callback=callback, event=event)
@property
def in_reset(self):
if self._reset_n is not None:
return not bool(self._reset_n.value.integer)
if self._reset is not None:
return bool(self._reset.value.integer)
return False
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, self.name)
|
mkreider/cocotb2
|
cocotb/monitors/__init__.py
|
Python
|
bsd-3-clause
| 5,969
|
from abc import ABCMeta, abstractmethod, abstractproperty
import re
import requests
from oauthlib.oauth2 import InvalidGrantError
from hs_restclient import HydroShareAuthOAuth2, HydroShareAuthBasic
from adapter import HydroShareAdapter
from . import HydroShareUtilityBaseClass, ImproperlyConfiguredError
from django.shortcuts import redirect
import logging as logger
OAUTH_ROPC = 'oauth-resource-owner-password-credentials'
OAUTH_AC = 'oauth-authorization-code'
BASIC_AUTH = 'basic'
SELF_SIGNED_CERTIFICATE = 'self-signed-certificate'
class AuthUtil(HydroShareUtilityBaseClass):
"""
Main authentication class. Use 'AuthUtilFactory' to create instances of this class.
"""
def __init__(self, implementation):
self.__implementation = implementation
@property
def auth_type(self):
return self.__implementation.auth_type
def get_client(self):
return self.__implementation.get_client()
def get_token(self):
return self.__implementation.get_token()
def refresh_token(self):
auth_type = self.auth_type
if auth_type == OAUTH_ROPC or auth_type == OAUTH_AC:
return self.__implementation.refresh_access_token()
@staticmethod
def authorize_client(request, response_type=None):
return OAuthUtil.authorize_client(request, response_type=response_type)
@staticmethod
def authorize_client_callback(request):
return OAuthUtil.authorize_client_callback(request)
@staticmethod
def authorize(scheme=None, username=None, password=None, token=None, hostname=None, port=None):
return AuthUtilFactory.create(scheme=scheme, username=username, password=password, token=token,
hostname=hostname, port=port)
class AuthUtilImplementor(HydroShareUtilityBaseClass):
"""Defines bridging interface for implementation classes of 'AuthUtil'"""
__metaclass__ = ABCMeta
@abstractproperty
def auth_type(self):
pass
@abstractmethod
def get_client(self):
pass
@abstractmethod
def get_token(self):
pass
class OAuthUtil(AuthUtilImplementor):
"""User authentication with OAuth 2.0 using the 'authorization_code' grant type"""
_required_token_fields = ['response_type', 'access_token', 'token_type', 'expires_in', 'refresh_token', 'scope']
_HS_BASE_URL_PROTO_WITH_PORT = '{scheme}://{hostname}:{port}/'
_HS_BASE_URL_PROTO = '{scheme}://{hostname}/'
_HS_API_URL_PROTO = '{base}hsapi/'
_HS_OAUTH_URL_PROTO = '{base}o/'
__client_id = None
__client_secret = None
__redirect_uri = None
@property
def auth_type(self):
return self.__authorization_grant_type
def __init__(self, use_https=True, hostname='www.hydroshare.org', client_id=None, client_secret=None,
redirect_uri=None, port=None, scope=None, access_token=None, refresh_token=None, expires_in=None,
token_type='Bearer', response_type='code', username=None, password=None):
if client_id:
self.__client_id = client_id
if client_secret:
self.__client_secret = client_secret
if redirect_uri:
self.__redirect_uri = redirect_uri
if not all([self.__client_id, self.__client_secret, self.__redirect_uri]):
raise ImproperlyConfiguredError()
self.token_type = token_type
self.response_type = response_type
self.access_token = access_token
self.refresh_token = refresh_token
self.expires_in = expires_in
self.scope = scope
self.username = username
self.password = password
if use_https:
self.scheme = 'https'
else:
self.scheme = 'http'
if port is None:
self.base_url = self._HS_BASE_URL_PROTO.format(scheme=self.scheme, hostname=hostname)
else:
self.base_url = self._HS_BASE_URL_PROTO_WITH_PORT.format(scheme=self.scheme, port=port, hostname=hostname)
self.api_url = self._HS_API_URL_PROTO.format(base=self.base_url)
self.oauth_url = self._HS_OAUTH_URL_PROTO.format(base=self.base_url)
if self.username and self.password:
self.__authorization_grant_type = OAUTH_ROPC
else:
self.__authorization_grant_type = OAUTH_AC
def get_token(self):
"""
Get the authorization token dict
:return: a dictionary representing the oauth token
"""
token = {
'access_token': self.access_token,
'token_type': self.token_type,
'expires_in': self.expires_in,
'refresh_token': self.refresh_token,
'scope': self.scope
}
for key, value in token.iteritems():
if value is None:
missing_attrs = [field for field in self._required_token_fields if
getattr(self, field) is None and not re.search(r'^__[a-zA-Z0-9]+__$', field)]
if len(missing_attrs) > 0:
raise AttributeError("missing attributes(s) for token: {attrs}".format(attrs=missing_attrs))
return token
def get_client(self): # type: () -> HydroShareAdapter
"""
Passes authentication details to underlying HydroShare object for authorization via OAuth 2.0.
"""
if self.auth_type == OAUTH_AC:
token = self.get_token()
auth = HydroShareAuthOAuth2(self.__client_id, self.__client_secret, token=token)
elif self.auth_type == OAUTH_ROPC:
auth = HydroShareAuthOAuth2(self.__client_id, self.__client_secret, username=self.username,
password=self.password)
else:
raise InvalidGrantError("Invalid authorization grant type.")
authorization_header = self.get_authorization_header()
return HydroShareAdapter(auth=auth, default_headers=authorization_header)
@staticmethod
def authorize_client(request, response_type=None):
"""
Redirects user from the client (data.envirodiy.org) to www.hydroshare.org/o/authorize/. After the user provides
their hydroshare account credentials and authorizes the requesting client, the user is redirected back to the
client website.
:param request: A Django HttpRequest object
:param response_type: (optional) a string representing the auth response type (defaults is 'code').
:return: None
"""
if response_type:
auth = OAuthUtil(response_type=response_type)
else:
auth = OAuthUtil()
url = auth._get_authorization_code_url(request)
return redirect(url)
@staticmethod
def authorize_client_callback(request, response_type=None): # type: (str, str) -> dict
"""
Callback handler after a user authorizes the client (data.envirodiy.org).
:param request: a Django HttpRequest
:param response_type: a string representing the oauth response_type
:return: a dictionary representing the token
"""
if response_type:
auth = OAuthUtil(response_type=response_type)
else:
auth = OAuthUtil()
token = auth._request_access_token(request)
return token
def get_authorization_header(self):
return {'Authorization': '{token_type} {access_token}'.format(token_type=self.token_type,
access_token=self.access_token)}
def _set_token(self, **token):
for key, value in token.iteritems():
if key in self.__dict__:
setattr(self, key, value)
else:
logger.warning("skipped setting attribute '{attr}' on '{clsname}".format(attr=key,
clsname=self.classname))
def refresh_access_token(self):
"""
Refresh oauth token using the refresh_token
:return: a dictionary representing the refreshed token
"""
params = {
'grant_type': 'refresh_token',
'client_id': self.__client_id,
'client_secret': self.__client_secret,
'refresh_token': self.refresh_token
}
url = self._build_oauth_url('token/', params)
response = requests.post(url)
if response.status_code == 200:
responseJSON = response.json()
self.access_token = responseJSON['access_token']
self.refresh_token = responseJSON['refresh_token']
self.expires_in = responseJSON['expires_in']
self.scope = responseJSON['scope']
else:
# TODO: better exception handling
raise Exception("failed to refresh access token", response.json())
return self.get_token()
def _build_oauth_url(self, path, params=None): # type: (str, dict) -> str
if params is None:
params = {}
url_params = []
for key, value in params.iteritems():
url_params.append('{0}={1}'.format(key, value))
return "{oauth_url}{path}?{params}".format(oauth_url=self.oauth_url, path=path, params="&".join(url_params))
def _get_authorization_code_url(self, request):
redirect_url = self._get_redirect_url(request.scheme, request.META['HTTP_HOST'])
params = {
'response_type': self.response_type,
'client_id': self.__client_id,
'redirect_uri': redirect_url
}
return self._build_oauth_url('authorize/', params)
def _get_access_token_url(self, request):
redirect_url = self._get_redirect_url(request.scheme, request.META['HTTP_HOST'])
params = {
'grant_type': 'authorization_code',
'client_id': self.__client_id,
'client_secret': self.__client_secret,
'redirect_uri': redirect_url,
'code': request.GET['code']
}
return self._build_oauth_url('token/', params)
def _get_redirect_url(self, scheme, host):
if scheme and host in self.__redirect_uri:
return self.__redirect_uri
redirect_uri = self.__redirect_uri
if '/' == redirect_uri[0]:
redirect_uri = redirect_uri[1:]
return '{scheme}://{host}/{uri}'.format(scheme=scheme, host=host, uri=redirect_uri)
def _request_access_token(self, request):
url = self._get_access_token_url(request)
response = requests.post(url)
if response.status_code == 200 and 'json' in dir(response):
return response.json()
else:
# TODO: Better exception handling...
raise Exception("failed to get access token")
class BasicAuthUtil(AuthUtilImplementor):
"""User authentication using 'Basic Auth' scheme."""
def __init__(self, username, password):
self.username = username
self.password = password
self.__auth_type = BASIC_AUTH
@property
def auth_type(self):
return self.__auth_type
def get_client(self):
auth = HydroShareAuthBasic(username=self.username, password=self.password)
return HydroShareAdapter(auth=auth)
def get_token(self):
return None
class SelfSignSecurityCertAuth(AuthUtilImplementor):
"""Used to connect to a development HydroShare server that uses a self-sign security certificate"""
def __init__(self, hostname, port=None): # type: (str, int) -> None
self.hostname = hostname
self.port = port
self.use_https = False
self.__auth_type = SELF_SIGNED_CERTIFICATE
@property
def auth_type(self):
return self.__auth_type
def get_client(self):
if self.port:
return HydroShareAdapter(hostname=self.hostname, port=self.port, use_https=self.use_https)
else:
return HydroShareAdapter(hostname=self.hostname, verify=False)
def get_token(self):
return None
class AuthUtilFactory(object):
"""
Factory class for creating instances of 'AuthUtil'.
Example: Creating a 'AuthUtil' object using the 'basic' authentication scheme:
hsauth = AuthUtilFactory.create(username='<your username>', password='<your password>')
Example of creating a 'AuthUtil' object using the 'oauth' authentication scheme and client credential grant type:
hsauth = AuthUtilFactory.create(scheme='oauth', username='<your_username>', password='<your_password>')
Example of creating an 'AuthUtil' object using the 'auth' authentication scheme and authorization code grant type:
token = get_token() # get_token is a stand for getting a token dictionary
hsauth = AuthUtilFactory.create(token=token)
"""
@staticmethod
def create(scheme=None, username=None, password=None, token=None, hostname=None, port=None, use_https=None):
# type: (AuthScheme, str, str, dict, str, int, bool) -> AuthUtil
"""
Factory method creates and returns an instance of AuthUtil. The chosen scheme ('basic' or 'oauth') determines
the background implementation. The following table shows which parameters are required for each type of
authentication scheme.
+----------------------------------------------------------------------------------------+
| scheme type | username | password | token | hostname | port |
+------------------------+---------------------------------------------------------------+
| Basic Auth | X | X | | | |
+------------------------+---------------------------------------------------------------+
| OAuth with credentials | X | X | | | |
+------------------------+---------------------------------------------------------------+
| OAuth with token | | | X | | |
+------------------------+---------------------------------------------------------------+
| Security Certificate | | | | X | optional |
+------------------------+---------------------------------------------------------------+
:param scheme: The authentication scheme, either 'basic' or 'oauth'
:param username: user's username
:param password: user's password
:param token: a dictionary containing values for 'access_token', 'token_type', 'refresh_token', 'expires_in',
and 'scope'
:param hostname: The hostname if using a self signed security certificate
:param port: The port to connect to if trying to connect to a hydroshare development server
:param use_https: True if using HTTPS is required, otherwise False, default is None
"""
if token:
# OAuth using authorization-code
implementation = OAuthUtil(**token)
elif scheme == 'oauth' and username and password:
# OAuth using resource-owner-password-credentials
implementation = OAuthUtil(username=username, password=password)
elif scheme == 'basic' and username and password:
# Basic auth - username and password
implementation = BasicAuthUtil(username, password)
elif scheme == 'self-signed-certificate' and hostname:
# Auth using a self signed security certificate
implementation = SelfSignSecurityCertAuth(hostname, port=port)
else:
raise ValueError("incorrect arguments supplied to 'AuthUtilFactory.create()' using authentication scheme \
'{scheme}'".format(scheme=scheme if scheme else "not specified"))
util = AuthUtil(implementation)
if use_https is not None:
util.use_https = use_https
return util
__all__ = ["AuthUtil", "OAuthUtil", "BasicAuthUtil", "SelfSignSecurityCertAuth", "AuthUtilFactory"]
|
ODM2/ODM2WebSDL
|
src/hydroshare_util/auth.py
|
Python
|
bsd-3-clause
| 16,163
|
from __future__ import absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement
from django.utils.translation import ugettext_lazy as _
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from .menu import DomecekMenu
from .urls import urlpatterns
class DomecekApp(CMSApp):
name = _('Domecek')
urls = [urlpatterns]
app_name = 'domecek'
menus = [DomecekMenu]
apphook_pool.register(DomecekApp)
|
misli/django-domecek
|
domecek/cms_app.py
|
Python
|
bsd-3-clause
| 483
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
from recipe_engine import recipe_api
class FilterApi(recipe_api.RecipeApi):
def __init__(self, **kwargs):
super(FilterApi, self).__init__(**kwargs)
self._test_targets = []
self._compile_targets = []
self._paths = []
def __is_path_in_exclusion_list(self, path, exclusions):
"""Returns true if |path| matches any of the regular expressions in
|exclusions|."""
for regex in exclusions:
match = regex.match(path)
if match and match.end() == len(path):
return regex.pattern
return False
@property
def test_targets(self):
"""Returns the set of targets passed to does_patch_require_compile() that
are affected by the set of files that have changed."""
return self._test_targets
@property
def compile_targets(self):
"""Returns the set of targets that need to be compiled based on the set of
files that have changed."""
return self._compile_targets
@property
def paths(self):
"""Returns the paths that have changed in this patch."""
return self._paths
def _load_analyze_config(self, file_name):
config_path = self.m.path.join('testing', 'buildbot', file_name)
step_result = self.m.json.read(
'read filter exclusion spec',
self.m.path['checkout'].join(config_path),
step_test_data=lambda: self.m.json.test_api.output({
'base': {
'exclusions': [],
},
'chromium': {
'exclusions': [],
},
'ios': {
'exclusions': [],
},
})
)
step_result.presentation.step_text = 'path: %r' % config_path
return step_result.json.output
def _load_exclusions(self, names, file_name):
file_contents = self._load_analyze_config(file_name)
exclusions = []
for name in names:
exclusions.extend(file_contents[name]['exclusions'])
return exclusions
def does_patch_require_compile(self,
affected_files,
test_targets=None,
additional_compile_targets=None,
additional_names=None,
config_file_name='trybot_analyze_config.json',
use_mb=False,
mb_mastername=None,
mb_buildername=None,
build_output_dir=None,
cros_board=None,
**kwargs):
"""Check to see if the affected files require a compile or tests.
Args:
affected_files: list of files affected by the current patch; paths
should only use forward slashes ("/") on all platforms
test_targets: the possible set of executables that are desired to run.
When done, test_targets() returns the subsetset of targets
that are affected by the files that have changed.
additional_compile_targets: any targets to compile in addition to
the test_targets.
additional_names: additional top level keys to look up exclusions in,
see |config_file_name|.
conconfig_file_name: the config file to look up exclusions in.
mb_mastername: the mastername to pass over to run MB.
mb_buildername: the buildername to pass over to run MB.
Within the file we concatenate "base.exclusions" and
"|additional_names|.exclusions" (if |additional_names| is not none) to
get the full list of exclusions.
The exclusions should be a list of Python regular expressions (as strings).
If any of the files in the current patch match one of the values in
we assume everything needs to be compiled and tested.
If an error occurs, an exception is raised. Otherwise, after the
call completes the results can be obtained from self.compile_targets()
and self.test_targets().
To run MB, we need to use the actual mastername and buildername we're
running on, and not those of the continuous builder the trybot may be
configured to match, because a trybot may be configured with different MB
settings.
However, recipes used by Findit for culprit finding may override the
defaults with `mb_mastername` and `mb_buildername` to exactly match a given
continuous builder.
"""
names = ['base']
if additional_names:
names.extend(additional_names)
exclusions = self._load_exclusions(names, config_file_name)
test_targets = test_targets or []
additional_compile_targets = additional_compile_targets or []
all_targets = sorted(set(test_targets) | set(additional_compile_targets))
self._test_targets = []
self._compile_targets = []
self._paths = affected_files
# Check the path of each file against the exclusion list. If found, no need
# to check dependencies.
exclusion_regexs = [re.compile(exclusion) for exclusion in exclusions]
for path in self.paths:
first_match = self.__is_path_in_exclusion_list(path, exclusion_regexs)
if first_match:
step_result = self.m.python.inline(
'analyze',
'import sys; sys.exit(0)',
add_python_log=False)
step_result.presentation.logs.setdefault('excluded_files', []).append(
'%s (regex = \'%s\')' % (path, first_match))
self._compile_targets = sorted(all_targets)
self._test_targets = sorted(test_targets)
return
analyze_input = {
'files': self.paths,
'test_targets': test_targets,
'additional_compile_targets': additional_compile_targets,
}
test_output = {
'status': 'No dependency',
'compile_targets': [],
'test_targets': [],
}
kwargs.setdefault('env', {})
# If building for CrOS, execute through the "chrome_sdk" wrapper. This will
# override GYP environment variables, so we'll refrain from defining them
# to avoid confusing output.
if cros_board:
kwargs['wrapper'] = self.m.chromium.get_cros_chrome_sdk_wrapper()
else:
kwargs['env'].update(self.m.chromium.c.gyp_env.as_jsonish())
if use_mb:
if 'env' in kwargs:
# Ensure that mb runs in a clean environment to avoid
# picking up any GYP_DEFINES accidentally.
del kwargs['env']
mb_mastername = mb_mastername or self.m.properties['mastername']
mb_buildername = mb_buildername or self.m.properties['buildername']
step_result = self.m.python(
'analyze',
self.m.path['checkout'].join('tools', 'mb', 'mb.py'),
args=['analyze',
'-m',
mb_mastername,
'-b',
mb_buildername,
'-v',
build_output_dir,
self.m.json.input(analyze_input),
self.m.json.output()],
step_test_data=lambda: self.m.json.test_api.output(
test_output),
**kwargs)
else:
step_result = self.m.python(
'analyze',
self.m.path['checkout'].join('build', 'gyp_chromium'),
args=['--analyzer',
self.m.json.input(analyze_input),
self.m.json.output()],
step_test_data=lambda: self.m.json.test_api.output(
test_output),
**kwargs)
if 'error' in step_result.json.output:
step_result.presentation.step_text = 'Error: ' + \
step_result.json.output['error']
raise self.m.step.StepFailure(
'Error: ' + step_result.json.output['error'])
if 'invalid_targets' in step_result.json.output:
raise self.m.step.StepFailure('Error, following targets were not ' + \
'found: ' + ', '.join(step_result.json.output['invalid_targets']))
if (step_result.json.output['status'] == 'Found dependency' or
step_result.json.output['status'] == 'Found dependency (all)'):
self._compile_targets = step_result.json.output['compile_targets']
self._test_targets = step_result.json.output['test_targets']
# TODO(dpranke) crbug.com/557505 - we need to not prune meta
# targets that are part of 'test_targets', because otherwise
# we might not actually build all of the binaries needed for
# a given test, even if they aren't affected by the patch.
# Until the GYP code is updated, we will merge the returned
# test_targets into compile_targets to be safe.
self._compile_targets = sorted(set(self._compile_targets +
self._test_targets))
else:
step_result.presentation.step_text = 'No compile necessary'
|
eunchong/build
|
scripts/slave/recipe_modules/filter/api.py
|
Python
|
bsd-3-clause
| 8,905
|
# Copyright 2018 Google LLC.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Tests for third_party.nucleus.io.sharded_file_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
if 'google' in sys.modules and 'google.protobuf' not in sys.modules:
del sys.modules['google']
from absl.testing import absltest
from absl.testing import parameterized
from third_party.nucleus.io import sharded_file_utils as io
from third_party.nucleus.testing import test_utils
class IOTest(parameterized.TestCase):
@parameterized.parameters(
# Unsharded outputs pass through as expected.
dict(task_id=0, filespecs=['foo.txt'], expected=[0, 'foo.txt']),
dict(
task_id=0,
filespecs=['foo.txt', 'bar.txt'],
expected=[0, 'foo.txt', 'bar.txt']),
dict(
task_id=0,
filespecs=['bar.txt', 'foo.txt'],
expected=[0, 'bar.txt', 'foo.txt']),
# It's ok to have False values for other bindings.
dict(
task_id=0, filespecs=['foo.txt', None], expected=[0, 'foo.txt',
None]),
dict(task_id=0, filespecs=['foo.txt', ''], expected=[0, 'foo.txt', '']),
dict(
task_id=0,
filespecs=['foo@10.txt', None],
expected=[10, 'foo-00000-of-00010.txt', None]),
dict(
task_id=0,
filespecs=['foo@10.txt', ''],
expected=[10, 'foo-00000-of-00010.txt', '']),
# Simple check that master behaves as expected.
dict(
task_id=0,
filespecs=['foo@10.txt', None],
expected=[10, 'foo-00000-of-00010.txt', None]),
dict(
task_id=0,
filespecs=['foo@10', None],
expected=[10, 'foo-00000-of-00010', None]),
dict(
task_id=1,
filespecs=['foo@10', None],
expected=[10, 'foo-00001-of-00010', None]),
dict(
task_id=9,
filespecs=['foo@10', None],
expected=[10, 'foo-00009-of-00010', None]),
# Make sure we handle sharding of multiple filespecs.
dict(
task_id=0,
filespecs=['foo@10', 'bar@10', 'baz@10'],
expected=[
10, 'foo-00000-of-00010', 'bar-00000-of-00010',
'baz-00000-of-00010'
]),
dict(
task_id=9,
filespecs=['foo@10', 'bar@10', 'baz@10'],
expected=[
10, 'foo-00009-of-00010', 'bar-00009-of-00010',
'baz-00009-of-00010'
]),
)
def test_resolve_filespecs(self, task_id, filespecs, expected):
self.assertEqual(io.resolve_filespecs(task_id, *filespecs), expected)
@parameterized.parameters(
# shard >= num_shards.
(10, ['foo@10']),
# shard > 0 but master isn't sharded.
(1, ['foo']),
# Inconsistent sharding.
(0, ['foo@10', 'bad@11']),
# master isn't sharded but bad is.
(0, ['foo', 'bad@11']),
)
def test_resolve_filespecs_raises_with_bad_inputs(self, task_id, outputs):
with self.assertRaises(ValueError):
io.resolve_filespecs(task_id, *outputs)
@parameterized.parameters(
# Unsharded files work.
('foo.txt', ['foo.txt']),
('foo-00000-of-00010.txt', ['foo-00000-of-00010.txt']),
# Sharded file patterns work.
('foo@3.txt', [
'foo-00000-of-00003.txt', 'foo-00001-of-00003.txt',
'foo-00002-of-00003.txt'
]),
('foo@3',
['foo-00000-of-00003', 'foo-00001-of-00003', 'foo-00002-of-00003']),
)
def test_maybe_generate_sharded_filenames(self, filespec, expected):
self.assertEqual(io.maybe_generate_sharded_filenames(filespec), expected)
class ShardsTest(parameterized.TestCase):
@parameterized.named_parameters(
('no_suffix', '/dir/foo/bar@3', '/dir/foo/bar', 3, ''),
('suffix-dot', '/dir/foo/bar@3.txt', '/dir/foo/bar', 3, '.txt'),
)
def testParseShardedFileSpec(self, spec, expected_basename,
expected_num_shards, expected_suffix):
basename, num_shards, suffix = io.parse_sharded_file_spec(spec)
self.assertEqual(basename, expected_basename)
self.assertEqual(num_shards, expected_num_shards)
self.assertEqual(suffix, expected_suffix)
def testParseShardedFileSpecInvalid(self):
self.assertRaises(io.ShardError,
io.parse_sharded_file_spec, '/dir/foo/bar@0')
@parameterized.named_parameters(
('no_suffix', '/dir/foo/bar@3', [
'/dir/foo/bar-00000-of-00003', '/dir/foo/bar-00001-of-00003',
'/dir/foo/bar-00002-of-00003'
]),
('suffix', '/dir/foo/bar@3.txt', [
'/dir/foo/bar-00000-of-00003.txt', '/dir/foo/bar-00001-of-00003.txt',
'/dir/foo/bar-00002-of-00003.txt'
]),
)
def testGenerateShardedFilenames(self, spec, expected):
names = io.generate_sharded_filenames(spec)
self.assertEqual(names, expected)
def testGenerateShardedFilenamesManyShards(self):
names = io.generate_sharded_filenames('/dir/foo/bar@100000')
self.assertEqual(len(names), 100000)
self.assertEqual(names[99999], '/dir/foo/bar-099999-of-100000')
@parameterized.named_parameters(
('no_spec', '/dir/foo/bar'),
('zero_shards', '/dir/foo/bar@0'),
)
def testGenerateShardedFilenamesError(self, spec):
self.assertRaises(io.ShardError, io.generate_sharded_filenames, spec)
@parameterized.named_parameters(
('basic', '/dir/foo/bar@3', True),
('suffix', '/dir/foo/bar@3,txt', True),
('many_shards', '/dir/foo/bar@123456', True),
('invalid_spec', '/dir/foo/bar@0', False),
('not_spec', '/dir/foo/bar', False),
)
def testIsShardedFileSpec(self, spec, expected):
actual = io.is_sharded_file_spec(spec)
self.assertEqual(actual, expected,
'io.IshShardedFileSpec({0}) is {1} expected {2}'.format(
spec, actual, expected))
@parameterized.named_parameters(
('no_suffix', '/dir/foo/bar', 3, '', '/dir/foo/bar-?????-of-00003'),
('suffix', '/dir/foo/bar', 3, '.txt', '/dir/foo/bar-?????-of-00003.txt'),
('many', '/dir/foo/bar', 1234567, '.txt',
'/dir/foo/bar-???????-of-1234567.txt'),
)
def testGenerateShardedFilePattern(self, basename, num_shards, suffix,
expected):
self.assertEqual(io.generate_sharded_file_pattern(
basename, num_shards, suffix), expected)
@parameterized.named_parameters(
('no_spec', '/dir/foo/bar', '/dir/foo/bar'),
('suffix', '/dir/foo/bar@3.txt', '/dir/foo/bar-?????-of-00003.txt'),
('no_suffix', '/dir/foo/bar@3', '/dir/foo/bar-?????-of-00003'),
('1000', '/dir/foo/bar@1000', '/dir/foo/bar-?????-of-01000'),
('many', '/dir/foo/bar@12345678', '/dir/foo/bar-????????-of-12345678'),
)
def testNormalizeToShardedFilePattern(self, spec, expected):
self.assertEqual(expected, io.normalize_to_sharded_file_pattern(spec))
@parameterized.named_parameters(
('no_spec', 'no_spec', ['no_spec']),
('sharded', 'sharded@3', ['sharded-00000-of-00003',
'sharded-00001-of-00003',
'sharded-00002-of-00003']),
('wildcard1', '*.ext', ['cat.ext', 'dog.ext']),
('wildcard2', 'fo?bar', ['foobar']),
('comma_list', 'file1,file2,file3', ['file1', 'file2', 'file3']),
('mixed_list', 'mixed.*txt,mixed@1,mixed_file',
['mixed.1txt', 'mixed.2txt', 'mixed-00000-of-00001', 'mixed_file']),
('with_dups', 'with_dups*',
['with_dups.1txt', 'with_dups.2txt', 'with_dups-00000-of-00001',
'with_dups']),
)
def testGlobListShardedFilePatterns(self, specs, expected_files):
# First, create all expected_files so Glob will work later.
expected_full_files = [test_utils.test_tmpfile(f, '')
for f in expected_files]
# Create the full spec names. This one doesn't create the files.
full_specs = ','.join(
[test_utils.test_tmpfile(spec) for spec in specs.split(',')])
self.assertEqual(sorted(set(expected_full_files)),
io.glob_list_sharded_file_patterns(full_specs))
if __name__ == '__main__':
absltest.main()
|
google/deepvariant
|
third_party/nucleus/io/sharded_file_utils_test.py
|
Python
|
bsd-3-clause
| 9,738
|
from django.db import models
from taggit.managers import TaggableManager
from django.contrib.auth.models import User
### contains all site definitions
class Site(models.Model):
group = models.ForeignKey('group.Group', related_name="sites", on_delete=models.PROTECT)
hostname = models.CharField(max_length=256, unique=True)
last_change = models.DateTimeField(null=True)
tags = TaggableManager(blank=True)
def get_alert_users(self):
from alert.models import SiteAlert, TagAlert, GroupAlert
sitealertusers = User.objects.filter(id__in=SiteAlert.objects.filter(site=self).values('user'))
groupalertusers = User.objects.filter(id__in=GroupAlert.objects.filter(group=self.group).values('user'))
tagalertusers = User.objects.filter(id__in=TagAlert.objects.filter(tag__in=[tag.id for tag in self.tags.all()]).values('user'))
alertusers = sitealertusers | groupalertusers | tagalertusers
return alertusers.distinct()
def __str__(self):
return "%s (%s)" % (self.hostname, self.group)
|
tykling/tlsscout
|
src/tlssite/models.py
|
Python
|
bsd-3-clause
| 1,062
|
import base64
import json
from finance import app
from tests.fixtures import setup_user, delete_user
class BaseViewTestCase():
def get_headers(self, username, password):
return {
'Authorization': 'Basic ' + base64.b64encode(
username + ":" + password
)
}
def open_with_auth(self, url, method, username, password, data=None):
return self.app.open(
url,
method=method,
headers=self.get_headers(username, password),
data=json.dumps(data),
follow_redirects=True,
content_type='application/json'
)
def login(self, username, password):
data = {
'username': username,
'password': password
}
return self.app.post(
"/login",
data=json.dumps(data),
follow_redirects=True,
content_type='application/json'
)
def logout(self, auth_token=None):
return self.app.open(
"/logout",
'GET',
headers={
'AuthToken': auth_token
},
follow_redirects=True
)
class TestGeneralView(BaseViewTestCase):
@classmethod
def setup_class(self):
self.user, self.username, self.password = setup_user()
self.app = app.test_client()
@classmethod
def teardown_class(self):
delete_user(self.user)
def test_version(self):
"""Test version number"""
rv = self.app.get("/")
assert 200 == rv.status_code
assert 'version' in json.loads(rv.data)
def test_login(self):
"""Test logging in """
rv = self.login(self.username, self.password)
assert 200 == rv.status_code
res = json.loads(rv.data)
assert "Success" in res.get('message')
assert 'auth_token' in res
rv = self.app.open(
"/accounts",
'GET',
headers={
'AuthToken': res.get('auth_token')
}
)
assert 200 == rv.status_code
rv = self.app.open(
"/accounts",
'GET',
headers={
'AuthToken': 'random'
}
)
assert 401 == rv.status_code
def test_login_fail(self):
"""Test logging in with invalid credentials"""
rv = self.login('boo', 'hoo')
assert 400 == rv.status_code
assert 'Invalid' in json.loads(rv.data).get('message')
def test_login_invalid(self):
"""Test logging in with invalid form post"""
rv = self.app.post(
"/login",
data=json.dumps({"username": "admin"}),
follow_redirects=True,
content_type='application/json'
)
assert 400 == rv.status_code
assert {
u'password': [u'This field is required.']
} == json.loads(rv.data)
def test_logout(self):
"""Test logging out"""
rv = self.login(self.username, self.password)
assert 200 == rv.status_code
auth_token = json.loads(rv.data).get('auth_token')
rv = self.logout(auth_token)
assert 200 == rv.status_code
assert 'Success' in json.loads(rv.data).get('message')
rv = self.app.open(
"/accounts",
'GET',
headers={
'AuthToken': auth_token
}
)
assert 401 == rv.status_code
|
reinbach/finance
|
api/tests/views/test_base.py
|
Python
|
bsd-3-clause
| 3,494
|
# -*-coding:Utf-8 -*
# Copyright (c) 2015 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant la commande 'roadmap'."""
from primaires.interpreteur.commande.commande import Commande
from .creer import PrmCreer
from .editer import PrmEditer
from .supprimer import PrmSupprimer
class CmdRoadmap(Commande):
"""Commande 'roadmap'."""
def __init__(self):
"""Constructeur de la commande"""
Commande.__init__(self, "roadmap", "roadmap")
self.nom_categorie = "info"
self.aide_courte = "affiche la feuille de route"
self.aide_longue = \
"Cette commande permet d'afficher la feuille de route " \
"actuelle. Cette feuille de route affiche les améliorations " \
"sur lesquelles les immortels travaillent mais qui ne " \
"sont pas encore visibles par les joueurs. Cette feuille " \
"de route est mise à jour régulièrement et permet de " \
"suivre l'avancement du travail accompli par les " \
"bâtisseurs. Pour chaque élément de la feuille de route, " \
"vous le verrez précédé d'un astérisque (*) coloré en " \
"rouge pour vous indiquer que cette information a été " \
"mise à jour depuis la dernière fois que vous avez " \
"consulté cette feuille de route."
def ajouter_parametres(self):
"""Ajout des paramètres"""
self.ajouter_parametre(PrmCreer())
self.ajouter_parametre(PrmEditer())
self.ajouter_parametre(PrmSupprimer())
def erreur_validation(self, personnage, dic_masques):
"""Interprétation du paramètre"""
roadmaps = importeur.information.roadmaps
if roadmaps:
msg = "Feuille de route :"
for roadmap in roadmaps:
msg += "\n"
if personnage.est_immortel():
msg += " {:>2}".format(roadmap.no)
elif personnage in roadmap.joueurs_ayant_lu:
msg += " "
else:
msg += " |rg|*|ff| "
roadmap.joueurs_ayant_lu.append(personnage)
msg += " " + roadmap.titre.capitalize()
if roadmap.texte:
msg += " : " + roadmap.texte
else:
msg = "|att|La feuille de route actuelle est vide.|ff|"
return msg
|
stormi/tsunami
|
src/primaires/information/commandes/roadmap/__init__.py
|
Python
|
bsd-3-clause
| 3,883
|
from distutils.core import setup
setup(
name='DSM-gui',
version='0.8',
author='M. Wysokinski',
description='Desktop GUI for the DSM project',
install_requires=[
"twisted == 13.2.0",
],
)
|
wysekm/DistributedSystemMonitoring
|
DSM-desktop-gui/setup.py
|
Python
|
bsd-3-clause
| 219
|
"""
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Code is originally adapted from MILK: Machine Learning Toolkit
# Copyright (C) 2008-2011, Luis Pedro Coelho <luis@luispedro.org>
# License: MIT. See COPYING.MIT file in the milk distribution
# Authors: Brian Holt, Peter Prettenhofer, Satrajit Ghosh, Gilles Louppe
# License: BSD3
from __future__ import division
import numpy as np
from abc import ABCMeta, abstractmethod
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..feature_selection.selector_mixin import SelectorMixin
from ..utils import array2d, check_random_state
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
DTYPE = _tree.DTYPE
CLASSIFICATION = {
"gini": _tree.Gini,
"entropy": _tree.Entropy,
}
REGRESSION = {
"mse": _tree.MSE,
}
def export_graphviz(decision_tree, out_file=None, feature_names=None):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
Parameters
----------
decision_tree : decision tree classifier
The decision tree to be exported to graphviz.
out : file object or string, optional (default=None)
Handle or name of the output file.
feature_names : list of strings, optional (default=None)
Names of each of the features.
Returns
-------
out_file : file object
The file object to which the tree was exported. The user is
expected to `close()` this object when done with it.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> import tempfile
>>> out_file = tree.export_graphviz(clf, out_file=tempfile.TemporaryFile())
>>> out_file.close()
"""
def node_to_str(tree, node_id):
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X[%s]" % tree.feature[node_id]
value = tree.value[node_id]
if tree.n_outputs == 1:
value = value[0, :]
if tree.children[node_id, 0] == Tree.LEAF:
return "error = %.4f\\nsamples = %s\\nvalue = %s" \
% (tree.init_error[node_id],
tree.n_samples[node_id],
value)
else:
return "%s <= %.4f\\nerror = %s\\nsamples = %s\\nvalue = %s" \
% (feature,
tree.threshold[node_id],
tree.init_error[node_id],
tree.n_samples[node_id],
value)
def recurse(tree, node_id, parent=None):
if node_id == Tree.LEAF:
raise ValueError("Invalid node_id %s" % Tree.LEAF)
left_child, right_child = tree.children[node_id, :]
# add node with description
out_file.write('%d [label="%s", shape="box"] ;\n' %
(node_id, node_to_str(tree, node_id)))
if not parent is None:
# add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
if not (left_child == Tree.LEAF):
recurse(tree, left_child, node_id)
recurse(tree, right_child, node_id)
if out_file is None:
out_file = open("tree.dot", "w")
elif isinstance(out_file, basestring):
out_file = open(out_file, "w")
out_file.write("digraph Tree {\n")
recurse(decision_tree.tree_, 0)
out_file.write("}")
return out_file
class Tree(object):
"""Struct-of-arrays representation of a binary decision tree.
The binary tree is represented as a number of parallel arrays.
The i-th element of each array holds information about the
node `i`. You can find a detailed description of all arrays
below. NOTE: Some of the arrays only apply to either leaves or
split nodes, resp. In this case the values of nodes of the other
type are arbitrary!
Attributes
----------
node_count : int
Number of nodes (internal nodes + leaves) in the tree.
children : np.ndarray, shape=(node_count, 2), dtype=int32
`children[i, 0]` holds the node id of the left child of node `i`.
`children[i, 1]` holds the node id of the right child of node `i`.
For leaves `children[i, 0] == children[i, 1] == Tree.LEAF == -1`.
feature : np.ndarray of int32
The feature to split on (only for internal nodes).
threshold : np.ndarray of float64
The threshold of each node (only for internal nodes).
value : np.ndarray of float64, shape=(capacity, n_outputs, n_classes)
Contains the constant prediction value of each node.
best_error : np.ndarray of float64
The error of the (best) split.
For leaves `init_error == `best_error`.
init_error : np.ndarray of float64
The initial error of the node (before splitting).
For leaves `init_error == `best_error`.
n_samples : np.ndarray of np.int32
The number of samples at each node.
"""
LEAF = -1
UNDEFINED = -2
def __init__(self, n_classes, n_features, n_outputs=1, capacity=3):
self.n_classes = n_classes
self.n_features = n_features
self.n_outputs = n_outputs
self.node_count = 0
self.children = np.empty((capacity, 2), dtype=np.int32)
self.children.fill(Tree.UNDEFINED)
self.feature = np.empty((capacity,), dtype=np.int32)
self.feature.fill(Tree.UNDEFINED)
self.threshold = np.empty((capacity,), dtype=np.float64)
self.value = np.empty((capacity, n_outputs, np.max(n_classes)),
dtype=np.float64)
self.best_error = np.empty((capacity,), dtype=np.float32)
self.init_error = np.empty((capacity,), dtype=np.float32)
self.n_samples = np.empty((capacity,), dtype=np.int32)
def _resize(self, capacity=None):
"""Resize tree arrays to `capacity`, if `None` double capacity. """
if capacity is None:
capacity = int(self.children.shape[0] * 2.0)
if capacity == self.children.shape[0]:
return
self.children.resize((capacity, 2), refcheck=False)
self.feature.resize((capacity,), refcheck=False)
self.threshold.resize((capacity,), refcheck=False)
self.value.resize((capacity, self.value.shape[1], self.value.shape[2]),
refcheck=False)
self.best_error.resize((capacity,), refcheck=False)
self.init_error.resize((capacity,), refcheck=False)
self.n_samples.resize((capacity,), refcheck=False)
# if capacity smaller than node_count, adjust the counter
if capacity < self.node_count:
self.node_count = capacity
def _add_split_node(self, parent, is_left_child, feature, threshold,
best_error, init_error, n_samples, value):
"""Add a splitting node to the tree. The new node registers itself as
the child of its parent. """
node_id = self.node_count
if node_id >= self.children.shape[0]:
self._resize()
self.feature[node_id] = feature
self.threshold[node_id] = threshold
self.init_error[node_id] = init_error
self.best_error[node_id] = best_error
self.n_samples[node_id] = n_samples
self.value[node_id] = value
# set as left or right child of parent
if parent > Tree.LEAF:
if is_left_child:
self.children[parent, 0] = node_id
else:
self.children[parent, 1] = node_id
self.node_count += 1
return node_id
def _add_leaf(self, parent, is_left_child, value, error, n_samples):
"""Add a leaf to the tree. The new node registers itself as the
child of its parent. """
node_id = self.node_count
if node_id >= self.children.shape[0]:
self._resize()
self.value[node_id] = value
self.n_samples[node_id] = n_samples
self.init_error[node_id] = error
self.best_error[node_id] = error
if is_left_child:
self.children[parent, 0] = node_id
else:
self.children[parent, 1] = node_id
self.children[node_id, :] = Tree.LEAF
self.node_count += 1
return node_id
def _copy(self):
new_tree = Tree(self.n_classes, self.n_features, self.n_outputs)
new_tree.node_count = self.node_count
new_tree.children = self.children.copy()
new_tree.feature = self.feature.copy()
new_tree.threshold = self.threshold.copy()
new_tree.value = self.value.copy()
new_tree.best_error = self.best_error.copy()
new_tree.init_error = self.init_error.copy()
new_tree.n_samples = self.n_samples.copy()
return new_tree
@staticmethod
def _get_leaves(children):
"""Lists the leaves from the children array of a tree object"""
return np.where(np.all(children == Tree.LEAF, axis=1))[0]
@property
def leaves(self):
return self._get_leaves(self.children)
def pruning_order(self, max_to_prune=None):
"""Compute the order for which the tree should be pruned.
The algorithm used is weakest link pruning. It removes first the nodes
that improve the tree the least.
Parameters
----------
max_to_prune : int, optional (default=all the nodes)
maximum number of nodes to prune
Returns
-------
nodes : numpy array
list of the nodes to remove to get to the optimal subtree.
References
----------
.. [1] J. Friedman and T. Hastie, "The elements of statistical
learning", 2001, section 9.2.1
"""
def _get_terminal_nodes(children):
"""Lists the nodes that only have leaves as children"""
leaves = self._get_leaves(children)
child_is_leaf = np.in1d(children, leaves).reshape(children.shape)
return np.where(np.all(child_is_leaf, axis=1))[0]
def _next_to_prune(tree, children=None):
"""Weakest link pruning for the subtree defined by children"""
if children is None:
children = tree.children
t_nodes = _get_terminal_nodes(children)
g_i = tree.init_error[t_nodes] - tree.best_error[t_nodes]
return t_nodes[np.argmin(g_i)]
if max_to_prune is None:
max_to_prune = self.node_count
children = self.children.copy()
nodes = list()
while True:
node = _next_to_prune(self, children)
nodes.append(node)
if (len(nodes) == max_to_prune) or (node == 0):
return np.array(nodes)
#Remove the subtree from the children array
children[children[node], :] = Tree.UNDEFINED
children[node, :] = Tree.LEAF
def prune(self, n_leaves):
"""Prunes the tree to obtain the optimal subtree with n_leaves leaves.
Parameters
----------
n_leaves : int
The final number of leaves the algorithm should bring
Returns
-------
tree : a Tree object
returns a new, pruned, tree
References
----------
.. [1] J. Friedman and T. Hastie, "The elements of statistical
learning", 2001, section 9.2.1
"""
to_remove_count = self.node_count - len(self.leaves) - n_leaves + 1
nodes_to_remove = self.pruning_order(to_remove_count)
out_tree = self._copy()
for node in nodes_to_remove:
#TODO: Add a Tree method to remove a branch of a tree
out_tree.children[out_tree.children[node], :] = Tree.UNDEFINED
out_tree.children[node, :] = Tree.LEAF
out_tree.node_count -= 2
return out_tree
def build(self, X, y, criterion, max_depth, min_samples_split,
min_samples_leaf, min_density, max_features, random_state,
find_split, sample_mask=None, X_argsorted=None):
# Recursive algorithm
def recursive_partition(X, X_argsorted, y, sample_mask, depth,
parent, is_left_child):
# Count samples
n_node_samples = sample_mask.sum()
if n_node_samples == 0:
raise ValueError("Attempting to find a split "
"with an empty sample_mask")
# Split samples
if depth < max_depth and n_node_samples >= min_samples_split \
and n_node_samples >= 2 * min_samples_leaf:
feature, threshold, best_error, init_error = find_split(
X, y, X_argsorted, sample_mask, n_node_samples,
min_samples_leaf, max_features, criterion, random_state)
else:
feature = -1
init_error = _tree._error_at_leaf(y, sample_mask, criterion,
n_node_samples)
value = criterion.init_value()
# Current node is leaf
if feature == -1:
self._add_leaf(parent, is_left_child, value,
init_error, n_node_samples)
# Current node is internal node (= split node)
else:
# Sample mask is too sparse?
if n_node_samples / X.shape[0] <= min_density:
X = X[sample_mask]
X_argsorted = np.asfortranarray(
np.argsort(X.T, axis=1).astype(np.int32).T)
y = y[sample_mask]
sample_mask = np.ones((X.shape[0],), dtype=np.bool)
# Split and and recurse
split = X[:, feature] <= threshold
node_id = self._add_split_node(parent, is_left_child, feature,
threshold, best_error,
init_error, n_node_samples,
value)
# left child recursion
recursive_partition(X, X_argsorted, y,
np.logical_and(split, sample_mask),
depth + 1, node_id, True)
# right child recursion
recursive_partition(X, X_argsorted, y,
np.logical_and(np.logical_not(split),
sample_mask),
depth + 1, node_id, False)
# Setup auxiliary data structures and check input before
# recursive partitioning
if X.dtype != DTYPE or not np.isfortran(X):
X = np.asarray(X, dtype=DTYPE, order="F")
if y.dtype != DTYPE or not y.flags.contiguous:
y = np.asarray(y, dtype=DTYPE, order="C")
n_samples = X.shape[0]
if sample_mask is None:
sample_mask = np.ones((n_samples, ), dtype=np.bool)
elif len(sample_mask) != n_samples:
raise ValueError(
"Length of sample_mask=%s does not match number of samples=%s"
% (len(sample_mask), n_samples))
if X_argsorted is None:
X_argsorted = np.asfortranarray(
np.argsort(X.T, axis=1).astype(np.int32).T)
elif len(X_argsorted) != n_samples:
raise ValueError(
"Length of X_argsorted=%s does not match number of samples=%s"
% (len(X_argsorted), n_samples))
# Pre-allocate some space
if max_depth <= 10:
# allocate space for complete binary tree
init_capacity = (2 ** (max_depth + 1)) - 1
else:
# allocate fixed size and dynamically resize later
init_capacity = 2047
self._resize(init_capacity)
# Build the tree by recursive partitioning
recursive_partition(X, X_argsorted, y, sample_mask, 0, -1, False)
# Compactify the tree data structure
self._resize(self.node_count)
return self
def predict(self, X):
out = np.empty((X.shape[0], self.value.shape[1], self.value.shape[2]),
dtype=np.float64)
_tree._predict_tree(X,
self.children,
self.feature,
self.threshold,
self.value,
out)
return out
def compute_feature_importances(self, method="gini"):
"""Computes the importance of each feature (aka variable).
The following `method`s are supported:
* "gini" : The difference of the initial error and the error of the
split times the number of samples that passed the node.
* "squared" : The empirical improvement in squared error.
Parameters
----------
method : str, optional (default="gini")
The method to estimate the importance of a feature. Either "gini"
or "squared".
"""
if method == "gini":
method = lambda node: (self.n_samples[node] * \
(self.init_error[node] -
self.best_error[node]))
elif method == "squared":
method = lambda node: (self.init_error[node] - \
self.best_error[node]) ** 2.0
else:
raise ValueError(
'Invalid value for method. Allowed string '
'values are "gini", or "mse".')
importances = np.zeros((self.n_features,), dtype=np.float64)
for node in range(self.node_count):
if (self.children[node, 0]
== self.children[node, 1]
== Tree.LEAF):
continue
else:
importances[self.feature[node]] += method(node)
normalizer = np.sum(importances)
if normalizer > 0.0:
# Avoid dividing by zero (e.g., when root is pure)
importances /= normalizer
return importances
class BaseDecisionTree(BaseEstimator, SelectorMixin):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self, criterion,
max_depth,
min_samples_split,
min_samples_leaf,
n_leaves,
min_density,
max_features,
compute_importances,
random_state):
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.n_leaves = n_leaves
self.min_density = min_density
self.max_features = max_features
self.compute_importances = compute_importances
self.random_state = check_random_state(random_state)
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.find_split_ = _tree._find_best_split
self.tree_ = None
self.feature_importances_ = None
def prune(self, n_leaves):
"""Prunes the decision tree
This method is necessary to avoid overfitting tree models. While broad
decision trees should be computed in the first place, pruning them
allows for smaller trees.
Parameters
----------
n_leaves : int
the number of leaves of the pruned tree
"""
self.tree_ = self.tree_.prune(n_leaves)
return self
def fit(self, X, y, sample_mask=None, X_argsorted=None):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (integers that correspond to classes in
classification, real numbers in regression).
Returns
-------
self : object
Returns self.
"""
# set min_samples_split sensibly
self.min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Convert data
X = np.asarray(X, dtype=DTYPE, order="F")
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.copy(y)
y = np.atleast_1d(y)
if y.ndim == 1:
y = y[:, np.newaxis]
self.classes_ = []
self.n_classes_ = []
self.n_outputs_ = y.shape[1]
if is_classification:
for k in xrange(self.n_outputs_):
unique = np.unique(y[:, k])
self.classes_.append(unique)
self.n_classes_.append(unique.shape[0])
y[:, k] = np.searchsorted(unique, y[:, k])
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
y = np.asarray(y, dtype=DTYPE, order="C")
if is_classification:
criterion = CLASSIFICATION[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = REGRESSION[self.criterion](self.n_outputs_)
# Check parameters
max_depth = np.inf if self.max_depth is None else self.max_depth
if isinstance(self.max_features, basestring):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
else:
max_features = self.max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if self.min_density < 0.0 or self.min_density > 1.0:
raise ValueError("min_density must be in [0, 1]")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
# Build tree
self.tree_ = Tree(self.n_classes_, self.n_features_, self.n_outputs_)
self.tree_.build(X, y, criterion, max_depth,
self.min_samples_split, self.min_samples_leaf,
self.min_density, max_features, self.random_state,
self.find_split_, sample_mask=sample_mask,
X_argsorted=X_argsorted)
if self.compute_importances:
self.feature_importances_ = \
self.tree_.compute_feature_importances()
if self.n_leaves is not None:
self.prune(self.n_leaves)
return self
def predict(self, X):
"""Predict class or regression target for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = array2d(X, dtype=DTYPE)
n_samples, n_features = X.shape
if self.tree_ is None:
raise Exception("Tree not initialized. Perform a fit first")
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
P = self.tree_.predict(X)
if isinstance(self, ClassifierMixin):
predictions = np.zeros((n_samples, self.n_outputs_))
for k in xrange(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(P[:, k],
axis=1),
axis=0)
else:
predictions = P[:, :, 0]
if self.n_outputs_ == 1:
predictions = predictions.reshape((n_samples, ))
return predictions
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : integer, optional (default=1)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
n_leaves : integer, optional (default=None)
The number of leaves of the post-pruned tree. If None, no post-pruning
will be run.
min_density : float, optional (default=0.1)
This parameter controls a trade-off in an optimization heuristic. It
controls the minimum density of the `sample_mask` (i.e. the
fraction of samples in the mask). If the density falls below this
threshold the mask is recomputed and the input data is packed
which results in data copying. If `min_density` equals to one,
the partitions are always represented as copies of the original
data. Otherwise, partitions are represented as bit masks (aka
sample masks).
max_features : int, string or None, optional (default=None)
The number of features to consider when looking for the best split.
If "auto", then `max_features=sqrt(n_features)` on classification
tasks and `max_features=n_features` on regression problems. If "sqrt",
then `max_features=sqrt(n_features)`. If "log2", then
`max_features=log2(n_features)`. If None, then
`max_features=n_features`.
compute_importances : boolean, optional (default=True)
Whether feature importances are computed and stored into the
``feature_importances_`` attribute when calling fit.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
`tree_` : Tree object
The underlying Tree object.
`feature_importances_` : array of shape = [n_features]
The feature mportances (the higher, the more important the feature).
The importance I(f) of a feature f is computed as the (normalized)
total reduction of error brought by that feature. It is also known as
the Gini importance [4]_.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self, criterion="gini",
max_depth=None,
min_samples_split=1,
min_samples_leaf=1,
n_leaves=None,
min_density=0.1,
max_features=None,
compute_importances=False,
random_state=None):
super(DecisionTreeClassifier, self).__init__(criterion,
max_depth,
min_samples_split,
min_samples_leaf,
n_leaves,
min_density,
max_features,
compute_importances,
random_state)
def predict_proba(self, X):
"""Predict class probabilities of the input samples X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by arithmetical order.
"""
X = array2d(X, dtype=DTYPE)
n_samples, n_features = X.shape
if self.tree_ is None:
raise Exception("Tree not initialized. Perform a fit first.")
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
proba = []
P = self.tree_.predict(X)
for k in xrange(self.n_outputs_):
P_k = P[:, k, :self.n_classes_[k]]
normalizer = P_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
P_k /= normalizer
proba.append(P_k)
if self.n_outputs_ == 1:
return proba[0]
else:
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. Classes are
ordered by arithmetical order.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in xrange(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A tree regressor.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : integer, optional (default=1)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
n_leaves : integer, optional (default=None)
The number of leaves of the post-pruned tree. If None, no post-pruning
will be run.
min_density : float, optional (default=0.1)
This parameter controls a trade-off in an optimization heuristic. It
controls the minimum density of the `sample_mask` (i.e. the
fraction of samples in the mask). If the density falls below this
threshold the mask is recomputed and the input data is packed
which results in data copying. If `min_density` equals to one,
the partitions are always represented as copies of the original
data. Otherwise, partitions are represented as bit masks (aka
sample masks).
max_features : int, string or None, optional (default=None)
The number of features to consider when looking for the best split.
If "auto", then `max_features=sqrt(n_features)` on classification
tasks and `max_features=n_features` on regression problems. If "sqrt",
then `max_features=sqrt(n_features)`. If "log2", then
`max_features=log2(n_features)`. If None, then
`max_features=n_features`.
compute_importances : boolean, optional (default=True)
Whether feature importances are computed and stored into the
``feature_importances_`` attribute when calling fit.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
`tree_` : Tree object
The underlying Tree object.
`feature_importances_` : array of shape = [n_features]
The feature mportances (the higher, the more important the feature).
The importance I(f) of a feature f is computed as the (normalized)
total reduction of error brought by that feature. It is also known as
the Gini importance [4]_.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
R2 scores (a.k.a. coefficient of determination) over 10-folds CV:
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self, criterion="mse",
max_depth=None,
min_samples_split=1,
min_samples_leaf=1,
n_leaves=None,
min_density=0.1,
max_features=None,
compute_importances=False,
random_state=None):
super(DecisionTreeRegressor, self).__init__(criterion,
max_depth,
min_samples_split,
min_samples_leaf,
n_leaves,
min_density,
max_features,
compute_importances,
random_state)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self, criterion="gini",
max_depth=None,
min_samples_split=1,
min_samples_leaf=1,
n_leaves=None,
min_density=0.1,
max_features="auto",
compute_importances=False,
random_state=None):
super(ExtraTreeClassifier, self).__init__(criterion,
max_depth,
min_samples_split,
min_samples_leaf,
n_leaves,
min_density,
max_features,
compute_importances,
random_state)
self.find_split_ = _tree._find_best_random_split
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
See also
--------
ExtraTreeClassifier : A classifier base on extremely randomized trees
sklearn.ensemble.ExtraTreesClassifier : An ensemble of extra-trees for
classification
sklearn.ensemble.ExtraTreesRegressor : An ensemble of extra-trees for
regression
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self, criterion="mse",
max_depth=None,
min_samples_split=1,
min_samples_leaf=1,
n_leaves=None,
min_density=0.1,
max_features="auto",
compute_importances=False,
random_state=None):
super(ExtraTreeRegressor, self).__init__(criterion,
max_depth,
min_samples_split,
min_samples_leaf,
n_leaves,
min_density,
max_features,
compute_importances,
random_state)
self.find_split_ = _tree._find_best_random_split
def prune_path(clf, X, y, max_n_leaves=10, n_iterations=10,
test_size=0.1, random_state=None):
"""Cross validation of scores for different values of the decision tree.
This function allows to test what the optimal size of the post-pruned
decision tree should be. It computes cross validated scores for different
size of the tree.
Parameters
----------
clf: decision tree estimator object
The object to use to fit the data
X: array-like of shape at least 2D
The data to fit.
y: array-like
The target variable to try to predict.
max_n_leaves : int, optional (default=10)
maximum number of leaves of the tree to prune
n_iterations : int, optional (default=10)
Number of re-shuffling & splitting iterations.
test_size : float (default=0.1) or int
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
scores : list of list of floats
The scores of the computed cross validated trees grouped by tree size.
scores[0] correspond to the values of trees of size max_n_leaves and
scores[-1] to the tree with just two leaves.
"""
from ..base import clone
from ..cross_validation import ShuffleSplit
scores = list()
kf = ShuffleSplit(len(y), n_iterations, test_size,
random_state=random_state)
for train, test in kf:
estimator = clone(clf)
fitted = estimator.fit(X[train], y[train])
loc_scores = list()
for i in range(max_n_leaves, 1, -1):
#We loop from the bigger values to the smaller ones in order to be
#able to compute the original tree once, and then make it smaller
fitted.prune(n_leaves=i)
loc_scores.append(fitted.score(X[test], y[test]))
scores.append(loc_scores)
return zip(*scores)
|
sgenoud/scikit-learn
|
sklearn/tree/tree.py
|
Python
|
bsd-3-clause
| 44,475
|
try:
from pandas.plotting._converter import DatetimeConverter
except ImportError:
from pandas.tseries.converter import DatetimeConverter
import pandas as pd
from pandas import to_datetime, date_range, Series, DataFrame, period_range
import datetime as dt
from pandas.tseries.frequencies import infer_freq
import numpy as np
if hasattr(Series, 'convert'):
Series.resample = Series.convert
class DatetimeIndex(object):
goal_time = 0.2
def setup(self):
self.N = 100000
self.rng = date_range(start='1/1/2000', periods=self.N, freq='T')
self.rng2 = date_range(start='1/1/2000 9:30', periods=10000,
freq='S', tz='US/Eastern')
self.index_repeated = date_range(start='1/1/2000',
periods=1000, freq='T').repeat(10)
self.rng3 = date_range(start='1/1/2000', periods=1000, freq='H')
self.df = DataFrame(np.random.randn(len(self.rng3), 2), self.rng3)
self.rng4 = date_range(start='1/1/2000', periods=1000,
freq='H', tz='US/Eastern')
self.df2 = DataFrame(np.random.randn(len(self.rng4), 2),
index=self.rng4)
N = 100000
self.dti = pd.date_range('2011-01-01', freq='H', periods=N).repeat(5)
self.dti_tz = pd.date_range('2011-01-01', freq='H', periods=N,
tz='Asia/Tokyo').repeat(5)
self.rng5 = date_range(start='1/1/2000',
end='3/1/2000', tz='US/Eastern')
self.dst_rng = date_range(start='10/29/2000 1:00:00',
end='10/29/2000 1:59:59', freq='S')
self.index = date_range(start='10/29/2000',
end='10/29/2000 00:59:59', freq='S')
self.index = self.index.append(self.dst_rng)
self.index = self.index.append(self.dst_rng)
self.index = self.index.append(date_range(start='10/29/2000 2:00:00',
end='10/29/2000 3:00:00',
freq='S'))
self.N = 10000
self.rng6 = date_range(start='1/1/1', periods=self.N, freq='B')
self.rng7 = date_range(start='1/1/1700', freq='D', periods=100000)
self.no_freq = self.rng7[:50000].append(self.rng7[50002:])
self.d_freq = self.rng7[:50000].append(self.rng7[50000:])
self.rng8 = date_range(start='1/1/1700', freq='B', periods=75000)
self.b_freq = self.rng8[:50000].append(self.rng8[50000:])
def time_add_timedelta(self):
(self.rng + dt.timedelta(minutes=2))
def time_normalize(self):
self.rng2.normalize()
def time_unique(self):
self.index_repeated.unique()
def time_reset_index(self):
self.df.reset_index()
def time_reset_index_tz(self):
self.df2.reset_index()
def time_dti_factorize(self):
self.dti.factorize()
def time_dti_tz_factorize(self):
self.dti_tz.factorize()
def time_dti_time(self):
self.dst_rng.time
def time_timestamp_tzinfo_cons(self):
self.rng5[0]
def time_infer_dst(self):
self.index.tz_localize('US/Eastern', infer_dst=True)
def time_timeseries_is_month_start(self):
self.rng6.is_month_start
def time_infer_freq_none(self):
infer_freq(self.no_freq)
def time_infer_freq_daily(self):
infer_freq(self.d_freq)
def time_infer_freq_business(self):
infer_freq(self.b_freq)
def time_to_date(self):
self.rng.date
def time_to_pydatetime(self):
self.rng.to_pydatetime()
class TimeDatetimeConverter(object):
goal_time = 0.2
def setup(self):
self.N = 100000
self.rng = date_range(start='1/1/2000', periods=self.N, freq='T')
def time_convert(self):
DatetimeConverter.convert(self.rng, None, None)
class Iteration(object):
goal_time = 0.2
def setup(self):
self.N = 1000000
self.M = 10000
self.idx1 = date_range(start='20140101', freq='T', periods=self.N)
self.idx2 = period_range(start='20140101', freq='T', periods=self.N)
def iter_n(self, iterable, n=None):
self.i = 0
for _ in iterable:
self.i += 1
if ((n is not None) and (self.i > n)):
break
def time_iter_datetimeindex(self):
self.iter_n(self.idx1)
def time_iter_datetimeindex_preexit(self):
self.iter_n(self.idx1, self.M)
def time_iter_periodindex(self):
self.iter_n(self.idx2)
def time_iter_periodindex_preexit(self):
self.iter_n(self.idx2, self.M)
# ----------------------------------------------------------------------
# Resampling
class ResampleDataFrame(object):
goal_time = 0.2
def setup(self):
self.rng = date_range(start='20130101', periods=100000, freq='50L')
self.df = DataFrame(np.random.randn(100000, 2), index=self.rng)
def time_max_numpy(self):
self.df.resample('1s', how=np.max)
def time_max_string(self):
self.df.resample('1s', how='max')
def time_mean_numpy(self):
self.df.resample('1s', how=np.mean)
def time_mean_string(self):
self.df.resample('1s', how='mean')
def time_min_numpy(self):
self.df.resample('1s', how=np.min)
def time_min_string(self):
self.df.resample('1s', how='min')
class ResampleSeries(object):
goal_time = 0.2
def setup(self):
self.rng1 = period_range(start='1/1/2000', end='1/1/2001', freq='T')
self.ts1 = Series(np.random.randn(len(self.rng1)), index=self.rng1)
self.rng2 = date_range(start='1/1/2000', end='1/1/2001', freq='T')
self.ts2 = Series(np.random.randn(len(self.rng2)), index=self.rng2)
self.rng3 = date_range(start='2000-01-01 00:00:00',
end='2000-01-01 10:00:00', freq='555000U')
self.int_ts = Series(5, self.rng3, dtype='int64')
self.dt_ts = self.int_ts.astype('datetime64[ns]')
def time_period_downsample_mean(self):
self.ts1.resample('D', how='mean')
def time_timestamp_downsample_mean(self):
self.ts2.resample('D', how='mean')
def time_resample_datetime64(self):
# GH 7754
self.dt_ts.resample('1S', how='last')
def time_1min_5min_mean(self):
self.ts2[:10000].resample('5min', how='mean')
def time_1min_5min_ohlc(self):
self.ts2[:10000].resample('5min', how='ohlc')
class AsOf(object):
goal_time = 0.2
def setup(self):
self.N = 10000
self.rng = date_range(start='1/1/1990', periods=self.N, freq='53s')
self.ts = Series(np.random.randn(self.N), index=self.rng)
self.dates = date_range(start='1/1/1990',
periods=(self.N * 10), freq='5s')
self.ts2 = self.ts.copy()
self.ts2[250:5000] = np.nan
self.ts3 = self.ts.copy()
self.ts3[-5000:] = np.nan
# test speed of pre-computing NAs.
def time_asof(self):
self.ts.asof(self.dates)
# should be roughly the same as above.
def time_asof_nan(self):
self.ts2.asof(self.dates)
# test speed of the code path for a scalar index
# without *while* loop
def time_asof_single(self):
self.ts.asof(self.dates[0])
# test speed of the code path for a scalar index
# before the start. should be the same as above.
def time_asof_single_early(self):
self.ts.asof(self.dates[0] - dt.timedelta(10))
# test the speed of the code path for a scalar index
# with a long *while* loop. should still be much
# faster than pre-computing all the NAs.
def time_asof_nan_single(self):
self.ts3.asof(self.dates[-1])
class AsOfDataFrame(object):
goal_time = 0.2
def setup(self):
self.N = 10000
self.M = 100
self.rng = date_range(start='1/1/1990', periods=self.N, freq='53s')
self.dates = date_range(start='1/1/1990',
periods=(self.N * 10), freq='5s')
self.ts = DataFrame(np.random.randn(self.N, self.M), index=self.rng)
self.ts2 = self.ts.copy()
self.ts2.iloc[250:5000] = np.nan
self.ts3 = self.ts.copy()
self.ts3.iloc[-5000:] = np.nan
# test speed of pre-computing NAs.
def time_asof(self):
self.ts.asof(self.dates)
# should be roughly the same as above.
def time_asof_nan(self):
self.ts2.asof(self.dates)
# test speed of the code path for a scalar index
# with pre-computing all NAs.
def time_asof_single(self):
self.ts.asof(self.dates[0])
# should be roughly the same as above.
def time_asof_nan_single(self):
self.ts3.asof(self.dates[-1])
# test speed of the code path for a scalar index
# before the start. should be without the cost of
# pre-computing all the NAs.
def time_asof_single_early(self):
self.ts.asof(self.dates[0] - dt.timedelta(10))
class TimeSeries(object):
goal_time = 0.2
def setup(self):
self.N = 100000
self.rng = date_range(start='1/1/2000', periods=self.N, freq='s')
self.rng = self.rng.take(np.random.permutation(self.N))
self.ts = Series(np.random.randn(self.N), index=self.rng)
self.rng2 = date_range(start='1/1/2000', periods=self.N, freq='T')
self.ts2 = Series(np.random.randn(self.N), index=self.rng2)
self.lindex = np.random.permutation(self.N)[:(self.N // 2)]
self.rindex = np.random.permutation(self.N)[:(self.N // 2)]
self.left = Series(self.ts2.values.take(self.lindex),
index=self.ts2.index.take(self.lindex))
self.right = Series(self.ts2.values.take(self.rindex),
index=self.ts2.index.take(self.rindex))
self.rng3 = date_range(start='1/1/2000', periods=1500000, freq='S')
self.ts3 = Series(1, index=self.rng3)
def time_sort_index_monotonic(self):
self.ts2.sort_index()
def time_sort_index_non_monotonic(self):
self.ts.sort_index()
def time_timeseries_slice_minutely(self):
self.ts2[:10000]
def time_add_irregular(self):
(self.left + self.right)
def time_large_lookup_value(self):
self.ts3[self.ts3.index[(len(self.ts3) // 2)]]
self.ts3.index._cleanup()
class ToDatetime(object):
goal_time = 0.2
def setup(self):
self.rng = date_range(start='1/1/2000', periods=10000, freq='D')
self.stringsD = Series(self.rng.strftime('%Y%m%d'))
self.rng = date_range(start='1/1/2000', periods=20000, freq='H')
self.strings = self.rng.strftime('%Y-%m-%d %H:%M:%S').tolist()
self.strings_nosep = self.rng.strftime('%Y%m%d %H:%M:%S').tolist()
self.strings_tz_space = [x.strftime('%Y-%m-%d %H:%M:%S') + ' -0800'
for x in self.rng]
self.s = Series((['19MAY11', '19MAY11:00:00:00'] * 100000))
self.s2 = self.s.str.replace(':\\S+$', '')
self.unique_numeric_seconds = range(10000)
self.dup_numeric_seconds = [1000] * 10000
self.dup_string_dates = ['2000-02-11'] * 10000
self.dup_string_with_tz = ['2000-02-11 15:00:00-0800'] * 10000
def time_format_YYYYMMDD(self):
to_datetime(self.stringsD, format='%Y%m%d')
def time_iso8601(self):
to_datetime(self.strings)
def time_iso8601_nosep(self):
to_datetime(self.strings_nosep)
def time_iso8601_format(self):
to_datetime(self.strings, format='%Y-%m-%d %H:%M:%S')
def time_iso8601_format_no_sep(self):
to_datetime(self.strings_nosep, format='%Y%m%d %H:%M:%S')
def time_iso8601_tz_spaceformat(self):
to_datetime(self.strings_tz_space)
def time_format_exact(self):
to_datetime(self.s2, format='%d%b%y')
def time_format_no_exact(self):
to_datetime(self.s, format='%d%b%y', exact=False)
def time_cache_true_with_unique_seconds_and_unit(self):
to_datetime(self.unique_numeric_seconds, unit='s', cache=True)
def time_cache_false_with_unique_seconds_and_unit(self):
to_datetime(self.unique_numeric_seconds, unit='s', cache=False)
def time_cache_true_with_dup_seconds_and_unit(self):
to_datetime(self.dup_numeric_seconds, unit='s', cache=True)
def time_cache_false_with_dup_seconds_and_unit(self):
to_datetime(self.dup_numeric_seconds, unit='s', cache=False)
def time_cache_true_with_dup_string_dates(self):
to_datetime(self.dup_string_dates, cache=True)
def time_cache_false_with_dup_string_dates(self):
to_datetime(self.dup_string_dates, cache=False)
def time_cache_true_with_dup_string_dates_and_format(self):
to_datetime(self.dup_string_dates, format='%Y-%m-%d', cache=True)
def time_cache_false_with_dup_string_dates_and_format(self):
to_datetime(self.dup_string_dates, format='%Y-%m-%d', cache=False)
def time_cache_true_with_dup_string_tzoffset_dates(self):
to_datetime(self.dup_string_with_tz, cache=True)
def time_cache_false_with_dup_string_tzoffset_dates(self):
to_datetime(self.dup_string_with_tz, cache=False)
class DatetimeAccessor(object):
def setup(self):
self.N = 100000
self.series = pd.Series(
pd.date_range(start='1/1/2000', periods=self.N, freq='T')
)
def time_dt_accessor(self):
self.series.dt
def time_dt_accessor_normalize(self):
self.series.dt.normalize()
|
zfrenchee/pandas
|
asv_bench/benchmarks/timeseries.py
|
Python
|
bsd-3-clause
| 13,637
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
#
# Coon Warp v1.0,
# released 23.01.2016
#
#
# Based on Paul s. Heckbert's "Bilinear coons patch image warping"
# Graphics gems IV, Pages 438-446
# http://dl.acm.org/citation.cfm?id=180937
#
#
# Copyright (c) 2016, Mario Klingemann, mario@quasimondo.com
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Mario Klingemann nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL MARIO KLINGEMANN BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from scipy.spatial import ConvexHull, KDTree, distance
import numpy as np
from scipy import interpolate
import math
def warpCloud( xyc, sourceGridPoints, targetGridPoints, warpQuality=9 ):
sourceTree = KDTree(sourceGridPoints, leafsize=10)
warpedXYC = []
for c in xyc:
nearestEdge = sourceTree.query(c,k=warpQuality)
nx = 0.0
ny = 0.0
ws = 0.0
for i in range(warpQuality):
p = targetGridPoints[nearestEdge[1][i]]
w = nearestEdge[0][i]
if w == 0.0:
nx = p[0]
ny = p[1]
ws = 1.0
break
else:
w = 1.0 / w
nx += w * p[0]
ny += w * p[1]
ws += w
warpedXYC.append([nx/ws,ny/ws])
warpedXYC = np.array(warpedXYC)
return warpedXYC
def getCloudGrid( xyc,autoPerimeterOffset=True,autoPerimeterDensity=True,
width=64, height=64, perimeterSubdivisionSteps=4, paddingScale=1.05,
smoothing=0.001, warpQuality=9, perimeterOffset=None ):
bounds, densities = getCloudHull(xyc, width=width, height=height, perimeterSubdivisionSteps=perimeterSubdivisionSteps,
smoothing=smoothing,autoPerimeterOffset=autoPerimeterOffset,
perimeterOffset=perimeterOffset,autoPerimeterDensity=autoPerimeterDensity)
return getCoonsGrid(bounds, width=width, height=height,densities=densities,paddingScale=paddingScale)
def rectifyCloud(xyc,autoPerimeterOffset=True,autoPerimeterDensity=True,
width=64, height=64,
perimeterSubdivisionSteps=4, paddingScale=1.05,
smoothing=0.001, warpQuality=9, perimeterOffset=None ):
sourceGridPoints = getCloudGrid( xyc,autoPerimeterOffset=autoPerimeterOffset,autoPerimeterDensity=autoPerimeterDensity,
width=width, height=width,
perimeterSubdivisionSteps=perimeterSubdivisionSteps, paddingScale=paddingScale,
smoothing=smoothing, warpQuality=warpQuality, perimeterOffset=perimeterOffset)
targetGridPoints = []
for yi in range(height):
for xi in range(width):
targetGridPoints.append([xi,yi])
return warpCloud( xyc, sourceGridPoints, targetGridPoints, warpQuality=warpQuality )
def getCloudHull(xyc,width=64,height=64,perimeterSubdivisionSteps=4,smoothing=0.001,
autoPerimeterOffset=True, perimeterOffset=None, autoPerimeterDensity=True):
tree = KDTree(xyc, leafsize=10)
hull = ConvexHull(xyc)
hullPoints = []
hullIndices = {}
for i in range(len(hull.vertices)):
hullIndices[hull.vertices[i]] = True
hullPoints.append(xyc[hull.vertices[i]])
for j in range(perimeterSubdivisionSteps):
io = 0
for i in range(len(hullPoints)):
index = tree.query(lerp(hullPoints[i+io],hullPoints[(i+1+io)%len(hullPoints)],0.5))[1]
if not (index in hullIndices):
hullPoints.insert( i+io+1, xyc[index])
hullIndices[index] = True
io += 1
perimeterLength = 0
for i in range(len(hullPoints)):
perimeterLength += distance.euclidean(hullPoints[i],hullPoints[(i+1)%len(hullPoints)])
perimeterCount = 2 * (width + height) - 4
perimeterStep = perimeterLength / perimeterCount
perimeterPoints = []
perimeterDensity = np.zeros(perimeterCount)
for i in range(perimeterCount):
t = 1.0 * i / perimeterCount
poh = getPointOnHull(hullPoints,t,perimeterLength)
perimeterPoints.append(poh)
perimeterDensity[i] = np.mean(tree.query(poh,k=32)[0])
if autoPerimeterOffset:
bestDensity = perimeterDensity[0] + perimeterDensity[width-1] + perimeterDensity[width+height-2] + perimeterDensity[2*width+height-3]
perimeterOffset = 0
for i in range(1,width+height ):
density = perimeterDensity[i] + perimeterDensity[(i+width-1)%perimeterCount] + perimeterDensity[(i+width+height-2)%perimeterCount] + perimeterDensity[(i+2*width+height-3)%perimeterCount]
if density < bestDensity:
bestDensity = density
perimeterOffset = i
elif perimeterOffset is None:
perimeterOffset = 0
corner = [np.min(xyc[:,0]),np.min(xyc[:,1])]
d = corner-perimeterPoints[0]
clostestDistanceToCorner = np.hypot(d[0],d[1])
for i in range(1,perimeterCount):
d = corner-perimeterPoints[i]
distanceToCorner = np.hypot(d[0],d[1])
if ( distanceToCorner < clostestDistanceToCorner):
clostestDistanceToCorner = distanceToCorner
perimeterOffset = i
perimeterPoints = np.array(perimeterPoints)
if perimeterOffset > 0:
perimeterPoints[:,0] = np.roll(perimeterPoints[:,0], - perimeterOffset)
perimeterPoints[:,1] = np.roll(perimeterPoints[:,1], - perimeterOffset)
perimeterPoints = np.append(perimeterPoints,[perimeterPoints[0]],axis=0)
bounds = {'top':perimeterPoints[0:width],
'right':perimeterPoints[width-1:width+height-1],
'bottom':perimeterPoints[width+height-2:2*width+height-2],
'left':perimeterPoints[2*width+height-3:]}
bounds['s_top'],u = interpolate.splprep([bounds['top'][:,0], bounds['top'][:,1]],s=smoothing)
bounds['s_right'],u = interpolate.splprep([bounds['right'][:,0],bounds['right'][:,1]],s=smoothing)
bounds['s_bottom'],u = interpolate.splprep([bounds['bottom'][:,0],bounds['bottom'][:,1]],s=smoothing)
bounds['s_left'],u = interpolate.splprep([bounds['left'][:,0],bounds['left'][:,1]],s=smoothing)
densities = None
if autoPerimeterDensity:
densities = {}
density_top = np.zeros(len(bounds['top']))
for i in range(len(density_top)):
t = 1.0 * i / len(density_top)
density_top[i] = np.mean(tree.query( np.array(interpolate.splev( t,bounds['s_top'])).flatten(),k=64)[0])
density_top /= np.sum(density_top)
density_right = np.zeros(len(bounds['right']))
for i in range(len(density_right)):
t = 1.0 * i / len(density_right)
density_right[i] = np.mean(tree.query( np.array(interpolate.splev( t,bounds['s_right'])).flatten(),k=64)[0])
density_right /= np.sum(density_right)
density_bottom = np.zeros(len(bounds['bottom']))
for i in range(len(density_bottom)):
t = 1.0 * i / len(density_bottom)
density_bottom[i] = np.mean(tree.query( np.array(interpolate.splev( t,bounds['s_bottom'])).flatten(),k=64)[0])
density_bottom /= np.sum(density_bottom)
density_left = np.zeros(len(bounds['left']))
for i in range(len(density_left)):
t = 1.0 * i / len(density_left)
density_left[i] = np.mean(tree.query( np.array(interpolate.splev( t,bounds['s_left'])).flatten(),k=64)[0])
density_left /= np.sum(density_left)
densities = {'top':density_top,'right':density_right,'bottom':density_bottom,'left':density_left}
return bounds, densities
def getCircularGrid( fitCloud=None, width=64, height=64, paddingScale=1.0):
return getCoonsGrid(getCircularBounds(fitCloud=fitCloud,width=width,height=height),width=width,height=height, paddingScale=paddingScale)
def getCircularBounds(fitCloud=None,width=64,height=64,smoothing=0.01):
circumference = 2*(width+height)
if not fitCloud is None:
cx = np.mean(fitCloud[:,0])
cy = np.mean(fitCloud[:,1])
r = 0.5* max( np.max(fitCloud[:,0])- np.min(fitCloud[:,0]),np.max(fitCloud[:,1])- np.min(fitCloud[:,1]))
else:
r = circumference /(2.0*math.pi)
cx = cy = r
perimeterPoints = np.zeros((circumference,2),dtype=float)
for i in range(circumference):
angle = (2.0*math.pi)*float(i) / circumference - math.pi * 0.5
perimeterPoints[i][0] = cx + r * math.cos(angle)
perimeterPoints[i][1] = cy + r * math.sin(angle)
bounds = {'top':perimeterPoints[0:width],
'right':perimeterPoints[width-1:width+height-1],
'bottom':perimeterPoints[width+height-2:2*width+height-2],
'left':perimeterPoints[2*width+height-3:]}
bounds['s_top'],u = interpolate.splprep([bounds['top'][:,0], bounds['top'][:,1]],s=smoothing)
bounds['s_right'],u = interpolate.splprep([bounds['right'][:,0],bounds['right'][:,1]],s=smoothing)
bounds['s_bottom'],u = interpolate.splprep([bounds['bottom'][:,0],bounds['bottom'][:,1]],s=smoothing)
bounds['s_left'],u = interpolate.splprep([bounds['left'][:,0],bounds['left'][:,1]],s=smoothing)
return bounds
def getCoonsGrid( bounds, width=64, height=64, densities=None, paddingScale=1.0):
targets = []
for yi in range(height):
for xi in range(width):
targets.append(getCoonsPatchPointBez(bounds,xi,yi,width,height,densities=densities))
targets = np.array(targets)
tmean = [np.mean(targets[:,0]),np.mean(targets[:,1])]
targets -= tmean
targets *= paddingScale
targets += tmean
return targets
def getCoonsPatchPointBez(bounds,x,y,width,height, densities = None):
p00 = np.array(interpolate.splev( 0.0,bounds['s_top'])).flatten()
p10 = np.array(interpolate.splev( 1.0,bounds['s_top'])).flatten()
p11 = np.array(interpolate.splev( 0.0,bounds['s_bottom'])).flatten()
p01 = np.array(interpolate.splev( 1.0,bounds['s_bottom'])).flatten()
u = 1.0 * x / (width-1)
v = 1.0 * y / (height-1)
iu = 1.0 - u
iv = 1.0 - v
if densities is None:
pu0 = np.array(interpolate.splev( u,bounds['s_top'])).flatten()
pu1 = np.array(interpolate.splev(iu,bounds['s_bottom'])).flatten()
pv0 = np.array(interpolate.splev(iv,bounds['s_left'])).flatten()
pv1 = np.array(interpolate.splev( v,bounds['s_right'])).flatten()
else:
ut = 0.0
ub = 0.0
for i in range(x):
ut+=densities['top'][i]
ub+=densities['bottom'][i]
vl = 0.0
vr = 0.0
for i in range(y):
vl+=densities['left'][i]
vr+=densities['right'][i]
pu0 = np.array(interpolate.splev( ut,bounds['s_top'])).flatten()
pu1 = np.array(interpolate.splev(1.0-ub,bounds['s_bottom'])).flatten()
pv0 = np.array(interpolate.splev(1-0-vl,bounds['s_left'])).flatten()
pv1 = np.array(interpolate.splev( vr,bounds['s_right'])).flatten()
return iv * pu0 + v * pu1 + iu * pv0 + u * pv1 - iu * iv * p00 - u * iv * p10 - iu * v * p01 - u * v * p11
def lerp( p1, p2, t):
return (1.0-t)*p1+t*p2
def leftOrRight(p,l1,l2):
return np.sign((l2[0] - l1[0]) * (p[1] - l1[1]) - (l2[1] - l1[1]) * (p[0] - l1[0]))
def getPointOnHull( hullPoints,t, totalLength ):
lh = len(hullPoints)
for j in range(lh+1):
sideLength = distance.euclidean(hullPoints[j%lh],hullPoints[(j+1)%lh])
t_sub = sideLength / totalLength;
if t > t_sub:
t-= t_sub;
else :
return lerp(hullPoints[j%lh],hullPoints[(j+1)%lh], t / t_sub );
|
Quasimondo/RasterFairy
|
rasterfairy/coonswarp.py
|
Python
|
bsd-3-clause
| 13,205
|
#!/usr/bin/python
#Copyright (c) 2012, Carnegie Mellon University.
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions
#are met:
#1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#3. Neither the name of the University nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#wrapper to run SPIRIT
#@author ishafer
import os, json, sys, optparse
#if importing wx on the fly is desirable
from numpy import *
from spirit import *
from pylab import *
sys.path.append(os.path.abspath("../"))
from preprocess import *
from analysis import *
#run an experiment with spirit, logging enabled
#minitial: initial number of principal components
def run_spirit(dm, minitial):
sp = Spirit(dm.nattrs(), minitial)
data = dm.get_data()
print "Data shape sending to SPIRIT:", data.shape
sp.run(data, True)
reclog = sp.getreclog()
return (data, reclog, sp)
#plot error as a function of number of initial principal components
#loc: output file
def error_vs_minitial(loc,dm,mmax):
(pcs, errs, nworses) = vary_pcs(dm,mmax)
MARKERSIZE = 15
figure()
plot(pcs, errs, 'b.-', markersize=MARKERSIZE)
ylim(ymin=0)
xlabel("Number of Principal Components")
ylabel("R^2")
twinx()
plot(pcs, nworses, 'r.-', markersize=MARKERSIZE)
ylim(ymin=0)
ylabel("# Mean Superior")
savefig(loc,format='png')
show()
#vary the number of initial principal components for SPIRIT IPCA
def vary_pcs(dm,mmax):
pcs = zeros((mmax))
errs = zeros((mmax))
nworses = zeros((mmax))
for nrm in xrange(1,mmax+1):
(data, reclog, sp) = run_spirit(dm, nrm)
(err, worse) = recon_error_all(data, reclog)
pcs[nrm-1] = nrm
errs[nrm-1] = err
nworses[nrm-1] = len(worse)
print "It would have been better to store the mean for..."
for wdx in worse:
print "%4s %s" % (wdx, dm.attrname(wdx))
return (pcs, errs, nworses)
#test effect of normalization
#loc: output file
#dm: datamatrix
def normalization_effect(loc,dm):
NPCS = 25
dm.print_stats()
(pcs, errs, nworses) = vary_pcs(dm,NPCS)
dm.transform_all()
dm.print_stats()
(pcs2, errs2, nworses2) = vary_pcs(dm,NPCS)
MARKERSIZE = 15
figure()
plot(pcs, errs, 'b.-', markersize=MARKERSIZE)
ylim(ymin=0,ymax=1)
xlabel("Number of principal components")
ylabel("R^2")
plot(pcs, errs2, 'g.-', markersize=MARKERSIZE)
legend(['unnormalized','normalized'],'lower right')
show()
def plot_recon_error_all(data, reclog, dm, maxplots, wastransformed):
matplotlib.rc("lines", linewidth=5)
for i in xrange(min(maxplots, len(data[0]))):
loc = os.path.join(tmpdir, "recon" + str(i) + ".png")
xs = data[:,i]
ys = reclog[:,i]
if wastransformed:
xs = dm.untransform(xs, i)
ys = dm.untransform(ys, i)
#to save data rather than plot it
#numpy.savez(loc, xs=xs, ys=ys)
#numpy.savetxt(loc,(xs,ys))
plot_recon(loc, xs, ys, thetitle=dm.attrname(i), showlegend=False)
print "Reconstruction error was: ", recon_error_all(data,reclog)
def plot_hvs_all(ylog, dm, maxplots):
matplotlib.rc("lines", linewidth=5)
for i in xrange(min(maxplots, ylog.shape[1])):
loc = os.path.join(tmpdir, "hv" + str(i) + ".png")
hv = ylog[:,i]
figure()
plot(hv, 'r-')
title("Hidden variable " + str(i), fontsize="large")
xlabel("Time Tick")
ylabel("Value")
savefig(loc,format='png')
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option('-o', action="store", help="Plot generation option")
opts, args = parser.parse_args(sys.argv)
cfg = getconfig()
tmpdir = cfg["tmpdir"]
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
def load_data(tick, dotransform):
dm = DataMatrix(cfg["externaldata"])
dm.load()
#interpolate and flatten to matrix
# e.g. 300s = 5 minute ticks
dm.flatten(tick)
##dm.print_metrics()
#apply forward transformation to 0..1
if dotransform:
dm.transform_all()
dm.print_stats()
return dm
gen = opts.o
if gen == "sample-data":
target = gettarget(cfg["datadir"], 0)
data = readdat(target)
elif gen == "recon-error-all":
transformit = True
dm = load_data(30, transformit)
hiddenvars = 10
#dm.removeattrs([1,5,9,13,14,17,21])\
(data, reclog, sp) = run_spirit(dm, hiddenvars)
plot_recon_error_all(data, reclog, dm, 30, transformit)
hvlog = sp.gethvlog()
plot_hvs_all(hvlog, dm, hiddenvars)
elif gen == "normalization-effect":
dm = load_data(300, False)
normalization_effect(os.path.join(tmpdir,"normalizationeffect.png"),dm)
elif gen == "error-vs-minitial":
error_vs_minitial(os.path.join(tmpdir,"rcerr.png"),dm,8)
|
mrcaps/rainmon
|
code/spirit/runspirit.py
|
Python
|
bsd-3-clause
| 5,444
|
# -*- coding: utf-8 -*-
"""
celery.worker.heartbeat
~~~~~~~~~~~~~~~~~~~~~~~
This is the internal thread that sends heartbeat events
at regular intervals.
"""
from __future__ import absolute_import
from .state import SOFTWARE_INFO, active_requests, total_count
class Heart(object):
"""Timer sending heartbeats at regular intervals.
:param timer: Timer instance.
:param eventer: Event dispatcher used to send the event.
:keyword interval: Time in seconds between heartbeats.
Default is 30 seconds.
"""
def __init__(self, timer, eventer, interval=None):
self.timer = timer
self.eventer = eventer
self.interval = float(interval or 5.0)
self.tref = None
# Make event dispatcher start/stop us when it's
# enabled/disabled.
self.eventer.on_enabled.add(self.start)
self.eventer.on_disabled.add(self.stop)
def _send(self, event):
return self.eventer.send(event, freq=self.interval,
active=len(active_requests),
processed=sum(total_count.itervalues()),
**SOFTWARE_INFO)
def start(self):
if self.eventer.enabled:
self._send('worker-online')
self.tref = self.timer.apply_interval(
self.interval * 1000.0, self._send, ('worker-heartbeat', ),
)
def stop(self):
if self.tref is not None:
self.timer.cancel(self.tref)
self.tref = None
if self.eventer.enabled:
self._send('worker-offline')
|
mozilla/firefox-flicks
|
vendor-local/lib/python/celery/worker/heartbeat.py
|
Python
|
bsd-3-clause
| 1,643
|
from pulsar.apps.test import test_timeout
from lux.utils import test
class TestSql(test.AppTestCase):
config_file = 'tests.odm'
config_params = {'DATASTORE': 'sqlite://'}
def test_odm(self):
odm = self.app.odm()
tables = odm.tables()
self.assertTrue(tables)
def test_simple_session(self):
app = self.app
odm = app.odm()
with odm.begin() as session:
self.assertEqual(session.app, app)
user = odm.user(first_name='Luca')
session.add(user)
self.assertTrue(user.id)
self.assertEqual(user.first_name, 'Luca')
self.assertFalse(user.is_superuser())
def test_get_tasks(self):
request = self.client.get('/tasks')
response = request.response
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content_type,
'application/json; charset=utf-8')
|
tazo90/lux
|
tests/odm/sqlite.py
|
Python
|
bsd-3-clause
| 944
|
#------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
from atom.api import Typed, ForwardTyped, Bool, observe, set_default
from enaml.core.declarative import d_
from .control import Control, ProxyControl
#: Delay the import of matplotlib until needed. This removes the hard
#: dependecy on matplotlib for the rest of the Enaml code base.
def Figure():
from matplotlib.figure import Figure
return Figure
class ProxyMPLCanvas(ProxyControl):
""" The abstract definition of a proxy MPLCanvas object.
"""
#: A reference to the MPLCanvas declaration.
declaration = ForwardTyped(lambda: MPLCanvas)
def set_figure(self, figure):
raise NotImplementedError
def set_toolbar_visible(self, visible):
raise NotImplementedError
class MPLCanvas(Control):
""" A control which can be used to embded a matplotlib figure.
"""
#: The matplotlib figure to display in the widget.
figure = d_(ForwardTyped(Figure))
#: Whether or not the matplotlib figure toolbar is visible.
toolbar_visible = d_(Bool(False))
#: Matplotlib figures expand freely in height and width by default.
hug_width = set_default('ignore')
hug_height = set_default('ignore')
#: A reference to the ProxyMPLCanvas object.
proxy = Typed(ProxyMPLCanvas)
#--------------------------------------------------------------------------
# Observers
#--------------------------------------------------------------------------
@observe(('figure', 'toolbar_visible'))
def _update_proxy(self, change):
""" An observer which sends state change to the proxy.
"""
# The superclass handler implementation is sufficient.
super(MPLCanvas, self)._update_proxy(change)
|
ContinuumIO/ashiba
|
enaml/enaml/widgets/mpl_canvas.py
|
Python
|
bsd-3-clause
| 2,050
|
"""
fs.watch
========
Change notification support for FS.
This module defines a standard interface for FS subclasses that support change
notification callbacks. It also offers some WrapFS subclasses that can
simulate such an ability on top of an ordinary FS object.
An FS object that wants to be "watchable" must provide the following methods:
* add_watcher(callback,path="/",events=None,recursive=True)
Request that the given callback be executed in response to changes
to the given path. A specific set of change events can be specified.
This method returns a Watcher object.
* del_watcher(watcher_or_callback)
Remove the given watcher object, or any watchers associated with
the given callback.
If you would prefer to read changes from a filesystem in a blocking fashion
rather than using callbacks, you can use the function 'iter_changes' to obtain
an iterator over the change events.
"""
import sys
import weakref
import threading
import Queue
import traceback
from fs.path import *
from fs.errors import *
from fs.wrapfs import WrapFS
from fs.base import FS
from fs.filelike import FileWrapper
class EVENT(object):
"""Base class for change notification events."""
def __init__(self,fs,path):
self.fs = fs
if path is not None:
path = abspath(normpath(path))
self.path = path
def __str__(self):
return unicode(self).encode("utf8")
def __unicode__(self):
return u"<fs.watch.%s object (path='%s') at %s>" % (self.__class__.__name__,self.path,hex(id(self)))
def clone(self,fs=None,path=None):
if fs is None:
fs = self.fs
if path is None:
path = self.path
return self.__class__(fs,path)
class ACCESSED(EVENT):
"""Event fired when a file's contents are accessed."""
pass
class CREATED(EVENT):
"""Event fired when a new file or directory is created."""
pass
class REMOVED(EVENT):
"""Event fired when a file or directory is removed."""
pass
class MODIFIED(EVENT):
"""Event fired when a file or directory is modified."""
def __init__(self,fs,path,data_changed=False):
super(MODIFIED,self).__init__(fs,path)
self.data_changed = data_changed
def clone(self,fs=None,path=None,data_changed=None):
evt = super(MODIFIED,self).clone(fs,path)
if data_changed is None:
data_changed = self.data_changed
evt.data_changd = data_changed
return evt
class MOVED_DST(EVENT):
"""Event fired when a file or directory is the target of a move."""
def __init__(self,fs,path,source=None):
super(MOVED_DST,self).__init__(fs,path)
if source is not None:
source = abspath(normpath(source))
self.source = source
def __unicode__(self):
return u"<fs.watch.%s object (path=%r,src=%r) at %s>" % (self.__class__.__name__,self.path,self.source,hex(id(self)))
def clone(self,fs=None,path=None,source=None):
evt = super(MOVED_DST,self).clone(fs,path)
if source is None:
source = self.source
evt.source = source
return evt
class MOVED_SRC(EVENT):
"""Event fired when a file or directory is the source of a move."""
def __init__(self,fs,path,destination=None):
super(MOVED_SRC,self).__init__(fs,path)
if destination is not None:
destination = abspath(normpath(destination))
self.destination = destination
def __unicode__(self):
return u"<fs.watch.%s object (path=%r,dst=%r) at %s>" % (self.__class__.__name__,self.path,self.destination,hex(id(self)))
def clone(self,fs=None,path=None,destination=None):
evt = super(MOVED_SRC,self).clone(fs,path)
if destination is None:
destination = self.destination
evt.destination = destination
return evt
class CLOSED(EVENT):
"""Event fired when the filesystem is closed."""
pass
class ERROR(EVENT):
"""Event fired when some miscellaneous error occurs."""
pass
class OVERFLOW(ERROR):
"""Event fired when some events could not be processed."""
pass
class Watcher(object):
"""Object encapsulating filesystem watch info."""
def __init__(self,fs,callback,path="/",events=None,recursive=True):
if events is None:
events = (EVENT,)
else:
events = tuple(events)
# Since the FS probably holds a reference to the Watcher, keeping
# a reference back to the FS would create a cycle containing a
# __del__ method. Use a weakref to avoid this.
self._w_fs = weakref.ref(fs)
self.callback = callback
self.path = abspath(normpath(path))
self.events = events
self.recursive = recursive
@property
def fs(self):
return self._w_fs()
def delete(self):
fs = self.fs
if fs is not None:
fs.del_watcher(self)
def handle_event(self,event):
if not isinstance(event,self.events):
return
if event.path is not None:
if not isprefix(self.path,event.path):
return
if not self.recursive:
if event.path != self.path:
if dirname(event.path) != self.path:
return
try:
self.callback(event)
except Exception:
print >>sys.stderr, "error in FS watcher callback", self.callback
traceback.print_exc()
class WatchableFSMixin(FS):
"""Mixin class providing watcher management functions."""
def __init__(self,*args,**kwds):
self._watchers = PathMap()
super(WatchableFSMixin,self).__init__(*args,**kwds)
def __getstate__(self):
state = super(WatchableFSMixin,self).__getstate__()
state.pop("_watchers",None)
return state
def __setstate__(self,state):
super(WatchableFSMixin,self).__setstate__(state)
self._watchers = PathMap()
def add_watcher(self,callback,path="/",events=None,recursive=True):
"""Add a watcher callback to the FS."""
w = Watcher(self,callback,path,events,recursive=recursive)
self._watchers.setdefault(path,[]).append(w)
return w
def del_watcher(self,watcher_or_callback):
"""Delete a watcher callback from the FS."""
if isinstance(watcher_or_callback,Watcher):
self._watchers[watcher_or_callback.path].remove(watcher_or_callback)
else:
for watchers in self._watchers.itervalues():
for i,watcher in enumerate(watchers):
if watcher.callback is watcher_or_callback:
del watchers[i]
break
def _find_watchers(self,callback):
"""Find watchers registered with the given callback."""
for watchers in self._watchers.itervalues():
for watcher in watchers:
if watcher.callback is callback:
yield watcher
def notify_watchers(self,event_or_class,path=None,*args,**kwds):
"""Notify watchers of the given event data."""
if isinstance(event_or_class,EVENT):
event = event_or_class
else:
event = event_or_class(self,path,*args,**kwds)
if path is None:
path = event.path
if path is None:
for watchers in self._watchers.itervalues():
for watcher in watchers:
watcher.handle_event(event)
else:
for prefix in recursepath(path):
if prefix in self._watchers:
for watcher in self._watchers[prefix]:
watcher.handle_event(event)
class WatchedFile(FileWrapper):
"""File wrapper for use with WatchableFS.
This file wrapper provides access to a file opened from a WatchableFS
instance, and fires MODIFIED events when the file is modified.
"""
def __init__(self,file,fs,path,mode=None):
super(WatchedFile,self).__init__(file,mode)
self.fs = fs
self.path = path
self.was_modified = False
def _write(self,string,flushing=False):
self.was_modified = True
return super(WatchedFile,self)._write(string,flushing=flushing)
def _truncate(self,size):
self.was_modified = True
return super(WatchedFile,self)._truncate(size)
def flush(self):
super(WatchedFile,self).flush()
# Don't bother if python if being torn down
if Watcher is not None:
if self.was_modified:
self.fs.notify_watchers(MODIFIED,self.path,True)
def close(self):
super(WatchedFile,self).close()
# Don't bother if python if being torn down
if Watcher is not None:
if self.was_modified:
self.fs.notify_watchers(MODIFIED,self.path,True)
class WatchableFS(WatchableFSMixin,WrapFS):
"""FS wrapper simulating watcher callbacks.
This FS wrapper intercepts method calls that modify the underlying FS
and generates appropriate notification events. It thus allows watchers
to monitor changes made through the underlying FS object, but not changes
that might be made through other interfaces to the same filesystem.
"""
def __init__(self,*args,**kwds):
super(WatchableFS,self).__init__(*args,**kwds)
def close(self):
super(WatchableFS,self).close()
self.notify_watchers(CLOSED)
def open(self,path,mode="r",**kwargs):
existed = self.wrapped_fs.isfile(path)
f = super(WatchableFS,self).open(path,mode,**kwargs)
if not existed:
self.notify_watchers(CREATED,path)
self.notify_watchers(ACCESSED,path)
return WatchedFile(f,self,path,mode)
def setcontents(self, path, data='', chunk_size=64*1024):
existed = self.wrapped_fs.isfile(path)
ret = super(WatchableFS, self).setcontents(path, data, chunk_size=chunk_size)
if not existed:
self.notify_watchers(CREATED,path)
self.notify_watchers(ACCESSED,path)
if data:
self.notify_watchers(MODIFIED,path,True)
return ret
def createfile(self, path):
existed = self.wrapped_fs.isfile(path)
ret = super(WatchableFS, self).createfile(path)
if not existed:
self.notify_watchers(CREATED,path)
self.notify_watchers(ACCESSED,path)
return retq
def makedir(self,path,recursive=False,allow_recreate=False):
existed = self.wrapped_fs.isdir(path)
try:
super(WatchableFS,self).makedir(path,allow_recreate=allow_recreate)
except ParentDirectoryMissingError:
if not recursive:
raise
parent = dirname(path)
if parent != path:
self.makedir(dirname(path),recursive=True,allow_recreate=True)
super(WatchableFS,self).makedir(path,allow_recreate=allow_recreate)
if not existed:
self.notify_watchers(CREATED,path)
def remove(self,path):
super(WatchableFS,self).remove(path)
self.notify_watchers(REMOVED,path)
def removedir(self,path,recursive=False,force=False):
if not force:
for nm in self.listdir(path):
raise DirectoryNotEmptyError(path)
else:
for nm in self.listdir(path,dirs_only=True):
try:
self.removedir(pathjoin(path,nm),force=True)
except ResourceNotFoundError:
pass
for nm in self.listdir(path,files_only=True):
try:
self.remove(pathjoin(path,nm))
except ResourceNotFoundError:
pass
super(WatchableFS,self).removedir(path)
self.notify_watchers(REMOVED,path)
if recursive:
parent = dirname(path)
while parent and not self.listdir(parent):
super(WatchableFS,self).removedir(parent)
self.notify_watchers(REMOVED,parent)
parent = dirname(parent)
def rename(self,src,dst):
d_existed = self.wrapped_fs.exists(dst)
super(WatchableFS,self).rename(src,dst)
if d_existed:
self.notify_watchers(REMOVED,dst)
self.notify_watchers(MOVED_DST,dst,src)
self.notify_watchers(MOVED_SRC,src,dst)
def copy(self,src,dst,**kwds):
d = self._pre_copy(src,dst)
super(WatchableFS,self).copy(src,dst,**kwds)
self._post_copy(src,dst,d)
def copydir(self,src,dst,**kwds):
d = self._pre_copy(src,dst)
super(WatchableFS,self).copydir(src,dst,**kwds)
self._post_copy(src,dst,d)
def move(self,src,dst,**kwds):
d = self._pre_copy(src,dst)
super(WatchableFS,self).move(src,dst,**kwds)
self._post_copy(src,dst,d)
self._post_move(src,dst,d)
def movedir(self,src,dst,**kwds):
d = self._pre_copy(src,dst)
super(WatchableFS,self).movedir(src,dst,**kwds)
self._post_copy(src,dst,d)
self._post_move(src,dst,d)
def _pre_copy(self,src,dst):
dst_paths = {}
try:
for (dirnm,filenms) in self.wrapped_fs.walk(dst):
dirnm = dirnm[len(dst)+1:]
dst_paths[dirnm] = True
for filenm in filenms:
dst_paths[filenm] = False
except ResourceNotFoundError:
pass
except ResourceInvalidError:
dst_paths[""] = False
src_paths = {}
try:
for (dirnm,filenms) in self.wrapped_fs.walk(src):
dirnm = dirnm[len(src)+1:]
src_paths[dirnm] = True
for filenm in filenms:
src_paths[pathjoin(dirnm,filenm)] = False
except ResourceNotFoundError:
pass
except ResourceInvalidError:
src_paths[""] = False
return (src_paths,dst_paths)
def _post_copy(self,src,dst,data):
(src_paths,dst_paths) = data
for src_path,isdir in sorted(src_paths.items()):
path = pathjoin(dst,src_path)
if src_path in dst_paths:
self.notify_watchers(MODIFIED,path,not isdir)
else:
self.notify_watchers(CREATED,path)
for dst_path,isdir in sorted(dst_paths.items()):
path = pathjoin(dst,dst_path)
if not self.wrapped_fs.exists(path):
self.notify_watchers(REMOVED,path)
def _post_move(self,src,dst,data):
(src_paths,dst_paths) = data
for src_path,isdir in sorted(src_paths.items(),reverse=True):
path = pathjoin(src,src_path)
self.notify_watchers(REMOVED,path)
def setxattr(self,path,name,value):
super(WatchableFS,self).setxattr(path,name,value)
self.notify_watchers(MODIFIED,path,False)
def delxattr(self,path,name):
super(WatchableFS,self).delxattr(path,name,value)
self.notify_watchers(MODIFIED,path,False)
class PollingWatchableFS(WatchableFS):
"""FS wrapper simulating watcher callbacks by periodic polling.
This FS wrapper augments the funcionality of WatchableFS by periodically
polling the underlying FS for changes. It is thus capable of detecting
changes made to the underlying FS via other interfaces, albeit with a
(configurable) delay to account for the polling interval.
"""
def __init__(self,wrapped_fs,poll_interval=60*5):
super(PollingWatchableFS,self).__init__(wrapped_fs)
self.poll_interval = poll_interval
self.add_watcher(self._on_path_modify,"/",(CREATED,MOVED_DST,))
self.add_watcher(self._on_path_modify,"/",(MODIFIED,ACCESSED,))
self.add_watcher(self._on_path_delete,"/",(REMOVED,MOVED_SRC,))
self._path_info = PathMap()
self._poll_thread = threading.Thread(target=self._poll_for_changes)
self._poll_cond = threading.Condition()
self._poll_close_event = threading.Event()
self._poll_thread.start()
def close(self):
self._poll_close_event.set()
self._poll_thread.join()
super(PollingWatchableFS,self).close()
def _on_path_modify(self,event):
path = event.path
try:
try:
self._path_info[path] = self.wrapped_fs.getinfo(path)
except ResourceNotFoundError:
self._path_info.clear(path)
except FSError:
pass
def _on_path_delete(self,event):
self._path_info.clear(event.path)
def _poll_for_changes(self):
try:
while not self._poll_close_event.isSet():
# Walk all directories looking for changes.
# Come back to any that give us an error.
error_paths = set()
for dirnm in self.wrapped_fs.walkdirs():
if self._poll_close_event.isSet():
break
try:
self._check_for_changes(dirnm)
except FSError:
error_paths.add(dirnm)
# Retry the directories that gave us an error, until
# we have successfully updated them all
while error_paths and not self._poll_close_event.isSet():
dirnm = error_paths.pop()
if self.wrapped_fs.isdir(dirnm):
try:
self._check_for_changes(dirnm)
except FSError:
error_paths.add(dirnm)
# Notify that we have completed a polling run
self._poll_cond.acquire()
self._poll_cond.notifyAll()
self._poll_cond.release()
# Sleep for the specified interval, or until closed.
self._poll_close_event.wait(timeout=self.poll_interval)
except FSError:
if not self.closed:
raise
def _check_for_changes(self,dirnm):
# Check the metadata for the directory itself.
new_info = self.wrapped_fs.getinfo(dirnm)
try:
old_info = self._path_info[dirnm]
except KeyError:
self.notify_watchers(CREATED,dirnm)
else:
if new_info != old_info:
self.notify_watchers(MODIFIED,dirnm,False)
# Check the metadata for each file in the directory.
# We assume that if the file's data changes, something in its
# metadata will also change; don't want to read through each file!
# Subdirectories will be handled by the outer polling loop.
for filenm in self.wrapped_fs.listdir(dirnm,files_only=True):
if self._poll_close_event.isSet():
return
fpath = pathjoin(dirnm,filenm)
new_info = self.wrapped_fs.getinfo(fpath)
try:
old_info = self._path_info[fpath]
except KeyError:
self.notify_watchers(CREATED,fpath)
else:
was_accessed = False
was_modified = False
for (k,v) in new_info.iteritems():
if k not in old_info:
was_modified = True
break
elif old_info[k] != v:
if k in ("accessed_time","st_atime",):
was_accessed = True
elif k:
was_modified = True
break
else:
for k in old_info:
if k not in new_info:
was_modified = True
break
if was_modified:
self.notify_watchers(MODIFIED,fpath,True)
elif was_accessed:
self.notify_watchers(ACCESSED,fpath)
# Check for deletion of cached child entries.
for childnm in self._path_info.iternames(dirnm):
if self._poll_close_event.isSet():
return
cpath = pathjoin(dirnm,childnm)
if not self.wrapped_fs.exists(cpath):
self.notify_watchers(REMOVED,cpath)
def ensure_watchable(fs,wrapper_class=PollingWatchableFS,*args,**kwds):
"""Ensure that the given fs supports watching, simulating it if necessary.
Given an FS object, this function returns an equivalent FS that has support
for watcher callbacks. This may be the original object if it supports them
natively, or a wrapper class if they must be simulated.
"""
if isinstance(fs,wrapper_class):
return fs
try:
w = fs.add_watcher(lambda e: None,"/",recursive=False)
except (AttributeError,FSError):
return wrapper_class(fs,*args,**kwds)
else:
fs.del_watcher(w)
return fs
class iter_changes(object):
"""Blocking iterator over the change events produced by an FS.
This class can be used to transform the callback-based watcher mechanism
into a blocking stream of events. It operates by having the callbacks
push events onto a queue as they come in, then reading them off one at a
time.
"""
def __init__(self,fs=None,path="/",events=None,**kwds):
self.closed = False
self._queue = Queue.Queue()
self._watching = set()
if fs is not None:
self.add_watcher(fs,path,events,**kwds)
def __iter__(self):
return self
def __del__(self):
self.close()
def next(self,timeout=None):
if not self._watching:
raise StopIteration
try:
event = self._queue.get(timeout=timeout)
except Queue.Empty:
raise StopIteration
if event is None:
raise StopIteration
if isinstance(event,CLOSED):
event.fs.del_watcher(self._enqueue)
self._watching.remove(event.fs)
return event
def close(self):
if not self.closed:
self.closed = True
for fs in self._watching:
fs.del_watcher(self._enqueue)
self._queue.put(None)
def add_watcher(self,fs,path="/",events=None,**kwds):
w = fs.add_watcher(self._enqueue,path,events,**kwds)
self._watching.add(fs)
return w
def _enqueue(self,event):
self._queue.put(event)
def del_watcher(self,watcher):
for fs in self._watching:
try:
fs.del_watcher(watcher)
break
except ValueError:
pass
else:
raise ValueError("watcher not found: %s" % (watcher,))
|
atty303/pyfilesystem
|
fs/watch.py
|
Python
|
bsd-3-clause
| 22,882
|
"""The suite of window functions."""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy import special, linalg
from scipy.fftpack import fft
__all__ = ['boxcar', 'triang', 'parzen', 'bohman', 'blackman', 'nuttall',
'blackmanharris', 'flattop', 'bartlett', 'hanning', 'barthann',
'hamming', 'kaiser', 'gaussian', 'general_gaussian', 'chebwin',
'slepian', 'hann', 'get_window']
def boxcar(M, sym=True):
"""Return a boxcar or rectangular window.
Included for completeness, this is equivalent to no window at all.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
Whether the window is symmetric. (Has no effect for boxcar.)
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.boxcar(51)
>>> plt.plot(window)
>>> plt.title("Boxcar window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the boxcar window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
return np.ones(M, float)
def triang(M, sym=True):
"""Return a triangular window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True, generates a symmetric window, for use in filter design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if the number of samples is even and sym is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.triang(51)
>>> plt.plot(window)
>>> plt.title("Triangular window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the triangular window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(1, (M + 1) // 2 + 1)
if M % 2 == 0:
w = (2 * n - 1.0) / M
w = np.r_[w, w[::-1]]
else:
w = 2 * n / (M + 1.0)
w = np.r_[w, w[-2::-1]]
if not sym and not odd:
w = w[:-1]
return w
def parzen(M, sym=True):
"""Return a Parzen window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True, generates a symmetric window, for use in filter design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if the number of samples is even and sym is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.parzen(51)
>>> plt.plot(window)
>>> plt.title("Parzen window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Parzen window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(-(M - 1) / 2.0, (M - 1) / 2.0 + 0.5, 1.0)
na = np.extract(n < -(M - 1) / 4.0, n)
nb = np.extract(abs(n) <= (M - 1) / 4.0, n)
wa = 2 * (1 - np.abs(na) / (M / 2.0)) ** 3.0
wb = (1 - 6 * (np.abs(nb) / (M / 2.0)) ** 2.0 +
6 * (np.abs(nb) / (M / 2.0)) ** 3.0)
w = np.r_[wa, wb, wa[::-1]]
if not sym and not odd:
w = w[:-1]
return w
def bohman(M, sym=True):
"""Return a Bohman window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True, generates a symmetric window, for use in filter design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if the number of samples is even and sym is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.bohman(51)
>>> plt.plot(window)
>>> plt.title("Bohman window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bohman window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
fac = np.abs(np.linspace(-1, 1, M)[1:-1])
w = (1 - fac) * np.cos(np.pi * fac) + 1.0 / np.pi * np.sin(np.pi * fac)
w = np.r_[0, w, 0]
if not sym and not odd:
w = w[:-1]
return w
def blackman(M, sym=True):
"""
Return a Blackman window.
The Blackman window is a taper formed by using the the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True, generates a symmetric window, for use in filter design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if the number of samples is even and sym is True).
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the Kaiser window.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.blackman(51)
>>> plt.plot(window)
>>> plt.title("Blackman window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Blackman window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's blackman function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = (0.42 - 0.5 * np.cos(2.0 * np.pi * n / (M - 1)) +
0.08 * np.cos(4.0 * np.pi * n / (M - 1)))
if not sym and not odd:
w = w[:-1]
return w
def nuttall(M, sym=True):
"""Return a minimum 4-term Blackman-Harris window according to Nuttall.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True, generates a symmetric window, for use in filter design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if the number of samples is even and sym is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.nuttall(51)
>>> plt.plot(window)
>>> plt.title("Nuttall window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Nuttall window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.3635819, 0.4891775, 0.1365995, 0.0106411]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac))
if not sym and not odd:
w = w[:-1]
return w
def blackmanharris(M, sym=True):
"""Return a minimum 4-term Blackman-Harris window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True, generates a symmetric window, for use in filter design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if the number of samples is even and sym is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.blackmanharris(51)
>>> plt.plot(window)
>>> plt.title("Blackman-Harris window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Blackman-Harris window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.35875, 0.48829, 0.14128, 0.01168]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac))
if not sym and not odd:
w = w[:-1]
return w
def flattop(M, sym=True):
"""Return a flat top window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True, generates a symmetric window, for use in filter design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if the number of samples is even and sym is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.flattop(51)
>>> plt.plot(window)
>>> plt.title("Flat top window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the flat top window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.2156, 0.4160, 0.2781, 0.0836, 0.0069]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac) +
a[4] * np.cos(4 * fac))
if not sym and not odd:
w = w[:-1]
return w
def bartlett(M, sym=True):
"""
Return a Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True, generates a symmetric window, for use in filter design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The triangular window, with the maximum value normalized to 1
(though the value 1 does not appear if the number of samples is even
and sym is True), with the first and last samples equal to zero.
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \\frac{2}{M-1} \\left(
\\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right|
\\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.bartlett(51)
>>> plt.plot(window)
>>> plt.title("Bartlett window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bartlett window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's bartlett function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = np.where(np.less_equal(n, (M - 1) / 2.0),
2.0 * n / (M - 1), 2.0 - 2.0 * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
def hann(M, sym=True):
"""
Return a Hann window.
The Hann window is a taper formed by using a raised cosine or sine-squared
with ends that touch zero.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True, generates a symmetric window, for use in filter design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Hann window is defined as
.. math:: w(n) = 0.5 - 0.5 \\cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The window was named for Julius van Hann, an Austrian meterologist. It is
also known as the Cosine Bell. It is sometimes erroneously referred to as
the "Hanning" window, from the use of "hann" as a verb in the original
paper and confusion with the very similar Hamming window.
Most references to the Hann window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.hann(51)
>>> plt.plot(window)
>>> plt.title("Hann window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Hann window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's hanning function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = 0.5 - 0.5 * np.cos(2.0 * np.pi * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
hanning = hann
def barthann(M, sym=True):
"""Return a modified Bartlett-Hann window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True, generates a symmetric window, for use in filter design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if the number of samples is even and sym is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.barthann(51)
>>> plt.plot(window)
>>> plt.title("Bartlett-Hann window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bartlett-Hann window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
fac = np.abs(n / (M - 1.0) - 0.5)
w = 0.62 - 0.48 * fac + 0.38 * np.cos(2 * np.pi * fac)
if not sym and not odd:
w = w[:-1]
return w
def hamming(M, sym=True):
r"""Return a Hamming window.
The Hamming window is a taper formed by using a raised cosine with
non-zero endpoints, optimized to minimize the nearest side lobe.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True, generates a symmetric window, for use in filter design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if the number of samples is even and sym is True).
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46 \cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and
is described in Blackman and Tukey. It was recommended for smoothing the
truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.hamming(51)
>>> plt.plot(window)
>>> plt.title("Hamming window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Hamming window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's hamming function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = 0.54 - 0.46 * np.cos(2.0 * np.pi * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
def kaiser(M, beta, sym=True):
r"""Return a Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
beta : float
Shape parameter, determines trade-off between main-lobe width and
side lobe level. As beta gets large, the window narrows.
sym : bool, optional
When True, generates a symmetric window, for use in filter design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if the number of samples is even and sym is True).
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\left( \beta \sqrt{1-\frac{4n^2}{(M-1)^2}}
\right)/I_0(\beta)
with
.. math:: \quad -\frac{M-1}{2} \leq n \leq \frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple approximation
to the DPSS window based on Bessel functions.
The Kaiser window is a very good approximation to the Digital Prolate
Spheroidal Sequence, or Slepian window, which is the transform which
maximizes the energy in the main lobe of the window relative to total
energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hann
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.kaiser(51, beta=14)
>>> plt.plot(window)
>>> plt.title(r"Kaiser window ($\beta$=14)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Frequency response of the Kaiser window ($\beta$=14)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's kaiser function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
alpha = (M - 1) / 2.0
w = (special.i0(beta * np.sqrt(1 - ((n - alpha) / alpha) ** 2.0)) /
special.i0(beta))
if not sym and not odd:
w = w[:-1]
return w
def gaussian(M, std, sym=True):
r"""Return a Gaussian window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
std : float
The standard deviation, sigma.
sym : bool, optional
When True, generates a symmetric window, for use in filter design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if the number of samples is even and sym is True).
Notes
-----
The Gaussian window is defined as
.. math:: w(n) = e^{ -\frac{1}{2}\left(\frac{n}{\sigma}\right)^2 }
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.gaussian(51, std=7)
>>> plt.plot(window)
>>> plt.title(r"Gaussian window ($\sigma$=7)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Frequency response of the Gaussian window ($\sigma$=7)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M) - (M - 1.0) / 2.0
sig2 = 2 * std * std
w = np.exp(-n ** 2 / sig2)
if not sym and not odd:
w = w[:-1]
return w
def general_gaussian(M, p, sig, sym=True):
r"""Return a window with a generalized Gaussian shape.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
p : float
Shape parameter. p = 1 is identical to `gaussian`, p = 0.5 is
the same shape as the Laplace distribution.
sig : float
The standard deviation, sigma.
sym : bool, optional
When True, generates a symmetric window, for use in filter design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if the number of samples is even and sym is True).
Notes
-----
The generalized Gaussian window is defined as
.. math:: w(n) = e^{ -\frac{1}{2}\left|\frac{n}{\sigma}\right|^{2p} }
the half-power point is at
.. math:: (2 \log(2))^{1/(2 p)} \sigma
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.general_gaussian(51, p=1.5, sig=7)
>>> plt.plot(window)
>>> plt.title(r"Generalized Gaussian window (p=1.5, $\sigma$=7)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Freq. resp. of the gen. Gaussian window (p=1.5, $\sigma$=7)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M) - (M - 1.0) / 2.0
w = np.exp(-0.5 * np.abs(n / sig) ** (2 * p))
if not sym and not odd:
w = w[:-1]
return w
# `chebwin` contributed by Kumar Appaiah.
def chebwin(M, at, sym=True):
"""Return a Dolph-Chebyshev window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
at : float
Attenuation (in dB).
sym : bool, optional
When True, generates a symmetric window, for use in filter design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value always normalized to 1
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.chebwin(51, at=100)
>>> plt.plot(window)
>>> plt.title("Dolph-Chebyshev window (100 dB)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Dolph-Chebyshev window (100 dB)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
# compute the parameter beta
order = M - 1.0
beta = np.cosh(1.0 / order * np.arccosh(10 ** (np.abs(at) / 20.)))
k = np.r_[0:M] * 1.0
x = beta * np.cos(np.pi * k / M)
# Find the window's DFT coefficients
# Use analytic definition of Chebyshev polynomial instead of expansion
# from scipy.special. Using the expansion in scipy.special leads to errors.
p = np.zeros(x.shape)
p[x > 1] = np.cosh(order * np.arccosh(x[x > 1]))
p[x < -1] = (1 - 2 * (order % 2)) * np.cosh(order * np.arccosh(-x[x < -1]))
p[np.abs(x) <= 1] = np.cos(order * np.arccos(x[np.abs(x) <= 1]))
# Appropriate IDFT and filling up
# depending on even/odd M
if M % 2:
w = np.real(fft(p))
n = (M + 1) // 2
w = w[:n] / w[0]
w = np.concatenate((w[n - 1:0:-1], w))
else:
p = p * np.exp(1.j * np.pi / M * np.r_[0:M])
w = np.real(fft(p))
n = M // 2 + 1
w = w / w[1]
w = np.concatenate((w[n - 1:0:-1], w[1:n]))
if not sym and not odd:
w = w[:-1]
return w
def slepian(M, width, sym=True):
"""Return a digital Slepian (DPSS) window.
Used to maximize the energy concentration in the main lobe. Also called
the digital prolate spheroidal sequence (DPSS).
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
width : float
Bandwidth
sym : bool, optional
When True, generates a symmetric window, for use in filter design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value always normalized to 1
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.slepian(51, width=0.3)
>>> plt.plot(window)
>>> plt.title("Slepian (DPSS) window (BW=0.3)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Slepian window (BW=0.3)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if (M * width > 27.38):
raise ValueError("Cannot reliably obtain slepian sequences for"
" M*width > 27.38.")
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
twoF = width / 2.0
alpha = (M - 1) / 2.0
m = np.arange(0, M) - alpha
n = m[:, np.newaxis]
k = m[np.newaxis, :]
AF = twoF * special.sinc(twoF * (n - k))
[lam, vec] = linalg.eig(AF)
ind = np.argmax(abs(lam), axis=-1)
w = np.abs(vec[:, ind])
w = w / max(w)
if not sym and not odd:
w = w[:-1]
return w
def get_window(window, Nx, fftbins=True):
"""
Return a window.
Parameters
----------
window : string, float, or tuple
The type of window to create. See below for more details.
Nx : int
The number of samples in the window.
fftbins : bool, optional
If True, create a "periodic" window ready to use with ifftshift
and be multiplied by the result of an fft (SEE ALSO fftfreq).
Returns
-------
get_window : ndarray
Returns a window of length `Nx` and type `window`
Notes
-----
Window types:
boxcar, triang, blackman, hamming, hann, bartlett, flattop,
parzen, bohman, blackmanharris, nuttall, barthann,
kaiser (needs beta), gaussian (needs std),
general_gaussian (needs power, width),
slepian (needs width), chebwin (needs attenuation)
If the window requires no parameters, then `window` can be a string.
If the window requires parameters, then `window` must be a tuple
with the first argument the string name of the window, and the next
arguments the needed parameters.
If `window` is a floating point number, it is interpreted as the beta
parameter of the kaiser window.
Each of the window types listed above is also the name of
a function that can be called directly to create a window of
that type.
Examples
--------
>>> from scipy import signal
>>> signal.get_window('triang', 7)
array([ 0.25, 0.5 , 0.75, 1. , 0.75, 0.5 , 0.25])
>>> signal.get_window(('kaiser', 4.0), 9)
array([ 0.08848053, 0.32578323, 0.63343178, 0.89640418, 1. ,
0.89640418, 0.63343178, 0.32578323, 0.08848053])
>>> signal.get_window(4.0, 9)
array([ 0.08848053, 0.32578323, 0.63343178, 0.89640418, 1. ,
0.89640418, 0.63343178, 0.32578323, 0.08848053])
"""
sym = not fftbins
try:
beta = float(window)
except (TypeError, ValueError):
args = ()
if isinstance(window, tuple):
winstr = window[0]
if len(window) > 1:
args = window[1:]
elif isinstance(window, str):
if window in ['kaiser', 'ksr', 'gaussian', 'gauss', 'gss',
'general gaussian', 'general_gaussian',
'general gauss', 'general_gauss', 'ggs',
'slepian', 'optimal', 'slep', 'dss',
'chebwin', 'cheb']:
raise ValueError("The '" + window + "' window needs one or "
"more parameters -- pass a tuple.")
else:
winstr = window
if winstr in ['blackman', 'black', 'blk']:
winfunc = blackman
elif winstr in ['triangle', 'triang', 'tri']:
winfunc = triang
elif winstr in ['hamming', 'hamm', 'ham']:
winfunc = hamming
elif winstr in ['bartlett', 'bart', 'brt']:
winfunc = bartlett
elif winstr in ['hanning', 'hann', 'han']:
winfunc = hann
elif winstr in ['blackmanharris', 'blackharr', 'bkh']:
winfunc = blackmanharris
elif winstr in ['parzen', 'parz', 'par']:
winfunc = parzen
elif winstr in ['bohman', 'bman', 'bmn']:
winfunc = bohman
elif winstr in ['nuttall', 'nutl', 'nut']:
winfunc = nuttall
elif winstr in ['barthann', 'brthan', 'bth']:
winfunc = barthann
elif winstr in ['flattop', 'flat', 'flt']:
winfunc = flattop
elif winstr in ['kaiser', 'ksr']:
winfunc = kaiser
elif winstr in ['gaussian', 'gauss', 'gss']:
winfunc = gaussian
elif winstr in ['general gaussian', 'general_gaussian',
'general gauss', 'general_gauss', 'ggs']:
winfunc = general_gaussian
elif winstr in ['boxcar', 'box', 'ones', 'rect', 'rectangular']:
winfunc = boxcar
elif winstr in ['slepian', 'slep', 'optimal', 'dpss', 'dss']:
winfunc = slepian
elif winstr in ['chebwin', 'cheb']:
winfunc = chebwin
else:
raise ValueError("Unknown window type.")
params = (Nx,) + args + (sym,)
else:
winfunc = kaiser
params = (Nx, beta, sym)
return winfunc(*params)
|
sargas/scipy
|
scipy/signal/windows.py
|
Python
|
bsd-3-clause
| 44,546
|
#! /usr/bin/python
#
# See README for usage instructions.
import sys
import os
import subprocess
# We must use setuptools, not distutils, because we need to use the
# namespace_packages option for the "google" package.
try:
from setuptools import setup, Extension
except ImportError:
try:
sys.stderr.write("running ez_setup\n")
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, Extension
except ImportError:
sys.stderr.write(
"Could not import setuptools; make sure you have setuptools or "
"ez_setup installed.\n")
raise
from distutils.command.clean import clean as _clean
from distutils.command.build_py import build_py as _build_py
from distutils.spawn import find_executable
exec(open('google/protobuf/internal/utils.py').read())
maintainer_email = "protobuf@googlegroups.com"
# Find the Protocol Compiler.
if 'PROTOC' in os.environ and os.path.exists(os.environ['PROTOC']):
protoc = os.environ['PROTOC']
elif os.path.exists("../src/protoc"):
protoc = "../src/protoc"
elif os.path.exists("../src/protoc.exe"):
protoc = "../src/protoc.exe"
elif os.path.exists("../vsprojects/Debug/protoc.exe"):
protoc = "../vsprojects/Debug/protoc.exe"
elif os.path.exists("../vsprojects/Release/protoc.exe"):
protoc = "../vsprojects/Release/protoc.exe"
elif os.path.exists("../../msvc2012/lib/64/Debug/protoc.exe"):
protoc = "../../msvc2012/lib/64/Debug/protoc.exe"
elif os.path.exists("../../msvc2012/lib/64/Release/protoc.exe"):
protoc = "../../msvc2012/lib/64/Release/protoc.exe"
else:
protoc = find_executable("protoc")
def generate_proto(source):
"""Invokes the Protocol Compiler to generate a _pb2.py from the given
.proto file. Does nothing if the output already exists and is newer than
the input."""
output = source.replace(".proto", "_pb2.py").replace("../src/", "")
if (not os.path.exists(output) or
(os.path.exists(source) and
os.path.getmtime(source) > os.path.getmtime(output))):
print_("Generating %s..." % output)
if not os.path.exists(source):
sys.stderr.write("Can't find required file: %s\n" % source)
sys.exit(-1)
if protoc == None:
sys.stderr.write(
"protoc is not installed nor found in ../src. Please compile it "
"or install the binary package.\n")
sys.exit(-1)
protoc_command = [ protoc, "-I../src", "-I.", "--python_out=.", source ]
if subprocess.call(protoc_command) != 0:
sys.exit(-1)
def GenerateUnittestProtos():
generate_proto("../src/google/protobuf/unittest.proto")
generate_proto("../src/google/protobuf/unittest_custom_options.proto")
generate_proto("../src/google/protobuf/unittest_import.proto")
generate_proto("../src/google/protobuf/unittest_import_public.proto")
generate_proto("../src/google/protobuf/unittest_mset.proto")
generate_proto("../src/google/protobuf/unittest_no_generic_services.proto")
generate_proto("google/protobuf/internal/test_bad_identifiers.proto")
generate_proto("google/protobuf/internal/more_extensions.proto")
generate_proto("google/protobuf/internal/more_extensions_dynamic.proto")
generate_proto("google/protobuf/internal/more_messages.proto")
generate_proto("google/protobuf/internal/factory_test1.proto")
generate_proto("google/protobuf/internal/factory_test2.proto")
def MakeTestSuite():
# This is apparently needed on some systems to make sure that the tests
# work even if a previous version is already installed.
if 'google' in sys.modules:
del sys.modules['google']
GenerateUnittestProtos()
import unittest
import google.protobuf.internal.generator_test as generator_test
import google.protobuf.internal.descriptor_test as descriptor_test
import google.protobuf.internal.reflection_test as reflection_test
import google.protobuf.internal.service_reflection_test \
as service_reflection_test
import google.protobuf.internal.text_format_test as text_format_test
import google.protobuf.internal.wire_format_test as wire_format_test
import google.protobuf.internal.unknown_fields_test as unknown_fields_test
import google.protobuf.internal.descriptor_database_test \
as descriptor_database_test
import google.protobuf.internal.descriptor_pool_test as descriptor_pool_test
import google.protobuf.internal.message_factory_test as message_factory_test
import google.protobuf.internal.message_cpp_test as message_cpp_test
import google.protobuf.internal.reflection_cpp_generated_test \
as reflection_cpp_generated_test
loader = unittest.defaultTestLoader
suite = unittest.TestSuite()
for test in [ generator_test,
descriptor_test,
reflection_test,
service_reflection_test,
text_format_test,
wire_format_test,
unknown_fields_test,
descriptor_database_test,
descriptor_pool_test,
message_factory_test ]:
suite.addTest(loader.loadTestsFromModule(test))
return suite
class clean(_clean):
def run(self):
# Delete generated files in the code tree.
for (dirpath, dirnames, filenames) in os.walk("."):
for filename in filenames:
filepath = os.path.join(dirpath, filename)
if filepath.endswith("_pb2.py") or filepath.endswith(".pyc") or \
filepath.endswith(".so") or filepath.endswith(".o") or \
filepath.endswith('google/protobuf/compiler/__init__.py'):
os.remove(filepath)
# _clean is an old-style class, so super() doesn't work.
_clean.run(self)
class build_py(_build_py):
def run(self):
# Generate necessary .proto file if it doesn't exist.
generate_proto("../src/google/protobuf/descriptor.proto")
generate_proto("../src/google/protobuf/compiler/plugin.proto")
# Make sure google.protobuf.compiler is a valid package.
open('google/protobuf/compiler/__init__.py', 'a').close()
# _build_py is an old-style class, so super() doesn't work.
_build_py.run(self)
if __name__ == '__main__':
ext_module_list = []
# C++ implementation extension
if os.getenv("PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION", "python") == "cpp":
print_("Using EXPERIMENTAL C++ Implmenetation.")
ext_module_list.append(Extension(
"google.protobuf.internal._net_proto2___python",
[ "google/protobuf/pyext/python_descriptor.cc",
"google/protobuf/pyext/python_protobuf.cc",
"google/protobuf/pyext/python-proto2.cc" ],
include_dirs = [ "." ],
libraries = [ "protobuf" ]))
setup(name = 'protobuf-py3',
version = '2.5.1',
packages = [ 'google' ],
namespace_packages = [ 'google' ],
test_suite = 'setup.MakeTestSuite',
# Must list modules explicitly so that we don't install tests.
py_modules = [
'google.protobuf.internal.api_implementation',
'google.protobuf.internal.containers',
'google.protobuf.internal.cpp_message',
'google.protobuf.internal.decoder',
'google.protobuf.internal.encoder',
'google.protobuf.internal.enum_type_wrapper',
'google.protobuf.internal.message_listener',
'google.protobuf.internal.python_message',
'google.protobuf.internal.type_checkers',
'google.protobuf.internal.utils',
'google.protobuf.internal.wire_format',
'google.protobuf.descriptor',
'google.protobuf.descriptor_pb2',
'google.protobuf.compiler.plugin_pb2',
'google.protobuf.message',
'google.protobuf.descriptor_database',
'google.protobuf.descriptor_pool',
'google.protobuf.message_factory',
'google.protobuf.reflection',
'google.protobuf.service',
'google.protobuf.service_reflection',
'google.protobuf.text_format' ],
cmdclass = { 'clean': clean, 'build_py': build_py },
install_requires = ['setuptools'],
ext_modules = ext_module_list,
url = 'https://github.com/GreatFruitOmsk/protobuf-py3',
maintainer = maintainer_email,
maintainer_email = 'kulakov.ilya@gmail.com',
license = 'New BSD License',
description = 'Protocol Buffers',
long_description =
"Unofficial version of Google Protocol Buffers that provides support for Python 3.",
)
|
kastnerkyle/protobuf-py3
|
python/setup.py
|
Python
|
bsd-3-clause
| 8,425
|
import asyncio
import logging
import json
import unittest
import socket
import random
import ddcm
from . import const
from . import utils
class StoreTest(unittest.TestCase):
async def handle_events(self, service):
pong_count = 0
self.ping_sent = []
self.ping_recved = []
self.pong_sent = []
self.pong_recved = []
self.pair_sent, self.pair_recved = [], []
while pong_count < const.test.STORE_COUNT:
event = await service.debugQueue.get()
if event["type"] is ddcm.const.kad.event.SEND_STORE:
self.ping_sent.append(event["data"]["echo"])
self.pair_sent.append(event["data"]["data"])
if event["type"] is ddcm.const.kad.event.HANDLE_PONG_STORE:
self.pong_recved.append(event["data"]["echo"])
pong_count += 1
if event["type"] is ddcm.const.kad.event.SEND_PONG_STORE:
self.pong_sent.append(event["data"]["echo"])
if event["type"] is ddcm.const.kad.event.HANDLE_STORE:
self.ping_recved.append(event["data"]["echo"])
self.pair_recved.append(event["data"]["data"])
def StoreTestCase(func):
async def _deco(*args, **kwargs):
ret = await func(*args, **kwargs)
loop, service, config, self = kwargs['loop'], kwargs['service'], kwargs['config'], kwargs['self']
await asyncio.wait(
[asyncio.ensure_future(
self.handle_events(service)
)],
timeout = const.test.STORE_TIMEOUT
)
for event_list in [self.ping_sent, self.ping_recved, self.pong_sent, self.pong_recved, self.pair_sent, self.pair_recved]:
event_list.sort()
self.assertEqual(len(self.ping_sent), const.test.STORE_COUNT)
self.assertEqual(len(self.ping_recved), const.test.STORE_COUNT)
self.assertEqual(len(self.pong_sent), const.test.STORE_COUNT)
self.assertEqual(len(self.pong_recved), const.test.STORE_COUNT)
self.assertEqual(self.ping_sent, self.pong_recved)
self.assertEqual(self.ping_sent, self.ping_recved)
self.assertEqual(self.ping_sent, self.pong_sent)
for sent, recved in zip(self.pair_sent, self.pair_recved):
self.assertEqual(sent, recved)
for data in self.pair_sent:
self.assertEqual(await service.storage.exist(data[0]), True)
self.assertEqual(await service.storage.get(data[0]), data[1])
return ret
return _deco
def get_key_pair(self):
return bytes(random.getrandbits(8) for i in range(20)), bytes(random.getrandbits(8) for i in range(120))
@utils.NetworkTestCase
@StoreTestCase
async def test_store(self, loop, config, service):
await asyncio.wait(
[service.tcpService.call.store(ddcm.Remote(
host = "127.0.0.1",
port = config["server"]["port"]
), *self.get_key_pair()) for i in range(const.test.STORE_COUNT)]
)
"""
@utils.NetworkTestCase
@PingTestCase
async def test_ping_ipv6(self, loop, config, service):
await asyncio.wait(
[service.tcpService.call.ping(ddcm.Remote(
host = "::1",
port = config["server"]["port"]
)) for i in range(const.test.PING_COUNT)]
)
"""
|
SkyZH/ddcm-protocol
|
ddcm/test/store_test.py
|
Python
|
bsd-3-clause
| 3,487
|
# Copyright (c) 2018,2019 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Test the `points` module."""
import logging
import numpy as np
import pytest
from scipy.spatial import cKDTree, Delaunay
from metpy.cbook import get_test_data
from metpy.interpolate import (interpolate_to_points, inverse_distance_to_points,
natural_neighbor_to_points)
from metpy.interpolate.geometry import dist_2, find_natural_neighbors
from metpy.interpolate.points import barnes_point, cressman_point, natural_neighbor_point
from metpy.testing import assert_almost_equal, assert_array_almost_equal
from metpy.units import units
logging.getLogger('metpy.interpolate.points').setLevel(logging.ERROR)
@pytest.fixture()
def test_data():
r"""Return data used for tests in this file."""
x = np.array([8, 67, 79, 10, 52, 53, 98, 34, 15, 58], dtype=float)
y = np.array([24, 87, 48, 94, 98, 66, 14, 24, 60, 16], dtype=float)
z = np.array([0.064, 4.489, 6.241, 0.1, 2.704, 2.809, 9.604, 1.156,
0.225, 3.364], dtype=float)
return x, y, z
@pytest.fixture()
def test_points():
r"""Return point locations used for tests in this file."""
with get_test_data('interpolation_test_grid.npz') as fobj:
data = np.load(fobj)
return np.stack([data['xg'].reshape(-1), data['yg'].reshape(-1)], axis=1)
def test_nn_point(test_data):
r"""Test find natural neighbors for a point interpolation function."""
xp, yp, z = test_data
tri = Delaunay(list(zip(xp, yp)))
sim_gridx = [30]
sim_gridy = [30]
members, tri_info = find_natural_neighbors(tri,
list(zip(sim_gridx, sim_gridy)))
val = natural_neighbor_point(xp, yp, z, (sim_gridx[0], sim_gridy[0]),
tri, members[0], tri_info)
truth = 1.009
assert_almost_equal(truth, val, 3)
def test_cressman_point(test_data):
r"""Test Cressman interpolation for a point function."""
xp, yp, z = test_data
r = 40
obs_tree = cKDTree(list(zip(xp, yp)))
indices = obs_tree.query_ball_point([30, 30], r=r)
dists = dist_2(30, 30, xp[indices], yp[indices])
values = z[indices]
truth = 1.05499444404
value = cressman_point(dists, values, r)
assert_almost_equal(truth, value)
def test_barnes_point(test_data):
r"""Test Barnes interpolation for a point function."""
xp, yp, z = test_data
r = 40
obs_tree = cKDTree(list(zip(xp, yp)))
indices = obs_tree.query_ball_point([60, 60], r=r)
dists = dist_2(60, 60, xp[indices], yp[indices])
values = z[indices]
assert_almost_equal(barnes_point(dists, values, 5762.7), 4.0871824)
def test_natural_neighbor_to_points(test_data, test_points):
r"""Test natural neighbor interpolation to grid function."""
xp, yp, z = test_data
obs_points = np.vstack([xp, yp]).transpose()
img = natural_neighbor_to_points(obs_points, z, test_points)
with get_test_data('nn_bbox0to100.npz') as fobj:
truth = np.load(fobj)['img'].reshape(-1)
assert_array_almost_equal(truth, img)
def test_inverse_distance_to_points_invalid(test_data, test_points):
"""Test that inverse_distance_to_points raises when given an invalid method."""
xp, yp, z = test_data
obs_points = np.vstack([xp, yp]).transpose()
with pytest.raises(ValueError):
inverse_distance_to_points(obs_points, z, test_points, kind='shouldraise', r=40)
@pytest.mark.parametrize('assume_units', [None, 'mbar'])
@pytest.mark.parametrize('method', ['cressman', 'barnes'])
def test_inverse_distance_to_points(method, assume_units, test_data, test_points):
r"""Test inverse distance interpolation to points function."""
xp, yp, z = test_data
obs_points = np.vstack([xp, yp]).transpose()
extra_kw, test_file = {'cressman': ({'r': 20, 'min_neighbors': 1}, 'cressman_r20_mn1.npz'),
'barnes': ({'r': 40, 'kappa': 100}, 'barnes_r40_k100.npz')}[method]
with get_test_data(test_file) as fobj:
truth = np.load(fobj)['img'].reshape(-1)
if assume_units:
z = units.Quantity(z, assume_units)
truth = units.Quantity(truth, assume_units)
img = inverse_distance_to_points(obs_points, z, test_points, kind=method, **extra_kw)
assert_array_almost_equal(truth, img)
def test_interpolate_to_points_invalid(test_data):
"""Test that interpolate_to_points raises when given an invalid method."""
xp, yp, z = test_data
obs_points = np.vstack([xp, yp]).transpose() * 10
with get_test_data('interpolation_test_points.npz') as fobj:
test_points = np.load(fobj)['points']
with pytest.raises(ValueError):
interpolate_to_points(obs_points, z, test_points, interp_type='shouldraise')
@pytest.mark.parametrize('assume_units', [None, 'mbar'])
@pytest.mark.parametrize('method', ['natural_neighbor', 'cressman', 'barnes', 'linear',
'nearest', 'rbf', 'cubic'])
def test_interpolate_to_points(method, assume_units, test_data):
r"""Test main grid interpolation function."""
xp, yp, z = test_data
obs_points = np.vstack([xp, yp]).transpose() * 10
with get_test_data('interpolation_test_points.npz') as fobj:
test_points = np.load(fobj)['points']
if method == 'cressman':
extra_kw = {'search_radius': 200, 'minimum_neighbors': 1}
elif method == 'barnes':
extra_kw = {'search_radius': 400, 'minimum_neighbors': 1, 'gamma': 1}
else:
extra_kw = {}
with get_test_data(f'{method}_test.npz') as fobj:
truth = np.load(fobj)['img'].reshape(-1)
if assume_units:
z = units.Quantity(z, assume_units)
truth = units.Quantity(truth, assume_units)
img = interpolate_to_points(obs_points, z, test_points, interp_type=method, **extra_kw)
assert_array_almost_equal(truth, img)
|
Unidata/MetPy
|
tests/interpolate/test_points.py
|
Python
|
bsd-3-clause
| 5,986
|
"""Loyalty domain objects for LCP services
The domain objects derived from LCPResource can be used for basic CRUD
operations to support internal services.
"""
from future import standard_library
standard_library.install_aliases() # NOQA
from builtins import object
try:
from http.client import NO_CONTENT
except ImportError:
from httplib import NO_CONTENT
import requests
import simplejson as json
class LCPResource(object):
"""Base class for loyalty domain objects for LCP services
When defining new domain objects, overrides of __init__ and create must call
the superclass implementations to ensure that the common id and url properties
are correctly initialized.
"""
def __init__(self, response=None):
self._json = None
self.response = response
self._url = None
if response is not None:
if response.status_code != NO_CONTENT:
self._json = response.json()
try:
self._url = self._self_link()
except KeyError:
pass
if 'location' in response.headers:
self._url = response.headers['location']
@property
def url(self):
return self._url
@property
def json(self):
return self.response.json()
def _self_link(self):
return self._json['links']['self']['href']
def __getitem__(self, key):
return self._json[key]
class LCPCrud(object):
"""Cruds are responsible for translating CRUD operations into http
requests (method, url-path, querystring, payload) and interpreting http
responses (success vs failure).
:param http_client: Must be of be a subclass of requests.Session
"""
def __init__(self, http_client):
self.http_client = http_client
@property
def resource_class(self):
return LCPResource
def create(self, path, payload):
return self._resource_from_http('post', path, payload)
def read(self, path):
return self._resource_from_http('get', path)
def update(self, path, payload):
return self._resource_from_http('put', path, payload)
def delete(self, path):
return self._resource_from_http('delete', path)
def search(self, path, params=None):
return self._resource_from_http('get', path, params=params)
def _resource_from_http(self, method, path, payload=None, params=None):
response = None
try:
response = self._http_method(method)(path, data=payload, params=params)
response.raise_for_status()
except requests.RequestException:
raise CRUDError(path, method, response, **{'request_payload': payload, 'request_parameters': params})
return self.resource_class(response)
def _http_method(self, method):
return getattr(self.http_client, method.lower())
class CRUDError(Exception):
def __init__(self, url, method, response, **request_kwargs):
formatted_request = self._format_optional_args(request_kwargs)
super(CRUDError, self).__init__(
"{status_code} returned.\n"
"Method: {method}\n"
"Correlation ID: {cid}\n"
"URL: {url}\n"
"{formatted_request}"
"Response: {response}".format(
url=url,
method=method,
status_code=response.status_code,
cid=response.headers.get('pts-lcp-cid', 'none'),
formatted_request=formatted_request,
response=response.text,
))
def _format_optional_args(self, request_kwargs):
formatted_request = ''
for key in list(request_kwargs.keys()):
value = request_kwargs[key]
label = self._format_label(key)
if isinstance(value, dict):
formatted_value = self._format_dictionary(label, value)
else:
formatted_value = u'{}: "{}"\n'.format(label, value)
formatted_request += formatted_value
return formatted_request
def _format_label(self, text):
return text.replace('_', ' ').capitalize()
def _format_dictionary(self, label, dict_to_format):
formatted_dictionary = ''
if dict_to_format is not None:
formatted_dictionary = u'{}: {}\n'.format(label, json.dumps(dict_to_format, indent=2, sort_keys=True))
return formatted_dictionary
|
Points/PyLCP
|
pylcp/crud/base.py
|
Python
|
bsd-3-clause
| 4,481
|
# coding: utf-8
from django import forms
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from registration.models import RegistrationProfile
from models import Account, Document, Comment
import const
latin = r'^[a-zA-Z0-9]+$'
datetime_input_format = '%d.%m.%Y %H:%M'
class RURegForm(forms.Form):
u""" форма регистрации русскоязычного аккаунта. требуем уникальности логина
"""
email = forms.EmailField(label=u'Email',
widget=forms.TextInput(attrs={'class':'text'})
)
password1 = forms.RegexField(label=u'Пароль',
regex=latin,
max_length=20,
help_text='допустимы латиница и цифры',
widget=forms.PasswordInput(attrs={'class':'text'}))
password2 = forms.RegexField(label=u'Пароль(повтор)',
regex=latin,
max_length=20,
widget=forms.PasswordInput(attrs={'class':'text'}))
family_name = forms.CharField(label=u'Фамилия')
first_name = forms.CharField(label=u'Имя')
parent_name = forms.CharField(label=u'Отчество')
country = forms.CharField(label=u'Страна')
city = forms.CharField(label=u'Город')
degree = forms.CharField(label=u'Учёная степень и звание', required=False)
work_place = forms.CharField(label=u'Организация', required=False)
post = forms.CharField(label=u'Должность', required=False)
contact = forms.CharField(label=u'Моб.телефон')
tos = forms.BooleanField(label=u'Согласен на публикацию своих данных в целях конференции')
def clean_email(self):
"""
Validate that the supplied email address is unique for the
site.
"""
if User.objects.filter(email__iexact=self.cleaned_data['email']):
raise forms.ValidationError(_(u'This email address is already in use. Please supply a different email address.'))
return self.cleaned_data['email']
def clean(self):
"""
Verifiy that the values entered into the two password fields
match. Note that an error here will end up in
``non_field_errors()`` because it doesn't apply to a single
field.
"""
if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:
if self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise forms.ValidationError(_(u'You must type the same password each time'))
return self.cleaned_data
def save(self, profile_callback=None):
"""
Create the new ``User`` and ``RegistrationProfile``, and
returns the ``User``.
This is essentially a light wrapper around
``RegistrationProfile.objects.create_inactive_user()``,
feeding it the form data and a profile callback (see the
documentation on ``create_inactive_user()`` for details) if
supplied.
"""
args = self.cleaned_data.copy()
args.pop('email')
args.pop('password1')
args.pop('password2')
args.pop('tos')
args['typ'] = const.RU_TYP
acc = Account.objects.create(**args)
new_user = RegistrationProfile.objects.create_inactive_user(username=str(acc.id),
password=self.cleaned_data['password1'],
email=self.cleaned_data['email'].lower(),
profile_callback=profile_callback)
acc.user = new_user
acc.save()
return new_user
class ENRegForm(forms.Form):
u""" форма регистрации английского аккаунта. требуем уникальности логина
"""
email = forms.EmailField(label=u'Email',
widget=forms.TextInput(attrs={'class':'text'})
)
password1 = forms.RegexField(label=u'Password',
regex=latin,
max_length=20,
help_text='Latin characters and numerals',
widget=forms.PasswordInput(attrs={'class':'text'}))
password2 = forms.RegexField(label=u'Password(repeat)',
regex=latin,
max_length=20,
widget=forms.PasswordInput(attrs={'class':'text'}))
deal = forms.ChoiceField(label=u'Title', choices=const.EN_DEAL_CHOICES)
first_name = forms.CharField(label=u'First name')
family_name = forms.CharField(label=u'Family name')
country = forms.CharField(label=u'Country')
city = forms.CharField(label=u'City')
degree = forms.CharField(label=u'IRA affiliation', required=False)
work_place = forms.CharField(label=u'Place of work', required=False)
post = forms.CharField(label=u'Position', required=False)
contact = forms.CharField(label=u'Mobile phone')
tos = forms.BooleanField(label=u'I give my consent to the publishing of my personal data.')
def clean_email(self):
"""
Validate that the supplied email address is unique for the
site.
"""
if User.objects.filter(email__iexact=self.cleaned_data['email']):
raise forms.ValidationError(_(u'This email address is already in use. Please supply a different email address.'))
return self.cleaned_data['email']
def clean(self):
"""
Verifiy that the values entered into the two password fields
match. Note that an error here will end up in
``non_field_errors()`` because it doesn't apply to a single
field.
"""
if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:
if self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise forms.ValidationError(_(u'You must type the same password each time'))
return self.cleaned_data
def save(self, profile_callback=None):
"""
Create the new ``User`` and ``RegistrationProfile``, and
returns the ``User``.
This is essentially a light wrapper around
``RegistrationProfile.objects.create_inactive_user()``,
feeding it the form data and a profile callback (see the
documentation on ``create_inactive_user()`` for details) if
supplied.
"""
args = self.cleaned_data.copy()
args.pop('email')
args.pop('password1')
args.pop('password2')
args.pop('tos')
args['typ'] = const.EN_TYP
acc = Account.objects.create(**args)
new_user = RegistrationProfile.objects.create_inactive_user(username=str(acc.id),
password=self.cleaned_data['password1'],
email=self.cleaned_data['email'].lower(),
profile_callback=profile_callback)
acc.user = new_user
acc.save()
return new_user
class DocumentForm(forms.ModelForm):
#section = forms.ChoiceField(label=(u'Секция'), widget=forms.RadioSelect)
class Meta:
model = Document
fields = ('title', 'filename', 'section', 'desc')
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ('text',)
class DocumentStatusForm(forms.ModelForm):
class Meta:
model = Document
fields = ('status_comment',)
|
m-garanin/conference
|
app/forms.py
|
Python
|
bsd-3-clause
| 8,103
|
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
)
import pandas._testing as tm
from pandas.core.util.hashing import hash_tuples
from pandas.util import (
hash_array,
hash_pandas_object,
)
@pytest.fixture(
params=[
Series([1, 2, 3] * 3, dtype="int32"),
Series([None, 2.5, 3.5] * 3, dtype="float32"),
Series(["a", "b", "c"] * 3, dtype="category"),
Series(["d", "e", "f"] * 3),
Series([True, False, True] * 3),
Series(pd.date_range("20130101", periods=9)),
Series(pd.date_range("20130101", periods=9, tz="US/Eastern")),
Series(pd.timedelta_range("2000", periods=9)),
]
)
def series(request):
return request.param
@pytest.fixture(params=[True, False])
def index(request):
return request.param
def test_consistency():
# Check that our hash doesn't change because of a mistake
# in the actual code; this is the ground truth.
result = hash_pandas_object(Index(["foo", "bar", "baz"]))
expected = Series(
np.array(
[3600424527151052760, 1374399572096150070, 477881037637427054],
dtype="uint64",
),
index=["foo", "bar", "baz"],
)
tm.assert_series_equal(result, expected)
def test_hash_array(series):
arr = series.values
tm.assert_numpy_array_equal(hash_array(arr), hash_array(arr))
@pytest.mark.parametrize("dtype", ["U", object])
def test_hash_array_mixed(dtype):
result1 = hash_array(np.array(["3", "4", "All"]))
result2 = hash_array(np.array([3, 4, "All"], dtype=dtype))
tm.assert_numpy_array_equal(result1, result2)
@pytest.mark.parametrize("val", [5, "foo", pd.Timestamp("20130101")])
def test_hash_array_errors(val):
msg = "must pass a ndarray-like"
with pytest.raises(TypeError, match=msg):
hash_array(val)
def test_hash_array_index_exception():
# GH42003 TypeError instead of AttributeError
obj = pd.DatetimeIndex(["2018-10-28 01:20:00"], tz="Europe/Berlin")
msg = "Use hash_pandas_object instead"
with pytest.raises(TypeError, match=msg):
hash_array(obj)
def test_hash_tuples():
tuples = [(1, "one"), (1, "two"), (2, "one")]
result = hash_tuples(tuples)
expected = hash_pandas_object(MultiIndex.from_tuples(tuples)).values
tm.assert_numpy_array_equal(result, expected)
# We only need to support MultiIndex and list-of-tuples
msg = "|".join(["object is not iterable", "zip argument #1 must support iteration"])
with pytest.raises(TypeError, match=msg):
hash_tuples(tuples[0])
@pytest.mark.parametrize("val", [5, "foo", pd.Timestamp("20130101")])
def test_hash_tuples_err(val):
msg = "must be convertible to a list-of-tuples"
with pytest.raises(TypeError, match=msg):
hash_tuples(val)
def test_multiindex_unique():
mi = MultiIndex.from_tuples([(118, 472), (236, 118), (51, 204), (102, 51)])
assert mi.is_unique is True
result = hash_pandas_object(mi)
assert result.is_unique is True
def test_multiindex_objects():
mi = MultiIndex(
levels=[["b", "d", "a"], [1, 2, 3]],
codes=[[0, 1, 0, 2], [2, 0, 0, 1]],
names=["col1", "col2"],
)
recons = mi._sort_levels_monotonic()
# These are equal.
assert mi.equals(recons)
assert Index(mi.values).equals(Index(recons.values))
@pytest.mark.parametrize(
"obj",
[
Series([1, 2, 3]),
Series([1.0, 1.5, 3.2]),
Series([1.0, 1.5, np.nan]),
Series([1.0, 1.5, 3.2], index=[1.5, 1.1, 3.3]),
Series(["a", "b", "c"]),
Series(["a", np.nan, "c"]),
Series(["a", None, "c"]),
Series([True, False, True]),
Series(dtype=object),
DataFrame({"x": ["a", "b", "c"], "y": [1, 2, 3]}),
DataFrame(),
tm.makeMissingDataframe(),
tm.makeMixedDataFrame(),
tm.makeTimeDataFrame(),
tm.makeTimeSeries(),
Series(tm.makePeriodIndex()),
Series(pd.date_range("20130101", periods=3, tz="US/Eastern")),
],
)
def test_hash_pandas_object(obj, index):
a = hash_pandas_object(obj, index=index)
b = hash_pandas_object(obj, index=index)
tm.assert_series_equal(a, b)
@pytest.mark.parametrize(
"obj",
[
Series([1, 2, 3]),
Series([1.0, 1.5, 3.2]),
Series([1.0, 1.5, np.nan]),
Series([1.0, 1.5, 3.2], index=[1.5, 1.1, 3.3]),
Series(["a", "b", "c"]),
Series(["a", np.nan, "c"]),
Series(["a", None, "c"]),
Series([True, False, True]),
DataFrame({"x": ["a", "b", "c"], "y": [1, 2, 3]}),
tm.makeMissingDataframe(),
tm.makeMixedDataFrame(),
tm.makeTimeDataFrame(),
tm.makeTimeSeries(),
Series(tm.makePeriodIndex()),
Series(pd.date_range("20130101", periods=3, tz="US/Eastern")),
],
)
def test_hash_pandas_object_diff_index_non_empty(obj):
a = hash_pandas_object(obj, index=True)
b = hash_pandas_object(obj, index=False)
assert not (a == b).all()
@pytest.mark.parametrize(
"obj",
[
Index([1, 2, 3]),
Index([True, False, True]),
tm.makeTimedeltaIndex(),
tm.makePeriodIndex(),
MultiIndex.from_product(
[range(5), ["foo", "bar", "baz"], pd.date_range("20130101", periods=2)]
),
MultiIndex.from_product([pd.CategoricalIndex(list("aabc")), range(3)]),
],
)
def test_hash_pandas_index(obj, index):
a = hash_pandas_object(obj, index=index)
b = hash_pandas_object(obj, index=index)
tm.assert_series_equal(a, b)
def test_hash_pandas_series(series, index):
a = hash_pandas_object(series, index=index)
b = hash_pandas_object(series, index=index)
tm.assert_series_equal(a, b)
def test_hash_pandas_series_diff_index(series):
a = hash_pandas_object(series, index=True)
b = hash_pandas_object(series, index=False)
assert not (a == b).all()
@pytest.mark.parametrize(
"obj", [Series([], dtype="float64"), Series([], dtype="object"), Index([])]
)
def test_hash_pandas_empty_object(obj, index):
# These are by-definition the same with
# or without the index as the data is empty.
a = hash_pandas_object(obj, index=index)
b = hash_pandas_object(obj, index=index)
tm.assert_series_equal(a, b)
@pytest.mark.parametrize(
"s1",
[
Series(["a", "b", "c", "d"]),
Series([1000, 2000, 3000, 4000]),
Series(pd.date_range(0, periods=4)),
],
)
@pytest.mark.parametrize("categorize", [True, False])
def test_categorical_consistency(s1, categorize):
# see gh-15143
#
# Check that categoricals hash consistent with their values,
# not codes. This should work for categoricals of any dtype.
s2 = s1.astype("category").cat.set_categories(s1)
s3 = s2.cat.set_categories(list(reversed(s1)))
# These should all hash identically.
h1 = hash_pandas_object(s1, categorize=categorize)
h2 = hash_pandas_object(s2, categorize=categorize)
h3 = hash_pandas_object(s3, categorize=categorize)
tm.assert_series_equal(h1, h2)
tm.assert_series_equal(h1, h3)
def test_categorical_with_nan_consistency():
c = pd.Categorical.from_codes(
[-1, 0, 1, 2, 3, 4], categories=pd.date_range("2012-01-01", periods=5, name="B")
)
expected = hash_array(c, categorize=False)
c = pd.Categorical.from_codes([-1, 0], categories=[pd.Timestamp("2012-01-01")])
result = hash_array(c, categorize=False)
assert result[0] in expected
assert result[1] in expected
def test_pandas_errors():
msg = "Unexpected type for hashing"
with pytest.raises(TypeError, match=msg):
hash_pandas_object(pd.Timestamp("20130101"))
def test_hash_keys():
# Using different hash keys, should have
# different hashes for the same data.
#
# This only matters for object dtypes.
obj = Series(list("abc"))
a = hash_pandas_object(obj, hash_key="9876543210123456")
b = hash_pandas_object(obj, hash_key="9876543210123465")
assert (a != b).all()
def test_df_hash_keys():
# DataFrame version of the test_hash_keys.
# https://github.com/pandas-dev/pandas/issues/41404
obj = DataFrame({"x": np.arange(3), "y": list("abc")})
a = hash_pandas_object(obj, hash_key="9876543210123456")
b = hash_pandas_object(obj, hash_key="9876543210123465")
assert (a != b).all()
def test_df_encoding():
# Check that DataFrame recognizes optional encoding.
# https://github.com/pandas-dev/pandas/issues/41404
# https://github.com/pandas-dev/pandas/pull/42049
obj = DataFrame({"x": np.arange(3), "y": list("a+c")})
a = hash_pandas_object(obj, encoding="utf8")
b = hash_pandas_object(obj, encoding="utf7")
# Note that the "+" is encoded as "+-" in utf-7.
assert a[0] == b[0]
assert a[1] != b[1]
assert a[2] == b[2]
def test_invalid_key():
# This only matters for object dtypes.
msg = "key should be a 16-byte string encoded"
with pytest.raises(ValueError, match=msg):
hash_pandas_object(Series(list("abc")), hash_key="foo")
def test_already_encoded(index):
# If already encoded, then ok.
obj = Series(list("abc")).str.encode("utf8")
a = hash_pandas_object(obj, index=index)
b = hash_pandas_object(obj, index=index)
tm.assert_series_equal(a, b)
def test_alternate_encoding(index):
obj = Series(list("abc"))
a = hash_pandas_object(obj, index=index)
b = hash_pandas_object(obj, index=index)
tm.assert_series_equal(a, b)
@pytest.mark.parametrize("l_exp", range(8))
@pytest.mark.parametrize("l_add", [0, 1])
def test_same_len_hash_collisions(l_exp, l_add):
length = 2 ** (l_exp + 8) + l_add
s = tm.rands_array(length, 2)
result = hash_array(s, "utf8")
assert not result[0] == result[1]
def test_hash_collisions():
# Hash collisions are bad.
#
# https://github.com/pandas-dev/pandas/issues/14711#issuecomment-264885726
hashes = [
"Ingrid-9Z9fKIZmkO7i7Cn51Li34pJm44fgX6DYGBNj3VPlOH50m7HnBlPxfIwFMrcNJNMP6PSgLmwWnInciMWrCSAlLEvt7JkJl4IxiMrVbXSa8ZQoVaq5xoQPjltuJEfwdNlO6jo8qRRHvD8sBEBMQASrRa6TsdaPTPCBo3nwIBpE7YzzmyH0vMBhjQZLx1aCT7faSEx7PgFxQhHdKFWROcysamgy9iVj8DO2Fmwg1NNl93rIAqC3mdqfrCxrzfvIY8aJdzin2cHVzy3QUJxZgHvtUtOLxoqnUHsYbNTeq0xcLXpTZEZCxD4PGubIuCNf32c33M7HFsnjWSEjE2yVdWKhmSVodyF8hFYVmhYnMCztQnJrt3O8ZvVRXd5IKwlLexiSp4h888w7SzAIcKgc3g5XQJf6MlSMftDXm9lIsE1mJNiJEv6uY6pgvC3fUPhatlR5JPpVAHNSbSEE73MBzJrhCAbOLXQumyOXigZuPoME7QgJcBalliQol7YZ9", # noqa: E501
"Tim-b9MddTxOWW2AT1Py6vtVbZwGAmYCjbp89p8mxsiFoVX4FyDOF3wFiAkyQTUgwg9sVqVYOZo09Dh1AzhFHbgij52ylF0SEwgzjzHH8TGY8Lypart4p4onnDoDvVMBa0kdthVGKl6K0BDVGzyOXPXKpmnMF1H6rJzqHJ0HywfwS4XYpVwlAkoeNsiicHkJUFdUAhG229INzvIAiJuAHeJDUoyO4DCBqtoZ5TDend6TK7Y914yHlfH3g1WZu5LksKv68VQHJriWFYusW5e6ZZ6dKaMjTwEGuRgdT66iU5nqWTHRH8WSzpXoCFwGcTOwyuqPSe0fTe21DVtJn1FKj9F9nEnR9xOvJUO7E0piCIF4Ad9yAIDY4DBimpsTfKXCu1vdHpKYerzbndfuFe5AhfMduLYZJi5iAw8qKSwR5h86ttXV0Mc0QmXz8dsRvDgxjXSmupPxBggdlqUlC828hXiTPD7am0yETBV0F3bEtvPiNJfremszcV8NcqAoARMe", # noqa: E501
]
# These should be different.
result1 = hash_array(np.asarray(hashes[0:1], dtype=object), "utf8")
expected1 = np.array([14963968704024874985], dtype=np.uint64)
tm.assert_numpy_array_equal(result1, expected1)
result2 = hash_array(np.asarray(hashes[1:2], dtype=object), "utf8")
expected2 = np.array([16428432627716348016], dtype=np.uint64)
tm.assert_numpy_array_equal(result2, expected2)
result = hash_array(np.asarray(hashes, dtype=object), "utf8")
tm.assert_numpy_array_equal(result, np.concatenate([expected1, expected2], axis=0))
@pytest.mark.parametrize(
"data, result_data",
[
[[tuple("1"), tuple("2")], [10345501319357378243, 8331063931016360761]],
[[(1,), (2,)], [9408946347443669104, 3278256261030523334]],
],
)
def test_hash_with_tuple(data, result_data):
# GH#28969 array containing a tuple raises on call to arr.astype(str)
# apparently a numpy bug github.com/numpy/numpy/issues/9441
df = DataFrame({"data": data})
result = hash_pandas_object(df)
expected = Series(result_data, dtype=np.uint64)
tm.assert_series_equal(result, expected)
def test_hashable_tuple_args():
# require that the elements of such tuples are themselves hashable
df3 = DataFrame(
{
"data": [
(
1,
[],
),
(
2,
{},
),
]
}
)
with pytest.raises(TypeError, match="unhashable type: 'list'"):
hash_pandas_object(df3)
def test_hash_object_none_key():
# https://github.com/pandas-dev/pandas/issues/30887
result = pd.util.hash_pandas_object(Series(["a", "b"]), hash_key=None)
expected = Series([4578374827886788867, 17338122309987883691], dtype="uint64")
tm.assert_series_equal(result, expected)
|
pandas-dev/pandas
|
pandas/tests/util/test_hashing.py
|
Python
|
bsd-3-clause
| 13,000
|
import numpy as np
import h5py
import constants as const
from circuit import Circuit
def load_neurohdf(filename, hdf5path, memmapped=False):
""" Loads the circuit from a NeuroHDF file as exported from CATMAID
Parameters
----------
filename : str
Path to the NeuroHDF file
hdfpath : str
HDF5 path to the irregular dataset containing the circuit
e.g. /Microcircuit
"""
if memmapped:
raise NotImplementedError('Memmapped HDF5 reading not yet implemented')
circuit = Circuit()
f = h5py.File(filename, 'r')
circuitdata_group=f[hdf5path]
vertices_group = circuitdata_group.get('vertices')
connectivity_group = circuitdata_group.get('connectivity')
metadata_group = circuitdata_group.get('metadata')
def helpdict(v):
helpdict = dict.fromkeys( v.attrs.keys() )
for k in helpdict:
helpdict[k] = v.attrs.get(k)
return helpdict
for k,v in vertices_group.items():
if k == 'id':
circuit.vertices = vertices_group[k].value
else:
circuit.vertices_properties[k] = dict.fromkeys( [const.DATA, const.METADATA] )
circuit.vertices_properties[k][const.DATA] = v.value
circuit.vertices_properties[k][const.METADATA] = helpdict(v)
print('Added vertices {0}'.format(k))
for k,v in connectivity_group.items():
if k == 'id':
circuit.connectivity = connectivity_group[k].value
else:
circuit.connectivity_properties[k] = dict.fromkeys( [const.DATA, const.METADATA] )
circuit.connectivity_properties[k][const.DATA] = v.value
circuit.connectivity_properties[k][const.METADATA] = helpdict(v)
print('Added connectivity {0}'.format(k))
if metadata_group:
for k,v in metadata_group.items():
circuit.metadata[k] = dict.fromkeys( [const.DATA, const.METADATA] )
circuit.metadata[k][const.DATA] = v.value
circuit.metadata[k][const.METADATA] = helpdict(v)
print('Added metadata {0}'.format(k))
circuit._remap_vertices_id2indices()
f.close()
return circuit
|
unidesigner/microcircuit
|
microcircuit/io.py
|
Python
|
bsd-3-clause
| 2,171
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='SiteProfile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('short_name', models.CharField(max_length=50)),
('neighbor_sites', models.ManyToManyField(related_name='neighbors', to='sites.Site', blank=True)),
('site', models.OneToOneField(to='sites.Site', on_delete=models.CASCADE)),
],
options={
},
bases=(models.Model,),
),
]
|
ugoertz/django-familio
|
base/migrations/0001_initial.py
|
Python
|
bsd-3-clause
| 805
|
"""Job.repo_fs
Revision ID: 5773ec3bc06
Revises: 3ce0ed6fff6
Create Date: 2015-12-01 02:03:19.042318
"""
# revision identifiers, used by Alembic.
revision = '5773ec3bc06'
down_revision = '3ce0ed6fff6'
branch_labels = None
depends_on = None
from alembic import op
def upgrade():
op.alter_column('job', 'repo', new_column_name='repo_fs')
def downgrade():
op.alter_column('job', 'repo_fs', new_column_name='repo')
|
RickyCook/DockCI
|
alembic/versions/5773ec3bc06_job_repo_fs.py
|
Python
|
isc
| 427
|
import datetime
from typing import Union
from .utils import to_camel_case
def streaming_market_filter(
market_ids: list = None,
bsp_market: bool = None,
betting_types: list = None,
event_type_ids: list = None,
event_ids: list = None,
turn_in_play_enabled: bool = None,
market_types: list = None,
venues: list = None,
country_codes: list = None,
race_types: list = None,
) -> dict:
"""
:param list market_ids: filter market data to data pertaining to specific marketIds.
:param list event_type_ids: filter market data to data pertaining to specific event_type ids.
:param list event_ids: filter market data to data pertaining to specific event ids.
:param bool turn_in_play_enabled: restriction on whether market will turn in play or not, not supplied returns all.
:param list venues: restrict markets by venue (only horse racing has venue at the moment)
:param bool bsp_market: restriction on bsp, not supplied will return all.
:param list betting_types: filter on betting types
:param list market_types: filter market data by market types.
:param list country_codes: filter based on country codes
:param list race_types: filter race types
:return: dict
"""
args = locals().copy()
return {to_camel_case(k): v for k, v in args.items() if v is not None}
def streaming_market_data_filter(
fields: list = None, ladder_levels: int = None
) -> dict:
"""
:param list fields: EX_BEST_OFFERS_DISP, EX_BEST_OFFERS, EX_ALL_OFFERS, EX_TRADED,
EX_TRADED_VOL, EX_LTP, EX_MARKET_DEF, SP_TRADED, SP_PROJECTED
:param int ladder_levels: 1->10
:return: dict
"""
args = locals().copy()
return {to_camel_case(k): v for k, v in args.items() if v is not None}
def streaming_order_filter(
include_overall_position: bool = None,
customer_strategy_refs: list = None,
partition_matched_by_strategy_ref: bool = None,
) -> dict:
"""
:param bool include_overall_position: Returns overall / net position (OrderRunnerChange.mb / OrderRunnerChange.ml)
:param list customer_strategy_refs: Restricts to specified customerStrategyRefs; this will filter orders and
StrategyMatchChanges accordingly (Note: overall postition is not filtered)
:param bool partition_matched_by_strategy_ref: Returns strategy positions (OrderRunnerChange.smc=
Map<customerStrategyRef, StrategyMatchChange>) - these are sent in delta format as per overall position.
:return: dict
"""
args = locals().copy()
return {to_camel_case(k): v for k, v in args.items() if v is not None}
def time_range(
from_: Union[str, datetime.datetime] = None,
to: Union[str, datetime.datetime] = None,
) -> dict:
"""
:param Union[str, datetime.datetime] from_:
:param Union[str, datetime.datetime] to:
:return: dict
"""
if from_ != None:
if isinstance(from_, datetime.datetime):
from_ = from_.isoformat()
elif not isinstance(from_, str):
raise TypeError("The 'from_' value must be string or datetime (not date)")
if to != None:
if isinstance(to, datetime.datetime):
to = to.isoformat()
elif not isinstance(to, str):
raise TypeError("The 'to' value must be string or datetime (not date)")
args = locals().copy()
return {k.replace("_", ""): v for k, v in args.items()}
def market_filter(
text_query: str = None,
event_type_ids: list = None,
event_ids: list = None,
competition_ids: list = None,
market_ids: list = None,
venues: list = None,
bsp_only: bool = None,
turn_in_play_enabled: bool = None,
in_play_only: bool = None,
market_betting_types: list = None,
market_countries: list = None,
market_type_codes: list = None,
market_start_time: dict = None,
with_orders: str = None,
race_types: list = None,
) -> dict:
"""
:param str text_query: restrict markets by text associated with it, e.g name, event, comp.
:param list event_type_ids: filter market data to data pertaining to specific event_type ids.
:param list event_ids: filter market data to data pertaining to specific event ids.
:param list competition_ids: filter market data to data pertaining to specific competition ids.
:param list market_ids: filter market data to data pertaining to specific marketIds.
:param list venues: restrict markets by venue (only horse racing has venue at the moment)
:param bool bsp_only: restriction on bsp, not supplied will return all.
:param bool turn_in_play_enabled: restriction on whether market will turn in play or not, not supplied returns all.
:param bool in_play_only: restriction to currently inplay, not supplied returns all.
:param list market_betting_types: filter market data by market betting types.
:param list market_countries: filter market data by country codes.
:param list market_type_codes: filter market data to match the type of market e.g. MATCH_ODDS.
:param dict market_start_time: filter market data by time at which it starts.
:param str with_orders: filter market data by specified order status.
:param list race_types: filter race types.
:return: dict
"""
args = locals().copy()
return {to_camel_case(k): v for k, v in args.items() if v is not None}
def price_data(
sp_available: bool = False,
sp_traded: bool = False,
ex_best_offers: bool = False,
ex_all_offers: bool = False,
ex_traded: bool = False,
) -> list:
"""
Create PriceData filter list from all args passed as True.
:param bool sp_available: Amount available for the BSP auction.
:param bool sp_traded: Amount traded in the BSP auction.
:param bool ex_best_offers: Only the best prices available for each runner, to requested price depth.
:param bool ex_all_offers: trumps EX_BEST_OFFERS if both settings are present
:param bool ex_traded: Amount traded on the exchange.
:returns: string values of all args specified as True.
:rtype: list
"""
args = locals().copy()
return [k.upper() for k, v in args.items() if v is True]
def ex_best_offers_overrides(
best_prices_depth: int = None,
rollup_model: str = None,
rollup_limit: int = None,
rollup_liability_threshold: float = None,
rollup_liability_factor: int = None,
) -> dict:
"""
Create filter to specify whether to accumulate market volume info, how deep a book to return and rollup methods if
accumulation is selected.
:param int best_prices_depth: The maximum number of prices to return on each side for each runner.
:param str rollup_model: method to use to accumulate market orders.
:param int rollup_limit: The volume limit to use when rolling up returned sizes. The exact definition of the limit
depends on the rollupModel.
If no limit is provided it will use minimum stake
:param float rollup_liability_threshold: Only applicable when rollupModel is MANAGED_LIABILITY. The rollup model
switches from being stake based to liability based at the smallest lay price which is >= rollupLiabilityThreshold
:param int rollup_liability_factor: Only applicable when rollupModel is MANAGED_LIABILITY. (rollupLiabilityFactor *
rollupLimit) is the minimum liabilty the user is deemed to be comfortable with. After the rollupLiabilityThreshold
price subsequent volumes will be rolled up to minimum value such that the liability >= the minimum liability.
:returns: parameters for inclusion in market data requests.
:rtype: dict
"""
args = locals().copy()
return {to_camel_case(k): v for k, v in args.items() if v is not None}
def price_projection(
price_data: list = None,
ex_best_offers_overrides: dict = None,
virtualise: bool = True,
rollover_stakes: bool = False,
) -> dict:
"""
Selection criteria of the returning price data.
:param list price_data: PriceData filter to specify what market data we wish to receive.
:param dict ex_best_offers_overrides: define order book depth, rollup method.
:param bool virtualise: whether to receive virtualised prices also.
:param bool rollover_stakes: whether to accumulate volume at each price as sum of volume at that price and all better
prices.
:returns: price data criteria for market data.
:rtype: dict
"""
if price_data is None:
price_data = []
if ex_best_offers_overrides is None:
ex_best_offers_overrides = {}
args = locals().copy()
return {to_camel_case(k): v for k, v in args.items() if v is not None}
def place_instruction(
order_type: str,
selection_id: int,
side: str,
handicap: float = None,
limit_order: dict = None,
limit_on_close_order: dict = None,
market_on_close_order: dict = None,
customer_order_ref: str = None,
) -> dict:
"""
Create order instructions to place an order at exchange.
:param str order_type: define type of order to place.
:param int selection_id: selection on which to place order
:param float handicap: handicap if placing order on asianhandicap type market
:param str side: side of order
:param dict limit_order: if orderType is a limitOrder structure details of the order.
:param dict limit_on_close_order: if orderType is a limitOnCloseOrder structure details of the order.
:param dict market_on_close_order: if orderType is a marketOnCloseOrder structure details of the order.
:param str customer_order_ref: an optional reference customers can set to identify instructions..
:return: orders to place.
:rtype: dict
"""
args = locals().copy()
return {to_camel_case(k): v for k, v in args.items() if v is not None}
def limit_order(
price: float,
persistence_type: str = None,
size: float = None,
time_in_force: str = None,
min_fill_size: float = None,
bet_target_type: str = None,
bet_target_size: float = None,
) -> dict:
"""
Create a limit order to send to exchange.
:param float size: amount in account currency to be sent.
:param float price: price at which the order is to be sent.
:param str persistence_type: what happens to order at turn in play.
:param str time_in_force: specify if it is FillOrKill/FillAndKill. This value takes precedence over any
PersistenceType value chosen.
:param float min_fill_size: the minimum amount to be filled for FillAndKill.
:param str bet_target_type: Specify the type of Target, bet to certain backer profit or certain payout value.
Used to adjust to lower stakes if filled at better levels.
:param float bet_target_size: Size of payout of profit to bet.
:returns: Order information to place a limit order.
:rtype: dict
"""
args = locals().copy()
return {to_camel_case(k): v for k, v in args.items() if v is not None}
def limit_on_close_order(liability: float, price: float) -> dict:
"""
Create limit order for the closing auction.
:param float liability: amount to bet.
:param float price: price at which to bet
:returns: Order information to place a limit on close order.
:rtype: dict
"""
return locals().copy()
def market_on_close_order(liability: float) -> dict:
"""
Create market order to be placed in the closing auction.
:param float liability: amount to bet.
:returns: Order information to place a market on close order.
:rtype: dict
"""
return locals().copy()
def cancel_instruction(bet_id: str, size_reduction: float = None) -> dict:
"""
Instruction to fully or partially cancel an order (only applies to LIMIT orders)
:param str bet_id: identifier of the bet to cancel.
:param float size_reduction: If supplied then this is a partial cancel.
:returns: cancellation report detailing status, cancellation requested and actual cancellation details.
:rtype: dict
"""
args = locals().copy()
return {to_camel_case(k): v for k, v in args.items() if v is not None}
def replace_instruction(bet_id: str, new_price: float) -> dict:
"""
Instruction to replace a LIMIT or LIMIT_ON_CLOSE order at a new price.
Original order will be cancelled and a new order placed at the new price for the remaining stake.
:param str bet_id: Unique identifier for the bet
:param float new_price: The price to replace the bet at
:returns: replace report detailing status, replace requested and actual replace details.
:rtype: dict
"""
args = locals().copy()
return {to_camel_case(k): v for k, v in args.items() if v is not None}
def update_instruction(bet_id: str, new_persistence_type: str) -> dict:
"""
Instruction to update LIMIT bet's persistence of an order that do not affect exposure
:param str bet_id: Unique identifier for the bet
:param str new_persistence_type: The new persistence type to update this bet to.
:returns: update report detailing status, update requested and update details.
:rtype: dict
"""
args = locals().copy()
return {to_camel_case(k): v for k, v in args.items() if v is not None}
|
liampauling/betfairlightweight
|
betfairlightweight/filters.py
|
Python
|
mit
| 13,217
|
from django.db import models
class Address(models.Model):
teryt = models.IntegerField(default=0)
territory = models.CharField(max_length=7)
number_of_district = models.IntegerField(default=0)
address = models.CharField(max_length=500, default=None, null=True, blank=True)
district = models.CharField(max_length=50, default=None, null=True, blank=True)
commune = models.CharField(max_length=200, default=None, null=True, blank=True)
commune_type = models.CharField(max_length=200, default=None, null=True, blank=True)
county = models.CharField(max_length=200, default=None, null=True, blank=True)
voivodeship = models.CharField(max_length=200, default=None, null=True, blank=True)
number_of_electoral_circuit = models.IntegerField(default=0)
number_electoral_circuits = models.IntegerField(default=0, null=True, blank=True)
class Meta:
abstract = True
class Votes_data(Address):
type = models.CharField(max_length=63, default=None, null=True, blank=True)
number_of_voters = models.IntegerField(default=0)
number_of_proxies = models.IntegerField(default=0)
cards_given = models.IntegerField(default=0)
cards_taken = models.IntegerField(default=0)
cards_taken_from_box = models.IntegerField(default=0)
votes_valid = models.IntegerField(default=0)
votes_invalid = models.IntegerField(default=0)
cards_received = models.IntegerField(default=0)
cards_valid = models.IntegerField(default=0)
cards_invalid = models.IntegerField(default=0)
cards_invalid_x = models.IntegerField(default=0)
cards_invalid_xx = models.IntegerField(default=0)
cards_unused = models.IntegerField(default=0)
polish_citizens = models.IntegerField(default=0)
polish_citizens_b = models.IntegerField(default=0)
envelope_unsealed = models.IntegerField(default=0)
envelopes_thrown_into_box = models.IntegerField(default=0)
envelopes_without_statement = models.IntegerField(default=0)
envelopes_returned = models.IntegerField(default=0)
envelopes_returned_without_envelope = models.IntegerField(default=0)
unsigned_statements = models.IntegerField(default=0)
eu_citizens = models.IntegerField(default=0)
eu_citiznes_b = models.IntegerField(default=0)
electoral_packages = models.IntegerField(default=0)
class Meta:
abstract = True
class Election(Votes_data):
election_type = models.CharField(max_length=511, default=None)
notes = models.TextField(default='[]')
class Vote(models.Model):
election = models.ForeignKey(Election, null=True, blank=True)
political_party = models.CharField(max_length=2047, default=None)
amount = models.IntegerField(default=0)
class Candidate(models.Model):
election_type = models.CharField(max_length=511, default=None)
surname = models.CharField(max_length=255, default=None)
names = models.CharField(max_length=255, default=None)
age = models.IntegerField(default=0)
sex = models.CharField(max_length=3, default=None)
place_of_living = models.CharField(max_length=511, default=None)
voivodeship = models.CharField(max_length=127, default=None)
nationality = models.CharField(max_length=127, default=None)
votes = models.IntegerField(default=0)
election_committee = models.CharField(max_length=2047, default=None) # coded, i.e 'kw1', 'kwp2'
number_of_list = models.IntegerField(default=0)
pos = models.IntegerField(default=0)
number_of_district = models.IntegerField(default=0)
grade = models.CharField(max_length=10, default=None)
mandate = models.CharField(max_length=2, default='n')
supported_by = models.TextField(default=None, null=True, blank=True)
teryt = models.IntegerField(default=0)
|
miastojestnasze/wyborySam2014
|
stats/models.py
|
Python
|
mit
| 3,755
|
#! /usr/bin/env nix-shell
#! nix-shell -i python2 -p "with python2Packages; [python debian]"
# Script to build a Nix script to actually build a Steam runtime.
# Patched version of https://github.com/ValveSoftware/steam-runtime/blob/master/build-runtime.py
import os
import re
import sys
import urllib
import gzip
import cStringIO
import subprocess
from debian import deb822
import argparse
destdir="newpkg"
arches=["amd64", "i386"]
REPO="http://repo.steampowered.com/steamrt"
DIST="scout"
COMPONENT="main"
out = open("runtime-generated.nix", "w")
out.write("# This file is autogenerated! Do not edit it yourself, use update-runtime.py for regeneration.\n")
out.write("{ fetchurl }:\n")
out.write("\n")
out.write("{\n")
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("-b", "--beta", help="build beta runtime", action="store_true")
parser.add_argument("-d", "--debug", help="build debug runtime", action="store_true")
parser.add_argument("--symbols", help="include debugging symbols", action="store_true")
parser.add_argument("--repo", help="source repository", default=REPO)
return parser.parse_args()
def download_file(file_base, file_name, file_url):
file_shortname = file_base + ".deb"
sha256 = subprocess.check_output(["nix-prefetch-url", "--type", "sha256", "--name", file_shortname, file_url])
out.write(" rec {\n")
out.write(" name = \"%s\";\n" % file_name)
out.write(" sha256 = \"%s\";\n" % sha256.strip())
out.write(" url = \"%s\";\n" % file_url.replace(REPO, "mirror://steamrt", 1))
out.write(" source = fetchurl {\n")
out.write(" inherit url sha256;\n")
out.write(" name = \"%s\";\n" % file_shortname)
out.write(" };\n")
out.write(" }\n")
def install_binaries (arch, binarylist):
installset = binarylist.copy()
#
# Load the Packages file so we can find the location of each binary package
#
packages_url = "%s/dists/%s/%s/binary-%s/Packages" % (REPO, DIST, COMPONENT, arch)
print("Downloading %s binaries from %s" % (arch, packages_url))
for stanza in deb822.Packages.iter_paragraphs(urllib.urlopen(packages_url)):
p = stanza['Package']
if p in installset:
print("DOWNLOADING BINARY: %s" % p)
#
# Download the package and install it
#
file_url="%s/%s" % (REPO,stanza['Filename'])
download_file(p, os.path.splitext(os.path.basename(stanza['Filename']))[0], file_url)
installset.remove(p)
for p in installset:
#
# There was a binary package in the list to be installed that is not in the repo
#
e = "ERROR: Package %s not found in Packages file %s\n" % (p, packages_url)
sys.stderr.write(e)
def install_symbols (arch, binarylist):
#
# Load the Packages file to find the location of each symbol package
#
packages_url = "%s/dists/%s/%s/debug/binary-%s/Packages" % (REPO, DIST, COMPONENT, arch)
print("Downloading %s symbols from %s" % (arch, packages_url))
for stanza in deb822.Packages.iter_paragraphs(urllib.urlopen(packages_url)):
p = stanza['Package']
m = re.match('([\w\-\.]+)\-dbgsym', p)
if m and m.group(1) in binarylist:
print("DOWNLOADING SYMBOLS: %s" % p)
#
# Download the package and install it
#
file_url="%s/%s" % (REPO,stanza['Filename'])
download_file(p, os.path.splitext(os.path.basename(stanza['Filename']))[0], file_url)
args = parse_args()
REPO=args.repo
if args.beta:
DIST="steam_beta"
if args.debug:
COMPONENT = "debug"
# Process packages.txt to get the list of source and binary packages
source_pkgs = set()
binary_pkgs = set()
print ("Creating runtime-generated.nix")
pkgs_list = urllib.urlopen("https://raw.githubusercontent.com/ValveSoftware/steam-runtime/master/packages.txt").readlines()
for line in pkgs_list:
if line[0] != '#':
toks = line.split()
if len(toks) > 1:
source_pkgs.add(toks[0])
binary_pkgs.update(toks[1:])
# remove development packages for end-user runtime
if not args.debug:
binary_pkgs -= {x for x in binary_pkgs if re.search('-dbg$|-dev$|-multidev$',x)}
for arch in arches:
out.write(" %s = [\n" % arch)
install_binaries(arch, binary_pkgs)
if args.symbols:
install_symbols(arch, binary_pkgs)
out.write(" ];\n");
out.write("}\n")
# vi: set noexpandtab:
|
SymbiFlow/nixpkgs
|
pkgs/games/steam/update-runtime.py
|
Python
|
mit
| 4,219
|
from unittest import TestCase
from six import assertRaisesRegex
from dark.reads import DNARead
from dark.summarize import summarizeReads, sequenceCategoryLengths
from dark.utils import StringIO
class TestSummarizeReads(TestCase):
"""
Test the summarizeReads function.
"""
def testReadNumberEmptyInput(self):
result = summarizeReads(StringIO(), 'fasta')
self.assertEqual(result['read_number'], 0)
def testReadNumberOneSequenceCount(self):
seq = '>hey\nagtcagtcagtc'
result = summarizeReads(StringIO(seq), 'fasta')
self.assertEqual(result['read_number'], 1)
def testReadNumberTwoSequencesCount(self):
seq = '>hey\nagtcagtcagtc\n>you\nacctg'
result = summarizeReads(StringIO(seq), 'fasta')
self.assertEqual(result['read_number'], 2)
def testTotalLengthEmptyInput(self):
result = summarizeReads(StringIO(), 'fasta')
self.assertEqual(result['total_length'], 0)
def testTotalLengthOneString(self):
seq = '>hey\nagtcagtcagtc'
result = summarizeReads(StringIO(seq), 'fasta')
self.assertEqual(result['total_length'], 12)
def testTotalLengthTwoStrings(self):
seq = '>hey\nagtcagtcagtc\n>you\nacctg'
result = summarizeReads(StringIO(seq), 'fasta')
self.assertEqual(result['total_length'], 17)
def testBaseCountsEmptyImput(self):
result = summarizeReads(StringIO(), 'fasta')
self.assertEqual(result['base_counts'], {})
def testBaseCountsOneRead(self):
seq = '>hey\nagtcagtcagtc'
result = summarizeReads(StringIO(seq), 'fasta')
self.assertEqual(result['base_counts'],
{'a': 3, 'c': 3, 't': 3, 'g': 3})
def testBaseCountsTwoReads(self):
seq = '>hey\nagtcagtcagtc\n>you\nacctg'
result = summarizeReads(StringIO(seq), 'fasta')
self.assertEqual(result['base_counts'],
{'a': 4, 'c': 5, 't': 4, 'g': 4})
def testMaxLengthListEmptyInput(self):
result = summarizeReads(StringIO(), 'fasta')
self.assertEqual(result['max_length'], 0)
def testMaxLengthListTwoStrings(self):
seq = '>hey\nagtcagtcagtc\n>you\nacctg'
result = summarizeReads(StringIO(seq), 'fasta')
self.assertEqual(result['max_length'], 12)
def testMinLengthListEmptyInput(self):
result = summarizeReads(StringIO(), 'fasta')
self.assertEqual(result['min_length'], 0)
def testMinLengthListTwoStrings(self):
seq = '>hey\nagtcagtcagtc\n>you\nacctg'
result = summarizeReads(StringIO(seq), 'fasta')
self.assertEqual(result['min_length'], 5)
def testMedianEmptyInput(self):
result = summarizeReads(StringIO(), 'fasta')
self.assertEqual(result['median_length'], 0)
def testMedianOneString(self):
seq = '>hey\nagtcagtcagtc'
result = summarizeReads(StringIO(seq), 'fasta')
self.assertEqual(result['median_length'], 12)
def testMedianThreeStrings(self):
seq = '>hey\nagtcagtcagtc\n>you\nacctg\n>how\natgggtc'
result = summarizeReads(StringIO(seq), 'fasta')
self.assertEqual(result['median_length'], 7)
def testMedianFourStrings(self):
seq = '>hey\nagtcagtcagtc\n>you\nacctg\n>how\natgggtc\n>are\n\
atggctattgaactgtatct'
result = summarizeReads(StringIO(seq), 'fasta')
self.assertEqual(result['median_length'], 9.5)
class TestSequenceCategoryLengths(TestCase):
"""
Test the sequenceCategoryLengths function.
"""
def testInvalidMinLength(self):
"""
If a minLength value less than 1 is passed, a ValueError must be
raised.
"""
read = DNARead('id', '')
error = '^minLength must be at least 1$'
assertRaisesRegex(self, ValueError, error, sequenceCategoryLengths,
read, {}, minLength=0)
def testEmpty(self):
"""
An empty sequence should result in an empty category summary.
"""
read = DNARead('id', '')
self.assertEqual([], sequenceCategoryLengths(read, {}))
def testOneCategoryPerBase(self):
"""
If each base is in its own category, the summary must be correct.
"""
read = DNARead('id', 'ACGT')
categories = {
'A': 0,
'C': 1,
'G': 2,
'T': 3,
}
self.assertEqual([(0, 1), (1, 1), (2, 1), (3, 1)],
sequenceCategoryLengths(read, categories))
def testRepeatedCategory(self):
"""
If categories are repeated in a sequence, the summary must have the
correct length for the categories.
"""
read = DNARead('id', 'ACCGGTTT')
categories = {
'A': 'a',
'C': 'c',
'G': 'g',
'T': 't',
}
self.assertEqual([('a', 1), ('c', 2), ('g', 2), ('t', 3)],
sequenceCategoryLengths(read, categories))
def testUnknownCategory(self):
"""
If a base has no category, the summary must have C{None} as the
category for those bases.
"""
read = DNARead('id', 'ACCGGTTT')
categories = {
'A': 'a',
'G': 'g',
'T': 't',
}
self.assertEqual([('a', 1), (None, 2), ('g', 2), ('t', 3)],
sequenceCategoryLengths(read, categories))
def testUnknownCategoryWithDefault(self):
"""
If a base has no category, the summary must have the passed default
category as the category for those bases.
"""
read = DNARead('id', 'ACCGGTTT')
categories = {
'A': 'a',
'G': 'g',
'T': 't',
}
self.assertEqual([('a', 1), ('xxx', 2), ('g', 2), ('t', 3)],
sequenceCategoryLengths(read, categories, 'xxx'))
def testSuppressAtStart(self):
"""
If a region at the start of the sequence is shorter than the passed
minimum length, the result should suppress the catgeory information.
"""
read = DNARead('id', 'ACCGGTTT')
categories = {
'A': 'a',
'C': 'c',
'G': 'g',
'T': 't',
}
self.assertEqual([('...', 1), ('c', 2), ('g', 2), ('t', 3)],
sequenceCategoryLengths(read, categories,
minLength=2))
def testSuppressTwoAtStart(self):
"""
If 2 regions at the start of the sequence are shorter than the passed
minimum length, the result should suppress the catgeory information
and the length of the suppressed region must be the sum of the lengths
of the regions.
"""
read = DNARead('id', 'AGCCGGTTT')
categories = {
'A': 'a',
'C': 'c',
'G': 'g',
'T': 't',
}
self.assertEqual([('...', 2), ('c', 2), ('g', 2), ('t', 3)],
sequenceCategoryLengths(read, categories,
minLength=2))
def testSuppressAtEnd(self):
"""
If a region at the end of the sequence is shorter than the passed
minimum length, the result should suppress the catgeory information.
"""
read = DNARead('id', 'CCGGTTTA')
categories = {
'A': 'a',
'C': 'c',
'G': 'g',
'T': 't',
}
self.assertEqual([('c', 2), ('g', 2), ('t', 3), ('...', 1)],
sequenceCategoryLengths(read, categories,
minLength=2))
def testSuppressTwoAtEnd(self):
"""
If 2 regions at the end of the sequence are shorter than the passed
minimum length, the result should suppress the catgeory information
and the length of the suppressed region must be the sum of the lengths
of the regions.
"""
read = DNARead('id', 'CCGGTTTAC')
categories = {
'A': 'a',
'C': 'c',
'G': 'g',
'T': 't',
}
self.assertEqual([('c', 2), ('g', 2), ('t', 3), ('...', 2)],
sequenceCategoryLengths(read, categories,
minLength=2))
def testSuppressWithNonDefaultSuppresscategory(self):
"""
If a region of the sequence is shorter than the passed minimum length,
the result should suppress the catgeory information and the suppress
category returned must be the one that is passed.
"""
read = DNARead('id', 'ACCGGTTT')
categories = {
'A': 'a',
'C': 'c',
'G': 'g',
'T': 't',
}
self.assertEqual([('s', 1), ('c', 2), ('g', 2), ('t', 3)],
sequenceCategoryLengths(
read, categories, minLength=2,
suppressedCategory='s'))
def testAllSuppressed(self):
"""
If all regions of the sequence are shorter than the passed
minimum length, the result should suppress the catgeory information
and the suppressed region length must be the sum of the region lengths.
"""
read = DNARead('id', 'ACCGGGTTT')
categories = {
'A': 'a',
'C': 'c',
'G': 'g',
'T': 't',
}
self.assertEqual([('...', 9)],
sequenceCategoryLengths(read, categories,
minLength=5))
|
terrycojones/dark-matter
|
test/test_summarize.py
|
Python
|
mit
| 9,767
|
#!/usr/bin/env python
import argparse
import bcrypt
import ftp2http
import getpass
import os
import sys
from .version import version
def main():
parser = argparse.ArgumentParser(version=version)
parser.add_argument(
"-f", "--configuration-file",
default="/etc/ftp2http.conf",
help="Specifies the configuration file path.",
)
parser.add_argument(
"-a", "--generate-account",
action="store_true",
help="Generate details for adding a user account."
)
parser.add_argument(
"--fd",
type=int, default=-1,
help="Specifies a socket file descriptor.",
)
args = parser.parse_args()
if args.generate_account:
name = raw_input("Enter a username: ")
password = getpass.getpass("Enter a password: ")
password_repeated = getpass.getpass("Confirm password: ")
if password != password_repeated:
sys.stderr.write("Error! The passwords did not match.\n")
sys.exit(1)
hashed_password = bcrypt.hashpw(password, bcrypt.gensalt())
print
print "Add the following line to your configuration file."
print "user: %s:%s" % (name, hashed_password)
print
else:
configuration_path = os.path.abspath(args.configuration_file)
config = ftp2http.read_configuration_file(configuration_path)
config["listen_fd"] = args.fd
ftp2http.start_ftp_server(**config)
if __name__ == "__main__":
main()
|
apn-online/ftp2http
|
ftp2http/__main__.py
|
Python
|
mit
| 1,514
|
import time
import random
nums = []
for i in range(100):
nums.append(int((random.random())*10000)) #fills empty nums list with 100 random int's from 0-1000
print "Before everything happens:", nums
start_time=time.time() #records start time of sorting
for index1 in range(1, len(nums)):
for index2 in range (0,index1):
if nums[index1]<nums[index2]:
temp=nums[index1]
del nums[index1] #works much better than .remove(element) in this scenario
nums.insert(index2, temp)
elapsed_time = time.time()-start_time #saves time elapsed for sorting
print "After everything happens:", nums
print 'Elapsed time taken for sorting (microseconds): ' + str(elapsed_time)
|
jiobert/python
|
Tan_ShinYi/Assignments/Python_Fundamentals/Insertion_Sort.py
|
Python
|
mit
| 710
|
from sfa.rspecs.elements.element import Element
from sfa.rspecs.elements.pltag import PLTag
class NITOSv1PLTag:
@staticmethod
def add_pl_tag(xml, name, value):
for pl_tag in pl_tags:
pl_tag_elem = xml.add_element(name)
pl_tag_elem.set_text(value)
@staticmethod
def get_pl_tags(xml, ignore=None):
if ignore is None: ignore=[]
pl_tags = []
for elem in xml.iterchildren():
if elem.tag not in ignore:
pl_tag = PLTag({'tagname': elem.tag, 'value': elem.text})
pl_tags.append(pl_tag)
return pl_tags
|
yippeecw/sfa
|
sfa/rspecs/elements/versions/nitosv1PLTag.py
|
Python
|
mit
| 640
|
from __future__ import unicode_literals
import pytz
from django.contrib.auth.models import User
from django.http import Http404
from django.utils.translation import ugettext_lazy as _
from djblets.datagrid.grids import (
Column,
DateTimeColumn,
DataGrid as DjbletsDataGrid,
AlphanumericDataGrid as DjbletsAlphanumericDataGrid)
from djblets.util.templatetags.djblets_utils import ageid
from reviewboard.accounts.models import (LocalSiteProfile, Profile,
ReviewRequestVisit)
from reviewboard.datagrids.columns import (BugsColumn,
DateTimeSinceColumn,
DiffSizeColumn,
DiffUpdatedColumn,
DiffUpdatedSinceColumn,
GroupMemberCountColumn,
GroupsColumn,
MyCommentsColumn,
NewUpdatesColumn,
PendingCountColumn,
PeopleColumn,
RepositoryColumn,
ReviewCountColumn,
ReviewGroupStarColumn,
ReviewRequestCheckboxColumn,
ReviewRequestIDColumn,
ReviewRequestStarColumn,
ReviewSubmitterColumn,
ReviewSummaryColumn,
ShipItColumn,
SubmitterColumn,
SummaryColumn,
ToMeColumn)
from reviewboard.datagrids.sidebar import Sidebar, DataGridSidebarMixin
from reviewboard.datagrids.builtin_items import (IncomingSection,
OutgoingSection,
UserGroupsItem,
UserProfileItem)
from reviewboard.reviews.models import Group, ReviewRequest, Review
from reviewboard.site.urlresolvers import local_site_reverse
class ShowClosedReviewRequestsMixin(object):
"""A mixin for showing or hiding closed review requests."""
def load_extra_state(self, profile, allow_hide_closed=True):
"""Load extra state for the datagrid."""
if profile:
self.show_closed = profile.show_closed
try:
self.show_closed = (
int(self.request.GET.get('show-closed',
self.show_closed)) != 0)
except ValueError:
# do nothing
pass
if allow_hide_closed and not self.show_closed:
self.queryset = self.queryset.filter(**{
self.status_query_field: 'P',
})
self.queryset = self.queryset.filter(**{
self.site_query_field: self.local_site,
})
if profile and self.show_closed != profile.show_closed:
profile.show_closed = self.show_closed
return True
return False
class DataGridJSMixin(object):
"""Mixin that provides enhanced JavaScript support for datagrids.
This contains additional information on the JavaScript views/models
to load for the page, allowing for enhanced functionality in datagrids.
"""
#: A list of extra CSS static bundles to load on the page.
css_bundle_names = []
#: A list of extra JavaScript static bundles to load on the page.
js_bundle_names = []
#: The JavaScript Model to use for the page state.
js_model_class = 'RB.DatagridPage'
#: The JavaScript View to use for the page rendering.
js_view_class = 'RB.DatagridPageView'
#: Whether or not to periodically reload the contents of the datagrid.
periodic_reload = False
#: Extra data to pass to the JavaScript Model.
extra_js_model_data = None
class DataGrid(DataGridJSMixin, DjbletsDataGrid):
"""Base class for a datagrid in Review Board.
This contains additional information on JavaScript views/models
to load for the page.
"""
class AlphanumericDataGrid(DataGridJSMixin, DjbletsAlphanumericDataGrid):
"""Base class for an alphanumeric datagrid in Review Board.
This contains additional information on JavaScript views/models
to load for the page.
"""
class ReviewRequestDataGrid(ShowClosedReviewRequestsMixin, DataGrid):
"""A datagrid that displays a list of review requests.
This datagrid accepts the show_closed parameter in the URL, allowing
submitted review requests to be filtered out or displayed.
"""
new_updates = NewUpdatesColumn()
my_comments = MyCommentsColumn()
star = ReviewRequestStarColumn()
ship_it = ShipItColumn()
summary = SummaryColumn()
submitter = SubmitterColumn()
branch = Column(
label=_('Branch'),
db_field='branch',
shrink=True,
sortable=True,
link=False)
bugs_closed = BugsColumn()
repository = RepositoryColumn()
time_added = DateTimeColumn(
label=_('Posted'),
detailed_label=_('Posted Time'),
format='F jS, Y, P',
shrink=True,
css_class=lambda r: ageid(r.time_added))
last_updated = DateTimeColumn(
label=_('Last Updated'),
format='F jS, Y, P',
shrink=True,
db_field='last_updated',
field_name='last_updated',
css_class=lambda r: ageid(r.last_updated))
diff_updated = DiffUpdatedColumn(
format='F jS, Y, P',
shrink=True,
css_class=lambda r: ageid(r.diffset_history.last_diff_updated))
time_added_since = DateTimeSinceColumn(
label=_('Posted'),
detailed_label=_('Posted Time (Relative)'),
field_name='time_added', shrink=True,
css_class=lambda r: ageid(r.time_added))
last_updated_since = DateTimeSinceColumn(
label=_('Last Updated'),
detailed_label=_('Last Updated (Relative)'), shrink=True,
db_field='last_updated',
field_name='last_updated',
css_class=lambda r: ageid(r.last_updated))
diff_updated_since = DiffUpdatedSinceColumn(
detailed_label=_('Diff Updated (Relative)'),
shrink=True,
css_class=lambda r: ageid(r.diffset_history.last_diff_updated))
diff_size = DiffSizeColumn()
review_count = ReviewCountColumn()
target_groups = GroupsColumn()
target_people = PeopleColumn()
to_me = ToMeColumn()
review_id = ReviewRequestIDColumn()
status_query_field = 'status'
site_query_field = 'local_site'
def __init__(self, *args, **kwargs):
"""Initialize the datagrid."""
self.local_site = kwargs.pop('local_site', None)
super(ReviewRequestDataGrid, self).__init__(*args, **kwargs)
self.listview_template = 'datagrids/review_request_listview.html'
self.profile_sort_field = 'sort_review_request_columns'
self.profile_columns_field = 'review_request_columns'
self.show_closed = True
self.submitter_url_name = 'user'
self.default_sort = ['-last_updated']
self.default_columns = [
'star', 'summary', 'submitter', 'time_added', 'last_updated_since'
]
# Add local timezone info to the columns
user = self.request.user
if user.is_authenticated():
profile, is_new = Profile.objects.get_or_create(user=user)
self.timezone = pytz.timezone(profile.timezone)
self.time_added.timezone = self.timezone
self.last_updated.timezone = self.timezone
self.diff_updated.timezone = self.timezone
def load_extra_state(self, profile, allow_hide_closed=True):
"""Load extra state for the datagrid."""
return super(ReviewRequestDataGrid, self).load_extra_state(
profile, allow_hide_closed)
def post_process_queryset(self, queryset):
"""Add additional data to the queryset."""
q = queryset.with_counts(self.request.user)
return super(ReviewRequestDataGrid, self).post_process_queryset(q)
def link_to_object(self, state, obj, value):
"""Return a link to the given object."""
if value and isinstance(value, User):
return local_site_reverse('user', request=self.request,
args=[value])
return obj.get_absolute_url()
class ReviewDataGrid(ShowClosedReviewRequestsMixin, DataGrid):
"""A datagrid that displays a list of reviews.
This datagrid accepts the show_closed parameter in the URL, allowing
submitted review requests to be filtered out or displayed.
"""
timestamp = DateTimeColumn(
label=_('Date Reviewed'),
format='F jS, Y',
shrink=True)
submitter = ReviewSubmitterColumn()
review_summary = ReviewSummaryColumn()
status_query_field = 'review_request__status'
site_query_field = 'review_request__local_site'
def __init__(self, *args, **kwargs):
"""Initialize the datagrid."""
self.local_site = kwargs.pop('local_site', None)
super(ReviewDataGrid, self).__init__(*args, **kwargs)
self.listview_template = 'datagrids/review_request_listview.html'
self.profile_columns_field = 'review_columns'
self.show_closed = True
self.default_sort = ['-timestamp']
self.default_columns = [
'submitter', 'review_summary', 'timestamp',
]
# Add local timezone info to the columns
user = self.request.user
if user.is_authenticated():
profile, is_new = Profile.objects.get_or_create(user=user)
self.timezone = pytz.timezone(profile.timezone)
self.timestamp.timezone = self.timezone
class DashboardDataGrid(DataGridSidebarMixin, ReviewRequestDataGrid):
"""Displays the dashboard.
The dashboard is the main place where users see what review requests
are out there that may need their attention.
"""
new_updates = NewUpdatesColumn()
my_comments = MyCommentsColumn()
selected = ReviewRequestCheckboxColumn()
sidebar = Sidebar(
[
OutgoingSection,
IncomingSection,
],
default_view_id='incoming')
js_model_class = 'RB.Dashboard'
js_view_class = 'RB.DashboardView'
periodic_reload = True
def __init__(self, *args, **kwargs):
"""Initialize the datagrid."""
local_site = kwargs.get('local_site', None)
super(DashboardDataGrid, self).__init__(*args, **kwargs)
self.listview_template = 'datagrids/hideable_listview.html'
self.profile_sort_field = 'sort_dashboard_columns'
self.profile_columns_field = 'dashboard_columns'
self.default_view = 'incoming'
self.show_closed = False
self.show_archived = False
self.default_sort = ['-last_updated']
self.default_columns = [
'selected', 'new_updates', 'ship_it', 'my_comments', 'summary',
'submitter', 'last_updated_since'
]
self.extra_js_model_data = {
'show_archived': self.show_archived,
}
self.local_site = local_site
self.user = self.request.user
self.profile = Profile.objects.get_or_create(user=self.user)[0]
self.site_profile = LocalSiteProfile.objects.get_or_create(
user=self.user,
local_site=local_site,
profile=self.profile)[0]
def load_extra_state(self, profile):
"""Load extra state for the datagrid."""
group_name = self.request.GET.get('group', '')
view = self.request.GET.get('view', self.default_view)
user = self.request.user
if view == 'outgoing':
self.queryset = ReviewRequest.objects.from_user(
user, user, local_site=self.local_site)
self.title = _('All Outgoing Review Requests')
elif view == 'mine':
self.queryset = ReviewRequest.objects.from_user(
user, user, None, local_site=self.local_site)
self.title = _('All My Review Requests')
elif view == 'to-me':
self.queryset = ReviewRequest.objects.to_user_directly(
user, user, local_site=self.local_site)
self.title = _('Incoming Review Requests to Me')
elif view in ('to-group', 'to-watched-group'):
if group_name:
# to-group is special because we want to make sure that the
# group exists and show a 404 if it doesn't. Otherwise, we'll
# show an empty datagrid with the name.
try:
group = Group.objects.get(name=group_name,
local_site=self.local_site)
if not group.is_accessible_by(user):
raise Http404
except Group.DoesNotExist:
raise Http404
self.queryset = ReviewRequest.objects.to_group(
group_name, self.local_site, user)
self.title = _('Incoming Review Requests to %s') % group_name
else:
self.queryset = ReviewRequest.objects.to_user_groups(
user, user, local_site=self.local_site)
self.title = _('All Incoming Review Requests to My Groups')
elif view == 'starred':
self.queryset = self.profile.starred_review_requests.public(
user=user, local_site=self.local_site, status=None)
self.title = _('Starred Review Requests')
elif view == 'incoming':
self.queryset = ReviewRequest.objects.to_user(
user, user, local_site=self.local_site)
self.title = _('All Incoming Review Requests')
else:
raise Http404
if profile and 'show_archived' in profile.extra_data:
self.show_archived = profile.extra_data['show_archived']
try:
show = self.request.GET.get('show-archived', self.show_archived)
self.show_archived = int(show) != 0
except ValueError:
pass
if not self.show_archived:
hidden_q = ReviewRequestVisit.objects.filter(
user=user).exclude(visibility=ReviewRequestVisit.VISIBLE)
hidden_q = hidden_q.values_list('review_request_id', flat=True)
self.queryset = self.queryset.exclude(pk__in=hidden_q)
if (profile and
self.show_archived != profile.extra_data.get('show_archived')):
profile.extra_data['show_archived'] = self.show_archived
profile_changed = True
else:
profile_changed = False
self.extra_js_model_data['show_archived'] = self.show_archived
parent_profile_changed = \
super(DashboardDataGrid, self).load_extra_state(
profile, allow_hide_closed=False)
return profile_changed or parent_profile_changed
class UsersDataGrid(AlphanumericDataGrid):
"""A datagrid showing a list of users registered on Review Board."""
username = Column(_('Username'), link=True, sortable=True)
fullname = Column(_('Full Name'), field_name='get_full_name',
link=True, expand=True)
pending_count = PendingCountColumn(_('Open Review Requests'),
field_name='directed_review_requests',
shrink=True)
def __init__(self, request,
queryset=User.objects.all(),
title=_('All users'),
local_site=None):
"""Initialize the datagrid."""
if local_site:
qs = queryset.filter(local_site=local_site)
else:
qs = queryset
super(UsersDataGrid, self).__init__(request, qs, title=title,
sortable_column='username',
extra_regex='^[0-9_\-\.].*')
self.listview_template = 'datagrids/user_listview.html'
self.default_sort = ['username']
self.profile_sort_field = 'sort_submitter_columns'
self.profile_columns_field = 'submitter_columns'
self.default_columns = [
'username', 'fullname', 'pending_count'
]
self.show_inactive = False
def link_to_object(self, state, obj, value):
"""Return a link to the given object."""
return local_site_reverse('user', request=self.request,
args=[obj.username])
def load_extra_state(self, profile):
"""Load extra state for the datagrid.
This handles hiding or showing inactive users.
Args:
profile (reviewboard.accounts.models.Profile):
The user profile which contains some basic
configurable settings.
Returns:
bool:
Always returns False.
"""
show_inactive = self.request.GET.get('show-inactive', 0)
try:
self.show_inactive = int(show_inactive)
except ValueError:
pass
if not self.show_inactive:
self.queryset = self.queryset.filter(is_active=True)
return False
class GroupDataGrid(DataGrid):
"""A datagrid showing a list of review groups accessible by the user."""
star = ReviewGroupStarColumn()
name = Column(_('Group ID'), link=True, sortable=True)
displayname = Column(_('Group Name'), field_name='display_name',
link=True, expand=True)
pending_count = PendingCountColumn(_('Open Review Requests'),
field_name='review_requests',
link=True,
shrink=True)
member_count = GroupMemberCountColumn(_('Members'),
field_name='members',
shrink=True)
def __init__(self, request, title=_('All groups'), *args, **kwargs):
"""Initialize the datagrid."""
local_site = kwargs.pop('local_site', None)
queryset = Group.objects.accessible(request.user,
local_site=local_site)
super(GroupDataGrid, self).__init__(request, queryset=queryset,
title=title, *args, **kwargs)
self.profile_sort_field = 'sort_group_columns'
self.profile_columns_field = 'group_columns'
self.default_sort = ['name']
self.default_columns = [
'star', 'name', 'displayname', 'pending_count'
]
@staticmethod
def link_to_object(state, obj, value):
"""Return a link to the given object."""
return obj.get_absolute_url()
class UserPageDataGridMixin(DataGridSidebarMixin):
"""An abstract class for data grids on the user page.
This will display information about the user on the side.
"""
sidebar = Sidebar([
UserProfileItem,
UserGroupsItem,
])
class UserPageReviewRequestDataGrid(UserPageDataGridMixin,
ReviewRequestDataGrid):
"""A data grid for the user page.
This will show the review requests the user has out for review.
"""
tab_title = _('Review Requests')
def __init__(self, request, user, *args, **kwargs):
"""Initialize the datagrid."""
queryset = ReviewRequest.objects.from_user(
user.username,
user=request.user,
status=None,
with_counts=True,
local_site=kwargs.get('local_site'),
filter_private=True,
show_inactive=True)
super(UserPageReviewRequestDataGrid, self).__init__(
request,
queryset=queryset,
title=_("%s's Review Requests") % user.username,
*args, **kwargs)
self.groups = user.review_groups.accessible(request.user)
self.user = user
class UserPageReviewsDataGrid(UserPageDataGridMixin, ReviewDataGrid):
"""A data grid for the user page.
This will show reviews the user has made on other review requests.
"""
tab_title = _('Reviews')
def __init__(self, request, user, *args, **kwargs):
"""Initialize the datagrid."""
queryset = Review.objects.from_user(
user.username,
user=request.user,
public=True,
filter_private=True,
status=None,
local_site=kwargs.get('local_site'))
super(UserPageReviewsDataGrid, self).__init__(
request,
queryset=queryset,
title=_("%s's Reviews") % user.username,
*args, **kwargs)
self.groups = user.review_groups.accessible(request.user)
self.user = user
|
davidt/reviewboard
|
reviewboard/datagrids/grids.py
|
Python
|
mit
| 21,259
|
"""
Define urls for api/v1
"""
from django.conf.urls import url, include
from rest_framework_bulk.routes import BulkRouter
from seven23 import settings
from seven23.api.accounts.views import AccountsList
ROUTER = BulkRouter(trailing_slash=False)
ROUTER.register(r'^', AccountsList, basename='accounts')
urlpatterns = ROUTER.urls
|
sebastienbarbier/723e_server
|
seven23/api/accounts/urls.py
|
Python
|
mit
| 337
|
#!/usr/bin/env python
""" Mappings from Adobe glyph names to Unicode characters.
In some CMap tables, Adobe glyph names are used for specifying
Unicode characters instead of using decimal/hex character code.
The following data was taken by
$ wget https://partners.adobe.com/public/developer/en/opentype/glyphlist.txt
$ python tools/conv_glyphlist.py glyphlist.txt > glyphlist.py
"""
# ###################################################################################
# Copyright (c) 1997,1998,2002,2007 Adobe Systems Incorporated
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this documentation file to use, copy, publish, distribute,
# sublicense, and/or sell copies of the documentation, and to permit
# others to do the same, provided that:
# - No modification, editing or other alteration of this document is
# allowed; and
# - The above copyright notice and this permission notice shall be
# included in all copies of the documentation.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this documentation file, to create their own derivative works
# from the content of this document to use, copy, publish, distribute,
# sublicense, and/or sell the derivative works, and to permit others to do
# the same, provided that the derived work is not represented as being a
# copy or version of this document.
#
# Adobe shall not be liable to any party for any loss of revenue or profit
# or for indirect, incidental, special, consequential, or other similar
# damages, whether based on tort (including without limitation negligence
# or strict liability), contract or other legal or equitable grounds even
# if Adobe has been advised or had reason to know of the possibility of
# such damages. The Adobe materials are provided on an "AS IS" basis.
# Adobe specifically disclaims all express, statutory, or implied
# warranties relating to the Adobe materials, including but not limited to
# those concerning merchantability or fitness for a particular purpose or
# non-infringement of any third party rights regarding the Adobe
# materials.
# ###################################################################################
# Name: Adobe Glyph List
# Table version: 2.0
# Date: September 20, 2002
#
# See http://partners.adobe.com/asn/developer/typeforum/unicodegn.html
#
# Format: Semicolon-delimited fields:
# (1) glyph name
# (2) Unicode scalar value
glyphname2unicode = {
'A': u'\u0041',
'AE': u'\u00C6',
'AEacute': u'\u01FC',
'AEmacron': u'\u01E2',
'AEsmall': u'\uF7E6',
'Aacute': u'\u00C1',
'Aacutesmall': u'\uF7E1',
'Abreve': u'\u0102',
'Abreveacute': u'\u1EAE',
'Abrevecyrillic': u'\u04D0',
'Abrevedotbelow': u'\u1EB6',
'Abrevegrave': u'\u1EB0',
'Abrevehookabove': u'\u1EB2',
'Abrevetilde': u'\u1EB4',
'Acaron': u'\u01CD',
'Acircle': u'\u24B6',
'Acircumflex': u'\u00C2',
'Acircumflexacute': u'\u1EA4',
'Acircumflexdotbelow': u'\u1EAC',
'Acircumflexgrave': u'\u1EA6',
'Acircumflexhookabove': u'\u1EA8',
'Acircumflexsmall': u'\uF7E2',
'Acircumflextilde': u'\u1EAA',
'Acute': u'\uF6C9',
'Acutesmall': u'\uF7B4',
'Acyrillic': u'\u0410',
'Adblgrave': u'\u0200',
'Adieresis': u'\u00C4',
'Adieresiscyrillic': u'\u04D2',
'Adieresismacron': u'\u01DE',
'Adieresissmall': u'\uF7E4',
'Adotbelow': u'\u1EA0',
'Adotmacron': u'\u01E0',
'Agrave': u'\u00C0',
'Agravesmall': u'\uF7E0',
'Ahookabove': u'\u1EA2',
'Aiecyrillic': u'\u04D4',
'Ainvertedbreve': u'\u0202',
'Alpha': u'\u0391',
'Alphatonos': u'\u0386',
'Amacron': u'\u0100',
'Amonospace': u'\uFF21',
'Aogonek': u'\u0104',
'Aring': u'\u00C5',
'Aringacute': u'\u01FA',
'Aringbelow': u'\u1E00',
'Aringsmall': u'\uF7E5',
'Asmall': u'\uF761',
'Atilde': u'\u00C3',
'Atildesmall': u'\uF7E3',
'Aybarmenian': u'\u0531',
'B': u'\u0042',
'Bcircle': u'\u24B7',
'Bdotaccent': u'\u1E02',
'Bdotbelow': u'\u1E04',
'Becyrillic': u'\u0411',
'Benarmenian': u'\u0532',
'Beta': u'\u0392',
'Bhook': u'\u0181',
'Blinebelow': u'\u1E06',
'Bmonospace': u'\uFF22',
'Brevesmall': u'\uF6F4',
'Bsmall': u'\uF762',
'Btopbar': u'\u0182',
'C': u'\u0043',
'Caarmenian': u'\u053E',
'Cacute': u'\u0106',
'Caron': u'\uF6CA',
'Caronsmall': u'\uF6F5',
'Ccaron': u'\u010C',
'Ccedilla': u'\u00C7',
'Ccedillaacute': u'\u1E08',
'Ccedillasmall': u'\uF7E7',
'Ccircle': u'\u24B8',
'Ccircumflex': u'\u0108',
'Cdot': u'\u010A',
'Cdotaccent': u'\u010A',
'Cedillasmall': u'\uF7B8',
'Chaarmenian': u'\u0549',
'Cheabkhasiancyrillic': u'\u04BC',
'Checyrillic': u'\u0427',
'Chedescenderabkhasiancyrillic': u'\u04BE',
'Chedescendercyrillic': u'\u04B6',
'Chedieresiscyrillic': u'\u04F4',
'Cheharmenian': u'\u0543',
'Chekhakassiancyrillic': u'\u04CB',
'Cheverticalstrokecyrillic': u'\u04B8',
'Chi': u'\u03A7',
'Chook': u'\u0187',
'Circumflexsmall': u'\uF6F6',
'Cmonospace': u'\uFF23',
'Coarmenian': u'\u0551',
'Csmall': u'\uF763',
'D': u'\u0044',
'DZ': u'\u01F1',
'DZcaron': u'\u01C4',
'Daarmenian': u'\u0534',
'Dafrican': u'\u0189',
'Dcaron': u'\u010E',
'Dcedilla': u'\u1E10',
'Dcircle': u'\u24B9',
'Dcircumflexbelow': u'\u1E12',
'Dcroat': u'\u0110',
'Ddotaccent': u'\u1E0A',
'Ddotbelow': u'\u1E0C',
'Decyrillic': u'\u0414',
'Deicoptic': u'\u03EE',
'Delta': u'\u2206',
'Deltagreek': u'\u0394',
'Dhook': u'\u018A',
'Dieresis': u'\uF6CB',
'DieresisAcute': u'\uF6CC',
'DieresisGrave': u'\uF6CD',
'Dieresissmall': u'\uF7A8',
'Digammagreek': u'\u03DC',
'Djecyrillic': u'\u0402',
'Dlinebelow': u'\u1E0E',
'Dmonospace': u'\uFF24',
'Dotaccentsmall': u'\uF6F7',
'Dslash': u'\u0110',
'Dsmall': u'\uF764',
'Dtopbar': u'\u018B',
'Dz': u'\u01F2',
'Dzcaron': u'\u01C5',
'Dzeabkhasiancyrillic': u'\u04E0',
'Dzecyrillic': u'\u0405',
'Dzhecyrillic': u'\u040F',
'E': u'\u0045',
'Eacute': u'\u00C9',
'Eacutesmall': u'\uF7E9',
'Ebreve': u'\u0114',
'Ecaron': u'\u011A',
'Ecedillabreve': u'\u1E1C',
'Echarmenian': u'\u0535',
'Ecircle': u'\u24BA',
'Ecircumflex': u'\u00CA',
'Ecircumflexacute': u'\u1EBE',
'Ecircumflexbelow': u'\u1E18',
'Ecircumflexdotbelow': u'\u1EC6',
'Ecircumflexgrave': u'\u1EC0',
'Ecircumflexhookabove': u'\u1EC2',
'Ecircumflexsmall': u'\uF7EA',
'Ecircumflextilde': u'\u1EC4',
'Ecyrillic': u'\u0404',
'Edblgrave': u'\u0204',
'Edieresis': u'\u00CB',
'Edieresissmall': u'\uF7EB',
'Edot': u'\u0116',
'Edotaccent': u'\u0116',
'Edotbelow': u'\u1EB8',
'Efcyrillic': u'\u0424',
'Egrave': u'\u00C8',
'Egravesmall': u'\uF7E8',
'Eharmenian': u'\u0537',
'Ehookabove': u'\u1EBA',
'Eightroman': u'\u2167',
'Einvertedbreve': u'\u0206',
'Eiotifiedcyrillic': u'\u0464',
'Elcyrillic': u'\u041B',
'Elevenroman': u'\u216A',
'Emacron': u'\u0112',
'Emacronacute': u'\u1E16',
'Emacrongrave': u'\u1E14',
'Emcyrillic': u'\u041C',
'Emonospace': u'\uFF25',
'Encyrillic': u'\u041D',
'Endescendercyrillic': u'\u04A2',
'Eng': u'\u014A',
'Enghecyrillic': u'\u04A4',
'Enhookcyrillic': u'\u04C7',
'Eogonek': u'\u0118',
'Eopen': u'\u0190',
'Epsilon': u'\u0395',
'Epsilontonos': u'\u0388',
'Ercyrillic': u'\u0420',
'Ereversed': u'\u018E',
'Ereversedcyrillic': u'\u042D',
'Escyrillic': u'\u0421',
'Esdescendercyrillic': u'\u04AA',
'Esh': u'\u01A9',
'Esmall': u'\uF765',
'Eta': u'\u0397',
'Etarmenian': u'\u0538',
'Etatonos': u'\u0389',
'Eth': u'\u00D0',
'Ethsmall': u'\uF7F0',
'Etilde': u'\u1EBC',
'Etildebelow': u'\u1E1A',
'Euro': u'\u20AC',
'Ezh': u'\u01B7',
'Ezhcaron': u'\u01EE',
'Ezhreversed': u'\u01B8',
'F': u'\u0046',
'Fcircle': u'\u24BB',
'Fdotaccent': u'\u1E1E',
'Feharmenian': u'\u0556',
'Feicoptic': u'\u03E4',
'Fhook': u'\u0191',
'Fitacyrillic': u'\u0472',
'Fiveroman': u'\u2164',
'Fmonospace': u'\uFF26',
'Fourroman': u'\u2163',
'Fsmall': u'\uF766',
'G': u'\u0047',
'GBsquare': u'\u3387',
'Gacute': u'\u01F4',
'Gamma': u'\u0393',
'Gammaafrican': u'\u0194',
'Gangiacoptic': u'\u03EA',
'Gbreve': u'\u011E',
'Gcaron': u'\u01E6',
'Gcedilla': u'\u0122',
'Gcircle': u'\u24BC',
'Gcircumflex': u'\u011C',
'Gcommaaccent': u'\u0122',
'Gdot': u'\u0120',
'Gdotaccent': u'\u0120',
'Gecyrillic': u'\u0413',
'Ghadarmenian': u'\u0542',
'Ghemiddlehookcyrillic': u'\u0494',
'Ghestrokecyrillic': u'\u0492',
'Gheupturncyrillic': u'\u0490',
'Ghook': u'\u0193',
'Gimarmenian': u'\u0533',
'Gjecyrillic': u'\u0403',
'Gmacron': u'\u1E20',
'Gmonospace': u'\uFF27',
'Grave': u'\uF6CE',
'Gravesmall': u'\uF760',
'Gsmall': u'\uF767',
'Gsmallhook': u'\u029B',
'Gstroke': u'\u01E4',
'H': u'\u0048',
'H18533': u'\u25CF',
'H18543': u'\u25AA',
'H18551': u'\u25AB',
'H22073': u'\u25A1',
'HPsquare': u'\u33CB',
'Haabkhasiancyrillic': u'\u04A8',
'Hadescendercyrillic': u'\u04B2',
'Hardsigncyrillic': u'\u042A',
'Hbar': u'\u0126',
'Hbrevebelow': u'\u1E2A',
'Hcedilla': u'\u1E28',
'Hcircle': u'\u24BD',
'Hcircumflex': u'\u0124',
'Hdieresis': u'\u1E26',
'Hdotaccent': u'\u1E22',
'Hdotbelow': u'\u1E24',
'Hmonospace': u'\uFF28',
'Hoarmenian': u'\u0540',
'Horicoptic': u'\u03E8',
'Hsmall': u'\uF768',
'Hungarumlaut': u'\uF6CF',
'Hungarumlautsmall': u'\uF6F8',
'Hzsquare': u'\u3390',
'I': u'\u0049',
'IAcyrillic': u'\u042F',
'IJ': u'\u0132',
'IUcyrillic': u'\u042E',
'Iacute': u'\u00CD',
'Iacutesmall': u'\uF7ED',
'Ibreve': u'\u012C',
'Icaron': u'\u01CF',
'Icircle': u'\u24BE',
'Icircumflex': u'\u00CE',
'Icircumflexsmall': u'\uF7EE',
'Icyrillic': u'\u0406',
'Idblgrave': u'\u0208',
'Idieresis': u'\u00CF',
'Idieresisacute': u'\u1E2E',
'Idieresiscyrillic': u'\u04E4',
'Idieresissmall': u'\uF7EF',
'Idot': u'\u0130',
'Idotaccent': u'\u0130',
'Idotbelow': u'\u1ECA',
'Iebrevecyrillic': u'\u04D6',
'Iecyrillic': u'\u0415',
'Ifraktur': u'\u2111',
'Igrave': u'\u00CC',
'Igravesmall': u'\uF7EC',
'Ihookabove': u'\u1EC8',
'Iicyrillic': u'\u0418',
'Iinvertedbreve': u'\u020A',
'Iishortcyrillic': u'\u0419',
'Imacron': u'\u012A',
'Imacroncyrillic': u'\u04E2',
'Imonospace': u'\uFF29',
'Iniarmenian': u'\u053B',
'Iocyrillic': u'\u0401',
'Iogonek': u'\u012E',
'Iota': u'\u0399',
'Iotaafrican': u'\u0196',
'Iotadieresis': u'\u03AA',
'Iotatonos': u'\u038A',
'Ismall': u'\uF769',
'Istroke': u'\u0197',
'Itilde': u'\u0128',
'Itildebelow': u'\u1E2C',
'Izhitsacyrillic': u'\u0474',
'Izhitsadblgravecyrillic': u'\u0476',
'J': u'\u004A',
'Jaarmenian': u'\u0541',
'Jcircle': u'\u24BF',
'Jcircumflex': u'\u0134',
'Jecyrillic': u'\u0408',
'Jheharmenian': u'\u054B',
'Jmonospace': u'\uFF2A',
'Jsmall': u'\uF76A',
'K': u'\u004B',
'KBsquare': u'\u3385',
'KKsquare': u'\u33CD',
'Kabashkircyrillic': u'\u04A0',
'Kacute': u'\u1E30',
'Kacyrillic': u'\u041A',
'Kadescendercyrillic': u'\u049A',
'Kahookcyrillic': u'\u04C3',
'Kappa': u'\u039A',
'Kastrokecyrillic': u'\u049E',
'Kaverticalstrokecyrillic': u'\u049C',
'Kcaron': u'\u01E8',
'Kcedilla': u'\u0136',
'Kcircle': u'\u24C0',
'Kcommaaccent': u'\u0136',
'Kdotbelow': u'\u1E32',
'Keharmenian': u'\u0554',
'Kenarmenian': u'\u053F',
'Khacyrillic': u'\u0425',
'Kheicoptic': u'\u03E6',
'Khook': u'\u0198',
'Kjecyrillic': u'\u040C',
'Klinebelow': u'\u1E34',
'Kmonospace': u'\uFF2B',
'Koppacyrillic': u'\u0480',
'Koppagreek': u'\u03DE',
'Ksicyrillic': u'\u046E',
'Ksmall': u'\uF76B',
'L': u'\u004C',
'LJ': u'\u01C7',
'LL': u'\uF6BF',
'Lacute': u'\u0139',
'Lambda': u'\u039B',
'Lcaron': u'\u013D',
'Lcedilla': u'\u013B',
'Lcircle': u'\u24C1',
'Lcircumflexbelow': u'\u1E3C',
'Lcommaaccent': u'\u013B',
'Ldot': u'\u013F',
'Ldotaccent': u'\u013F',
'Ldotbelow': u'\u1E36',
'Ldotbelowmacron': u'\u1E38',
'Liwnarmenian': u'\u053C',
'Lj': u'\u01C8',
'Ljecyrillic': u'\u0409',
'Llinebelow': u'\u1E3A',
'Lmonospace': u'\uFF2C',
'Lslash': u'\u0141',
'Lslashsmall': u'\uF6F9',
'Lsmall': u'\uF76C',
'M': u'\u004D',
'MBsquare': u'\u3386',
'Macron': u'\uF6D0',
'Macronsmall': u'\uF7AF',
'Macute': u'\u1E3E',
'Mcircle': u'\u24C2',
'Mdotaccent': u'\u1E40',
'Mdotbelow': u'\u1E42',
'Menarmenian': u'\u0544',
'Mmonospace': u'\uFF2D',
'Msmall': u'\uF76D',
'Mturned': u'\u019C',
'Mu': u'\u039C',
'N': u'\u004E',
'NJ': u'\u01CA',
'Nacute': u'\u0143',
'Ncaron': u'\u0147',
'Ncedilla': u'\u0145',
'Ncircle': u'\u24C3',
'Ncircumflexbelow': u'\u1E4A',
'Ncommaaccent': u'\u0145',
'Ndotaccent': u'\u1E44',
'Ndotbelow': u'\u1E46',
'Nhookleft': u'\u019D',
'Nineroman': u'\u2168',
'Nj': u'\u01CB',
'Njecyrillic': u'\u040A',
'Nlinebelow': u'\u1E48',
'Nmonospace': u'\uFF2E',
'Nowarmenian': u'\u0546',
'Nsmall': u'\uF76E',
'Ntilde': u'\u00D1',
'Ntildesmall': u'\uF7F1',
'Nu': u'\u039D',
'O': u'\u004F',
'OE': u'\u0152',
'OEsmall': u'\uF6FA',
'Oacute': u'\u00D3',
'Oacutesmall': u'\uF7F3',
'Obarredcyrillic': u'\u04E8',
'Obarreddieresiscyrillic': u'\u04EA',
'Obreve': u'\u014E',
'Ocaron': u'\u01D1',
'Ocenteredtilde': u'\u019F',
'Ocircle': u'\u24C4',
'Ocircumflex': u'\u00D4',
'Ocircumflexacute': u'\u1ED0',
'Ocircumflexdotbelow': u'\u1ED8',
'Ocircumflexgrave': u'\u1ED2',
'Ocircumflexhookabove': u'\u1ED4',
'Ocircumflexsmall': u'\uF7F4',
'Ocircumflextilde': u'\u1ED6',
'Ocyrillic': u'\u041E',
'Odblacute': u'\u0150',
'Odblgrave': u'\u020C',
'Odieresis': u'\u00D6',
'Odieresiscyrillic': u'\u04E6',
'Odieresissmall': u'\uF7F6',
'Odotbelow': u'\u1ECC',
'Ogoneksmall': u'\uF6FB',
'Ograve': u'\u00D2',
'Ogravesmall': u'\uF7F2',
'Oharmenian': u'\u0555',
'Ohm': u'\u2126',
'Ohookabove': u'\u1ECE',
'Ohorn': u'\u01A0',
'Ohornacute': u'\u1EDA',
'Ohorndotbelow': u'\u1EE2',
'Ohorngrave': u'\u1EDC',
'Ohornhookabove': u'\u1EDE',
'Ohorntilde': u'\u1EE0',
'Ohungarumlaut': u'\u0150',
'Oi': u'\u01A2',
'Oinvertedbreve': u'\u020E',
'Omacron': u'\u014C',
'Omacronacute': u'\u1E52',
'Omacrongrave': u'\u1E50',
'Omega': u'\u2126',
'Omegacyrillic': u'\u0460',
'Omegagreek': u'\u03A9',
'Omegaroundcyrillic': u'\u047A',
'Omegatitlocyrillic': u'\u047C',
'Omegatonos': u'\u038F',
'Omicron': u'\u039F',
'Omicrontonos': u'\u038C',
'Omonospace': u'\uFF2F',
'Oneroman': u'\u2160',
'Oogonek': u'\u01EA',
'Oogonekmacron': u'\u01EC',
'Oopen': u'\u0186',
'Oslash': u'\u00D8',
'Oslashacute': u'\u01FE',
'Oslashsmall': u'\uF7F8',
'Osmall': u'\uF76F',
'Ostrokeacute': u'\u01FE',
'Otcyrillic': u'\u047E',
'Otilde': u'\u00D5',
'Otildeacute': u'\u1E4C',
'Otildedieresis': u'\u1E4E',
'Otildesmall': u'\uF7F5',
'P': u'\u0050',
'Pacute': u'\u1E54',
'Pcircle': u'\u24C5',
'Pdotaccent': u'\u1E56',
'Pecyrillic': u'\u041F',
'Peharmenian': u'\u054A',
'Pemiddlehookcyrillic': u'\u04A6',
'Phi': u'\u03A6',
'Phook': u'\u01A4',
'Pi': u'\u03A0',
'Piwrarmenian': u'\u0553',
'Pmonospace': u'\uFF30',
'Psi': u'\u03A8',
'Psicyrillic': u'\u0470',
'Psmall': u'\uF770',
'Q': u'\u0051',
'Qcircle': u'\u24C6',
'Qmonospace': u'\uFF31',
'Qsmall': u'\uF771',
'R': u'\u0052',
'Raarmenian': u'\u054C',
'Racute': u'\u0154',
'Rcaron': u'\u0158',
'Rcedilla': u'\u0156',
'Rcircle': u'\u24C7',
'Rcommaaccent': u'\u0156',
'Rdblgrave': u'\u0210',
'Rdotaccent': u'\u1E58',
'Rdotbelow': u'\u1E5A',
'Rdotbelowmacron': u'\u1E5C',
'Reharmenian': u'\u0550',
'Rfraktur': u'\u211C',
'Rho': u'\u03A1',
'Ringsmall': u'\uF6FC',
'Rinvertedbreve': u'\u0212',
'Rlinebelow': u'\u1E5E',
'Rmonospace': u'\uFF32',
'Rsmall': u'\uF772',
'Rsmallinverted': u'\u0281',
'Rsmallinvertedsuperior': u'\u02B6',
'S': u'\u0053',
'SF010000': u'\u250C',
'SF020000': u'\u2514',
'SF030000': u'\u2510',
'SF040000': u'\u2518',
'SF050000': u'\u253C',
'SF060000': u'\u252C',
'SF070000': u'\u2534',
'SF080000': u'\u251C',
'SF090000': u'\u2524',
'SF100000': u'\u2500',
'SF110000': u'\u2502',
'SF190000': u'\u2561',
'SF200000': u'\u2562',
'SF210000': u'\u2556',
'SF220000': u'\u2555',
'SF230000': u'\u2563',
'SF240000': u'\u2551',
'SF250000': u'\u2557',
'SF260000': u'\u255D',
'SF270000': u'\u255C',
'SF280000': u'\u255B',
'SF360000': u'\u255E',
'SF370000': u'\u255F',
'SF380000': u'\u255A',
'SF390000': u'\u2554',
'SF400000': u'\u2569',
'SF410000': u'\u2566',
'SF420000': u'\u2560',
'SF430000': u'\u2550',
'SF440000': u'\u256C',
'SF450000': u'\u2567',
'SF460000': u'\u2568',
'SF470000': u'\u2564',
'SF480000': u'\u2565',
'SF490000': u'\u2559',
'SF500000': u'\u2558',
'SF510000': u'\u2552',
'SF520000': u'\u2553',
'SF530000': u'\u256B',
'SF540000': u'\u256A',
'Sacute': u'\u015A',
'Sacutedotaccent': u'\u1E64',
'Sampigreek': u'\u03E0',
'Scaron': u'\u0160',
'Scarondotaccent': u'\u1E66',
'Scaronsmall': u'\uF6FD',
'Scedilla': u'\u015E',
'Schwa': u'\u018F',
'Schwacyrillic': u'\u04D8',
'Schwadieresiscyrillic': u'\u04DA',
'Scircle': u'\u24C8',
'Scircumflex': u'\u015C',
'Scommaaccent': u'\u0218',
'Sdotaccent': u'\u1E60',
'Sdotbelow': u'\u1E62',
'Sdotbelowdotaccent': u'\u1E68',
'Seharmenian': u'\u054D',
'Sevenroman': u'\u2166',
'Shaarmenian': u'\u0547',
'Shacyrillic': u'\u0428',
'Shchacyrillic': u'\u0429',
'Sheicoptic': u'\u03E2',
'Shhacyrillic': u'\u04BA',
'Shimacoptic': u'\u03EC',
'Sigma': u'\u03A3',
'Sixroman': u'\u2165',
'Smonospace': u'\uFF33',
'Softsigncyrillic': u'\u042C',
'Ssmall': u'\uF773',
'Stigmagreek': u'\u03DA',
'T': u'\u0054',
'Tau': u'\u03A4',
'Tbar': u'\u0166',
'Tcaron': u'\u0164',
'Tcedilla': u'\u0162',
'Tcircle': u'\u24C9',
'Tcircumflexbelow': u'\u1E70',
'Tcommaaccent': u'\u0162',
'Tdotaccent': u'\u1E6A',
'Tdotbelow': u'\u1E6C',
'Tecyrillic': u'\u0422',
'Tedescendercyrillic': u'\u04AC',
'Tenroman': u'\u2169',
'Tetsecyrillic': u'\u04B4',
'Theta': u'\u0398',
'Thook': u'\u01AC',
'Thorn': u'\u00DE',
'Thornsmall': u'\uF7FE',
'Threeroman': u'\u2162',
'Tildesmall': u'\uF6FE',
'Tiwnarmenian': u'\u054F',
'Tlinebelow': u'\u1E6E',
'Tmonospace': u'\uFF34',
'Toarmenian': u'\u0539',
'Tonefive': u'\u01BC',
'Tonesix': u'\u0184',
'Tonetwo': u'\u01A7',
'Tretroflexhook': u'\u01AE',
'Tsecyrillic': u'\u0426',
'Tshecyrillic': u'\u040B',
'Tsmall': u'\uF774',
'Twelveroman': u'\u216B',
'Tworoman': u'\u2161',
'U': u'\u0055',
'Uacute': u'\u00DA',
'Uacutesmall': u'\uF7FA',
'Ubreve': u'\u016C',
'Ucaron': u'\u01D3',
'Ucircle': u'\u24CA',
'Ucircumflex': u'\u00DB',
'Ucircumflexbelow': u'\u1E76',
'Ucircumflexsmall': u'\uF7FB',
'Ucyrillic': u'\u0423',
'Udblacute': u'\u0170',
'Udblgrave': u'\u0214',
'Udieresis': u'\u00DC',
'Udieresisacute': u'\u01D7',
'Udieresisbelow': u'\u1E72',
'Udieresiscaron': u'\u01D9',
'Udieresiscyrillic': u'\u04F0',
'Udieresisgrave': u'\u01DB',
'Udieresismacron': u'\u01D5',
'Udieresissmall': u'\uF7FC',
'Udotbelow': u'\u1EE4',
'Ugrave': u'\u00D9',
'Ugravesmall': u'\uF7F9',
'Uhookabove': u'\u1EE6',
'Uhorn': u'\u01AF',
'Uhornacute': u'\u1EE8',
'Uhorndotbelow': u'\u1EF0',
'Uhorngrave': u'\u1EEA',
'Uhornhookabove': u'\u1EEC',
'Uhorntilde': u'\u1EEE',
'Uhungarumlaut': u'\u0170',
'Uhungarumlautcyrillic': u'\u04F2',
'Uinvertedbreve': u'\u0216',
'Ukcyrillic': u'\u0478',
'Umacron': u'\u016A',
'Umacroncyrillic': u'\u04EE',
'Umacrondieresis': u'\u1E7A',
'Umonospace': u'\uFF35',
'Uogonek': u'\u0172',
'Upsilon': u'\u03A5',
'Upsilon1': u'\u03D2',
'Upsilonacutehooksymbolgreek': u'\u03D3',
'Upsilonafrican': u'\u01B1',
'Upsilondieresis': u'\u03AB',
'Upsilondieresishooksymbolgreek': u'\u03D4',
'Upsilonhooksymbol': u'\u03D2',
'Upsilontonos': u'\u038E',
'Uring': u'\u016E',
'Ushortcyrillic': u'\u040E',
'Usmall': u'\uF775',
'Ustraightcyrillic': u'\u04AE',
'Ustraightstrokecyrillic': u'\u04B0',
'Utilde': u'\u0168',
'Utildeacute': u'\u1E78',
'Utildebelow': u'\u1E74',
'V': u'\u0056',
'Vcircle': u'\u24CB',
'Vdotbelow': u'\u1E7E',
'Vecyrillic': u'\u0412',
'Vewarmenian': u'\u054E',
'Vhook': u'\u01B2',
'Vmonospace': u'\uFF36',
'Voarmenian': u'\u0548',
'Vsmall': u'\uF776',
'Vtilde': u'\u1E7C',
'W': u'\u0057',
'Wacute': u'\u1E82',
'Wcircle': u'\u24CC',
'Wcircumflex': u'\u0174',
'Wdieresis': u'\u1E84',
'Wdotaccent': u'\u1E86',
'Wdotbelow': u'\u1E88',
'Wgrave': u'\u1E80',
'Wmonospace': u'\uFF37',
'Wsmall': u'\uF777',
'X': u'\u0058',
'Xcircle': u'\u24CD',
'Xdieresis': u'\u1E8C',
'Xdotaccent': u'\u1E8A',
'Xeharmenian': u'\u053D',
'Xi': u'\u039E',
'Xmonospace': u'\uFF38',
'Xsmall': u'\uF778',
'Y': u'\u0059',
'Yacute': u'\u00DD',
'Yacutesmall': u'\uF7FD',
'Yatcyrillic': u'\u0462',
'Ycircle': u'\u24CE',
'Ycircumflex': u'\u0176',
'Ydieresis': u'\u0178',
'Ydieresissmall': u'\uF7FF',
'Ydotaccent': u'\u1E8E',
'Ydotbelow': u'\u1EF4',
'Yericyrillic': u'\u042B',
'Yerudieresiscyrillic': u'\u04F8',
'Ygrave': u'\u1EF2',
'Yhook': u'\u01B3',
'Yhookabove': u'\u1EF6',
'Yiarmenian': u'\u0545',
'Yicyrillic': u'\u0407',
'Yiwnarmenian': u'\u0552',
'Ymonospace': u'\uFF39',
'Ysmall': u'\uF779',
'Ytilde': u'\u1EF8',
'Yusbigcyrillic': u'\u046A',
'Yusbigiotifiedcyrillic': u'\u046C',
'Yuslittlecyrillic': u'\u0466',
'Yuslittleiotifiedcyrillic': u'\u0468',
'Z': u'\u005A',
'Zaarmenian': u'\u0536',
'Zacute': u'\u0179',
'Zcaron': u'\u017D',
'Zcaronsmall': u'\uF6FF',
'Zcircle': u'\u24CF',
'Zcircumflex': u'\u1E90',
'Zdot': u'\u017B',
'Zdotaccent': u'\u017B',
'Zdotbelow': u'\u1E92',
'Zecyrillic': u'\u0417',
'Zedescendercyrillic': u'\u0498',
'Zedieresiscyrillic': u'\u04DE',
'Zeta': u'\u0396',
'Zhearmenian': u'\u053A',
'Zhebrevecyrillic': u'\u04C1',
'Zhecyrillic': u'\u0416',
'Zhedescendercyrillic': u'\u0496',
'Zhedieresiscyrillic': u'\u04DC',
'Zlinebelow': u'\u1E94',
'Zmonospace': u'\uFF3A',
'Zsmall': u'\uF77A',
'Zstroke': u'\u01B5',
'a': u'\u0061',
'aabengali': u'\u0986',
'aacute': u'\u00E1',
'aadeva': u'\u0906',
'aagujarati': u'\u0A86',
'aagurmukhi': u'\u0A06',
'aamatragurmukhi': u'\u0A3E',
'aarusquare': u'\u3303',
'aavowelsignbengali': u'\u09BE',
'aavowelsigndeva': u'\u093E',
'aavowelsigngujarati': u'\u0ABE',
'abbreviationmarkarmenian': u'\u055F',
'abbreviationsigndeva': u'\u0970',
'abengali': u'\u0985',
'abopomofo': u'\u311A',
'abreve': u'\u0103',
'abreveacute': u'\u1EAF',
'abrevecyrillic': u'\u04D1',
'abrevedotbelow': u'\u1EB7',
'abrevegrave': u'\u1EB1',
'abrevehookabove': u'\u1EB3',
'abrevetilde': u'\u1EB5',
'acaron': u'\u01CE',
'acircle': u'\u24D0',
'acircumflex': u'\u00E2',
'acircumflexacute': u'\u1EA5',
'acircumflexdotbelow': u'\u1EAD',
'acircumflexgrave': u'\u1EA7',
'acircumflexhookabove': u'\u1EA9',
'acircumflextilde': u'\u1EAB',
'acute': u'\u00B4',
'acutebelowcmb': u'\u0317',
'acutecmb': u'\u0301',
'acutecomb': u'\u0301',
'acutedeva': u'\u0954',
'acutelowmod': u'\u02CF',
'acutetonecmb': u'\u0341',
'acyrillic': u'\u0430',
'adblgrave': u'\u0201',
'addakgurmukhi': u'\u0A71',
'adeva': u'\u0905',
'adieresis': u'\u00E4',
'adieresiscyrillic': u'\u04D3',
'adieresismacron': u'\u01DF',
'adotbelow': u'\u1EA1',
'adotmacron': u'\u01E1',
'ae': u'\u00E6',
'aeacute': u'\u01FD',
'aekorean': u'\u3150',
'aemacron': u'\u01E3',
'afii00208': u'\u2015',
'afii08941': u'\u20A4',
'afii10017': u'\u0410',
'afii10018': u'\u0411',
'afii10019': u'\u0412',
'afii10020': u'\u0413',
'afii10021': u'\u0414',
'afii10022': u'\u0415',
'afii10023': u'\u0401',
'afii10024': u'\u0416',
'afii10025': u'\u0417',
'afii10026': u'\u0418',
'afii10027': u'\u0419',
'afii10028': u'\u041A',
'afii10029': u'\u041B',
'afii10030': u'\u041C',
'afii10031': u'\u041D',
'afii10032': u'\u041E',
'afii10033': u'\u041F',
'afii10034': u'\u0420',
'afii10035': u'\u0421',
'afii10036': u'\u0422',
'afii10037': u'\u0423',
'afii10038': u'\u0424',
'afii10039': u'\u0425',
'afii10040': u'\u0426',
'afii10041': u'\u0427',
'afii10042': u'\u0428',
'afii10043': u'\u0429',
'afii10044': u'\u042A',
'afii10045': u'\u042B',
'afii10046': u'\u042C',
'afii10047': u'\u042D',
'afii10048': u'\u042E',
'afii10049': u'\u042F',
'afii10050': u'\u0490',
'afii10051': u'\u0402',
'afii10052': u'\u0403',
'afii10053': u'\u0404',
'afii10054': u'\u0405',
'afii10055': u'\u0406',
'afii10056': u'\u0407',
'afii10057': u'\u0408',
'afii10058': u'\u0409',
'afii10059': u'\u040A',
'afii10060': u'\u040B',
'afii10061': u'\u040C',
'afii10062': u'\u040E',
'afii10063': u'\uF6C4',
'afii10064': u'\uF6C5',
'afii10065': u'\u0430',
'afii10066': u'\u0431',
'afii10067': u'\u0432',
'afii10068': u'\u0433',
'afii10069': u'\u0434',
'afii10070': u'\u0435',
'afii10071': u'\u0451',
'afii10072': u'\u0436',
'afii10073': u'\u0437',
'afii10074': u'\u0438',
'afii10075': u'\u0439',
'afii10076': u'\u043A',
'afii10077': u'\u043B',
'afii10078': u'\u043C',
'afii10079': u'\u043D',
'afii10080': u'\u043E',
'afii10081': u'\u043F',
'afii10082': u'\u0440',
'afii10083': u'\u0441',
'afii10084': u'\u0442',
'afii10085': u'\u0443',
'afii10086': u'\u0444',
'afii10087': u'\u0445',
'afii10088': u'\u0446',
'afii10089': u'\u0447',
'afii10090': u'\u0448',
'afii10091': u'\u0449',
'afii10092': u'\u044A',
'afii10093': u'\u044B',
'afii10094': u'\u044C',
'afii10095': u'\u044D',
'afii10096': u'\u044E',
'afii10097': u'\u044F',
'afii10098': u'\u0491',
'afii10099': u'\u0452',
'afii10100': u'\u0453',
'afii10101': u'\u0454',
'afii10102': u'\u0455',
'afii10103': u'\u0456',
'afii10104': u'\u0457',
'afii10105': u'\u0458',
'afii10106': u'\u0459',
'afii10107': u'\u045A',
'afii10108': u'\u045B',
'afii10109': u'\u045C',
'afii10110': u'\u045E',
'afii10145': u'\u040F',
'afii10146': u'\u0462',
'afii10147': u'\u0472',
'afii10148': u'\u0474',
'afii10192': u'\uF6C6',
'afii10193': u'\u045F',
'afii10194': u'\u0463',
'afii10195': u'\u0473',
'afii10196': u'\u0475',
'afii10831': u'\uF6C7',
'afii10832': u'\uF6C8',
'afii10846': u'\u04D9',
'afii299': u'\u200E',
'afii300': u'\u200F',
'afii301': u'\u200D',
'afii57381': u'\u066A',
'afii57388': u'\u060C',
'afii57392': u'\u0660',
'afii57393': u'\u0661',
'afii57394': u'\u0662',
'afii57395': u'\u0663',
'afii57396': u'\u0664',
'afii57397': u'\u0665',
'afii57398': u'\u0666',
'afii57399': u'\u0667',
'afii57400': u'\u0668',
'afii57401': u'\u0669',
'afii57403': u'\u061B',
'afii57407': u'\u061F',
'afii57409': u'\u0621',
'afii57410': u'\u0622',
'afii57411': u'\u0623',
'afii57412': u'\u0624',
'afii57413': u'\u0625',
'afii57414': u'\u0626',
'afii57415': u'\u0627',
'afii57416': u'\u0628',
'afii57417': u'\u0629',
'afii57418': u'\u062A',
'afii57419': u'\u062B',
'afii57420': u'\u062C',
'afii57421': u'\u062D',
'afii57422': u'\u062E',
'afii57423': u'\u062F',
'afii57424': u'\u0630',
'afii57425': u'\u0631',
'afii57426': u'\u0632',
'afii57427': u'\u0633',
'afii57428': u'\u0634',
'afii57429': u'\u0635',
'afii57430': u'\u0636',
'afii57431': u'\u0637',
'afii57432': u'\u0638',
'afii57433': u'\u0639',
'afii57434': u'\u063A',
'afii57440': u'\u0640',
'afii57441': u'\u0641',
'afii57442': u'\u0642',
'afii57443': u'\u0643',
'afii57444': u'\u0644',
'afii57445': u'\u0645',
'afii57446': u'\u0646',
'afii57448': u'\u0648',
'afii57449': u'\u0649',
'afii57450': u'\u064A',
'afii57451': u'\u064B',
'afii57452': u'\u064C',
'afii57453': u'\u064D',
'afii57454': u'\u064E',
'afii57455': u'\u064F',
'afii57456': u'\u0650',
'afii57457': u'\u0651',
'afii57458': u'\u0652',
'afii57470': u'\u0647',
'afii57505': u'\u06A4',
'afii57506': u'\u067E',
'afii57507': u'\u0686',
'afii57508': u'\u0698',
'afii57509': u'\u06AF',
'afii57511': u'\u0679',
'afii57512': u'\u0688',
'afii57513': u'\u0691',
'afii57514': u'\u06BA',
'afii57519': u'\u06D2',
'afii57534': u'\u06D5',
'afii57636': u'\u20AA',
'afii57645': u'\u05BE',
'afii57658': u'\u05C3',
'afii57664': u'\u05D0',
'afii57665': u'\u05D1',
'afii57666': u'\u05D2',
'afii57667': u'\u05D3',
'afii57668': u'\u05D4',
'afii57669': u'\u05D5',
'afii57670': u'\u05D6',
'afii57671': u'\u05D7',
'afii57672': u'\u05D8',
'afii57673': u'\u05D9',
'afii57674': u'\u05DA',
'afii57675': u'\u05DB',
'afii57676': u'\u05DC',
'afii57677': u'\u05DD',
'afii57678': u'\u05DE',
'afii57679': u'\u05DF',
'afii57680': u'\u05E0',
'afii57681': u'\u05E1',
'afii57682': u'\u05E2',
'afii57683': u'\u05E3',
'afii57684': u'\u05E4',
'afii57685': u'\u05E5',
'afii57686': u'\u05E6',
'afii57687': u'\u05E7',
'afii57688': u'\u05E8',
'afii57689': u'\u05E9',
'afii57690': u'\u05EA',
'afii57694': u'\uFB2A',
'afii57695': u'\uFB2B',
'afii57700': u'\uFB4B',
'afii57705': u'\uFB1F',
'afii57716': u'\u05F0',
'afii57717': u'\u05F1',
'afii57718': u'\u05F2',
'afii57723': u'\uFB35',
'afii57793': u'\u05B4',
'afii57794': u'\u05B5',
'afii57795': u'\u05B6',
'afii57796': u'\u05BB',
'afii57797': u'\u05B8',
'afii57798': u'\u05B7',
'afii57799': u'\u05B0',
'afii57800': u'\u05B2',
'afii57801': u'\u05B1',
'afii57802': u'\u05B3',
'afii57803': u'\u05C2',
'afii57804': u'\u05C1',
'afii57806': u'\u05B9',
'afii57807': u'\u05BC',
'afii57839': u'\u05BD',
'afii57841': u'\u05BF',
'afii57842': u'\u05C0',
'afii57929': u'\u02BC',
'afii61248': u'\u2105',
'afii61289': u'\u2113',
'afii61352': u'\u2116',
'afii61573': u'\u202C',
'afii61574': u'\u202D',
'afii61575': u'\u202E',
'afii61664': u'\u200C',
'afii63167': u'\u066D',
'afii64937': u'\u02BD',
'agrave': u'\u00E0',
'agujarati': u'\u0A85',
'agurmukhi': u'\u0A05',
'ahiragana': u'\u3042',
'ahookabove': u'\u1EA3',
'aibengali': u'\u0990',
'aibopomofo': u'\u311E',
'aideva': u'\u0910',
'aiecyrillic': u'\u04D5',
'aigujarati': u'\u0A90',
'aigurmukhi': u'\u0A10',
'aimatragurmukhi': u'\u0A48',
'ainarabic': u'\u0639',
'ainfinalarabic': u'\uFECA',
'aininitialarabic': u'\uFECB',
'ainmedialarabic': u'\uFECC',
'ainvertedbreve': u'\u0203',
'aivowelsignbengali': u'\u09C8',
'aivowelsigndeva': u'\u0948',
'aivowelsigngujarati': u'\u0AC8',
'akatakana': u'\u30A2',
'akatakanahalfwidth': u'\uFF71',
'akorean': u'\u314F',
'alef': u'\u05D0',
'alefarabic': u'\u0627',
'alefdageshhebrew': u'\uFB30',
'aleffinalarabic': u'\uFE8E',
'alefhamzaabovearabic': u'\u0623',
'alefhamzaabovefinalarabic': u'\uFE84',
'alefhamzabelowarabic': u'\u0625',
'alefhamzabelowfinalarabic': u'\uFE88',
'alefhebrew': u'\u05D0',
'aleflamedhebrew': u'\uFB4F',
'alefmaddaabovearabic': u'\u0622',
'alefmaddaabovefinalarabic': u'\uFE82',
'alefmaksuraarabic': u'\u0649',
'alefmaksurafinalarabic': u'\uFEF0',
'alefmaksurainitialarabic': u'\uFEF3',
'alefmaksuramedialarabic': u'\uFEF4',
'alefpatahhebrew': u'\uFB2E',
'alefqamatshebrew': u'\uFB2F',
'aleph': u'\u2135',
'allequal': u'\u224C',
'alpha': u'\u03B1',
'alphatonos': u'\u03AC',
'amacron': u'\u0101',
'amonospace': u'\uFF41',
'ampersand': u'\u0026',
'ampersandmonospace': u'\uFF06',
'ampersandsmall': u'\uF726',
'amsquare': u'\u33C2',
'anbopomofo': u'\u3122',
'angbopomofo': u'\u3124',
'angkhankhuthai': u'\u0E5A',
'angle': u'\u2220',
'anglebracketleft': u'\u3008',
'anglebracketleftvertical': u'\uFE3F',
'anglebracketright': u'\u3009',
'anglebracketrightvertical': u'\uFE40',
'angleleft': u'\u2329',
'angleright': u'\u232A',
'angstrom': u'\u212B',
'anoteleia': u'\u0387',
'anudattadeva': u'\u0952',
'anusvarabengali': u'\u0982',
'anusvaradeva': u'\u0902',
'anusvaragujarati': u'\u0A82',
'aogonek': u'\u0105',
'apaatosquare': u'\u3300',
'aparen': u'\u249C',
'apostrophearmenian': u'\u055A',
'apostrophemod': u'\u02BC',
'apple': u'\uF8FF',
'approaches': u'\u2250',
'approxequal': u'\u2248',
'approxequalorimage': u'\u2252',
'approximatelyequal': u'\u2245',
'araeaekorean': u'\u318E',
'araeakorean': u'\u318D',
'arc': u'\u2312',
'arighthalfring': u'\u1E9A',
'aring': u'\u00E5',
'aringacute': u'\u01FB',
'aringbelow': u'\u1E01',
'arrowboth': u'\u2194',
'arrowdashdown': u'\u21E3',
'arrowdashleft': u'\u21E0',
'arrowdashright': u'\u21E2',
'arrowdashup': u'\u21E1',
'arrowdblboth': u'\u21D4',
'arrowdbldown': u'\u21D3',
'arrowdblleft': u'\u21D0',
'arrowdblright': u'\u21D2',
'arrowdblup': u'\u21D1',
'arrowdown': u'\u2193',
'arrowdownleft': u'\u2199',
'arrowdownright': u'\u2198',
'arrowdownwhite': u'\u21E9',
'arrowheaddownmod': u'\u02C5',
'arrowheadleftmod': u'\u02C2',
'arrowheadrightmod': u'\u02C3',
'arrowheadupmod': u'\u02C4',
'arrowhorizex': u'\uF8E7',
'arrowleft': u'\u2190',
'arrowleftdbl': u'\u21D0',
'arrowleftdblstroke': u'\u21CD',
'arrowleftoverright': u'\u21C6',
'arrowleftwhite': u'\u21E6',
'arrowright': u'\u2192',
'arrowrightdblstroke': u'\u21CF',
'arrowrightheavy': u'\u279E',
'arrowrightoverleft': u'\u21C4',
'arrowrightwhite': u'\u21E8',
'arrowtableft': u'\u21E4',
'arrowtabright': u'\u21E5',
'arrowup': u'\u2191',
'arrowupdn': u'\u2195',
'arrowupdnbse': u'\u21A8',
'arrowupdownbase': u'\u21A8',
'arrowupleft': u'\u2196',
'arrowupleftofdown': u'\u21C5',
'arrowupright': u'\u2197',
'arrowupwhite': u'\u21E7',
'arrowvertex': u'\uF8E6',
'asciicircum': u'\u005E',
'asciicircummonospace': u'\uFF3E',
'asciitilde': u'\u007E',
'asciitildemonospace': u'\uFF5E',
'ascript': u'\u0251',
'ascriptturned': u'\u0252',
'asmallhiragana': u'\u3041',
'asmallkatakana': u'\u30A1',
'asmallkatakanahalfwidth': u'\uFF67',
'asterisk': u'\u002A',
'asteriskaltonearabic': u'\u066D',
'asteriskarabic': u'\u066D',
'asteriskmath': u'\u2217',
'asteriskmonospace': u'\uFF0A',
'asterisksmall': u'\uFE61',
'asterism': u'\u2042',
'asuperior': u'\uF6E9',
'asymptoticallyequal': u'\u2243',
'at': u'\u0040',
'atilde': u'\u00E3',
'atmonospace': u'\uFF20',
'atsmall': u'\uFE6B',
'aturned': u'\u0250',
'aubengali': u'\u0994',
'aubopomofo': u'\u3120',
'audeva': u'\u0914',
'augujarati': u'\u0A94',
'augurmukhi': u'\u0A14',
'aulengthmarkbengali': u'\u09D7',
'aumatragurmukhi': u'\u0A4C',
'auvowelsignbengali': u'\u09CC',
'auvowelsigndeva': u'\u094C',
'auvowelsigngujarati': u'\u0ACC',
'avagrahadeva': u'\u093D',
'aybarmenian': u'\u0561',
'ayin': u'\u05E2',
'ayinaltonehebrew': u'\uFB20',
'ayinhebrew': u'\u05E2',
'b': u'\u0062',
'babengali': u'\u09AC',
'backslash': u'\u005C',
'backslashmonospace': u'\uFF3C',
'badeva': u'\u092C',
'bagujarati': u'\u0AAC',
'bagurmukhi': u'\u0A2C',
'bahiragana': u'\u3070',
'bahtthai': u'\u0E3F',
'bakatakana': u'\u30D0',
'bar': u'\u007C',
'barmonospace': u'\uFF5C',
'bbopomofo': u'\u3105',
'bcircle': u'\u24D1',
'bdotaccent': u'\u1E03',
'bdotbelow': u'\u1E05',
'beamedsixteenthnotes': u'\u266C',
'because': u'\u2235',
'becyrillic': u'\u0431',
'beharabic': u'\u0628',
'behfinalarabic': u'\uFE90',
'behinitialarabic': u'\uFE91',
'behiragana': u'\u3079',
'behmedialarabic': u'\uFE92',
'behmeeminitialarabic': u'\uFC9F',
'behmeemisolatedarabic': u'\uFC08',
'behnoonfinalarabic': u'\uFC6D',
'bekatakana': u'\u30D9',
'benarmenian': u'\u0562',
'bet': u'\u05D1',
'beta': u'\u03B2',
'betasymbolgreek': u'\u03D0',
'betdagesh': u'\uFB31',
'betdageshhebrew': u'\uFB31',
'bethebrew': u'\u05D1',
'betrafehebrew': u'\uFB4C',
'bhabengali': u'\u09AD',
'bhadeva': u'\u092D',
'bhagujarati': u'\u0AAD',
'bhagurmukhi': u'\u0A2D',
'bhook': u'\u0253',
'bihiragana': u'\u3073',
'bikatakana': u'\u30D3',
'bilabialclick': u'\u0298',
'bindigurmukhi': u'\u0A02',
'birusquare': u'\u3331',
'blackcircle': u'\u25CF',
'blackdiamond': u'\u25C6',
'blackdownpointingtriangle': u'\u25BC',
'blackleftpointingpointer': u'\u25C4',
'blackleftpointingtriangle': u'\u25C0',
'blacklenticularbracketleft': u'\u3010',
'blacklenticularbracketleftvertical': u'\uFE3B',
'blacklenticularbracketright': u'\u3011',
'blacklenticularbracketrightvertical': u'\uFE3C',
'blacklowerlefttriangle': u'\u25E3',
'blacklowerrighttriangle': u'\u25E2',
'blackrectangle': u'\u25AC',
'blackrightpointingpointer': u'\u25BA',
'blackrightpointingtriangle': u'\u25B6',
'blacksmallsquare': u'\u25AA',
'blacksmilingface': u'\u263B',
'blacksquare': u'\u25A0',
'blackstar': u'\u2605',
'blackupperlefttriangle': u'\u25E4',
'blackupperrighttriangle': u'\u25E5',
'blackuppointingsmalltriangle': u'\u25B4',
'blackuppointingtriangle': u'\u25B2',
'blank': u'\u2423',
'blinebelow': u'\u1E07',
'block': u'\u2588',
'bmonospace': u'\uFF42',
'bobaimaithai': u'\u0E1A',
'bohiragana': u'\u307C',
'bokatakana': u'\u30DC',
'bparen': u'\u249D',
'bqsquare': u'\u33C3',
'braceex': u'\uF8F4',
'braceleft': u'\u007B',
'braceleftbt': u'\uF8F3',
'braceleftmid': u'\uF8F2',
'braceleftmonospace': u'\uFF5B',
'braceleftsmall': u'\uFE5B',
'bracelefttp': u'\uF8F1',
'braceleftvertical': u'\uFE37',
'braceright': u'\u007D',
'bracerightbt': u'\uF8FE',
'bracerightmid': u'\uF8FD',
'bracerightmonospace': u'\uFF5D',
'bracerightsmall': u'\uFE5C',
'bracerighttp': u'\uF8FC',
'bracerightvertical': u'\uFE38',
'bracketleft': u'\u005B',
'bracketleftbt': u'\uF8F0',
'bracketleftex': u'\uF8EF',
'bracketleftmonospace': u'\uFF3B',
'bracketlefttp': u'\uF8EE',
'bracketright': u'\u005D',
'bracketrightbt': u'\uF8FB',
'bracketrightex': u'\uF8FA',
'bracketrightmonospace': u'\uFF3D',
'bracketrighttp': u'\uF8F9',
'breve': u'\u02D8',
'brevebelowcmb': u'\u032E',
'brevecmb': u'\u0306',
'breveinvertedbelowcmb': u'\u032F',
'breveinvertedcmb': u'\u0311',
'breveinverteddoublecmb': u'\u0361',
'bridgebelowcmb': u'\u032A',
'bridgeinvertedbelowcmb': u'\u033A',
'brokenbar': u'\u00A6',
'bstroke': u'\u0180',
'bsuperior': u'\uF6EA',
'btopbar': u'\u0183',
'buhiragana': u'\u3076',
'bukatakana': u'\u30D6',
'bullet': u'\u2022',
'bulletinverse': u'\u25D8',
'bulletoperator': u'\u2219',
'bullseye': u'\u25CE',
'c': u'\u0063',
'caarmenian': u'\u056E',
'cabengali': u'\u099A',
'cacute': u'\u0107',
'cadeva': u'\u091A',
'cagujarati': u'\u0A9A',
'cagurmukhi': u'\u0A1A',
'calsquare': u'\u3388',
'candrabindubengali': u'\u0981',
'candrabinducmb': u'\u0310',
'candrabindudeva': u'\u0901',
'candrabindugujarati': u'\u0A81',
'capslock': u'\u21EA',
'careof': u'\u2105',
'caron': u'\u02C7',
'caronbelowcmb': u'\u032C',
'caroncmb': u'\u030C',
'carriagereturn': u'\u21B5',
'cbopomofo': u'\u3118',
'ccaron': u'\u010D',
'ccedilla': u'\u00E7',
'ccedillaacute': u'\u1E09',
'ccircle': u'\u24D2',
'ccircumflex': u'\u0109',
'ccurl': u'\u0255',
'cdot': u'\u010B',
'cdotaccent': u'\u010B',
'cdsquare': u'\u33C5',
'cedilla': u'\u00B8',
'cedillacmb': u'\u0327',
'cent': u'\u00A2',
'centigrade': u'\u2103',
'centinferior': u'\uF6DF',
'centmonospace': u'\uFFE0',
'centoldstyle': u'\uF7A2',
'centsuperior': u'\uF6E0',
'chaarmenian': u'\u0579',
'chabengali': u'\u099B',
'chadeva': u'\u091B',
'chagujarati': u'\u0A9B',
'chagurmukhi': u'\u0A1B',
'chbopomofo': u'\u3114',
'cheabkhasiancyrillic': u'\u04BD',
'checkmark': u'\u2713',
'checyrillic': u'\u0447',
'chedescenderabkhasiancyrillic': u'\u04BF',
'chedescendercyrillic': u'\u04B7',
'chedieresiscyrillic': u'\u04F5',
'cheharmenian': u'\u0573',
'chekhakassiancyrillic': u'\u04CC',
'cheverticalstrokecyrillic': u'\u04B9',
'chi': u'\u03C7',
'chieuchacirclekorean': u'\u3277',
'chieuchaparenkorean': u'\u3217',
'chieuchcirclekorean': u'\u3269',
'chieuchkorean': u'\u314A',
'chieuchparenkorean': u'\u3209',
'chochangthai': u'\u0E0A',
'chochanthai': u'\u0E08',
'chochingthai': u'\u0E09',
'chochoethai': u'\u0E0C',
'chook': u'\u0188',
'cieucacirclekorean': u'\u3276',
'cieucaparenkorean': u'\u3216',
'cieuccirclekorean': u'\u3268',
'cieuckorean': u'\u3148',
'cieucparenkorean': u'\u3208',
'cieucuparenkorean': u'\u321C',
'circle': u'\u25CB',
'circlemultiply': u'\u2297',
'circleot': u'\u2299',
'circleplus': u'\u2295',
'circlepostalmark': u'\u3036',
'circlewithlefthalfblack': u'\u25D0',
'circlewithrighthalfblack': u'\u25D1',
'circumflex': u'\u02C6',
'circumflexbelowcmb': u'\u032D',
'circumflexcmb': u'\u0302',
'clear': u'\u2327',
'clickalveolar': u'\u01C2',
'clickdental': u'\u01C0',
'clicklateral': u'\u01C1',
'clickretroflex': u'\u01C3',
'club': u'\u2663',
'clubsuitblack': u'\u2663',
'clubsuitwhite': u'\u2667',
'cmcubedsquare': u'\u33A4',
'cmonospace': u'\uFF43',
'cmsquaredsquare': u'\u33A0',
'coarmenian': u'\u0581',
'colon': u'\u003A',
'colonmonetary': u'\u20A1',
'colonmonospace': u'\uFF1A',
'colonsign': u'\u20A1',
'colonsmall': u'\uFE55',
'colontriangularhalfmod': u'\u02D1',
'colontriangularmod': u'\u02D0',
'comma': u'\u002C',
'commaabovecmb': u'\u0313',
'commaaboverightcmb': u'\u0315',
'commaaccent': u'\uF6C3',
'commaarabic': u'\u060C',
'commaarmenian': u'\u055D',
'commainferior': u'\uF6E1',
'commamonospace': u'\uFF0C',
'commareversedabovecmb': u'\u0314',
'commareversedmod': u'\u02BD',
'commasmall': u'\uFE50',
'commasuperior': u'\uF6E2',
'commaturnedabovecmb': u'\u0312',
'commaturnedmod': u'\u02BB',
'compass': u'\u263C',
'congruent': u'\u2245',
'contourintegral': u'\u222E',
'control': u'\u2303',
'controlACK': u'\u0006',
'controlBEL': u'\u0007',
'controlBS': u'\u0008',
'controlCAN': u'\u0018',
'controlCR': u'\u000D',
'controlDC1': u'\u0011',
'controlDC2': u'\u0012',
'controlDC3': u'\u0013',
'controlDC4': u'\u0014',
'controlDEL': u'\u007F',
'controlDLE': u'\u0010',
'controlEM': u'\u0019',
'controlENQ': u'\u0005',
'controlEOT': u'\u0004',
'controlESC': u'\u001B',
'controlETB': u'\u0017',
'controlETX': u'\u0003',
'controlFF': u'\u000C',
'controlFS': u'\u001C',
'controlGS': u'\u001D',
'controlHT': u'\u0009',
'controlLF': u'\u000A',
'controlNAK': u'\u0015',
'controlRS': u'\u001E',
'controlSI': u'\u000F',
'controlSO': u'\u000E',
'controlSOT': u'\u0002',
'controlSTX': u'\u0001',
'controlSUB': u'\u001A',
'controlSYN': u'\u0016',
'controlUS': u'\u001F',
'controlVT': u'\u000B',
'copyright': u'\u00A9',
'copyrightsans': u'\uF8E9',
'copyrightserif': u'\uF6D9',
'cornerbracketleft': u'\u300C',
'cornerbracketlefthalfwidth': u'\uFF62',
'cornerbracketleftvertical': u'\uFE41',
'cornerbracketright': u'\u300D',
'cornerbracketrighthalfwidth': u'\uFF63',
'cornerbracketrightvertical': u'\uFE42',
'corporationsquare': u'\u337F',
'cosquare': u'\u33C7',
'coverkgsquare': u'\u33C6',
'cparen': u'\u249E',
'cruzeiro': u'\u20A2',
'cstretched': u'\u0297',
'curlyand': u'\u22CF',
'curlyor': u'\u22CE',
'currency': u'\u00A4',
'cyrBreve': u'\uF6D1',
'cyrFlex': u'\uF6D2',
'cyrbreve': u'\uF6D4',
'cyrflex': u'\uF6D5',
'd': u'\u0064',
'daarmenian': u'\u0564',
'dabengali': u'\u09A6',
'dadarabic': u'\u0636',
'dadeva': u'\u0926',
'dadfinalarabic': u'\uFEBE',
'dadinitialarabic': u'\uFEBF',
'dadmedialarabic': u'\uFEC0',
'dagesh': u'\u05BC',
'dageshhebrew': u'\u05BC',
'dagger': u'\u2020',
'daggerdbl': u'\u2021',
'dagujarati': u'\u0AA6',
'dagurmukhi': u'\u0A26',
'dahiragana': u'\u3060',
'dakatakana': u'\u30C0',
'dalarabic': u'\u062F',
'dalet': u'\u05D3',
'daletdagesh': u'\uFB33',
'daletdageshhebrew': u'\uFB33',
'dalethatafpatah': u'\u05D3\u05B2',
'dalethatafpatahhebrew': u'\u05D3\u05B2',
'dalethatafsegol': u'\u05D3\u05B1',
'dalethatafsegolhebrew': u'\u05D3\u05B1',
'dalethebrew': u'\u05D3',
'dalethiriq': u'\u05D3\u05B4',
'dalethiriqhebrew': u'\u05D3\u05B4',
'daletholam': u'\u05D3\u05B9',
'daletholamhebrew': u'\u05D3\u05B9',
'daletpatah': u'\u05D3\u05B7',
'daletpatahhebrew': u'\u05D3\u05B7',
'daletqamats': u'\u05D3\u05B8',
'daletqamatshebrew': u'\u05D3\u05B8',
'daletqubuts': u'\u05D3\u05BB',
'daletqubutshebrew': u'\u05D3\u05BB',
'daletsegol': u'\u05D3\u05B6',
'daletsegolhebrew': u'\u05D3\u05B6',
'daletsheva': u'\u05D3\u05B0',
'daletshevahebrew': u'\u05D3\u05B0',
'dalettsere': u'\u05D3\u05B5',
'dalettserehebrew': u'\u05D3\u05B5',
'dalfinalarabic': u'\uFEAA',
'dammaarabic': u'\u064F',
'dammalowarabic': u'\u064F',
'dammatanaltonearabic': u'\u064C',
'dammatanarabic': u'\u064C',
'danda': u'\u0964',
'dargahebrew': u'\u05A7',
'dargalefthebrew': u'\u05A7',
'dasiapneumatacyrilliccmb': u'\u0485',
'dblGrave': u'\uF6D3',
'dblanglebracketleft': u'\u300A',
'dblanglebracketleftvertical': u'\uFE3D',
'dblanglebracketright': u'\u300B',
'dblanglebracketrightvertical': u'\uFE3E',
'dblarchinvertedbelowcmb': u'\u032B',
'dblarrowleft': u'\u21D4',
'dblarrowright': u'\u21D2',
'dbldanda': u'\u0965',
'dblgrave': u'\uF6D6',
'dblgravecmb': u'\u030F',
'dblintegral': u'\u222C',
'dbllowline': u'\u2017',
'dbllowlinecmb': u'\u0333',
'dbloverlinecmb': u'\u033F',
'dblprimemod': u'\u02BA',
'dblverticalbar': u'\u2016',
'dblverticallineabovecmb': u'\u030E',
'dbopomofo': u'\u3109',
'dbsquare': u'\u33C8',
'dcaron': u'\u010F',
'dcedilla': u'\u1E11',
'dcircle': u'\u24D3',
'dcircumflexbelow': u'\u1E13',
'dcroat': u'\u0111',
'ddabengali': u'\u09A1',
'ddadeva': u'\u0921',
'ddagujarati': u'\u0AA1',
'ddagurmukhi': u'\u0A21',
'ddalarabic': u'\u0688',
'ddalfinalarabic': u'\uFB89',
'dddhadeva': u'\u095C',
'ddhabengali': u'\u09A2',
'ddhadeva': u'\u0922',
'ddhagujarati': u'\u0AA2',
'ddhagurmukhi': u'\u0A22',
'ddotaccent': u'\u1E0B',
'ddotbelow': u'\u1E0D',
'decimalseparatorarabic': u'\u066B',
'decimalseparatorpersian': u'\u066B',
'decyrillic': u'\u0434',
'degree': u'\u00B0',
'dehihebrew': u'\u05AD',
'dehiragana': u'\u3067',
'deicoptic': u'\u03EF',
'dekatakana': u'\u30C7',
'deleteleft': u'\u232B',
'deleteright': u'\u2326',
'delta': u'\u03B4',
'deltaturned': u'\u018D',
'denominatorminusonenumeratorbengali': u'\u09F8',
'dezh': u'\u02A4',
'dhabengali': u'\u09A7',
'dhadeva': u'\u0927',
'dhagujarati': u'\u0AA7',
'dhagurmukhi': u'\u0A27',
'dhook': u'\u0257',
'dialytikatonos': u'\u0385',
'dialytikatonoscmb': u'\u0344',
'diamond': u'\u2666',
'diamondsuitwhite': u'\u2662',
'dieresis': u'\u00A8',
'dieresisacute': u'\uF6D7',
'dieresisbelowcmb': u'\u0324',
'dieresiscmb': u'\u0308',
'dieresisgrave': u'\uF6D8',
'dieresistonos': u'\u0385',
'dihiragana': u'\u3062',
'dikatakana': u'\u30C2',
'dittomark': u'\u3003',
'divide': u'\u00F7',
'divides': u'\u2223',
'divisionslash': u'\u2215',
'djecyrillic': u'\u0452',
'dkshade': u'\u2593',
'dlinebelow': u'\u1E0F',
'dlsquare': u'\u3397',
'dmacron': u'\u0111',
'dmonospace': u'\uFF44',
'dnblock': u'\u2584',
'dochadathai': u'\u0E0E',
'dodekthai': u'\u0E14',
'dohiragana': u'\u3069',
'dokatakana': u'\u30C9',
'dollar': u'\u0024',
'dollarinferior': u'\uF6E3',
'dollarmonospace': u'\uFF04',
'dollaroldstyle': u'\uF724',
'dollarsmall': u'\uFE69',
'dollarsuperior': u'\uF6E4',
'dong': u'\u20AB',
'dorusquare': u'\u3326',
'dotaccent': u'\u02D9',
'dotaccentcmb': u'\u0307',
'dotbelowcmb': u'\u0323',
'dotbelowcomb': u'\u0323',
'dotkatakana': u'\u30FB',
'dotlessi': u'\u0131',
'dotlessj': u'\uF6BE',
'dotlessjstrokehook': u'\u0284',
'dotmath': u'\u22C5',
'dottedcircle': u'\u25CC',
'doubleyodpatah': u'\uFB1F',
'doubleyodpatahhebrew': u'\uFB1F',
'downtackbelowcmb': u'\u031E',
'downtackmod': u'\u02D5',
'dparen': u'\u249F',
'dsuperior': u'\uF6EB',
'dtail': u'\u0256',
'dtopbar': u'\u018C',
'duhiragana': u'\u3065',
'dukatakana': u'\u30C5',
'dz': u'\u01F3',
'dzaltone': u'\u02A3',
'dzcaron': u'\u01C6',
'dzcurl': u'\u02A5',
'dzeabkhasiancyrillic': u'\u04E1',
'dzecyrillic': u'\u0455',
'dzhecyrillic': u'\u045F',
'e': u'\u0065',
'eacute': u'\u00E9',
'earth': u'\u2641',
'ebengali': u'\u098F',
'ebopomofo': u'\u311C',
'ebreve': u'\u0115',
'ecandradeva': u'\u090D',
'ecandragujarati': u'\u0A8D',
'ecandravowelsigndeva': u'\u0945',
'ecandravowelsigngujarati': u'\u0AC5',
'ecaron': u'\u011B',
'ecedillabreve': u'\u1E1D',
'echarmenian': u'\u0565',
'echyiwnarmenian': u'\u0587',
'ecircle': u'\u24D4',
'ecircumflex': u'\u00EA',
'ecircumflexacute': u'\u1EBF',
'ecircumflexbelow': u'\u1E19',
'ecircumflexdotbelow': u'\u1EC7',
'ecircumflexgrave': u'\u1EC1',
'ecircumflexhookabove': u'\u1EC3',
'ecircumflextilde': u'\u1EC5',
'ecyrillic': u'\u0454',
'edblgrave': u'\u0205',
'edeva': u'\u090F',
'edieresis': u'\u00EB',
'edot': u'\u0117',
'edotaccent': u'\u0117',
'edotbelow': u'\u1EB9',
'eegurmukhi': u'\u0A0F',
'eematragurmukhi': u'\u0A47',
'efcyrillic': u'\u0444',
'egrave': u'\u00E8',
'egujarati': u'\u0A8F',
'eharmenian': u'\u0567',
'ehbopomofo': u'\u311D',
'ehiragana': u'\u3048',
'ehookabove': u'\u1EBB',
'eibopomofo': u'\u311F',
'eight': u'\u0038',
'eightarabic': u'\u0668',
'eightbengali': u'\u09EE',
'eightcircle': u'\u2467',
'eightcircleinversesansserif': u'\u2791',
'eightdeva': u'\u096E',
'eighteencircle': u'\u2471',
'eighteenparen': u'\u2485',
'eighteenperiod': u'\u2499',
'eightgujarati': u'\u0AEE',
'eightgurmukhi': u'\u0A6E',
'eighthackarabic': u'\u0668',
'eighthangzhou': u'\u3028',
'eighthnotebeamed': u'\u266B',
'eightideographicparen': u'\u3227',
'eightinferior': u'\u2088',
'eightmonospace': u'\uFF18',
'eightoldstyle': u'\uF738',
'eightparen': u'\u247B',
'eightperiod': u'\u248F',
'eightpersian': u'\u06F8',
'eightroman': u'\u2177',
'eightsuperior': u'\u2078',
'eightthai': u'\u0E58',
'einvertedbreve': u'\u0207',
'eiotifiedcyrillic': u'\u0465',
'ekatakana': u'\u30A8',
'ekatakanahalfwidth': u'\uFF74',
'ekonkargurmukhi': u'\u0A74',
'ekorean': u'\u3154',
'elcyrillic': u'\u043B',
'element': u'\u2208',
'elevencircle': u'\u246A',
'elevenparen': u'\u247E',
'elevenperiod': u'\u2492',
'elevenroman': u'\u217A',
'ellipsis': u'\u2026',
'ellipsisvertical': u'\u22EE',
'emacron': u'\u0113',
'emacronacute': u'\u1E17',
'emacrongrave': u'\u1E15',
'emcyrillic': u'\u043C',
'emdash': u'\u2014',
'emdashvertical': u'\uFE31',
'emonospace': u'\uFF45',
'emphasismarkarmenian': u'\u055B',
'emptyset': u'\u2205',
'enbopomofo': u'\u3123',
'encyrillic': u'\u043D',
'endash': u'\u2013',
'endashvertical': u'\uFE32',
'endescendercyrillic': u'\u04A3',
'eng': u'\u014B',
'engbopomofo': u'\u3125',
'enghecyrillic': u'\u04A5',
'enhookcyrillic': u'\u04C8',
'enspace': u'\u2002',
'eogonek': u'\u0119',
'eokorean': u'\u3153',
'eopen': u'\u025B',
'eopenclosed': u'\u029A',
'eopenreversed': u'\u025C',
'eopenreversedclosed': u'\u025E',
'eopenreversedhook': u'\u025D',
'eparen': u'\u24A0',
'epsilon': u'\u03B5',
'epsilontonos': u'\u03AD',
'equal': u'\u003D',
'equalmonospace': u'\uFF1D',
'equalsmall': u'\uFE66',
'equalsuperior': u'\u207C',
'equivalence': u'\u2261',
'erbopomofo': u'\u3126',
'ercyrillic': u'\u0440',
'ereversed': u'\u0258',
'ereversedcyrillic': u'\u044D',
'escyrillic': u'\u0441',
'esdescendercyrillic': u'\u04AB',
'esh': u'\u0283',
'eshcurl': u'\u0286',
'eshortdeva': u'\u090E',
'eshortvowelsigndeva': u'\u0946',
'eshreversedloop': u'\u01AA',
'eshsquatreversed': u'\u0285',
'esmallhiragana': u'\u3047',
'esmallkatakana': u'\u30A7',
'esmallkatakanahalfwidth': u'\uFF6A',
'estimated': u'\u212E',
'esuperior': u'\uF6EC',
'eta': u'\u03B7',
'etarmenian': u'\u0568',
'etatonos': u'\u03AE',
'eth': u'\u00F0',
'etilde': u'\u1EBD',
'etildebelow': u'\u1E1B',
'etnahtafoukhhebrew': u'\u0591',
'etnahtafoukhlefthebrew': u'\u0591',
'etnahtahebrew': u'\u0591',
'etnahtalefthebrew': u'\u0591',
'eturned': u'\u01DD',
'eukorean': u'\u3161',
'euro': u'\u20AC',
'evowelsignbengali': u'\u09C7',
'evowelsigndeva': u'\u0947',
'evowelsigngujarati': u'\u0AC7',
'exclam': u'\u0021',
'exclamarmenian': u'\u055C',
'exclamdbl': u'\u203C',
'exclamdown': u'\u00A1',
'exclamdownsmall': u'\uF7A1',
'exclammonospace': u'\uFF01',
'exclamsmall': u'\uF721',
'existential': u'\u2203',
'ezh': u'\u0292',
'ezhcaron': u'\u01EF',
'ezhcurl': u'\u0293',
'ezhreversed': u'\u01B9',
'ezhtail': u'\u01BA',
'f': u'\u0066',
'fadeva': u'\u095E',
'fagurmukhi': u'\u0A5E',
'fahrenheit': u'\u2109',
'fathaarabic': u'\u064E',
'fathalowarabic': u'\u064E',
'fathatanarabic': u'\u064B',
'fbopomofo': u'\u3108',
'fcircle': u'\u24D5',
'fdotaccent': u'\u1E1F',
'feharabic': u'\u0641',
'feharmenian': u'\u0586',
'fehfinalarabic': u'\uFED2',
'fehinitialarabic': u'\uFED3',
'fehmedialarabic': u'\uFED4',
'feicoptic': u'\u03E5',
'female': u'\u2640',
'ff': u'\uFB00',
'ffi': u'\uFB03',
'ffl': u'\uFB04',
'fi': u'\uFB01',
'fifteencircle': u'\u246E',
'fifteenparen': u'\u2482',
'fifteenperiod': u'\u2496',
'figuredash': u'\u2012',
'filledbox': u'\u25A0',
'filledrect': u'\u25AC',
'finalkaf': u'\u05DA',
'finalkafdagesh': u'\uFB3A',
'finalkafdageshhebrew': u'\uFB3A',
'finalkafhebrew': u'\u05DA',
'finalkafqamats': u'\u05DA\u05B8',
'finalkafqamatshebrew': u'\u05DA\u05B8',
'finalkafsheva': u'\u05DA\u05B0',
'finalkafshevahebrew': u'\u05DA\u05B0',
'finalmem': u'\u05DD',
'finalmemhebrew': u'\u05DD',
'finalnun': u'\u05DF',
'finalnunhebrew': u'\u05DF',
'finalpe': u'\u05E3',
'finalpehebrew': u'\u05E3',
'finaltsadi': u'\u05E5',
'finaltsadihebrew': u'\u05E5',
'firsttonechinese': u'\u02C9',
'fisheye': u'\u25C9',
'fitacyrillic': u'\u0473',
'five': u'\u0035',
'fivearabic': u'\u0665',
'fivebengali': u'\u09EB',
'fivecircle': u'\u2464',
'fivecircleinversesansserif': u'\u278E',
'fivedeva': u'\u096B',
'fiveeighths': u'\u215D',
'fivegujarati': u'\u0AEB',
'fivegurmukhi': u'\u0A6B',
'fivehackarabic': u'\u0665',
'fivehangzhou': u'\u3025',
'fiveideographicparen': u'\u3224',
'fiveinferior': u'\u2085',
'fivemonospace': u'\uFF15',
'fiveoldstyle': u'\uF735',
'fiveparen': u'\u2478',
'fiveperiod': u'\u248C',
'fivepersian': u'\u06F5',
'fiveroman': u'\u2174',
'fivesuperior': u'\u2075',
'fivethai': u'\u0E55',
'fl': u'\uFB02',
'florin': u'\u0192',
'fmonospace': u'\uFF46',
'fmsquare': u'\u3399',
'fofanthai': u'\u0E1F',
'fofathai': u'\u0E1D',
'fongmanthai': u'\u0E4F',
'forall': u'\u2200',
'four': u'\u0034',
'fourarabic': u'\u0664',
'fourbengali': u'\u09EA',
'fourcircle': u'\u2463',
'fourcircleinversesansserif': u'\u278D',
'fourdeva': u'\u096A',
'fourgujarati': u'\u0AEA',
'fourgurmukhi': u'\u0A6A',
'fourhackarabic': u'\u0664',
'fourhangzhou': u'\u3024',
'fourideographicparen': u'\u3223',
'fourinferior': u'\u2084',
'fourmonospace': u'\uFF14',
'fournumeratorbengali': u'\u09F7',
'fouroldstyle': u'\uF734',
'fourparen': u'\u2477',
'fourperiod': u'\u248B',
'fourpersian': u'\u06F4',
'fourroman': u'\u2173',
'foursuperior': u'\u2074',
'fourteencircle': u'\u246D',
'fourteenparen': u'\u2481',
'fourteenperiod': u'\u2495',
'fourthai': u'\u0E54',
'fourthtonechinese': u'\u02CB',
'fparen': u'\u24A1',
'fraction': u'\u2044',
'franc': u'\u20A3',
'g': u'\u0067',
'gabengali': u'\u0997',
'gacute': u'\u01F5',
'gadeva': u'\u0917',
'gafarabic': u'\u06AF',
'gaffinalarabic': u'\uFB93',
'gafinitialarabic': u'\uFB94',
'gafmedialarabic': u'\uFB95',
'gagujarati': u'\u0A97',
'gagurmukhi': u'\u0A17',
'gahiragana': u'\u304C',
'gakatakana': u'\u30AC',
'gamma': u'\u03B3',
'gammalatinsmall': u'\u0263',
'gammasuperior': u'\u02E0',
'gangiacoptic': u'\u03EB',
'gbopomofo': u'\u310D',
'gbreve': u'\u011F',
'gcaron': u'\u01E7',
'gcedilla': u'\u0123',
'gcircle': u'\u24D6',
'gcircumflex': u'\u011D',
'gcommaaccent': u'\u0123',
'gdot': u'\u0121',
'gdotaccent': u'\u0121',
'gecyrillic': u'\u0433',
'gehiragana': u'\u3052',
'gekatakana': u'\u30B2',
'geometricallyequal': u'\u2251',
'gereshaccenthebrew': u'\u059C',
'gereshhebrew': u'\u05F3',
'gereshmuqdamhebrew': u'\u059D',
'germandbls': u'\u00DF',
'gershayimaccenthebrew': u'\u059E',
'gershayimhebrew': u'\u05F4',
'getamark': u'\u3013',
'ghabengali': u'\u0998',
'ghadarmenian': u'\u0572',
'ghadeva': u'\u0918',
'ghagujarati': u'\u0A98',
'ghagurmukhi': u'\u0A18',
'ghainarabic': u'\u063A',
'ghainfinalarabic': u'\uFECE',
'ghaininitialarabic': u'\uFECF',
'ghainmedialarabic': u'\uFED0',
'ghemiddlehookcyrillic': u'\u0495',
'ghestrokecyrillic': u'\u0493',
'gheupturncyrillic': u'\u0491',
'ghhadeva': u'\u095A',
'ghhagurmukhi': u'\u0A5A',
'ghook': u'\u0260',
'ghzsquare': u'\u3393',
'gihiragana': u'\u304E',
'gikatakana': u'\u30AE',
'gimarmenian': u'\u0563',
'gimel': u'\u05D2',
'gimeldagesh': u'\uFB32',
'gimeldageshhebrew': u'\uFB32',
'gimelhebrew': u'\u05D2',
'gjecyrillic': u'\u0453',
'glottalinvertedstroke': u'\u01BE',
'glottalstop': u'\u0294',
'glottalstopinverted': u'\u0296',
'glottalstopmod': u'\u02C0',
'glottalstopreversed': u'\u0295',
'glottalstopreversedmod': u'\u02C1',
'glottalstopreversedsuperior': u'\u02E4',
'glottalstopstroke': u'\u02A1',
'glottalstopstrokereversed': u'\u02A2',
'gmacron': u'\u1E21',
'gmonospace': u'\uFF47',
'gohiragana': u'\u3054',
'gokatakana': u'\u30B4',
'gparen': u'\u24A2',
'gpasquare': u'\u33AC',
'gradient': u'\u2207',
'grave': u'\u0060',
'gravebelowcmb': u'\u0316',
'gravecmb': u'\u0300',
'gravecomb': u'\u0300',
'gravedeva': u'\u0953',
'gravelowmod': u'\u02CE',
'gravemonospace': u'\uFF40',
'gravetonecmb': u'\u0340',
'greater': u'\u003E',
'greaterequal': u'\u2265',
'greaterequalorless': u'\u22DB',
'greatermonospace': u'\uFF1E',
'greaterorequivalent': u'\u2273',
'greaterorless': u'\u2277',
'greateroverequal': u'\u2267',
'greatersmall': u'\uFE65',
'gscript': u'\u0261',
'gstroke': u'\u01E5',
'guhiragana': u'\u3050',
'guillemotleft': u'\u00AB',
'guillemotright': u'\u00BB',
'guilsinglleft': u'\u2039',
'guilsinglright': u'\u203A',
'gukatakana': u'\u30B0',
'guramusquare': u'\u3318',
'gysquare': u'\u33C9',
'h': u'\u0068',
'haabkhasiancyrillic': u'\u04A9',
'haaltonearabic': u'\u06C1',
'habengali': u'\u09B9',
'hadescendercyrillic': u'\u04B3',
'hadeva': u'\u0939',
'hagujarati': u'\u0AB9',
'hagurmukhi': u'\u0A39',
'haharabic': u'\u062D',
'hahfinalarabic': u'\uFEA2',
'hahinitialarabic': u'\uFEA3',
'hahiragana': u'\u306F',
'hahmedialarabic': u'\uFEA4',
'haitusquare': u'\u332A',
'hakatakana': u'\u30CF',
'hakatakanahalfwidth': u'\uFF8A',
'halantgurmukhi': u'\u0A4D',
'hamzaarabic': u'\u0621',
'hamzadammaarabic': u'\u0621\u064F',
'hamzadammatanarabic': u'\u0621\u064C',
'hamzafathaarabic': u'\u0621\u064E',
'hamzafathatanarabic': u'\u0621\u064B',
'hamzalowarabic': u'\u0621',
'hamzalowkasraarabic': u'\u0621\u0650',
'hamzalowkasratanarabic': u'\u0621\u064D',
'hamzasukunarabic': u'\u0621\u0652',
'hangulfiller': u'\u3164',
'hardsigncyrillic': u'\u044A',
'harpoonleftbarbup': u'\u21BC',
'harpoonrightbarbup': u'\u21C0',
'hasquare': u'\u33CA',
'hatafpatah': u'\u05B2',
'hatafpatah16': u'\u05B2',
'hatafpatah23': u'\u05B2',
'hatafpatah2f': u'\u05B2',
'hatafpatahhebrew': u'\u05B2',
'hatafpatahnarrowhebrew': u'\u05B2',
'hatafpatahquarterhebrew': u'\u05B2',
'hatafpatahwidehebrew': u'\u05B2',
'hatafqamats': u'\u05B3',
'hatafqamats1b': u'\u05B3',
'hatafqamats28': u'\u05B3',
'hatafqamats34': u'\u05B3',
'hatafqamatshebrew': u'\u05B3',
'hatafqamatsnarrowhebrew': u'\u05B3',
'hatafqamatsquarterhebrew': u'\u05B3',
'hatafqamatswidehebrew': u'\u05B3',
'hatafsegol': u'\u05B1',
'hatafsegol17': u'\u05B1',
'hatafsegol24': u'\u05B1',
'hatafsegol30': u'\u05B1',
'hatafsegolhebrew': u'\u05B1',
'hatafsegolnarrowhebrew': u'\u05B1',
'hatafsegolquarterhebrew': u'\u05B1',
'hatafsegolwidehebrew': u'\u05B1',
'hbar': u'\u0127',
'hbopomofo': u'\u310F',
'hbrevebelow': u'\u1E2B',
'hcedilla': u'\u1E29',
'hcircle': u'\u24D7',
'hcircumflex': u'\u0125',
'hdieresis': u'\u1E27',
'hdotaccent': u'\u1E23',
'hdotbelow': u'\u1E25',
'he': u'\u05D4',
'heart': u'\u2665',
'heartsuitblack': u'\u2665',
'heartsuitwhite': u'\u2661',
'hedagesh': u'\uFB34',
'hedageshhebrew': u'\uFB34',
'hehaltonearabic': u'\u06C1',
'heharabic': u'\u0647',
'hehebrew': u'\u05D4',
'hehfinalaltonearabic': u'\uFBA7',
'hehfinalalttwoarabic': u'\uFEEA',
'hehfinalarabic': u'\uFEEA',
'hehhamzaabovefinalarabic': u'\uFBA5',
'hehhamzaaboveisolatedarabic': u'\uFBA4',
'hehinitialaltonearabic': u'\uFBA8',
'hehinitialarabic': u'\uFEEB',
'hehiragana': u'\u3078',
'hehmedialaltonearabic': u'\uFBA9',
'hehmedialarabic': u'\uFEEC',
'heiseierasquare': u'\u337B',
'hekatakana': u'\u30D8',
'hekatakanahalfwidth': u'\uFF8D',
'hekutaarusquare': u'\u3336',
'henghook': u'\u0267',
'herutusquare': u'\u3339',
'het': u'\u05D7',
'hethebrew': u'\u05D7',
'hhook': u'\u0266',
'hhooksuperior': u'\u02B1',
'hieuhacirclekorean': u'\u327B',
'hieuhaparenkorean': u'\u321B',
'hieuhcirclekorean': u'\u326D',
'hieuhkorean': u'\u314E',
'hieuhparenkorean': u'\u320D',
'hihiragana': u'\u3072',
'hikatakana': u'\u30D2',
'hikatakanahalfwidth': u'\uFF8B',
'hiriq': u'\u05B4',
'hiriq14': u'\u05B4',
'hiriq21': u'\u05B4',
'hiriq2d': u'\u05B4',
'hiriqhebrew': u'\u05B4',
'hiriqnarrowhebrew': u'\u05B4',
'hiriqquarterhebrew': u'\u05B4',
'hiriqwidehebrew': u'\u05B4',
'hlinebelow': u'\u1E96',
'hmonospace': u'\uFF48',
'hoarmenian': u'\u0570',
'hohipthai': u'\u0E2B',
'hohiragana': u'\u307B',
'hokatakana': u'\u30DB',
'hokatakanahalfwidth': u'\uFF8E',
'holam': u'\u05B9',
'holam19': u'\u05B9',
'holam26': u'\u05B9',
'holam32': u'\u05B9',
'holamhebrew': u'\u05B9',
'holamnarrowhebrew': u'\u05B9',
'holamquarterhebrew': u'\u05B9',
'holamwidehebrew': u'\u05B9',
'honokhukthai': u'\u0E2E',
'hookabovecomb': u'\u0309',
'hookcmb': u'\u0309',
'hookpalatalizedbelowcmb': u'\u0321',
'hookretroflexbelowcmb': u'\u0322',
'hoonsquare': u'\u3342',
'horicoptic': u'\u03E9',
'horizontalbar': u'\u2015',
'horncmb': u'\u031B',
'hotsprings': u'\u2668',
'house': u'\u2302',
'hparen': u'\u24A3',
'hsuperior': u'\u02B0',
'hturned': u'\u0265',
'huhiragana': u'\u3075',
'huiitosquare': u'\u3333',
'hukatakana': u'\u30D5',
'hukatakanahalfwidth': u'\uFF8C',
'hungarumlaut': u'\u02DD',
'hungarumlautcmb': u'\u030B',
'hv': u'\u0195',
'hyphen': u'\u002D',
'hypheninferior': u'\uF6E5',
'hyphenmonospace': u'\uFF0D',
'hyphensmall': u'\uFE63',
'hyphensuperior': u'\uF6E6',
'hyphentwo': u'\u2010',
'i': u'\u0069',
'iacute': u'\u00ED',
'iacyrillic': u'\u044F',
'ibengali': u'\u0987',
'ibopomofo': u'\u3127',
'ibreve': u'\u012D',
'icaron': u'\u01D0',
'icircle': u'\u24D8',
'icircumflex': u'\u00EE',
'icyrillic': u'\u0456',
'idblgrave': u'\u0209',
'ideographearthcircle': u'\u328F',
'ideographfirecircle': u'\u328B',
'ideographicallianceparen': u'\u323F',
'ideographiccallparen': u'\u323A',
'ideographiccentrecircle': u'\u32A5',
'ideographicclose': u'\u3006',
'ideographiccomma': u'\u3001',
'ideographiccommaleft': u'\uFF64',
'ideographiccongratulationparen': u'\u3237',
'ideographiccorrectcircle': u'\u32A3',
'ideographicearthparen': u'\u322F',
'ideographicenterpriseparen': u'\u323D',
'ideographicexcellentcircle': u'\u329D',
'ideographicfestivalparen': u'\u3240',
'ideographicfinancialcircle': u'\u3296',
'ideographicfinancialparen': u'\u3236',
'ideographicfireparen': u'\u322B',
'ideographichaveparen': u'\u3232',
'ideographichighcircle': u'\u32A4',
'ideographiciterationmark': u'\u3005',
'ideographiclaborcircle': u'\u3298',
'ideographiclaborparen': u'\u3238',
'ideographicleftcircle': u'\u32A7',
'ideographiclowcircle': u'\u32A6',
'ideographicmedicinecircle': u'\u32A9',
'ideographicmetalparen': u'\u322E',
'ideographicmoonparen': u'\u322A',
'ideographicnameparen': u'\u3234',
'ideographicperiod': u'\u3002',
'ideographicprintcircle': u'\u329E',
'ideographicreachparen': u'\u3243',
'ideographicrepresentparen': u'\u3239',
'ideographicresourceparen': u'\u323E',
'ideographicrightcircle': u'\u32A8',
'ideographicsecretcircle': u'\u3299',
'ideographicselfparen': u'\u3242',
'ideographicsocietyparen': u'\u3233',
'ideographicspace': u'\u3000',
'ideographicspecialparen': u'\u3235',
'ideographicstockparen': u'\u3231',
'ideographicstudyparen': u'\u323B',
'ideographicsunparen': u'\u3230',
'ideographicsuperviseparen': u'\u323C',
'ideographicwaterparen': u'\u322C',
'ideographicwoodparen': u'\u322D',
'ideographiczero': u'\u3007',
'ideographmetalcircle': u'\u328E',
'ideographmooncircle': u'\u328A',
'ideographnamecircle': u'\u3294',
'ideographsuncircle': u'\u3290',
'ideographwatercircle': u'\u328C',
'ideographwoodcircle': u'\u328D',
'ideva': u'\u0907',
'idieresis': u'\u00EF',
'idieresisacute': u'\u1E2F',
'idieresiscyrillic': u'\u04E5',
'idotbelow': u'\u1ECB',
'iebrevecyrillic': u'\u04D7',
'iecyrillic': u'\u0435',
'ieungacirclekorean': u'\u3275',
'ieungaparenkorean': u'\u3215',
'ieungcirclekorean': u'\u3267',
'ieungkorean': u'\u3147',
'ieungparenkorean': u'\u3207',
'igrave': u'\u00EC',
'igujarati': u'\u0A87',
'igurmukhi': u'\u0A07',
'ihiragana': u'\u3044',
'ihookabove': u'\u1EC9',
'iibengali': u'\u0988',
'iicyrillic': u'\u0438',
'iideva': u'\u0908',
'iigujarati': u'\u0A88',
'iigurmukhi': u'\u0A08',
'iimatragurmukhi': u'\u0A40',
'iinvertedbreve': u'\u020B',
'iishortcyrillic': u'\u0439',
'iivowelsignbengali': u'\u09C0',
'iivowelsigndeva': u'\u0940',
'iivowelsigngujarati': u'\u0AC0',
'ij': u'\u0133',
'ikatakana': u'\u30A4',
'ikatakanahalfwidth': u'\uFF72',
'ikorean': u'\u3163',
'ilde': u'\u02DC',
'iluyhebrew': u'\u05AC',
'imacron': u'\u012B',
'imacroncyrillic': u'\u04E3',
'imageorapproximatelyequal': u'\u2253',
'imatragurmukhi': u'\u0A3F',
'imonospace': u'\uFF49',
'increment': u'\u2206',
'infinity': u'\u221E',
'iniarmenian': u'\u056B',
'integral': u'\u222B',
'integralbottom': u'\u2321',
'integralbt': u'\u2321',
'integralex': u'\uF8F5',
'integraltop': u'\u2320',
'integraltp': u'\u2320',
'intersection': u'\u2229',
'intisquare': u'\u3305',
'invbullet': u'\u25D8',
'invcircle': u'\u25D9',
'invsmileface': u'\u263B',
'iocyrillic': u'\u0451',
'iogonek': u'\u012F',
'iota': u'\u03B9',
'iotadieresis': u'\u03CA',
'iotadieresistonos': u'\u0390',
'iotalatin': u'\u0269',
'iotatonos': u'\u03AF',
'iparen': u'\u24A4',
'irigurmukhi': u'\u0A72',
'ismallhiragana': u'\u3043',
'ismallkatakana': u'\u30A3',
'ismallkatakanahalfwidth': u'\uFF68',
'issharbengali': u'\u09FA',
'istroke': u'\u0268',
'isuperior': u'\uF6ED',
'iterationhiragana': u'\u309D',
'iterationkatakana': u'\u30FD',
'itilde': u'\u0129',
'itildebelow': u'\u1E2D',
'iubopomofo': u'\u3129',
'iucyrillic': u'\u044E',
'ivowelsignbengali': u'\u09BF',
'ivowelsigndeva': u'\u093F',
'ivowelsigngujarati': u'\u0ABF',
'izhitsacyrillic': u'\u0475',
'izhitsadblgravecyrillic': u'\u0477',
'j': u'\u006A',
'jaarmenian': u'\u0571',
'jabengali': u'\u099C',
'jadeva': u'\u091C',
'jagujarati': u'\u0A9C',
'jagurmukhi': u'\u0A1C',
'jbopomofo': u'\u3110',
'jcaron': u'\u01F0',
'jcircle': u'\u24D9',
'jcircumflex': u'\u0135',
'jcrossedtail': u'\u029D',
'jdotlessstroke': u'\u025F',
'jecyrillic': u'\u0458',
'jeemarabic': u'\u062C',
'jeemfinalarabic': u'\uFE9E',
'jeeminitialarabic': u'\uFE9F',
'jeemmedialarabic': u'\uFEA0',
'jeharabic': u'\u0698',
'jehfinalarabic': u'\uFB8B',
'jhabengali': u'\u099D',
'jhadeva': u'\u091D',
'jhagujarati': u'\u0A9D',
'jhagurmukhi': u'\u0A1D',
'jheharmenian': u'\u057B',
'jis': u'\u3004',
'jmonospace': u'\uFF4A',
'jparen': u'\u24A5',
'jsuperior': u'\u02B2',
'k': u'\u006B',
'kabashkircyrillic': u'\u04A1',
'kabengali': u'\u0995',
'kacute': u'\u1E31',
'kacyrillic': u'\u043A',
'kadescendercyrillic': u'\u049B',
'kadeva': u'\u0915',
'kaf': u'\u05DB',
'kafarabic': u'\u0643',
'kafdagesh': u'\uFB3B',
'kafdageshhebrew': u'\uFB3B',
'kaffinalarabic': u'\uFEDA',
'kafhebrew': u'\u05DB',
'kafinitialarabic': u'\uFEDB',
'kafmedialarabic': u'\uFEDC',
'kafrafehebrew': u'\uFB4D',
'kagujarati': u'\u0A95',
'kagurmukhi': u'\u0A15',
'kahiragana': u'\u304B',
'kahookcyrillic': u'\u04C4',
'kakatakana': u'\u30AB',
'kakatakanahalfwidth': u'\uFF76',
'kappa': u'\u03BA',
'kappasymbolgreek': u'\u03F0',
'kapyeounmieumkorean': u'\u3171',
'kapyeounphieuphkorean': u'\u3184',
'kapyeounpieupkorean': u'\u3178',
'kapyeounssangpieupkorean': u'\u3179',
'karoriisquare': u'\u330D',
'kashidaautoarabic': u'\u0640',
'kashidaautonosidebearingarabic': u'\u0640',
'kasmallkatakana': u'\u30F5',
'kasquare': u'\u3384',
'kasraarabic': u'\u0650',
'kasratanarabic': u'\u064D',
'kastrokecyrillic': u'\u049F',
'katahiraprolongmarkhalfwidth': u'\uFF70',
'kaverticalstrokecyrillic': u'\u049D',
'kbopomofo': u'\u310E',
'kcalsquare': u'\u3389',
'kcaron': u'\u01E9',
'kcedilla': u'\u0137',
'kcircle': u'\u24DA',
'kcommaaccent': u'\u0137',
'kdotbelow': u'\u1E33',
'keharmenian': u'\u0584',
'kehiragana': u'\u3051',
'kekatakana': u'\u30B1',
'kekatakanahalfwidth': u'\uFF79',
'kenarmenian': u'\u056F',
'kesmallkatakana': u'\u30F6',
'kgreenlandic': u'\u0138',
'khabengali': u'\u0996',
'khacyrillic': u'\u0445',
'khadeva': u'\u0916',
'khagujarati': u'\u0A96',
'khagurmukhi': u'\u0A16',
'khaharabic': u'\u062E',
'khahfinalarabic': u'\uFEA6',
'khahinitialarabic': u'\uFEA7',
'khahmedialarabic': u'\uFEA8',
'kheicoptic': u'\u03E7',
'khhadeva': u'\u0959',
'khhagurmukhi': u'\u0A59',
'khieukhacirclekorean': u'\u3278',
'khieukhaparenkorean': u'\u3218',
'khieukhcirclekorean': u'\u326A',
'khieukhkorean': u'\u314B',
'khieukhparenkorean': u'\u320A',
'khokhaithai': u'\u0E02',
'khokhonthai': u'\u0E05',
'khokhuatthai': u'\u0E03',
'khokhwaithai': u'\u0E04',
'khomutthai': u'\u0E5B',
'khook': u'\u0199',
'khorakhangthai': u'\u0E06',
'khzsquare': u'\u3391',
'kihiragana': u'\u304D',
'kikatakana': u'\u30AD',
'kikatakanahalfwidth': u'\uFF77',
'kiroguramusquare': u'\u3315',
'kiromeetorusquare': u'\u3316',
'kirosquare': u'\u3314',
'kiyeokacirclekorean': u'\u326E',
'kiyeokaparenkorean': u'\u320E',
'kiyeokcirclekorean': u'\u3260',
'kiyeokkorean': u'\u3131',
'kiyeokparenkorean': u'\u3200',
'kiyeoksioskorean': u'\u3133',
'kjecyrillic': u'\u045C',
'klinebelow': u'\u1E35',
'klsquare': u'\u3398',
'kmcubedsquare': u'\u33A6',
'kmonospace': u'\uFF4B',
'kmsquaredsquare': u'\u33A2',
'kohiragana': u'\u3053',
'kohmsquare': u'\u33C0',
'kokaithai': u'\u0E01',
'kokatakana': u'\u30B3',
'kokatakanahalfwidth': u'\uFF7A',
'kooposquare': u'\u331E',
'koppacyrillic': u'\u0481',
'koreanstandardsymbol': u'\u327F',
'koroniscmb': u'\u0343',
'kparen': u'\u24A6',
'kpasquare': u'\u33AA',
'ksicyrillic': u'\u046F',
'ktsquare': u'\u33CF',
'kturned': u'\u029E',
'kuhiragana': u'\u304F',
'kukatakana': u'\u30AF',
'kukatakanahalfwidth': u'\uFF78',
'kvsquare': u'\u33B8',
'kwsquare': u'\u33BE',
'l': u'\u006C',
'labengali': u'\u09B2',
'lacute': u'\u013A',
'ladeva': u'\u0932',
'lagujarati': u'\u0AB2',
'lagurmukhi': u'\u0A32',
'lakkhangyaothai': u'\u0E45',
'lamaleffinalarabic': u'\uFEFC',
'lamalefhamzaabovefinalarabic': u'\uFEF8',
'lamalefhamzaaboveisolatedarabic': u'\uFEF7',
'lamalefhamzabelowfinalarabic': u'\uFEFA',
'lamalefhamzabelowisolatedarabic': u'\uFEF9',
'lamalefisolatedarabic': u'\uFEFB',
'lamalefmaddaabovefinalarabic': u'\uFEF6',
'lamalefmaddaaboveisolatedarabic': u'\uFEF5',
'lamarabic': u'\u0644',
'lambda': u'\u03BB',
'lambdastroke': u'\u019B',
'lamed': u'\u05DC',
'lameddagesh': u'\uFB3C',
'lameddageshhebrew': u'\uFB3C',
'lamedhebrew': u'\u05DC',
'lamedholam': u'\u05DC\u05B9',
'lamedholamdagesh': u'\u05DC\u05B9\u05BC',
'lamedholamdageshhebrew': u'\u05DC\u05B9\u05BC',
'lamedholamhebrew': u'\u05DC\u05B9',
'lamfinalarabic': u'\uFEDE',
'lamhahinitialarabic': u'\uFCCA',
'laminitialarabic': u'\uFEDF',
'lamjeeminitialarabic': u'\uFCC9',
'lamkhahinitialarabic': u'\uFCCB',
'lamlamhehisolatedarabic': u'\uFDF2',
'lammedialarabic': u'\uFEE0',
'lammeemhahinitialarabic': u'\uFD88',
'lammeeminitialarabic': u'\uFCCC',
'lammeemjeeminitialarabic': u'\uFEDF\uFEE4\uFEA0',
'lammeemkhahinitialarabic': u'\uFEDF\uFEE4\uFEA8',
'largecircle': u'\u25EF',
'lbar': u'\u019A',
'lbelt': u'\u026C',
'lbopomofo': u'\u310C',
'lcaron': u'\u013E',
'lcedilla': u'\u013C',
'lcircle': u'\u24DB',
'lcircumflexbelow': u'\u1E3D',
'lcommaaccent': u'\u013C',
'ldot': u'\u0140',
'ldotaccent': u'\u0140',
'ldotbelow': u'\u1E37',
'ldotbelowmacron': u'\u1E39',
'leftangleabovecmb': u'\u031A',
'lefttackbelowcmb': u'\u0318',
'less': u'\u003C',
'lessequal': u'\u2264',
'lessequalorgreater': u'\u22DA',
'lessmonospace': u'\uFF1C',
'lessorequivalent': u'\u2272',
'lessorgreater': u'\u2276',
'lessoverequal': u'\u2266',
'lesssmall': u'\uFE64',
'lezh': u'\u026E',
'lfblock': u'\u258C',
'lhookretroflex': u'\u026D',
'lira': u'\u20A4',
'liwnarmenian': u'\u056C',
'lj': u'\u01C9',
'ljecyrillic': u'\u0459',
'll': u'\uF6C0',
'lladeva': u'\u0933',
'llagujarati': u'\u0AB3',
'llinebelow': u'\u1E3B',
'llladeva': u'\u0934',
'llvocalicbengali': u'\u09E1',
'llvocalicdeva': u'\u0961',
'llvocalicvowelsignbengali': u'\u09E3',
'llvocalicvowelsigndeva': u'\u0963',
'lmiddletilde': u'\u026B',
'lmonospace': u'\uFF4C',
'lmsquare': u'\u33D0',
'lochulathai': u'\u0E2C',
'logicaland': u'\u2227',
'logicalnot': u'\u00AC',
'logicalnotreversed': u'\u2310',
'logicalor': u'\u2228',
'lolingthai': u'\u0E25',
'longs': u'\u017F',
'lowlinecenterline': u'\uFE4E',
'lowlinecmb': u'\u0332',
'lowlinedashed': u'\uFE4D',
'lozenge': u'\u25CA',
'lparen': u'\u24A7',
'lslash': u'\u0142',
'lsquare': u'\u2113',
'lsuperior': u'\uF6EE',
'ltshade': u'\u2591',
'luthai': u'\u0E26',
'lvocalicbengali': u'\u098C',
'lvocalicdeva': u'\u090C',
'lvocalicvowelsignbengali': u'\u09E2',
'lvocalicvowelsigndeva': u'\u0962',
'lxsquare': u'\u33D3',
'm': u'\u006D',
'mabengali': u'\u09AE',
'macron': u'\u00AF',
'macronbelowcmb': u'\u0331',
'macroncmb': u'\u0304',
'macronlowmod': u'\u02CD',
'macronmonospace': u'\uFFE3',
'macute': u'\u1E3F',
'madeva': u'\u092E',
'magujarati': u'\u0AAE',
'magurmukhi': u'\u0A2E',
'mahapakhhebrew': u'\u05A4',
'mahapakhlefthebrew': u'\u05A4',
'mahiragana': u'\u307E',
'maichattawalowleftthai': u'\uF895',
'maichattawalowrightthai': u'\uF894',
'maichattawathai': u'\u0E4B',
'maichattawaupperleftthai': u'\uF893',
'maieklowleftthai': u'\uF88C',
'maieklowrightthai': u'\uF88B',
'maiekthai': u'\u0E48',
'maiekupperleftthai': u'\uF88A',
'maihanakatleftthai': u'\uF884',
'maihanakatthai': u'\u0E31',
'maitaikhuleftthai': u'\uF889',
'maitaikhuthai': u'\u0E47',
'maitholowleftthai': u'\uF88F',
'maitholowrightthai': u'\uF88E',
'maithothai': u'\u0E49',
'maithoupperleftthai': u'\uF88D',
'maitrilowleftthai': u'\uF892',
'maitrilowrightthai': u'\uF891',
'maitrithai': u'\u0E4A',
'maitriupperleftthai': u'\uF890',
'maiyamokthai': u'\u0E46',
'makatakana': u'\u30DE',
'makatakanahalfwidth': u'\uFF8F',
'male': u'\u2642',
'mansyonsquare': u'\u3347',
'maqafhebrew': u'\u05BE',
'mars': u'\u2642',
'masoracirclehebrew': u'\u05AF',
'masquare': u'\u3383',
'mbopomofo': u'\u3107',
'mbsquare': u'\u33D4',
'mcircle': u'\u24DC',
'mcubedsquare': u'\u33A5',
'mdotaccent': u'\u1E41',
'mdotbelow': u'\u1E43',
'meemarabic': u'\u0645',
'meemfinalarabic': u'\uFEE2',
'meeminitialarabic': u'\uFEE3',
'meemmedialarabic': u'\uFEE4',
'meemmeeminitialarabic': u'\uFCD1',
'meemmeemisolatedarabic': u'\uFC48',
'meetorusquare': u'\u334D',
'mehiragana': u'\u3081',
'meizierasquare': u'\u337E',
'mekatakana': u'\u30E1',
'mekatakanahalfwidth': u'\uFF92',
'mem': u'\u05DE',
'memdagesh': u'\uFB3E',
'memdageshhebrew': u'\uFB3E',
'memhebrew': u'\u05DE',
'menarmenian': u'\u0574',
'merkhahebrew': u'\u05A5',
'merkhakefulahebrew': u'\u05A6',
'merkhakefulalefthebrew': u'\u05A6',
'merkhalefthebrew': u'\u05A5',
'mhook': u'\u0271',
'mhzsquare': u'\u3392',
'middledotkatakanahalfwidth': u'\uFF65',
'middot': u'\u00B7',
'mieumacirclekorean': u'\u3272',
'mieumaparenkorean': u'\u3212',
'mieumcirclekorean': u'\u3264',
'mieumkorean': u'\u3141',
'mieumpansioskorean': u'\u3170',
'mieumparenkorean': u'\u3204',
'mieumpieupkorean': u'\u316E',
'mieumsioskorean': u'\u316F',
'mihiragana': u'\u307F',
'mikatakana': u'\u30DF',
'mikatakanahalfwidth': u'\uFF90',
'minus': u'\u2212',
'minusbelowcmb': u'\u0320',
'minuscircle': u'\u2296',
'minusmod': u'\u02D7',
'minusplus': u'\u2213',
'minute': u'\u2032',
'miribaarusquare': u'\u334A',
'mirisquare': u'\u3349',
'mlonglegturned': u'\u0270',
'mlsquare': u'\u3396',
'mmcubedsquare': u'\u33A3',
'mmonospace': u'\uFF4D',
'mmsquaredsquare': u'\u339F',
'mohiragana': u'\u3082',
'mohmsquare': u'\u33C1',
'mokatakana': u'\u30E2',
'mokatakanahalfwidth': u'\uFF93',
'molsquare': u'\u33D6',
'momathai': u'\u0E21',
'moverssquare': u'\u33A7',
'moverssquaredsquare': u'\u33A8',
'mparen': u'\u24A8',
'mpasquare': u'\u33AB',
'mssquare': u'\u33B3',
'msuperior': u'\uF6EF',
'mturned': u'\u026F',
'mu': u'\u00B5',
'mu1': u'\u00B5',
'muasquare': u'\u3382',
'muchgreater': u'\u226B',
'muchless': u'\u226A',
'mufsquare': u'\u338C',
'mugreek': u'\u03BC',
'mugsquare': u'\u338D',
'muhiragana': u'\u3080',
'mukatakana': u'\u30E0',
'mukatakanahalfwidth': u'\uFF91',
'mulsquare': u'\u3395',
'multiply': u'\u00D7',
'mumsquare': u'\u339B',
'munahhebrew': u'\u05A3',
'munahlefthebrew': u'\u05A3',
'musicalnote': u'\u266A',
'musicalnotedbl': u'\u266B',
'musicflatsign': u'\u266D',
'musicsharpsign': u'\u266F',
'mussquare': u'\u33B2',
'muvsquare': u'\u33B6',
'muwsquare': u'\u33BC',
'mvmegasquare': u'\u33B9',
'mvsquare': u'\u33B7',
'mwmegasquare': u'\u33BF',
'mwsquare': u'\u33BD',
'n': u'\u006E',
'nabengali': u'\u09A8',
'nabla': u'\u2207',
'nacute': u'\u0144',
'nadeva': u'\u0928',
'nagujarati': u'\u0AA8',
'nagurmukhi': u'\u0A28',
'nahiragana': u'\u306A',
'nakatakana': u'\u30CA',
'nakatakanahalfwidth': u'\uFF85',
'napostrophe': u'\u0149',
'nasquare': u'\u3381',
'nbopomofo': u'\u310B',
'nbspace': u'\u00A0',
'ncaron': u'\u0148',
'ncedilla': u'\u0146',
'ncircle': u'\u24DD',
'ncircumflexbelow': u'\u1E4B',
'ncommaaccent': u'\u0146',
'ndotaccent': u'\u1E45',
'ndotbelow': u'\u1E47',
'nehiragana': u'\u306D',
'nekatakana': u'\u30CD',
'nekatakanahalfwidth': u'\uFF88',
'newsheqelsign': u'\u20AA',
'nfsquare': u'\u338B',
'ngabengali': u'\u0999',
'ngadeva': u'\u0919',
'ngagujarati': u'\u0A99',
'ngagurmukhi': u'\u0A19',
'ngonguthai': u'\u0E07',
'nhiragana': u'\u3093',
'nhookleft': u'\u0272',
'nhookretroflex': u'\u0273',
'nieunacirclekorean': u'\u326F',
'nieunaparenkorean': u'\u320F',
'nieuncieuckorean': u'\u3135',
'nieuncirclekorean': u'\u3261',
'nieunhieuhkorean': u'\u3136',
'nieunkorean': u'\u3134',
'nieunpansioskorean': u'\u3168',
'nieunparenkorean': u'\u3201',
'nieunsioskorean': u'\u3167',
'nieuntikeutkorean': u'\u3166',
'nihiragana': u'\u306B',
'nikatakana': u'\u30CB',
'nikatakanahalfwidth': u'\uFF86',
'nikhahitleftthai': u'\uF899',
'nikhahitthai': u'\u0E4D',
'nine': u'\u0039',
'ninearabic': u'\u0669',
'ninebengali': u'\u09EF',
'ninecircle': u'\u2468',
'ninecircleinversesansserif': u'\u2792',
'ninedeva': u'\u096F',
'ninegujarati': u'\u0AEF',
'ninegurmukhi': u'\u0A6F',
'ninehackarabic': u'\u0669',
'ninehangzhou': u'\u3029',
'nineideographicparen': u'\u3228',
'nineinferior': u'\u2089',
'ninemonospace': u'\uFF19',
'nineoldstyle': u'\uF739',
'nineparen': u'\u247C',
'nineperiod': u'\u2490',
'ninepersian': u'\u06F9',
'nineroman': u'\u2178',
'ninesuperior': u'\u2079',
'nineteencircle': u'\u2472',
'nineteenparen': u'\u2486',
'nineteenperiod': u'\u249A',
'ninethai': u'\u0E59',
'nj': u'\u01CC',
'njecyrillic': u'\u045A',
'nkatakana': u'\u30F3',
'nkatakanahalfwidth': u'\uFF9D',
'nlegrightlong': u'\u019E',
'nlinebelow': u'\u1E49',
'nmonospace': u'\uFF4E',
'nmsquare': u'\u339A',
'nnabengali': u'\u09A3',
'nnadeva': u'\u0923',
'nnagujarati': u'\u0AA3',
'nnagurmukhi': u'\u0A23',
'nnnadeva': u'\u0929',
'nohiragana': u'\u306E',
'nokatakana': u'\u30CE',
'nokatakanahalfwidth': u'\uFF89',
'nonbreakingspace': u'\u00A0',
'nonenthai': u'\u0E13',
'nonuthai': u'\u0E19',
'noonarabic': u'\u0646',
'noonfinalarabic': u'\uFEE6',
'noonghunnaarabic': u'\u06BA',
'noonghunnafinalarabic': u'\uFB9F',
'noonhehinitialarabic': u'\uFEE7\uFEEC',
'nooninitialarabic': u'\uFEE7',
'noonjeeminitialarabic': u'\uFCD2',
'noonjeemisolatedarabic': u'\uFC4B',
'noonmedialarabic': u'\uFEE8',
'noonmeeminitialarabic': u'\uFCD5',
'noonmeemisolatedarabic': u'\uFC4E',
'noonnoonfinalarabic': u'\uFC8D',
'notcontains': u'\u220C',
'notelement': u'\u2209',
'notelementof': u'\u2209',
'notequal': u'\u2260',
'notgreater': u'\u226F',
'notgreaternorequal': u'\u2271',
'notgreaternorless': u'\u2279',
'notidentical': u'\u2262',
'notless': u'\u226E',
'notlessnorequal': u'\u2270',
'notparallel': u'\u2226',
'notprecedes': u'\u2280',
'notsubset': u'\u2284',
'notsucceeds': u'\u2281',
'notsuperset': u'\u2285',
'nowarmenian': u'\u0576',
'nparen': u'\u24A9',
'nssquare': u'\u33B1',
'nsuperior': u'\u207F',
'ntilde': u'\u00F1',
'nu': u'\u03BD',
'nuhiragana': u'\u306C',
'nukatakana': u'\u30CC',
'nukatakanahalfwidth': u'\uFF87',
'nuktabengali': u'\u09BC',
'nuktadeva': u'\u093C',
'nuktagujarati': u'\u0ABC',
'nuktagurmukhi': u'\u0A3C',
'numbersign': u'\u0023',
'numbersignmonospace': u'\uFF03',
'numbersignsmall': u'\uFE5F',
'numeralsigngreek': u'\u0374',
'numeralsignlowergreek': u'\u0375',
'numero': u'\u2116',
'nun': u'\u05E0',
'nundagesh': u'\uFB40',
'nundageshhebrew': u'\uFB40',
'nunhebrew': u'\u05E0',
'nvsquare': u'\u33B5',
'nwsquare': u'\u33BB',
'nyabengali': u'\u099E',
'nyadeva': u'\u091E',
'nyagujarati': u'\u0A9E',
'nyagurmukhi': u'\u0A1E',
'o': u'\u006F',
'oacute': u'\u00F3',
'oangthai': u'\u0E2D',
'obarred': u'\u0275',
'obarredcyrillic': u'\u04E9',
'obarreddieresiscyrillic': u'\u04EB',
'obengali': u'\u0993',
'obopomofo': u'\u311B',
'obreve': u'\u014F',
'ocandradeva': u'\u0911',
'ocandragujarati': u'\u0A91',
'ocandravowelsigndeva': u'\u0949',
'ocandravowelsigngujarati': u'\u0AC9',
'ocaron': u'\u01D2',
'ocircle': u'\u24DE',
'ocircumflex': u'\u00F4',
'ocircumflexacute': u'\u1ED1',
'ocircumflexdotbelow': u'\u1ED9',
'ocircumflexgrave': u'\u1ED3',
'ocircumflexhookabove': u'\u1ED5',
'ocircumflextilde': u'\u1ED7',
'ocyrillic': u'\u043E',
'odblacute': u'\u0151',
'odblgrave': u'\u020D',
'odeva': u'\u0913',
'odieresis': u'\u00F6',
'odieresiscyrillic': u'\u04E7',
'odotbelow': u'\u1ECD',
'oe': u'\u0153',
'oekorean': u'\u315A',
'ogonek': u'\u02DB',
'ogonekcmb': u'\u0328',
'ograve': u'\u00F2',
'ogujarati': u'\u0A93',
'oharmenian': u'\u0585',
'ohiragana': u'\u304A',
'ohookabove': u'\u1ECF',
'ohorn': u'\u01A1',
'ohornacute': u'\u1EDB',
'ohorndotbelow': u'\u1EE3',
'ohorngrave': u'\u1EDD',
'ohornhookabove': u'\u1EDF',
'ohorntilde': u'\u1EE1',
'ohungarumlaut': u'\u0151',
'oi': u'\u01A3',
'oinvertedbreve': u'\u020F',
'okatakana': u'\u30AA',
'okatakanahalfwidth': u'\uFF75',
'okorean': u'\u3157',
'olehebrew': u'\u05AB',
'omacron': u'\u014D',
'omacronacute': u'\u1E53',
'omacrongrave': u'\u1E51',
'omdeva': u'\u0950',
'omega': u'\u03C9',
'omega1': u'\u03D6',
'omegacyrillic': u'\u0461',
'omegalatinclosed': u'\u0277',
'omegaroundcyrillic': u'\u047B',
'omegatitlocyrillic': u'\u047D',
'omegatonos': u'\u03CE',
'omgujarati': u'\u0AD0',
'omicron': u'\u03BF',
'omicrontonos': u'\u03CC',
'omonospace': u'\uFF4F',
'one': u'\u0031',
'onearabic': u'\u0661',
'onebengali': u'\u09E7',
'onecircle': u'\u2460',
'onecircleinversesansserif': u'\u278A',
'onedeva': u'\u0967',
'onedotenleader': u'\u2024',
'oneeighth': u'\u215B',
'onefitted': u'\uF6DC',
'onegujarati': u'\u0AE7',
'onegurmukhi': u'\u0A67',
'onehackarabic': u'\u0661',
'onehalf': u'\u00BD',
'onehangzhou': u'\u3021',
'oneideographicparen': u'\u3220',
'oneinferior': u'\u2081',
'onemonospace': u'\uFF11',
'onenumeratorbengali': u'\u09F4',
'oneoldstyle': u'\uF731',
'oneparen': u'\u2474',
'oneperiod': u'\u2488',
'onepersian': u'\u06F1',
'onequarter': u'\u00BC',
'oneroman': u'\u2170',
'onesuperior': u'\u00B9',
'onethai': u'\u0E51',
'onethird': u'\u2153',
'oogonek': u'\u01EB',
'oogonekmacron': u'\u01ED',
'oogurmukhi': u'\u0A13',
'oomatragurmukhi': u'\u0A4B',
'oopen': u'\u0254',
'oparen': u'\u24AA',
'openbullet': u'\u25E6',
'option': u'\u2325',
'ordfeminine': u'\u00AA',
'ordmasculine': u'\u00BA',
'orthogonal': u'\u221F',
'oshortdeva': u'\u0912',
'oshortvowelsigndeva': u'\u094A',
'oslash': u'\u00F8',
'oslashacute': u'\u01FF',
'osmallhiragana': u'\u3049',
'osmallkatakana': u'\u30A9',
'osmallkatakanahalfwidth': u'\uFF6B',
'ostrokeacute': u'\u01FF',
'osuperior': u'\uF6F0',
'otcyrillic': u'\u047F',
'otilde': u'\u00F5',
'otildeacute': u'\u1E4D',
'otildedieresis': u'\u1E4F',
'oubopomofo': u'\u3121',
'overline': u'\u203E',
'overlinecenterline': u'\uFE4A',
'overlinecmb': u'\u0305',
'overlinedashed': u'\uFE49',
'overlinedblwavy': u'\uFE4C',
'overlinewavy': u'\uFE4B',
'overscore': u'\u00AF',
'ovowelsignbengali': u'\u09CB',
'ovowelsigndeva': u'\u094B',
'ovowelsigngujarati': u'\u0ACB',
'p': u'\u0070',
'paampssquare': u'\u3380',
'paasentosquare': u'\u332B',
'pabengali': u'\u09AA',
'pacute': u'\u1E55',
'padeva': u'\u092A',
'pagedown': u'\u21DF',
'pageup': u'\u21DE',
'pagujarati': u'\u0AAA',
'pagurmukhi': u'\u0A2A',
'pahiragana': u'\u3071',
'paiyannoithai': u'\u0E2F',
'pakatakana': u'\u30D1',
'palatalizationcyrilliccmb': u'\u0484',
'palochkacyrillic': u'\u04C0',
'pansioskorean': u'\u317F',
'paragraph': u'\u00B6',
'parallel': u'\u2225',
'parenleft': u'\u0028',
'parenleftaltonearabic': u'\uFD3E',
'parenleftbt': u'\uF8ED',
'parenleftex': u'\uF8EC',
'parenleftinferior': u'\u208D',
'parenleftmonospace': u'\uFF08',
'parenleftsmall': u'\uFE59',
'parenleftsuperior': u'\u207D',
'parenlefttp': u'\uF8EB',
'parenleftvertical': u'\uFE35',
'parenright': u'\u0029',
'parenrightaltonearabic': u'\uFD3F',
'parenrightbt': u'\uF8F8',
'parenrightex': u'\uF8F7',
'parenrightinferior': u'\u208E',
'parenrightmonospace': u'\uFF09',
'parenrightsmall': u'\uFE5A',
'parenrightsuperior': u'\u207E',
'parenrighttp': u'\uF8F6',
'parenrightvertical': u'\uFE36',
'partialdiff': u'\u2202',
'paseqhebrew': u'\u05C0',
'pashtahebrew': u'\u0599',
'pasquare': u'\u33A9',
'patah': u'\u05B7',
'patah11': u'\u05B7',
'patah1d': u'\u05B7',
'patah2a': u'\u05B7',
'patahhebrew': u'\u05B7',
'patahnarrowhebrew': u'\u05B7',
'patahquarterhebrew': u'\u05B7',
'patahwidehebrew': u'\u05B7',
'pazerhebrew': u'\u05A1',
'pbopomofo': u'\u3106',
'pcircle': u'\u24DF',
'pdotaccent': u'\u1E57',
'pe': u'\u05E4',
'pecyrillic': u'\u043F',
'pedagesh': u'\uFB44',
'pedageshhebrew': u'\uFB44',
'peezisquare': u'\u333B',
'pefinaldageshhebrew': u'\uFB43',
'peharabic': u'\u067E',
'peharmenian': u'\u057A',
'pehebrew': u'\u05E4',
'pehfinalarabic': u'\uFB57',
'pehinitialarabic': u'\uFB58',
'pehiragana': u'\u307A',
'pehmedialarabic': u'\uFB59',
'pekatakana': u'\u30DA',
'pemiddlehookcyrillic': u'\u04A7',
'perafehebrew': u'\uFB4E',
'percent': u'\u0025',
'percentarabic': u'\u066A',
'percentmonospace': u'\uFF05',
'percentsmall': u'\uFE6A',
'period': u'\u002E',
'periodarmenian': u'\u0589',
'periodcentered': u'\u00B7',
'periodhalfwidth': u'\uFF61',
'periodinferior': u'\uF6E7',
'periodmonospace': u'\uFF0E',
'periodsmall': u'\uFE52',
'periodsuperior': u'\uF6E8',
'perispomenigreekcmb': u'\u0342',
'perpendicular': u'\u22A5',
'perthousand': u'\u2030',
'peseta': u'\u20A7',
'pfsquare': u'\u338A',
'phabengali': u'\u09AB',
'phadeva': u'\u092B',
'phagujarati': u'\u0AAB',
'phagurmukhi': u'\u0A2B',
'phi': u'\u03C6',
'phi1': u'\u03D5',
'phieuphacirclekorean': u'\u327A',
'phieuphaparenkorean': u'\u321A',
'phieuphcirclekorean': u'\u326C',
'phieuphkorean': u'\u314D',
'phieuphparenkorean': u'\u320C',
'philatin': u'\u0278',
'phinthuthai': u'\u0E3A',
'phisymbolgreek': u'\u03D5',
'phook': u'\u01A5',
'phophanthai': u'\u0E1E',
'phophungthai': u'\u0E1C',
'phosamphaothai': u'\u0E20',
'pi': u'\u03C0',
'pieupacirclekorean': u'\u3273',
'pieupaparenkorean': u'\u3213',
'pieupcieuckorean': u'\u3176',
'pieupcirclekorean': u'\u3265',
'pieupkiyeokkorean': u'\u3172',
'pieupkorean': u'\u3142',
'pieupparenkorean': u'\u3205',
'pieupsioskiyeokkorean': u'\u3174',
'pieupsioskorean': u'\u3144',
'pieupsiostikeutkorean': u'\u3175',
'pieupthieuthkorean': u'\u3177',
'pieuptikeutkorean': u'\u3173',
'pihiragana': u'\u3074',
'pikatakana': u'\u30D4',
'pisymbolgreek': u'\u03D6',
'piwrarmenian': u'\u0583',
'plus': u'\u002B',
'plusbelowcmb': u'\u031F',
'pluscircle': u'\u2295',
'plusminus': u'\u00B1',
'plusmod': u'\u02D6',
'plusmonospace': u'\uFF0B',
'plussmall': u'\uFE62',
'plussuperior': u'\u207A',
'pmonospace': u'\uFF50',
'pmsquare': u'\u33D8',
'pohiragana': u'\u307D',
'pointingindexdownwhite': u'\u261F',
'pointingindexleftwhite': u'\u261C',
'pointingindexrightwhite': u'\u261E',
'pointingindexupwhite': u'\u261D',
'pokatakana': u'\u30DD',
'poplathai': u'\u0E1B',
'postalmark': u'\u3012',
'postalmarkface': u'\u3020',
'pparen': u'\u24AB',
'precedes': u'\u227A',
'prescription': u'\u211E',
'primemod': u'\u02B9',
'primereversed': u'\u2035',
'product': u'\u220F',
'projective': u'\u2305',
'prolongedkana': u'\u30FC',
'propellor': u'\u2318',
'propersubset': u'\u2282',
'propersuperset': u'\u2283',
'proportion': u'\u2237',
'proportional': u'\u221D',
'psi': u'\u03C8',
'psicyrillic': u'\u0471',
'psilipneumatacyrilliccmb': u'\u0486',
'pssquare': u'\u33B0',
'puhiragana': u'\u3077',
'pukatakana': u'\u30D7',
'pvsquare': u'\u33B4',
'pwsquare': u'\u33BA',
'q': u'\u0071',
'qadeva': u'\u0958',
'qadmahebrew': u'\u05A8',
'qafarabic': u'\u0642',
'qaffinalarabic': u'\uFED6',
'qafinitialarabic': u'\uFED7',
'qafmedialarabic': u'\uFED8',
'qamats': u'\u05B8',
'qamats10': u'\u05B8',
'qamats1a': u'\u05B8',
'qamats1c': u'\u05B8',
'qamats27': u'\u05B8',
'qamats29': u'\u05B8',
'qamats33': u'\u05B8',
'qamatsde': u'\u05B8',
'qamatshebrew': u'\u05B8',
'qamatsnarrowhebrew': u'\u05B8',
'qamatsqatanhebrew': u'\u05B8',
'qamatsqatannarrowhebrew': u'\u05B8',
'qamatsqatanquarterhebrew': u'\u05B8',
'qamatsqatanwidehebrew': u'\u05B8',
'qamatsquarterhebrew': u'\u05B8',
'qamatswidehebrew': u'\u05B8',
'qarneyparahebrew': u'\u059F',
'qbopomofo': u'\u3111',
'qcircle': u'\u24E0',
'qhook': u'\u02A0',
'qmonospace': u'\uFF51',
'qof': u'\u05E7',
'qofdagesh': u'\uFB47',
'qofdageshhebrew': u'\uFB47',
'qofhatafpatah': u'\u05E7\u05B2',
'qofhatafpatahhebrew': u'\u05E7\u05B2',
'qofhatafsegol': u'\u05E7\u05B1',
'qofhatafsegolhebrew': u'\u05E7\u05B1',
'qofhebrew': u'\u05E7',
'qofhiriq': u'\u05E7\u05B4',
'qofhiriqhebrew': u'\u05E7\u05B4',
'qofholam': u'\u05E7\u05B9',
'qofholamhebrew': u'\u05E7\u05B9',
'qofpatah': u'\u05E7\u05B7',
'qofpatahhebrew': u'\u05E7\u05B7',
'qofqamats': u'\u05E7\u05B8',
'qofqamatshebrew': u'\u05E7\u05B8',
'qofqubuts': u'\u05E7\u05BB',
'qofqubutshebrew': u'\u05E7\u05BB',
'qofsegol': u'\u05E7\u05B6',
'qofsegolhebrew': u'\u05E7\u05B6',
'qofsheva': u'\u05E7\u05B0',
'qofshevahebrew': u'\u05E7\u05B0',
'qoftsere': u'\u05E7\u05B5',
'qoftserehebrew': u'\u05E7\u05B5',
'qparen': u'\u24AC',
'quarternote': u'\u2669',
'qubuts': u'\u05BB',
'qubuts18': u'\u05BB',
'qubuts25': u'\u05BB',
'qubuts31': u'\u05BB',
'qubutshebrew': u'\u05BB',
'qubutsnarrowhebrew': u'\u05BB',
'qubutsquarterhebrew': u'\u05BB',
'qubutswidehebrew': u'\u05BB',
'question': u'\u003F',
'questionarabic': u'\u061F',
'questionarmenian': u'\u055E',
'questiondown': u'\u00BF',
'questiondownsmall': u'\uF7BF',
'questiongreek': u'\u037E',
'questionmonospace': u'\uFF1F',
'questionsmall': u'\uF73F',
'quotedbl': u'\u0022',
'quotedblbase': u'\u201E',
'quotedblleft': u'\u201C',
'quotedblmonospace': u'\uFF02',
'quotedblprime': u'\u301E',
'quotedblprimereversed': u'\u301D',
'quotedblright': u'\u201D',
'quoteleft': u'\u2018',
'quoteleftreversed': u'\u201B',
'quotereversed': u'\u201B',
'quoteright': u'\u2019',
'quoterightn': u'\u0149',
'quotesinglbase': u'\u201A',
'quotesingle': u'\u0027',
'quotesinglemonospace': u'\uFF07',
'r': u'\u0072',
'raarmenian': u'\u057C',
'rabengali': u'\u09B0',
'racute': u'\u0155',
'radeva': u'\u0930',
'radical': u'\u221A',
'radicalex': u'\uF8E5',
'radoverssquare': u'\u33AE',
'radoverssquaredsquare': u'\u33AF',
'radsquare': u'\u33AD',
'rafe': u'\u05BF',
'rafehebrew': u'\u05BF',
'ragujarati': u'\u0AB0',
'ragurmukhi': u'\u0A30',
'rahiragana': u'\u3089',
'rakatakana': u'\u30E9',
'rakatakanahalfwidth': u'\uFF97',
'ralowerdiagonalbengali': u'\u09F1',
'ramiddlediagonalbengali': u'\u09F0',
'ramshorn': u'\u0264',
'ratio': u'\u2236',
'rbopomofo': u'\u3116',
'rcaron': u'\u0159',
'rcedilla': u'\u0157',
'rcircle': u'\u24E1',
'rcommaaccent': u'\u0157',
'rdblgrave': u'\u0211',
'rdotaccent': u'\u1E59',
'rdotbelow': u'\u1E5B',
'rdotbelowmacron': u'\u1E5D',
'referencemark': u'\u203B',
'reflexsubset': u'\u2286',
'reflexsuperset': u'\u2287',
'registered': u'\u00AE',
'registersans': u'\uF8E8',
'registerserif': u'\uF6DA',
'reharabic': u'\u0631',
'reharmenian': u'\u0580',
'rehfinalarabic': u'\uFEAE',
'rehiragana': u'\u308C',
'rehyehaleflamarabic': u'\u0631\uFEF3\uFE8E\u0644',
'rekatakana': u'\u30EC',
'rekatakanahalfwidth': u'\uFF9A',
'resh': u'\u05E8',
'reshdageshhebrew': u'\uFB48',
'reshhatafpatah': u'\u05E8\u05B2',
'reshhatafpatahhebrew': u'\u05E8\u05B2',
'reshhatafsegol': u'\u05E8\u05B1',
'reshhatafsegolhebrew': u'\u05E8\u05B1',
'reshhebrew': u'\u05E8',
'reshhiriq': u'\u05E8\u05B4',
'reshhiriqhebrew': u'\u05E8\u05B4',
'reshholam': u'\u05E8\u05B9',
'reshholamhebrew': u'\u05E8\u05B9',
'reshpatah': u'\u05E8\u05B7',
'reshpatahhebrew': u'\u05E8\u05B7',
'reshqamats': u'\u05E8\u05B8',
'reshqamatshebrew': u'\u05E8\u05B8',
'reshqubuts': u'\u05E8\u05BB',
'reshqubutshebrew': u'\u05E8\u05BB',
'reshsegol': u'\u05E8\u05B6',
'reshsegolhebrew': u'\u05E8\u05B6',
'reshsheva': u'\u05E8\u05B0',
'reshshevahebrew': u'\u05E8\u05B0',
'reshtsere': u'\u05E8\u05B5',
'reshtserehebrew': u'\u05E8\u05B5',
'reversedtilde': u'\u223D',
'reviahebrew': u'\u0597',
'reviamugrashhebrew': u'\u0597',
'revlogicalnot': u'\u2310',
'rfishhook': u'\u027E',
'rfishhookreversed': u'\u027F',
'rhabengali': u'\u09DD',
'rhadeva': u'\u095D',
'rho': u'\u03C1',
'rhook': u'\u027D',
'rhookturned': u'\u027B',
'rhookturnedsuperior': u'\u02B5',
'rhosymbolgreek': u'\u03F1',
'rhotichookmod': u'\u02DE',
'rieulacirclekorean': u'\u3271',
'rieulaparenkorean': u'\u3211',
'rieulcirclekorean': u'\u3263',
'rieulhieuhkorean': u'\u3140',
'rieulkiyeokkorean': u'\u313A',
'rieulkiyeoksioskorean': u'\u3169',
'rieulkorean': u'\u3139',
'rieulmieumkorean': u'\u313B',
'rieulpansioskorean': u'\u316C',
'rieulparenkorean': u'\u3203',
'rieulphieuphkorean': u'\u313F',
'rieulpieupkorean': u'\u313C',
'rieulpieupsioskorean': u'\u316B',
'rieulsioskorean': u'\u313D',
'rieulthieuthkorean': u'\u313E',
'rieultikeutkorean': u'\u316A',
'rieulyeorinhieuhkorean': u'\u316D',
'rightangle': u'\u221F',
'righttackbelowcmb': u'\u0319',
'righttriangle': u'\u22BF',
'rihiragana': u'\u308A',
'rikatakana': u'\u30EA',
'rikatakanahalfwidth': u'\uFF98',
'ring': u'\u02DA',
'ringbelowcmb': u'\u0325',
'ringcmb': u'\u030A',
'ringhalfleft': u'\u02BF',
'ringhalfleftarmenian': u'\u0559',
'ringhalfleftbelowcmb': u'\u031C',
'ringhalfleftcentered': u'\u02D3',
'ringhalfright': u'\u02BE',
'ringhalfrightbelowcmb': u'\u0339',
'ringhalfrightcentered': u'\u02D2',
'rinvertedbreve': u'\u0213',
'rittorusquare': u'\u3351',
'rlinebelow': u'\u1E5F',
'rlongleg': u'\u027C',
'rlonglegturned': u'\u027A',
'rmonospace': u'\uFF52',
'rohiragana': u'\u308D',
'rokatakana': u'\u30ED',
'rokatakanahalfwidth': u'\uFF9B',
'roruathai': u'\u0E23',
'rparen': u'\u24AD',
'rrabengali': u'\u09DC',
'rradeva': u'\u0931',
'rragurmukhi': u'\u0A5C',
'rreharabic': u'\u0691',
'rrehfinalarabic': u'\uFB8D',
'rrvocalicbengali': u'\u09E0',
'rrvocalicdeva': u'\u0960',
'rrvocalicgujarati': u'\u0AE0',
'rrvocalicvowelsignbengali': u'\u09C4',
'rrvocalicvowelsigndeva': u'\u0944',
'rrvocalicvowelsigngujarati': u'\u0AC4',
'rsuperior': u'\uF6F1',
'rtblock': u'\u2590',
'rturned': u'\u0279',
'rturnedsuperior': u'\u02B4',
'ruhiragana': u'\u308B',
'rukatakana': u'\u30EB',
'rukatakanahalfwidth': u'\uFF99',
'rupeemarkbengali': u'\u09F2',
'rupeesignbengali': u'\u09F3',
'rupiah': u'\uF6DD',
'ruthai': u'\u0E24',
'rvocalicbengali': u'\u098B',
'rvocalicdeva': u'\u090B',
'rvocalicgujarati': u'\u0A8B',
'rvocalicvowelsignbengali': u'\u09C3',
'rvocalicvowelsigndeva': u'\u0943',
'rvocalicvowelsigngujarati': u'\u0AC3',
's': u'\u0073',
'sabengali': u'\u09B8',
'sacute': u'\u015B',
'sacutedotaccent': u'\u1E65',
'sadarabic': u'\u0635',
'sadeva': u'\u0938',
'sadfinalarabic': u'\uFEBA',
'sadinitialarabic': u'\uFEBB',
'sadmedialarabic': u'\uFEBC',
'sagujarati': u'\u0AB8',
'sagurmukhi': u'\u0A38',
'sahiragana': u'\u3055',
'sakatakana': u'\u30B5',
'sakatakanahalfwidth': u'\uFF7B',
'sallallahoualayhewasallamarabic': u'\uFDFA',
'samekh': u'\u05E1',
'samekhdagesh': u'\uFB41',
'samekhdageshhebrew': u'\uFB41',
'samekhhebrew': u'\u05E1',
'saraaathai': u'\u0E32',
'saraaethai': u'\u0E41',
'saraaimaimalaithai': u'\u0E44',
'saraaimaimuanthai': u'\u0E43',
'saraamthai': u'\u0E33',
'saraathai': u'\u0E30',
'saraethai': u'\u0E40',
'saraiileftthai': u'\uF886',
'saraiithai': u'\u0E35',
'saraileftthai': u'\uF885',
'saraithai': u'\u0E34',
'saraothai': u'\u0E42',
'saraueeleftthai': u'\uF888',
'saraueethai': u'\u0E37',
'saraueleftthai': u'\uF887',
'sarauethai': u'\u0E36',
'sarauthai': u'\u0E38',
'sarauuthai': u'\u0E39',
'sbopomofo': u'\u3119',
'scaron': u'\u0161',
'scarondotaccent': u'\u1E67',
'scedilla': u'\u015F',
'schwa': u'\u0259',
'schwacyrillic': u'\u04D9',
'schwadieresiscyrillic': u'\u04DB',
'schwahook': u'\u025A',
'scircle': u'\u24E2',
'scircumflex': u'\u015D',
'scommaaccent': u'\u0219',
'sdotaccent': u'\u1E61',
'sdotbelow': u'\u1E63',
'sdotbelowdotaccent': u'\u1E69',
'seagullbelowcmb': u'\u033C',
'second': u'\u2033',
'secondtonechinese': u'\u02CA',
'section': u'\u00A7',
'seenarabic': u'\u0633',
'seenfinalarabic': u'\uFEB2',
'seeninitialarabic': u'\uFEB3',
'seenmedialarabic': u'\uFEB4',
'segol': u'\u05B6',
'segol13': u'\u05B6',
'segol1f': u'\u05B6',
'segol2c': u'\u05B6',
'segolhebrew': u'\u05B6',
'segolnarrowhebrew': u'\u05B6',
'segolquarterhebrew': u'\u05B6',
'segoltahebrew': u'\u0592',
'segolwidehebrew': u'\u05B6',
'seharmenian': u'\u057D',
'sehiragana': u'\u305B',
'sekatakana': u'\u30BB',
'sekatakanahalfwidth': u'\uFF7E',
'semicolon': u'\u003B',
'semicolonarabic': u'\u061B',
'semicolonmonospace': u'\uFF1B',
'semicolonsmall': u'\uFE54',
'semivoicedmarkkana': u'\u309C',
'semivoicedmarkkanahalfwidth': u'\uFF9F',
'sentisquare': u'\u3322',
'sentosquare': u'\u3323',
'seven': u'\u0037',
'sevenarabic': u'\u0667',
'sevenbengali': u'\u09ED',
'sevencircle': u'\u2466',
'sevencircleinversesansserif': u'\u2790',
'sevendeva': u'\u096D',
'seveneighths': u'\u215E',
'sevengujarati': u'\u0AED',
'sevengurmukhi': u'\u0A6D',
'sevenhackarabic': u'\u0667',
'sevenhangzhou': u'\u3027',
'sevenideographicparen': u'\u3226',
'seveninferior': u'\u2087',
'sevenmonospace': u'\uFF17',
'sevenoldstyle': u'\uF737',
'sevenparen': u'\u247A',
'sevenperiod': u'\u248E',
'sevenpersian': u'\u06F7',
'sevenroman': u'\u2176',
'sevensuperior': u'\u2077',
'seventeencircle': u'\u2470',
'seventeenparen': u'\u2484',
'seventeenperiod': u'\u2498',
'seventhai': u'\u0E57',
'sfthyphen': u'\u00AD',
'shaarmenian': u'\u0577',
'shabengali': u'\u09B6',
'shacyrillic': u'\u0448',
'shaddaarabic': u'\u0651',
'shaddadammaarabic': u'\uFC61',
'shaddadammatanarabic': u'\uFC5E',
'shaddafathaarabic': u'\uFC60',
'shaddafathatanarabic': u'\u0651\u064B',
'shaddakasraarabic': u'\uFC62',
'shaddakasratanarabic': u'\uFC5F',
'shade': u'\u2592',
'shadedark': u'\u2593',
'shadelight': u'\u2591',
'shademedium': u'\u2592',
'shadeva': u'\u0936',
'shagujarati': u'\u0AB6',
'shagurmukhi': u'\u0A36',
'shalshelethebrew': u'\u0593',
'shbopomofo': u'\u3115',
'shchacyrillic': u'\u0449',
'sheenarabic': u'\u0634',
'sheenfinalarabic': u'\uFEB6',
'sheeninitialarabic': u'\uFEB7',
'sheenmedialarabic': u'\uFEB8',
'sheicoptic': u'\u03E3',
'sheqel': u'\u20AA',
'sheqelhebrew': u'\u20AA',
'sheva': u'\u05B0',
'sheva115': u'\u05B0',
'sheva15': u'\u05B0',
'sheva22': u'\u05B0',
'sheva2e': u'\u05B0',
'shevahebrew': u'\u05B0',
'shevanarrowhebrew': u'\u05B0',
'shevaquarterhebrew': u'\u05B0',
'shevawidehebrew': u'\u05B0',
'shhacyrillic': u'\u04BB',
'shimacoptic': u'\u03ED',
'shin': u'\u05E9',
'shindagesh': u'\uFB49',
'shindageshhebrew': u'\uFB49',
'shindageshshindot': u'\uFB2C',
'shindageshshindothebrew': u'\uFB2C',
'shindageshsindot': u'\uFB2D',
'shindageshsindothebrew': u'\uFB2D',
'shindothebrew': u'\u05C1',
'shinhebrew': u'\u05E9',
'shinshindot': u'\uFB2A',
'shinshindothebrew': u'\uFB2A',
'shinsindot': u'\uFB2B',
'shinsindothebrew': u'\uFB2B',
'shook': u'\u0282',
'sigma': u'\u03C3',
'sigma1': u'\u03C2',
'sigmafinal': u'\u03C2',
'sigmalunatesymbolgreek': u'\u03F2',
'sihiragana': u'\u3057',
'sikatakana': u'\u30B7',
'sikatakanahalfwidth': u'\uFF7C',
'siluqhebrew': u'\u05BD',
'siluqlefthebrew': u'\u05BD',
'similar': u'\u223C',
'sindothebrew': u'\u05C2',
'siosacirclekorean': u'\u3274',
'siosaparenkorean': u'\u3214',
'sioscieuckorean': u'\u317E',
'sioscirclekorean': u'\u3266',
'sioskiyeokkorean': u'\u317A',
'sioskorean': u'\u3145',
'siosnieunkorean': u'\u317B',
'siosparenkorean': u'\u3206',
'siospieupkorean': u'\u317D',
'siostikeutkorean': u'\u317C',
'six': u'\u0036',
'sixarabic': u'\u0666',
'sixbengali': u'\u09EC',
'sixcircle': u'\u2465',
'sixcircleinversesansserif': u'\u278F',
'sixdeva': u'\u096C',
'sixgujarati': u'\u0AEC',
'sixgurmukhi': u'\u0A6C',
'sixhackarabic': u'\u0666',
'sixhangzhou': u'\u3026',
'sixideographicparen': u'\u3225',
'sixinferior': u'\u2086',
'sixmonospace': u'\uFF16',
'sixoldstyle': u'\uF736',
'sixparen': u'\u2479',
'sixperiod': u'\u248D',
'sixpersian': u'\u06F6',
'sixroman': u'\u2175',
'sixsuperior': u'\u2076',
'sixteencircle': u'\u246F',
'sixteencurrencydenominatorbengali': u'\u09F9',
'sixteenparen': u'\u2483',
'sixteenperiod': u'\u2497',
'sixthai': u'\u0E56',
'slash': u'\u002F',
'slashmonospace': u'\uFF0F',
'slong': u'\u017F',
'slongdotaccent': u'\u1E9B',
'smileface': u'\u263A',
'smonospace': u'\uFF53',
'sofpasuqhebrew': u'\u05C3',
'softhyphen': u'\u00AD',
'softsigncyrillic': u'\u044C',
'sohiragana': u'\u305D',
'sokatakana': u'\u30BD',
'sokatakanahalfwidth': u'\uFF7F',
'soliduslongoverlaycmb': u'\u0338',
'solidusshortoverlaycmb': u'\u0337',
'sorusithai': u'\u0E29',
'sosalathai': u'\u0E28',
'sosothai': u'\u0E0B',
'sosuathai': u'\u0E2A',
'space': u'\u0020',
'spacehackarabic': u'\u0020',
'spade': u'\u2660',
'spadesuitblack': u'\u2660',
'spadesuitwhite': u'\u2664',
'sparen': u'\u24AE',
'squarebelowcmb': u'\u033B',
'squarecc': u'\u33C4',
'squarecm': u'\u339D',
'squarediagonalcrosshatchfill': u'\u25A9',
'squarehorizontalfill': u'\u25A4',
'squarekg': u'\u338F',
'squarekm': u'\u339E',
'squarekmcapital': u'\u33CE',
'squareln': u'\u33D1',
'squarelog': u'\u33D2',
'squaremg': u'\u338E',
'squaremil': u'\u33D5',
'squaremm': u'\u339C',
'squaremsquared': u'\u33A1',
'squareorthogonalcrosshatchfill': u'\u25A6',
'squareupperlefttolowerrightfill': u'\u25A7',
'squareupperrighttolowerleftfill': u'\u25A8',
'squareverticalfill': u'\u25A5',
'squarewhitewithsmallblack': u'\u25A3',
'srsquare': u'\u33DB',
'ssabengali': u'\u09B7',
'ssadeva': u'\u0937',
'ssagujarati': u'\u0AB7',
'ssangcieuckorean': u'\u3149',
'ssanghieuhkorean': u'\u3185',
'ssangieungkorean': u'\u3180',
'ssangkiyeokkorean': u'\u3132',
'ssangnieunkorean': u'\u3165',
'ssangpieupkorean': u'\u3143',
'ssangsioskorean': u'\u3146',
'ssangtikeutkorean': u'\u3138',
'ssuperior': u'\uF6F2',
'sterling': u'\u00A3',
'sterlingmonospace': u'\uFFE1',
'strokelongoverlaycmb': u'\u0336',
'strokeshortoverlaycmb': u'\u0335',
'subset': u'\u2282',
'subsetnotequal': u'\u228A',
'subsetorequal': u'\u2286',
'succeeds': u'\u227B',
'suchthat': u'\u220B',
'suhiragana': u'\u3059',
'sukatakana': u'\u30B9',
'sukatakanahalfwidth': u'\uFF7D',
'sukunarabic': u'\u0652',
'summation': u'\u2211',
'sun': u'\u263C',
'superset': u'\u2283',
'supersetnotequal': u'\u228B',
'supersetorequal': u'\u2287',
'svsquare': u'\u33DC',
'syouwaerasquare': u'\u337C',
't': u'\u0074',
'tabengali': u'\u09A4',
'tackdown': u'\u22A4',
'tackleft': u'\u22A3',
'tadeva': u'\u0924',
'tagujarati': u'\u0AA4',
'tagurmukhi': u'\u0A24',
'taharabic': u'\u0637',
'tahfinalarabic': u'\uFEC2',
'tahinitialarabic': u'\uFEC3',
'tahiragana': u'\u305F',
'tahmedialarabic': u'\uFEC4',
'taisyouerasquare': u'\u337D',
'takatakana': u'\u30BF',
'takatakanahalfwidth': u'\uFF80',
'tatweelarabic': u'\u0640',
'tau': u'\u03C4',
'tav': u'\u05EA',
'tavdages': u'\uFB4A',
'tavdagesh': u'\uFB4A',
'tavdageshhebrew': u'\uFB4A',
'tavhebrew': u'\u05EA',
'tbar': u'\u0167',
'tbopomofo': u'\u310A',
'tcaron': u'\u0165',
'tccurl': u'\u02A8',
'tcedilla': u'\u0163',
'tcheharabic': u'\u0686',
'tchehfinalarabic': u'\uFB7B',
'tchehinitialarabic': u'\uFB7C',
'tchehmedialarabic': u'\uFB7D',
'tchehmeeminitialarabic': u'\uFB7C\uFEE4',
'tcircle': u'\u24E3',
'tcircumflexbelow': u'\u1E71',
'tcommaaccent': u'\u0163',
'tdieresis': u'\u1E97',
'tdotaccent': u'\u1E6B',
'tdotbelow': u'\u1E6D',
'tecyrillic': u'\u0442',
'tedescendercyrillic': u'\u04AD',
'teharabic': u'\u062A',
'tehfinalarabic': u'\uFE96',
'tehhahinitialarabic': u'\uFCA2',
'tehhahisolatedarabic': u'\uFC0C',
'tehinitialarabic': u'\uFE97',
'tehiragana': u'\u3066',
'tehjeeminitialarabic': u'\uFCA1',
'tehjeemisolatedarabic': u'\uFC0B',
'tehmarbutaarabic': u'\u0629',
'tehmarbutafinalarabic': u'\uFE94',
'tehmedialarabic': u'\uFE98',
'tehmeeminitialarabic': u'\uFCA4',
'tehmeemisolatedarabic': u'\uFC0E',
'tehnoonfinalarabic': u'\uFC73',
'tekatakana': u'\u30C6',
'tekatakanahalfwidth': u'\uFF83',
'telephone': u'\u2121',
'telephoneblack': u'\u260E',
'telishagedolahebrew': u'\u05A0',
'telishaqetanahebrew': u'\u05A9',
'tencircle': u'\u2469',
'tenideographicparen': u'\u3229',
'tenparen': u'\u247D',
'tenperiod': u'\u2491',
'tenroman': u'\u2179',
'tesh': u'\u02A7',
'tet': u'\u05D8',
'tetdagesh': u'\uFB38',
'tetdageshhebrew': u'\uFB38',
'tethebrew': u'\u05D8',
'tetsecyrillic': u'\u04B5',
'tevirhebrew': u'\u059B',
'tevirlefthebrew': u'\u059B',
'thabengali': u'\u09A5',
'thadeva': u'\u0925',
'thagujarati': u'\u0AA5',
'thagurmukhi': u'\u0A25',
'thalarabic': u'\u0630',
'thalfinalarabic': u'\uFEAC',
'thanthakhatlowleftthai': u'\uF898',
'thanthakhatlowrightthai': u'\uF897',
'thanthakhatthai': u'\u0E4C',
'thanthakhatupperleftthai': u'\uF896',
'theharabic': u'\u062B',
'thehfinalarabic': u'\uFE9A',
'thehinitialarabic': u'\uFE9B',
'thehmedialarabic': u'\uFE9C',
'thereexists': u'\u2203',
'therefore': u'\u2234',
'theta': u'\u03B8',
'theta1': u'\u03D1',
'thetasymbolgreek': u'\u03D1',
'thieuthacirclekorean': u'\u3279',
'thieuthaparenkorean': u'\u3219',
'thieuthcirclekorean': u'\u326B',
'thieuthkorean': u'\u314C',
'thieuthparenkorean': u'\u320B',
'thirteencircle': u'\u246C',
'thirteenparen': u'\u2480',
'thirteenperiod': u'\u2494',
'thonangmonthothai': u'\u0E11',
'thook': u'\u01AD',
'thophuthaothai': u'\u0E12',
'thorn': u'\u00FE',
'thothahanthai': u'\u0E17',
'thothanthai': u'\u0E10',
'thothongthai': u'\u0E18',
'thothungthai': u'\u0E16',
'thousandcyrillic': u'\u0482',
'thousandsseparatorarabic': u'\u066C',
'thousandsseparatorpersian': u'\u066C',
'three': u'\u0033',
'threearabic': u'\u0663',
'threebengali': u'\u09E9',
'threecircle': u'\u2462',
'threecircleinversesansserif': u'\u278C',
'threedeva': u'\u0969',
'threeeighths': u'\u215C',
'threegujarati': u'\u0AE9',
'threegurmukhi': u'\u0A69',
'threehackarabic': u'\u0663',
'threehangzhou': u'\u3023',
'threeideographicparen': u'\u3222',
'threeinferior': u'\u2083',
'threemonospace': u'\uFF13',
'threenumeratorbengali': u'\u09F6',
'threeoldstyle': u'\uF733',
'threeparen': u'\u2476',
'threeperiod': u'\u248A',
'threepersian': u'\u06F3',
'threequarters': u'\u00BE',
'threequartersemdash': u'\uF6DE',
'threeroman': u'\u2172',
'threesuperior': u'\u00B3',
'threethai': u'\u0E53',
'thzsquare': u'\u3394',
'tihiragana': u'\u3061',
'tikatakana': u'\u30C1',
'tikatakanahalfwidth': u'\uFF81',
'tikeutacirclekorean': u'\u3270',
'tikeutaparenkorean': u'\u3210',
'tikeutcirclekorean': u'\u3262',
'tikeutkorean': u'\u3137',
'tikeutparenkorean': u'\u3202',
'tilde': u'\u02DC',
'tildebelowcmb': u'\u0330',
'tildecmb': u'\u0303',
'tildecomb': u'\u0303',
'tildedoublecmb': u'\u0360',
'tildeoperator': u'\u223C',
'tildeoverlaycmb': u'\u0334',
'tildeverticalcmb': u'\u033E',
'timescircle': u'\u2297',
'tipehahebrew': u'\u0596',
'tipehalefthebrew': u'\u0596',
'tippigurmukhi': u'\u0A70',
'titlocyrilliccmb': u'\u0483',
'tiwnarmenian': u'\u057F',
'tlinebelow': u'\u1E6F',
'tmonospace': u'\uFF54',
'toarmenian': u'\u0569',
'tohiragana': u'\u3068',
'tokatakana': u'\u30C8',
'tokatakanahalfwidth': u'\uFF84',
'tonebarextrahighmod': u'\u02E5',
'tonebarextralowmod': u'\u02E9',
'tonebarhighmod': u'\u02E6',
'tonebarlowmod': u'\u02E8',
'tonebarmidmod': u'\u02E7',
'tonefive': u'\u01BD',
'tonesix': u'\u0185',
'tonetwo': u'\u01A8',
'tonos': u'\u0384',
'tonsquare': u'\u3327',
'topatakthai': u'\u0E0F',
'tortoiseshellbracketleft': u'\u3014',
'tortoiseshellbracketleftsmall': u'\uFE5D',
'tortoiseshellbracketleftvertical': u'\uFE39',
'tortoiseshellbracketright': u'\u3015',
'tortoiseshellbracketrightsmall': u'\uFE5E',
'tortoiseshellbracketrightvertical': u'\uFE3A',
'totaothai': u'\u0E15',
'tpalatalhook': u'\u01AB',
'tparen': u'\u24AF',
'trademark': u'\u2122',
'trademarksans': u'\uF8EA',
'trademarkserif': u'\uF6DB',
'tretroflexhook': u'\u0288',
'triagdn': u'\u25BC',
'triaglf': u'\u25C4',
'triagrt': u'\u25BA',
'triagup': u'\u25B2',
'ts': u'\u02A6',
'tsadi': u'\u05E6',
'tsadidagesh': u'\uFB46',
'tsadidageshhebrew': u'\uFB46',
'tsadihebrew': u'\u05E6',
'tsecyrillic': u'\u0446',
'tsere': u'\u05B5',
'tsere12': u'\u05B5',
'tsere1e': u'\u05B5',
'tsere2b': u'\u05B5',
'tserehebrew': u'\u05B5',
'tserenarrowhebrew': u'\u05B5',
'tserequarterhebrew': u'\u05B5',
'tserewidehebrew': u'\u05B5',
'tshecyrillic': u'\u045B',
'tsuperior': u'\uF6F3',
'ttabengali': u'\u099F',
'ttadeva': u'\u091F',
'ttagujarati': u'\u0A9F',
'ttagurmukhi': u'\u0A1F',
'tteharabic': u'\u0679',
'ttehfinalarabic': u'\uFB67',
'ttehinitialarabic': u'\uFB68',
'ttehmedialarabic': u'\uFB69',
'tthabengali': u'\u09A0',
'tthadeva': u'\u0920',
'tthagujarati': u'\u0AA0',
'tthagurmukhi': u'\u0A20',
'tturned': u'\u0287',
'tuhiragana': u'\u3064',
'tukatakana': u'\u30C4',
'tukatakanahalfwidth': u'\uFF82',
'tusmallhiragana': u'\u3063',
'tusmallkatakana': u'\u30C3',
'tusmallkatakanahalfwidth': u'\uFF6F',
'twelvecircle': u'\u246B',
'twelveparen': u'\u247F',
'twelveperiod': u'\u2493',
'twelveroman': u'\u217B',
'twentycircle': u'\u2473',
'twentyhangzhou': u'\u5344',
'twentyparen': u'\u2487',
'twentyperiod': u'\u249B',
'two': u'\u0032',
'twoarabic': u'\u0662',
'twobengali': u'\u09E8',
'twocircle': u'\u2461',
'twocircleinversesansserif': u'\u278B',
'twodeva': u'\u0968',
'twodotenleader': u'\u2025',
'twodotleader': u'\u2025',
'twodotleadervertical': u'\uFE30',
'twogujarati': u'\u0AE8',
'twogurmukhi': u'\u0A68',
'twohackarabic': u'\u0662',
'twohangzhou': u'\u3022',
'twoideographicparen': u'\u3221',
'twoinferior': u'\u2082',
'twomonospace': u'\uFF12',
'twonumeratorbengali': u'\u09F5',
'twooldstyle': u'\uF732',
'twoparen': u'\u2475',
'twoperiod': u'\u2489',
'twopersian': u'\u06F2',
'tworoman': u'\u2171',
'twostroke': u'\u01BB',
'twosuperior': u'\u00B2',
'twothai': u'\u0E52',
'twothirds': u'\u2154',
'u': u'\u0075',
'uacute': u'\u00FA',
'ubar': u'\u0289',
'ubengali': u'\u0989',
'ubopomofo': u'\u3128',
'ubreve': u'\u016D',
'ucaron': u'\u01D4',
'ucircle': u'\u24E4',
'ucircumflex': u'\u00FB',
'ucircumflexbelow': u'\u1E77',
'ucyrillic': u'\u0443',
'udattadeva': u'\u0951',
'udblacute': u'\u0171',
'udblgrave': u'\u0215',
'udeva': u'\u0909',
'udieresis': u'\u00FC',
'udieresisacute': u'\u01D8',
'udieresisbelow': u'\u1E73',
'udieresiscaron': u'\u01DA',
'udieresiscyrillic': u'\u04F1',
'udieresisgrave': u'\u01DC',
'udieresismacron': u'\u01D6',
'udotbelow': u'\u1EE5',
'ugrave': u'\u00F9',
'ugujarati': u'\u0A89',
'ugurmukhi': u'\u0A09',
'uhiragana': u'\u3046',
'uhookabove': u'\u1EE7',
'uhorn': u'\u01B0',
'uhornacute': u'\u1EE9',
'uhorndotbelow': u'\u1EF1',
'uhorngrave': u'\u1EEB',
'uhornhookabove': u'\u1EED',
'uhorntilde': u'\u1EEF',
'uhungarumlaut': u'\u0171',
'uhungarumlautcyrillic': u'\u04F3',
'uinvertedbreve': u'\u0217',
'ukatakana': u'\u30A6',
'ukatakanahalfwidth': u'\uFF73',
'ukcyrillic': u'\u0479',
'ukorean': u'\u315C',
'umacron': u'\u016B',
'umacroncyrillic': u'\u04EF',
'umacrondieresis': u'\u1E7B',
'umatragurmukhi': u'\u0A41',
'umonospace': u'\uFF55',
'underscore': u'\u005F',
'underscoredbl': u'\u2017',
'underscoremonospace': u'\uFF3F',
'underscorevertical': u'\uFE33',
'underscorewavy': u'\uFE4F',
'union': u'\u222A',
'universal': u'\u2200',
'uogonek': u'\u0173',
'uparen': u'\u24B0',
'upblock': u'\u2580',
'upperdothebrew': u'\u05C4',
'upsilon': u'\u03C5',
'upsilondieresis': u'\u03CB',
'upsilondieresistonos': u'\u03B0',
'upsilonlatin': u'\u028A',
'upsilontonos': u'\u03CD',
'uptackbelowcmb': u'\u031D',
'uptackmod': u'\u02D4',
'uragurmukhi': u'\u0A73',
'uring': u'\u016F',
'ushortcyrillic': u'\u045E',
'usmallhiragana': u'\u3045',
'usmallkatakana': u'\u30A5',
'usmallkatakanahalfwidth': u'\uFF69',
'ustraightcyrillic': u'\u04AF',
'ustraightstrokecyrillic': u'\u04B1',
'utilde': u'\u0169',
'utildeacute': u'\u1E79',
'utildebelow': u'\u1E75',
'uubengali': u'\u098A',
'uudeva': u'\u090A',
'uugujarati': u'\u0A8A',
'uugurmukhi': u'\u0A0A',
'uumatragurmukhi': u'\u0A42',
'uuvowelsignbengali': u'\u09C2',
'uuvowelsigndeva': u'\u0942',
'uuvowelsigngujarati': u'\u0AC2',
'uvowelsignbengali': u'\u09C1',
'uvowelsigndeva': u'\u0941',
'uvowelsigngujarati': u'\u0AC1',
'v': u'\u0076',
'vadeva': u'\u0935',
'vagujarati': u'\u0AB5',
'vagurmukhi': u'\u0A35',
'vakatakana': u'\u30F7',
'vav': u'\u05D5',
'vavdagesh': u'\uFB35',
'vavdagesh65': u'\uFB35',
'vavdageshhebrew': u'\uFB35',
'vavhebrew': u'\u05D5',
'vavholam': u'\uFB4B',
'vavholamhebrew': u'\uFB4B',
'vavvavhebrew': u'\u05F0',
'vavyodhebrew': u'\u05F1',
'vcircle': u'\u24E5',
'vdotbelow': u'\u1E7F',
'vecyrillic': u'\u0432',
'veharabic': u'\u06A4',
'vehfinalarabic': u'\uFB6B',
'vehinitialarabic': u'\uFB6C',
'vehmedialarabic': u'\uFB6D',
'vekatakana': u'\u30F9',
'venus': u'\u2640',
'verticalbar': u'\u007C',
'verticallineabovecmb': u'\u030D',
'verticallinebelowcmb': u'\u0329',
'verticallinelowmod': u'\u02CC',
'verticallinemod': u'\u02C8',
'vewarmenian': u'\u057E',
'vhook': u'\u028B',
'vikatakana': u'\u30F8',
'viramabengali': u'\u09CD',
'viramadeva': u'\u094D',
'viramagujarati': u'\u0ACD',
'visargabengali': u'\u0983',
'visargadeva': u'\u0903',
'visargagujarati': u'\u0A83',
'vmonospace': u'\uFF56',
'voarmenian': u'\u0578',
'voicediterationhiragana': u'\u309E',
'voicediterationkatakana': u'\u30FE',
'voicedmarkkana': u'\u309B',
'voicedmarkkanahalfwidth': u'\uFF9E',
'vokatakana': u'\u30FA',
'vparen': u'\u24B1',
'vtilde': u'\u1E7D',
'vturned': u'\u028C',
'vuhiragana': u'\u3094',
'vukatakana': u'\u30F4',
'w': u'\u0077',
'wacute': u'\u1E83',
'waekorean': u'\u3159',
'wahiragana': u'\u308F',
'wakatakana': u'\u30EF',
'wakatakanahalfwidth': u'\uFF9C',
'wakorean': u'\u3158',
'wasmallhiragana': u'\u308E',
'wasmallkatakana': u'\u30EE',
'wattosquare': u'\u3357',
'wavedash': u'\u301C',
'wavyunderscorevertical': u'\uFE34',
'wawarabic': u'\u0648',
'wawfinalarabic': u'\uFEEE',
'wawhamzaabovearabic': u'\u0624',
'wawhamzaabovefinalarabic': u'\uFE86',
'wbsquare': u'\u33DD',
'wcircle': u'\u24E6',
'wcircumflex': u'\u0175',
'wdieresis': u'\u1E85',
'wdotaccent': u'\u1E87',
'wdotbelow': u'\u1E89',
'wehiragana': u'\u3091',
'weierstrass': u'\u2118',
'wekatakana': u'\u30F1',
'wekorean': u'\u315E',
'weokorean': u'\u315D',
'wgrave': u'\u1E81',
'whitebullet': u'\u25E6',
'whitecircle': u'\u25CB',
'whitecircleinverse': u'\u25D9',
'whitecornerbracketleft': u'\u300E',
'whitecornerbracketleftvertical': u'\uFE43',
'whitecornerbracketright': u'\u300F',
'whitecornerbracketrightvertical': u'\uFE44',
'whitediamond': u'\u25C7',
'whitediamondcontainingblacksmalldiamond': u'\u25C8',
'whitedownpointingsmalltriangle': u'\u25BF',
'whitedownpointingtriangle': u'\u25BD',
'whiteleftpointingsmalltriangle': u'\u25C3',
'whiteleftpointingtriangle': u'\u25C1',
'whitelenticularbracketleft': u'\u3016',
'whitelenticularbracketright': u'\u3017',
'whiterightpointingsmalltriangle': u'\u25B9',
'whiterightpointingtriangle': u'\u25B7',
'whitesmallsquare': u'\u25AB',
'whitesmilingface': u'\u263A',
'whitesquare': u'\u25A1',
'whitestar': u'\u2606',
'whitetelephone': u'\u260F',
'whitetortoiseshellbracketleft': u'\u3018',
'whitetortoiseshellbracketright': u'\u3019',
'whiteuppointingsmalltriangle': u'\u25B5',
'whiteuppointingtriangle': u'\u25B3',
'wihiragana': u'\u3090',
'wikatakana': u'\u30F0',
'wikorean': u'\u315F',
'wmonospace': u'\uFF57',
'wohiragana': u'\u3092',
'wokatakana': u'\u30F2',
'wokatakanahalfwidth': u'\uFF66',
'won': u'\u20A9',
'wonmonospace': u'\uFFE6',
'wowaenthai': u'\u0E27',
'wparen': u'\u24B2',
'wring': u'\u1E98',
'wsuperior': u'\u02B7',
'wturned': u'\u028D',
'wynn': u'\u01BF',
'x': u'\u0078',
'xabovecmb': u'\u033D',
'xbopomofo': u'\u3112',
'xcircle': u'\u24E7',
'xdieresis': u'\u1E8D',
'xdotaccent': u'\u1E8B',
'xeharmenian': u'\u056D',
'xi': u'\u03BE',
'xmonospace': u'\uFF58',
'xparen': u'\u24B3',
'xsuperior': u'\u02E3',
'y': u'\u0079',
'yaadosquare': u'\u334E',
'yabengali': u'\u09AF',
'yacute': u'\u00FD',
'yadeva': u'\u092F',
'yaekorean': u'\u3152',
'yagujarati': u'\u0AAF',
'yagurmukhi': u'\u0A2F',
'yahiragana': u'\u3084',
'yakatakana': u'\u30E4',
'yakatakanahalfwidth': u'\uFF94',
'yakorean': u'\u3151',
'yamakkanthai': u'\u0E4E',
'yasmallhiragana': u'\u3083',
'yasmallkatakana': u'\u30E3',
'yasmallkatakanahalfwidth': u'\uFF6C',
'yatcyrillic': u'\u0463',
'ycircle': u'\u24E8',
'ycircumflex': u'\u0177',
'ydieresis': u'\u00FF',
'ydotaccent': u'\u1E8F',
'ydotbelow': u'\u1EF5',
'yeharabic': u'\u064A',
'yehbarreearabic': u'\u06D2',
'yehbarreefinalarabic': u'\uFBAF',
'yehfinalarabic': u'\uFEF2',
'yehhamzaabovearabic': u'\u0626',
'yehhamzaabovefinalarabic': u'\uFE8A',
'yehhamzaaboveinitialarabic': u'\uFE8B',
'yehhamzaabovemedialarabic': u'\uFE8C',
'yehinitialarabic': u'\uFEF3',
'yehmedialarabic': u'\uFEF4',
'yehmeeminitialarabic': u'\uFCDD',
'yehmeemisolatedarabic': u'\uFC58',
'yehnoonfinalarabic': u'\uFC94',
'yehthreedotsbelowarabic': u'\u06D1',
'yekorean': u'\u3156',
'yen': u'\u00A5',
'yenmonospace': u'\uFFE5',
'yeokorean': u'\u3155',
'yeorinhieuhkorean': u'\u3186',
'yerahbenyomohebrew': u'\u05AA',
'yerahbenyomolefthebrew': u'\u05AA',
'yericyrillic': u'\u044B',
'yerudieresiscyrillic': u'\u04F9',
'yesieungkorean': u'\u3181',
'yesieungpansioskorean': u'\u3183',
'yesieungsioskorean': u'\u3182',
'yetivhebrew': u'\u059A',
'ygrave': u'\u1EF3',
'yhook': u'\u01B4',
'yhookabove': u'\u1EF7',
'yiarmenian': u'\u0575',
'yicyrillic': u'\u0457',
'yikorean': u'\u3162',
'yinyang': u'\u262F',
'yiwnarmenian': u'\u0582',
'ymonospace': u'\uFF59',
'yod': u'\u05D9',
'yoddagesh': u'\uFB39',
'yoddageshhebrew': u'\uFB39',
'yodhebrew': u'\u05D9',
'yodyodhebrew': u'\u05F2',
'yodyodpatahhebrew': u'\uFB1F',
'yohiragana': u'\u3088',
'yoikorean': u'\u3189',
'yokatakana': u'\u30E8',
'yokatakanahalfwidth': u'\uFF96',
'yokorean': u'\u315B',
'yosmallhiragana': u'\u3087',
'yosmallkatakana': u'\u30E7',
'yosmallkatakanahalfwidth': u'\uFF6E',
'yotgreek': u'\u03F3',
'yoyaekorean': u'\u3188',
'yoyakorean': u'\u3187',
'yoyakthai': u'\u0E22',
'yoyingthai': u'\u0E0D',
'yparen': u'\u24B4',
'ypogegrammeni': u'\u037A',
'ypogegrammenigreekcmb': u'\u0345',
'yr': u'\u01A6',
'yring': u'\u1E99',
'ysuperior': u'\u02B8',
'ytilde': u'\u1EF9',
'yturned': u'\u028E',
'yuhiragana': u'\u3086',
'yuikorean': u'\u318C',
'yukatakana': u'\u30E6',
'yukatakanahalfwidth': u'\uFF95',
'yukorean': u'\u3160',
'yusbigcyrillic': u'\u046B',
'yusbigiotifiedcyrillic': u'\u046D',
'yuslittlecyrillic': u'\u0467',
'yuslittleiotifiedcyrillic': u'\u0469',
'yusmallhiragana': u'\u3085',
'yusmallkatakana': u'\u30E5',
'yusmallkatakanahalfwidth': u'\uFF6D',
'yuyekorean': u'\u318B',
'yuyeokorean': u'\u318A',
'yyabengali': u'\u09DF',
'yyadeva': u'\u095F',
'z': u'\u007A',
'zaarmenian': u'\u0566',
'zacute': u'\u017A',
'zadeva': u'\u095B',
'zagurmukhi': u'\u0A5B',
'zaharabic': u'\u0638',
'zahfinalarabic': u'\uFEC6',
'zahinitialarabic': u'\uFEC7',
'zahiragana': u'\u3056',
'zahmedialarabic': u'\uFEC8',
'zainarabic': u'\u0632',
'zainfinalarabic': u'\uFEB0',
'zakatakana': u'\u30B6',
'zaqefgadolhebrew': u'\u0595',
'zaqefqatanhebrew': u'\u0594',
'zarqahebrew': u'\u0598',
'zayin': u'\u05D6',
'zayindagesh': u'\uFB36',
'zayindageshhebrew': u'\uFB36',
'zayinhebrew': u'\u05D6',
'zbopomofo': u'\u3117',
'zcaron': u'\u017E',
'zcircle': u'\u24E9',
'zcircumflex': u'\u1E91',
'zcurl': u'\u0291',
'zdot': u'\u017C',
'zdotaccent': u'\u017C',
'zdotbelow': u'\u1E93',
'zecyrillic': u'\u0437',
'zedescendercyrillic': u'\u0499',
'zedieresiscyrillic': u'\u04DF',
'zehiragana': u'\u305C',
'zekatakana': u'\u30BC',
'zero': u'\u0030',
'zeroarabic': u'\u0660',
'zerobengali': u'\u09E6',
'zerodeva': u'\u0966',
'zerogujarati': u'\u0AE6',
'zerogurmukhi': u'\u0A66',
'zerohackarabic': u'\u0660',
'zeroinferior': u'\u2080',
'zeromonospace': u'\uFF10',
'zerooldstyle': u'\uF730',
'zeropersian': u'\u06F0',
'zerosuperior': u'\u2070',
'zerothai': u'\u0E50',
'zerowidthjoiner': u'\uFEFF',
'zerowidthnonjoiner': u'\u200C',
'zerowidthspace': u'\u200B',
'zeta': u'\u03B6',
'zhbopomofo': u'\u3113',
'zhearmenian': u'\u056A',
'zhebrevecyrillic': u'\u04C2',
'zhecyrillic': u'\u0436',
'zhedescendercyrillic': u'\u0497',
'zhedieresiscyrillic': u'\u04DD',
'zihiragana': u'\u3058',
'zikatakana': u'\u30B8',
'zinorhebrew': u'\u05AE',
'zlinebelow': u'\u1E95',
'zmonospace': u'\uFF5A',
'zohiragana': u'\u305E',
'zokatakana': u'\u30BE',
'zparen': u'\u24B5',
'zretroflexhook': u'\u0290',
'zstroke': u'\u01B6',
'zuhiragana': u'\u305A',
'zukatakana': u'\u30BA',
}
#--end
|
euske/pdfminer
|
pdfminer/glyphlist.py
|
Python
|
mit
| 121,510
|
# author: Fei Gao
#
# Add Two Numbers
#
# You are given two linked lists representing two non-negative
# numbers. The digits are stored in reverse order and each of
# their nodes contain a single digit. Add the two numbers and
# return it as a linked list.
#
# Input: (2 -> 4 -> 3) + (5 -> 6 -> 4)
# Output: 7 -> 0 -> 8
from node.sllist import ListNode, SinglyLinkedList
class Solution:
# @return a ListNode
def addTwoNumbers(self, l1, l2):
h1 = l1
h2 = l2
h = ListNode(0)
p = h
carry = 0
while h1 and h2:
p.next = ListNode(h1.val + h2.val + carry)
p = p.next
carry = p.val // 10
p.val %= 10
h1 = h1.next
h2 = h2.next
h3 = h1 if h1 else h2
while h3:
p.next = ListNode(h3.val + carry)
p = p.next
carry = p.val // 10
p.val %= 10
h3 = h3.next
while carry != 0:
p.next = ListNode(carry)
p = p.next
carry = p.val // 10
p.val %= 10
return h.next
def main():
solver = Solution()
tests = [[[2, 4, 3], [5, 6, 4]],
[[1], [9] * 3]]
for test in tests:
l1 = SinglyLinkedList(test[0])
print(l1)
l2 = SinglyLinkedList(test[1])
print(l2)
print(' ->')
result = SinglyLinkedList(solver.addTwoNumbers(l1.head, l2.head))
print(result)
print('~' * 10)
pass
if __name__ == '__main__':
main()
pass
|
feigaochn/leetcode
|
p2_add_two_numbers.py
|
Python
|
mit
| 1,551
|
import pytest
from spacy.util import get_lang_class
# fmt: off
# Only include languages with no external dependencies
# excluded: ja, ko, th, vi, zh
LANGUAGES = ["af", "am", "ar", "az", "bg", "bn", "ca", "cs", "da", "de", "el",
"en", "es", "et", "eu", "fa", "fi", "fr", "ga", "gu", "he", "hi",
"hr", "hu", "hy", "id", "is", "it", "kn", "ky", "lb", "lt", "lv",
"mk", "ml", "mr", "nb", "ne", "nl", "pl", "pt", "ro", "ru", "sa",
"si", "sk", "sl", "sq", "sr", "sv", "ta", "te", "ti", "tl", "tn",
"tr", "tt", "uk", "ur", "xx", "yo"]
# fmt: on
@pytest.mark.parametrize("lang", LANGUAGES)
def test_lang_initialize(lang, capfd):
"""Test that languages can be initialized."""
nlp = get_lang_class(lang)()
# Check for stray print statements (see #3342)
doc = nlp("test") # noqa: F841
captured = capfd.readouterr()
assert not captured.out
|
honnibal/spaCy
|
spacy/tests/lang/test_initialize.py
|
Python
|
mit
| 922
|
"""Contains the base class for the Snux driver overlay.
This class overlays an existing WPC-compatible platform interface to work with
Mark Sunnucks's System 11 interface board.
"""
# snux.py
# Mission Pinball Framework
# Written by Brian Madden & Gabe Knuth
# Released under the MIT License. (See license info at the end of this file.)
# Documentation and more info at http://missionpinball.com/mpf
import logging
import time
from mpf.system.tasks import DelayManager
from mpf.system.timing import Timer
class Snux(object):
def __init__(self, machine, platform):
self.log = logging.getLogger('Platform.Snux')
self.delay = DelayManager()
self.machine = machine
self.platform = platform
self.system11_config = None
self.snux_config = None
self.ac_relay_delay_ms = 100
self.special_drivers = set()
self.diag_led = None
'''Diagnostics LED (LED 3) on the Snux board turns on solid when MPF
first connects, then starts flashing once the MPF init is done.'''
self.ac_relay = None
self.flipper_relay = None
self.ac_relay_enabled = False # disabled = A, enabled = C
self.a_side_queue = set()
self.c_side_queue = set()
self.a_drivers = set()
self.c_drivers = set()
self.a_side_done_time = 0
self.c_side_done_time = 0
self.drivers_holding_a_side = set()
self.drivers_holding_c_side = set()
# self.a_side_busy = False # This is a property
# self.c_side_active = False # This is a property
self.a_side_enabled = True
self.c_side_enabled = False
self.ac_relay_in_transition = False
self._morph()
@property
def a_side_busy(self):
if (self.drivers_holding_a_side or
self.a_side_done_time > time.time() or
self.a_side_queue):
return True
else:
return False
@property
def c_side_active(self):
if self.drivers_holding_c_side or self.c_side_done_time > time.time():
return True
else:
return False
def null_log_handler(self, *args, **kwargs):
pass
def _morph(self):
self.platform_configure_driver = self.platform.configure_driver
self.platform.configure_driver = self.configure_driver
self.platform_write_hw_rule = self.platform.write_hw_rule
self.platform.write_hw_rule = self.write_hw_rule
def initialize(self):
"""Automatically called by the Platform class after all the system
modules are loaded.
"""
self._validate_config()
self.log.debug("Configuring Snux Diag LED for driver %s",
self.snux_config['diag_led_driver_number'])
self.diag_led, _ = self.platform_configure_driver(
{'number': self.snux_config['diag_led_driver_number'],
'allow_enable': True})
self.diag_led.log.info = self.null_log_handler
self.diag_led.log.debug = self.null_log_handler
self.diag_led.enable()
self.special_drivers.add(
self.snux_config['diag_led_driver_number'].lower())
self.log.debug("Configuring A/C Select Relay for driver %s",
self.system11_config['ac_relay_driver_number'])
self.ac_relay, _ = self.platform_configure_driver(
{'number': self.system11_config['ac_relay_driver_number'],
'allow_enable': True})
self.special_drivers.add(
self.system11_config['ac_relay_driver_number'].lower())
self.log.debug("Configuring A/C Select Relay transition delay for "
"%sms", self.system11_config['ac_relay_delay_ms'])
self.ac_relay_delay_ms = self.system11_config['ac_relay_delay_ms']
self.flipper_relay, _ = self.platform_configure_driver(
{'number': self.snux_config['flipper_enable_driver_number'],
'allow_enable': True})
self.log.debug("Configuring Flipper Enable for driver %s",
self.snux_config['flipper_enable_driver_number'])
self.machine.events.add_handler('init_phase_5',
self._initialize_phase_2)
def _initialize_phase_2(self):
self.machine.timing.add(
Timer(callback=self.flash_diag_led, frequency=0.5))
self.machine.events.add_handler('timer_tick', self._tick)
def _validate_config(self):
self.system11_config = self.machine.config_processor.process_config2(
'system11', self.machine.config['system11'])
snux = self.machine.config.get('snux', dict())
self.snux_config = self.machine.config_processor.process_config2(
'snux', snux)
def _tick(self):
# Called based on the timer_tick event
if self.a_side_queue:
self._service_a_side()
elif self.c_side_queue:
self._service_c_side()
elif self.c_side_enabled and not self.c_side_active:
self._enable_a_side()
def flash_diag_led(self):
self.diag_led.pulse(250)
def configure_driver(self, config, device_type='coil'):
# If the user has configured one of the special drivers in their
# machine config, don't set it up since that could let them do weird
# things.
if config['number'].lower() in self.special_drivers:
return
orig_number = config['number']
if (config['number'].lower().endswith('a') or
config['number'].lower().endswith('c')):
config['number'] = config['number'][:-1]
platform_driver, _ = (
self.platform_configure_driver(config, device_type))
snux_driver = SnuxDriver(orig_number, platform_driver, self)
if orig_number.lower().endswith('a'):
self._add_a_driver(snux_driver.platform_driver)
elif orig_number.lower().endswith('c'):
self._add_c_driver(snux_driver.platform_driver)
return snux_driver, orig_number
else:
return self.platform_configure_driver(config, device_type)
def write_hw_rule(self, switch_obj, sw_activity, driver_obj, driver_action,
disable_on_release, drive_now,
**driver_settings_overrides):
"""On system 11 machines, Switched drivers cannot be configured with
autofire hardware rules.
"""
if driver_obj in self.a_drivers or driver_obj in self.c_drivers:
self.log.warning("Received a request to set a hardware rule for a"
"switched driver. Ignoring")
else:
self.platform_write_hw_rule(switch_obj, sw_activity, driver_obj,
driver_action, disable_on_release,
drive_now,
**driver_settings_overrides)
def driver_action(self, driver, milliseconds):
"""Adds a driver action for a switched driver to the queue (for either
the A-side or C-side queue).
Args:
driver: A reference to the original platform class Driver instance.
milliseconds: Integer of the number of milliseconds this action is
for. 0 = pulse, -1 = enable (hold), any other value is a timed
action (either pulse or long_pulse)
This action will be serviced immediately if it can, or ASAP otherwise.
"""
if driver in self.a_drivers:
self.a_side_queue.add((driver, milliseconds))
self._service_a_side()
elif driver in self.c_drivers:
self.c_side_queue.add((driver, milliseconds))
if not self.ac_relay_in_transition and not self.a_side_busy:
self._service_c_side()
def _enable_ac_relay(self):
self.ac_relay.enable()
self.ac_relay_in_transition = True
self.a_side_enabled = False
self.c_side_enabled = False
self.delay.add(ms=self.ac_relay_delay_ms,
callback=self._c_side_enabled,
name='enable_ac_relay')
def _disable_ac_relay(self):
self.ac_relay.disable()
self.ac_relay_in_transition = True
self.a_side_enabled = False
self.c_side_enabled = False
self.delay.add(ms=self.ac_relay_delay_ms,
callback=self._a_side_enabled,
name='disable_ac_relay')
# -------------------------------- A SIDE ---------------------------------
def _enable_a_side(self):
if not self.a_side_enabled and not self.ac_relay_in_transition:
if self.c_side_active:
self._disable_all_c_side_drivers()
self.delay.add(ms=self.ac_relay_delay_ms,
callback=self._enable_a_side,
name='enable_a_side')
return
elif self.c_side_enabled:
self._disable_ac_relay()
else:
self._a_side_enabled()
def _a_side_enabled(self):
self.ac_relay_in_transition = False
self.a_side_enabled = True
self.c_side_enabled = False
self._service_a_side()
def _service_a_side(self):
if not self.a_side_queue:
return
elif not self.a_side_enabled:
self._enable_a_side()
return
while self.a_side_queue:
driver, ms = self.a_side_queue.pop()
if ms > 0:
driver.pulse(ms)
self.a_side_done_time = max(self.a_side_done_time,
time.time() + (ms / 1000.0))
elif ms == -1:
driver.enable()
self.drivers_holding_a_side.add(driver)
else: # ms == 0
driver.disable()
try:
self.drivers_holding_a_side.remove(driver)
except KeyError:
pass
def _add_a_driver(self, driver):
self.a_drivers.add(driver)
# -------------------------------- C SIDE ---------------------------------
def _enable_c_side(self):
if (not self.ac_relay_in_transition and
not self.c_side_enabled and
not self.a_side_busy):
self._enable_ac_relay()
elif self.c_side_enabled and self.c_side_queue:
self._service_c_side()
def _c_side_enabled(self):
self.ac_relay_in_transition = False
if self.a_side_queue:
self._enable_a_side()
return
self.a_side_enabled = False
self.c_side_enabled = True
self._service_c_side()
def _service_c_side(self):
if not self.c_side_queue:
return
if self.ac_relay_in_transition or self.a_side_busy:
return
elif not self.c_side_enabled:
self._enable_c_side()
return
while self.c_side_queue:
driver, ms = self.c_side_queue.pop()
if ms > 0:
driver.pulse(ms)
self.c_side_done_time = max(self.c_side_done_time,
time.time() + (ms / 1000.))
elif ms == -1:
driver.enable()
self.drivers_holding_c_side.add(driver)
else: # ms == 0
driver.disable()
try:
self.drivers_holding_c_side.remove(driver)
except KeyError:
pass
def _add_c_driver(self, driver):
self.c_drivers.add(driver)
def _disable_all_c_side_drivers(self):
if self.c_side_active:
for driver in self.c_drivers:
driver.disable()
self.drivers_holding_c_side = set()
self.c_side_done_time = 0
self.c_side_enabled = False
class SnuxDriver(object):
def __init__(self, number, platform_driver, overlay):
self.number = number
self.platform_driver = platform_driver
self.driver_settings = platform_driver.driver_settings
self.overlay = overlay
def __repr__(self):
return "SnuxDriver.{}".format(self.number)
def pulse(self, milliseconds=None, **kwargs):
if milliseconds is None:
milliseconds = self.platform_driver.get_pulse_ms()
self.overlay.driver_action(self.platform_driver, milliseconds)
# Usually pulse() returns the value (in ms) that the driver will pulse
# for so we can update Driver.time_when_done. But with A/C switched
# coils, we don't know when exactly that will be, so we return -1
return -1
def enable(self, **kwargs):
self.overlay.driver_action(self.platform_driver, -1)
def disable(self, **kwargs):
self.overlay.driver_action(self.platform_driver, 0)
driver_overlay_class = Snux
# The MIT License (MIT)
# Copyright (c) 2013-2015 Brian Madden and Gabe Knuth
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
|
spierepf/mpf
|
mpf/platform/snux.py
|
Python
|
mit
| 14,196
|
__author__ = 'beast'
|
mr-robot/granule
|
tests/__init__.py
|
Python
|
mit
| 21
|
"""
MutatePDB
=========
"""
import argparse
import tempfile
from Bio import PDB
from Bio.PDB.Polypeptide import aa1
from Bio.PDB.Polypeptide import aa3
from Bio.PDB.Polypeptide import one_to_three
from ssbio.protein.structure.utils.cleanpdb import CleanPDB
from ssbio.protein.structure.utils.structureio import StructureIO
class MutatePDB(PDB.Select):
"""Selection rules to mutate a PDB file
These rules aim to:
- Mutate a specified residue number to a new amino acid
"""
keep_atom_list = ['N', 'C', 'O', 'CA']
def __init__(self, mutation_list):
"""Initialize the parameters which indicate what mutations will occur
Args:
chain:
residue_number:
mutate_to:
"""
self.mutation_list = [(i[0], int(i[1]), self._standard_resname(i[2])) for i in mutation_list]
self.chains_and_residues = [(i[0], int(i[1])) for i in mutation_list]
def _standard_resname(self, res):
resname3 = res.upper()
if resname3 not in list(aa3) and resname3 not in list(aa1):
# TODO: mutation to selenocysteine (U;SEC) is not working
raise ValueError("Unrecognised residue {}".format(res))
if len(resname3) == 1:
resname3 = one_to_three(resname3)
return resname3
def accept_residue(self, residue):
hetfield, resseq, icode = residue.get_id()
chain = residue.get_parent()
chain_id = chain.get_id()
if (chain_id,resseq) in self.chains_and_residues:
prev_resname = residue.resname
get_index = self.chains_and_residues.index((chain_id,resseq))
residue.resname = self.mutation_list[get_index][2]
print("Mutated {0}.{1}.{2} to {0}.{1}.{3}".format(chain_id, resseq, prev_resname, residue.resname))
return True
def accept_atom(self, atom):
residue = atom.get_parent()
hetfield, resseq, icode = residue.get_id()
chain = residue.get_parent()
chain_id = chain.get_id()
if (chain_id,resseq) in self.chains_and_residues and atom.get_id() not in self.keep_atom_list:
# print("Removing atom {}.{}.{}".format(chain_id, resseq, atom.get_id()))
return False
return True
def parse_mutation_input(instr):
init_split = instr.split(',')
second_split = [tuple(i.split('.')) for i in init_split]
return second_split
if __name__ == '__main__':
p = argparse.ArgumentParser(description='Mutates a PDB file')
p.add_argument('infile', help='PDB file you want to mutate')
p.add_argument('mutations', help='Mutations in the form of Chain1.ResNum1.Mutation1,Chain2.ResNum2.Mutation2. Example: A.4.TYR,B.4.TYR')
p.add_argument('--outsuffix', '-o', default='_mutated', help='Suffix appended to PDB file')
p.add_argument('--clean', '-c', action='store_true', help='Clean PDB and keep only chain with mutation')
args = p.parse_args()
mutations = parse_mutation_input(args.mutations)
my_pdb = StructureIO(args.infile)
if args.clean:
my_cleaner = CleanPDB(keep_chains=[x[0] for x in mutations])
my_clean_pdb = my_pdb.write_pdb(out_suffix='_clean', out_dir=tempfile.gettempdir(), custom_selection=my_cleaner)
my_pdb = StructureIO(my_clean_pdb)
my_mutation = MutatePDB(mutations)
my_mutated_pdb = my_pdb.write_pdb(out_suffix=args.outsuffix, out_dir='mutated_pdbs', custom_selection=my_mutation)
print('Mutated PDB at: {}'.format(my_mutated_pdb))
|
SBRG/ssbio
|
ssbio/protein/structure/utils/mutatepdb.py
|
Python
|
mit
| 3,518
|
import re
import sys
from waflib import Options
import os.path as osp
from waflib import Logs
from waflib import Context
from waflib import Errors
def options(ctx):
import optparse
grp = ctx.parser.get_option_group("--gcc")
if grp==None:
grp=optparse.OptionGroup(ctx.parser,"compiler options")
grp.add_option("--gfortran",action="store_true",default=False,help="Do not test for ifort and only use gfortran")
grp.add_option("--ifort",action="store_true",default=False,help="Do not test for gfortran and only use ifort")
grp.add_option("--fortran_flagline",action="store",default="",help="flagline to link fortran object to c using ld")
ctx.add_option_group(grp)
def configure_(ctx):
if ctx.options.fortran_flagline:
conf.parse_flags(ctx.options.fortran_flagline,uselib="fc_runtime")
if sys.platform.lower()=="darwin":
ctx.env.fcshlib_PATTERN = 'lib%s.dylib'
ctx.env.has_ifort = False
if not Options.options.gfortran:
try:
ifort_conf(ctx)
return
except Exception,e:
if Options.options.ifort:
raise
Logs.pprint("PINK", "ifort not found, defaulting to gfortran (cause: '%s')"%e)
gfortran_conf(ctx)
def configure(ctx):
configure_(ctx)
ctx.env.append_value("FCFLAGS_fcshlib",ctx.env.LINKFLAGS_fcshlib)
ctx.env["FCFLAGS_fpic"]=[]
ctx.env.append_value("FCFLAGS_fpic",[flg for flg in ctx.env.FCFLAGS_fcshlib if "-fpic" in flg.lower()])
def show_linkline(ctx):
ctx.start_msg("fortran link line")
ctx.end_msg(" ".join(["-L%s"%vv for vv in ctx.env.LIBPATH_fc_runtime])+" "+" ".join(["-l%s"%vv for vv in ctx.env.LIB_fc_runtime]))
def ifort_conf(ctx):
import waflib
import os
ctx.env.FC=[]
ctx.load('ifort')
if sys.platform.lower()=="darwin":
ctx.env.LINKFLAGS_fcshlib = ['-dynamiclib']
ctx.env.append_value('FCFLAGS',ctx.env.mopt.split())
ctx.env["FCFLAGS_fc_omp"]=[]
ctx.env.append_value("FCFLAGS_fc_omp","-openmp")
ctx.env.FCSHLIB_MARKER = [""]
ctx.env.FCSTLIB_MARKER = [""]
ctx.check_cc(
errmsg="failed",msg='Compile a test code with ifort',
mandatory=1,fragment = "program test\n WRITE(*,*) 'hello world'\n end program test\n",compile_filename='test.f90',features='fc fcprogram')
if not ctx.options.fortran_flagline:
ctx.start_msg("retrieve ifort link line")
try:
#print "%s %s -dryrun -dynamiclib -shared-intel -no-cxxlib dummy.f90"%(ctx.env.FC," ".join(ctx.env.FCFLAGS))
llgo,llge = ctx.cmd_and_log("%s %s -dryrun -dynamiclib -shared-intel -no-cxxlib dummy.f90"%(ctx.env.FC," ".join(ctx.env.FCFLAGS+ctx.env.FCFLAGS_fc_omp)), output=waflib.Context.BOTH)
#print "RET",llgo,llge
L = set([ll.strip() for ll in re.findall("-L(.+)\s*\\\\", llge.split("ld ")[1]) if ("ifort" in ll.lower()) or ("intel" in ll.lower())])
l = set([ll.strip() for ll in re.findall("-l(.+)\s*\\\\", llge.split("ld ")[1])])
rL = set()
rl = set()
for Li in L:
oli = os.listdir(Li)
for li in l:
if ctx.env.cshlib_PATTERN%li in oli:
rl.add(li)
rL.add(Li)
except:
ctx.end_msg(False)
raise
for pth in list(rL) + ["/lib","/lib64"]:
if osp.exists(pth):
ctx.env.append_value("LIBPATH_fc_runtime",pth)
ctx.env.append_value("RPATH_fc_runtime",pth)
ctx.env.append_value("LIB_fc_runtime",list(rl)+["pthread"])
ctx.end_msg(True)
show_linkline(ctx)
ctx.env.has_ifort = True
def ifort_conf_(ctx):
ctx.env.FC=[]
ctx.load('ifort')
if sys.platform.lower()=="darwin":
ctx.env.LINKFLAGS_fcshlib = ['-dynamiclib']
ctx.env.append_value('FCFLAGS',ctx.env.mopt.split())
ctx.env.append_value("FCFLAGS_fc_omp","-openmp")
ctx.env.FCSHLIB_MARKER = [""]
ctx.env.FCSTLIB_MARKER = [""]
ctx.check_cc(
errmsg="failed",msg='Compile a test code with ifort',
mandatory=1,fragment = "program test\n WRITE(*,*) 'hello world'\n end program test\n",compile_filename='test.f90',features='fc fcprogram')
if not ctx.options.fortran_flagline:
ctx.start_msg("retrieve ifort link line")
if "/" not in ctx.env.FC:
ctx.env.FC = ctx.cmd_and_log("which %s"%ctx.env.FC).strip()
#print ctx.env.FC
ifort_path = osp.dirname(osp.realpath(ctx.env.FC))
#print ifort_path
if ctx.options.m32:
try:
f=open(osp.join(ifort_path,'ifortvars_ia32.sh'))
except:
ctx.end_msg(False)
raise Errors.WafError("Can't locate ifort configuration file")
else:
try:
f=open(osp.join(ifort_path,'ifortvars_intel64.sh'))
except:
ctx.end_msg(False)
raise Errors.WafError("Can't locate ifort configuration file")
txt = f.read()
f.close()
#print txt
if sys.platform.lower()=="darwin":
sp = "DYLD_LIBRARY_PATH"
else:
sp = "LD_LIBRARY_PATH"
res = re.findall("\s"+sp+"\s*=\s*\"(.+)\"",txt)[0]
for pth in res.split(":"):
ctx.env.append_value("LIBPATH_fc_runtime",pth)
ctx.env.append_value("RPATH_fc_runtime",pth)
ctx.env.append_value("LIB_fc_runtime",["ifcore","intlc","ifport","imf","irc","svml","iomp5","pthread"])
ctx.end_msg(True)
show_linkline(ctx)
def gfortran_conf(ctx):
ctx.env.FC=[]
ctx.env.FCFLAGS = []
ctx.load('gfortran')
ctx.env["FCFLAGS_fc_omp"]=[]
ctx.env.append_value("FCFLAGS_fc_omp","-fopenmp")
ctx.env.append_value("FCFLAGS","-DGFORTRAN")
ctx.env.append_value("FCFLAGS","-ffixed-line-length-0")
ctx.env.append_value("FCFLAGS","-ffree-line-length-0")
mopt = ctx.env.mopt
if sys.platform.lower()=="darwin":
if "i386" in ctx.env.mopt:
ctx.env.append_value('FCFLAGS','-m32')
mopt = "-m32"
else:
ctx.env.append_value('FCFLAGS','-m64')
mopt = "-m64"
else:
ctx.env.append_value('FCFLAGS',ctx.env.mopt.split())
ctx.start_msg("Check gfortran version")
v90 = ctx.cmd_and_log(ctx.env.FC+" --version",quiet=Context.STDOUT).split("\n")[0].strip()
version90 = re.findall("(4\.[0-9]\.[0-9])",v90)
if len(version90)<1:
#Logs.pprint("PINK","Can't get gfortran version... Let's hope for the best")
ctx.end_msg("not found, let's hope for the best...",color="PINK")
else:
version90 = version90[0]
vmid = int(version90.split(".")[1])
if vmid<3:
ctx.end_msg(v90,color="YELLOW")
raise Errors.WafError("gfortran version need to be above 4.3 got %s"%version90)
ctx.end_msg(v90)
# kludge !
ctx.env.FCSHLIB_MARKER = [""]
ctx.env.FCSTLIB_MARKER = [mopt]
ctx.check_cc(
errmsg="failed",msg='Compile a test code with gfortran',
mandatory=1,fragment = "program test\n WRITE(*,*) 'hello world'\n end program test\n",compile_filename='test.f90',features='fc fcprogram')
ctx.start_msg("retrieve gfortran link line")
lgfpath = ctx.cmd_and_log(ctx.env.FC+" %s -print-file-name=libgfortran.dylib"%mopt,quiet=Context.STDOUT)
lpath = [osp.dirname(osp.realpath(lgfpath))]
lgfpath = ctx.cmd_and_log(ctx.env.FC+" %s -print-file-name=libgomp.dylib"%mopt,quiet=Context.STDOUT)
lpath += [osp.dirname(osp.realpath(lgfpath))]
lpath = set(lpath)
ctx.env.append_value("LIB_fc_runtime",["gfortran","gomp"])
ctx.env.append_value("LIBPATH_fc_runtime",list(lpath))
ctx.env.append_value("RPATH_fc_runtime",list(lpath))
ctx.end_msg(True)
show_linkline(ctx)
|
miguelzuma/montepython_zuma
|
wrapper_wmap/waf_tools/try_ifort.py
|
Python
|
mit
| 7,286
|
import asposepdfcloud
from asposepdfcloud.PdfApi import PdfApi
from asposepdfcloud.PdfApi import ApiException
import asposestoragecloud
from asposestoragecloud.StorageApi import StorageApi
from asposestoragecloud.StorageApi import ResponseMessage
apiKey = "XXXXX" #sepcify App Key
appSid = "XXXXX" #sepcify App SID
apiServer = "http://api.aspose.com/v1.1"
data_folder = "../../data/"
#Instantiate Aspose Storage API SDK
storage_apiClient = asposestoragecloud.ApiClient.ApiClient(apiKey, appSid, True)
storageApi = StorageApi(storage_apiClient)
#Instantiate Aspose Pdf API SDK
api_client = asposepdfcloud.ApiClient.ApiClient(apiKey, appSid, True)
pdfApi = PdfApi(api_client);
#set input file name
name = "sample-input.pdf"
pageNumber = 1
try:
#upload file to aspose cloud storage
response = storageApi.PutCreate(name, data_folder + name)
#invoke Aspose.Pdf Cloud SDK API to get fragments from a particular page
response = pdfApi.GetFragments(name, pageNumber)
if response.Status == "OK":
count = len(response.TextItems.List)
print "count :: " + str(count)
except ApiException as ex:
print "ApiException:"
print "Code:" + str(ex.code)
print "Message:" + ex.message
|
asposepdf/Aspose_Pdf_Cloud
|
Examples/Python/Examples/GetFragmentCountPDFPage.py
|
Python
|
mit
| 1,245
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/structure/general/shared_data_terminal_s4.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
obi-two/Rebelion
|
data/scripts/templates/object/static/structure/general/shared_data_terminal_s4.py
|
Python
|
mit
| 454
|
import yaml
def load_catalogs(app):
with app.open_resource('../catalogs.yaml', 'r') as fh:
return yaml.load(fh)
|
pudo/datameta
|
datameta/catalogs.py
|
Python
|
mit
| 126
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-27 09:48
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('nomi', '0015_auto_20170527_0937'),
]
operations = [
migrations.AddField(
model_name='post',
name='nomination',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='nomi.Nomination'),
),
]
|
aniketp41/Gymkhana-Nominations
|
nomi/migrations/0016_post_nomination.py
|
Python
|
mit
| 560
|
#!/usr/bin/env python3
import gitlab
from github import Github
from Utils.github_workflow_scripts.utils import timestamped_print, get_env_var
# ANSI Colors
RED = '\033[0;31m'
GREEN = '\033[0;32m'
RESET = '\033[0m'
GITLAB_PROJECT_ID = get_env_var('CI_PROJECT_ID', '2596') # the default is the id of the content project in code.pan.run
GITLAB_SERVER_URL = get_env_var('CI_SERVER_URL', 'https://code.pan.run') # disable-secrets-detection
GITLAB_WRITE_TOKEN = get_env_var('GITLAB_WRITE_TOKEN')
print = timestamped_print
def main():
"""
Remove branches from GitLab content repository that do not exist in the Github repository it is mirrored from
Head branches in Github that are deleted upon a PR merge event, persist in GitLab despite having been deleted
from the Github repository from which we mirror from. This script deletes from GitLab the branches which no
longer exist in Github.
"""
# get github content repo's branches
github = Github(get_env_var('CONTENT_GITHUB_TOKEN'), verify=False)
organization = 'demisto'
repo = 'content'
content_repo = github.get_repo(f'{organization}/{repo}')
github_branches = content_repo.get_branches()
print(f'{github_branches.totalCount=}')
github_branch_names = set()
for github_branch in github_branches:
github_branch_names.add(github_branch.name)
# get gitlab content repo's branches
gitlab_client = gitlab.Gitlab(GITLAB_SERVER_URL, private_token=GITLAB_WRITE_TOKEN, ssl_verify=False)
gl_project = gitlab_client.projects.get(int(GITLAB_PROJECT_ID))
gitlab_branches = gl_project.branches.list(as_list=False)
print(f'{gitlab_branches.total=}')
diff_count = gitlab_branches.total - github_branches.totalCount
print(f'{diff_count} branches require deletion')
# delete gitlab branches
for gitlab_branch in gitlab_branches:
if (gitlab_branch_name := gitlab_branch.name) not in github_branch_names:
try:
gitlab_branch.delete()
print(f'{GREEN}deleted "{gitlab_branch_name}"{RESET}')
except gitlab.exceptions.GitlabError as e:
print(f'{RED}Deletion of {gitlab_branch_name} encountered an issue: {str(e)}{RESET}')
if __name__ == "__main__":
main()
|
demisto/content
|
Utils/delete_mismatched_branches.py
|
Python
|
mit
| 2,283
|
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
setup(
name='pylonsapp',
version='0.1',
description='',
author='',
author_email='',
url='',
install_requires=[
"Pylons",
"SQLAlchemy>=0.5",
],
setup_requires=["PasteScript>=1.6.3"],
packages=find_packages(exclude=['ez_setup']),
include_package_data=True,
test_suite='nose.collector',
package_data={'pylonsapp': ['i18n/*/LC_MESSAGES/*.mo']},
#message_extractors={'pylonsapp': [
# ('**.py', 'python', None),
# ('templates/**.mako', 'mako', {'input_encoding': 'utf-8'}),
# ('public/**', 'ignore', None)]},
zip_safe=False,
paster_plugins=['PasteScript', 'Pylons'],
entry_points="""
[paste.app_factory]
main = pylonsapp.config.middleware:make_app
[paste.app_install]
main = pylons.util:PylonsInstaller
""",
)
|
FormAlchemy/formalchemy
|
pylonsapp/setup.py
|
Python
|
mit
| 1,025
|
#!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
#
import os, subprocess, sys, glob, string
import zipfile
from datetime import date
cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
os.chdir(cwd)
required_module_keys = ['name','version','moduleid','description','copyright','license','copyright','platform','minsdk']
module_defaults = {
'description':'My module',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year),
}
module_license_default = "TODO: place your license here and we'll include it in the module distribution"
def find_sdk(config):
sdk = config['TITANIUM_SDK']
return os.path.expandvars(os.path.expanduser(sdk))
def replace_vars(config,token):
idx = token.find('$(')
while idx != -1:
idx2 = token.find(')',idx+2)
if idx2 == -1: break
key = token[idx+2:idx2]
if not config.has_key(key): break
token = token.replace('$(%s)' % key, config[key])
idx = token.find('$(')
return token
def read_ti_xcconfig():
contents = open(os.path.join(cwd,'titanium.xcconfig')).read()
config = {}
for line in contents.splitlines(False):
line = line.strip()
if line[0:2]=='//': continue
idx = line.find('=')
if idx > 0:
key = line[0:idx].strip()
value = line[idx+1:].strip()
config[key] = replace_vars(config,value)
return config
def generate_doc(config):
docdir = os.path.join(cwd,'documentation')
if not os.path.exists(docdir):
print "Couldn't find documentation file at: %s" % docdir
return None
try:
import markdown2 as markdown
except ImportError:
import markdown
documentation = []
for file in os.listdir(docdir):
if file in ignoreFiles or os.path.isdir(os.path.join(docdir, file)):
continue
md = open(os.path.join(docdir,file)).read()
html = markdown.markdown(md)
documentation.append({file:html});
return documentation
def compile_js(manifest,config):
js_file = os.path.join(cwd,'assets','ti.mely.js')
if not os.path.exists(js_file): return
from compiler import Compiler
try:
import json
except:
import simplejson as json
compiler = Compiler(cwd, manifest['moduleid'], manifest['name'], 'commonjs')
root_asset, module_assets = compiler.compile_module()
root_asset_content = """
%s
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[0]);
""" % root_asset
module_asset_content = """
%s
NSNumber *index = [map objectForKey:path];
if (index == nil) {
return nil;
}
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[index.integerValue]);
""" % module_assets
from tools import splice_code
assets_router = os.path.join(cwd,'Classes','TiMelyModuleAssets.m')
splice_code(assets_router, 'asset', root_asset_content)
splice_code(assets_router, 'resolve_asset', module_asset_content)
# Generate the exports after crawling all of the available JS source
exports = open('metadata.json','w')
json.dump({'exports':compiler.exports }, exports)
exports.close()
def die(msg):
print msg
sys.exit(1)
def warn(msg):
print "[WARN] %s" % msg
def validate_license():
c = open(os.path.join(cwd,'LICENSE')).read()
if c.find(module_license_default)!=-1:
warn('please update the LICENSE file with your license text before distributing')
def validate_manifest():
path = os.path.join(cwd,'manifest')
f = open(path)
if not os.path.exists(path): die("missing %s" % path)
manifest = {}
for line in f.readlines():
line = line.strip()
if line[0:1]=='#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()]=value.strip()
for key in required_module_keys:
if not manifest.has_key(key): die("missing required manifest key '%s'" % key)
if module_defaults.has_key(key):
defvalue = module_defaults[key]
curvalue = manifest[key]
if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key)
return manifest,path
ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README']
ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT']
def zip_dir(zf,dir,basepath,ignoreExt=[]):
if not os.path.exists(dir): return
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles: continue
e = os.path.splitext(file)
if len(e) == 2 and e[1] in ignoreExt: continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, '%s/%s'%(basepath,dir), 1)
zf.write(from_, to_)
def glob_libfiles():
files = []
for libfile in glob.glob('build/**/*.a'):
if libfile.find('Release-')!=-1:
files.append(libfile)
return files
def build_module(manifest,config):
from tools import ensure_dev_path
ensure_dev_path()
rc = os.system("xcodebuild -sdk iphoneos -configuration Release")
if rc != 0:
die("xcodebuild failed")
rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release")
if rc != 0:
die("xcodebuild failed")
# build the merged library using lipo
moduleid = manifest['moduleid']
libpaths = ''
for libfile in glob_libfiles():
libpaths+='%s ' % libfile
os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid))
def package_module(manifest,mf,config):
name = manifest['name'].lower()
moduleid = manifest['moduleid'].lower()
version = manifest['version']
modulezip = '%s-iphone-%s.zip' % (moduleid,version)
if os.path.exists(modulezip): os.remove(modulezip)
zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED)
modulepath = 'modules/iphone/%s/%s' % (moduleid,version)
zf.write(mf,'%s/manifest' % modulepath)
libname = 'lib%s.a' % moduleid
zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname))
docs = generate_doc(config)
if docs!=None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file,'.md','.html')
zf.writestr('%s/documentation/%s'%(modulepath,filename),html)
zip_dir(zf,'assets',modulepath,['.pyc','.js'])
zip_dir(zf,'example',modulepath,['.pyc'])
zip_dir(zf,'platform',modulepath,['.pyc','.js'])
zf.write('LICENSE','%s/LICENSE' % modulepath)
zf.write('module.xcconfig','%s/module.xcconfig' % modulepath)
exports_file = 'metadata.json'
if os.path.exists(exports_file):
zf.write(exports_file, '%s/%s' % (modulepath, exports_file))
zf.close()
if __name__ == '__main__':
manifest,mf = validate_manifest()
validate_license()
config = read_ti_xcconfig()
sdk = find_sdk(config)
sys.path.insert(0,os.path.join(sdk,'iphone'))
sys.path.append(os.path.join(sdk, "common"))
compile_js(manifest,config)
build_module(manifest,config)
package_module(manifest,mf,config)
sys.exit(0)
|
titanium-forks/benbahrenburg.ti.mely
|
iOS/build.py
|
Python
|
mit
| 6,779
|
#!/usr/bin/env python3
import unittest
class IterablesTests(unittest.TestCase):
"""Test of iterable patterns"""
def test_comprehensions(self):
none
if __name__ == '__main__':
unittest.main()
|
Shawn1874/CodeSamples
|
CodeSamplesPython/PythonSamples/PythonIterables/PythonIterables.py
|
Python
|
mit
| 213
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class UsagePaged(Paged):
"""
A paging container for iterating over a list of :class:`Usage <azure.mgmt.network.v2017_11_01.models.Usage>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[Usage]'}
}
def __init__(self, *args, **kwargs):
super(UsagePaged, self).__init__(*args, **kwargs)
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2017_11_01/models/usage_paged.py
|
Python
|
mit
| 917
|
import re
import sys
from datetime import datetime
import discord.game
import discordant.utils as utils
from discordant import Discordant
def _punishment_format(self, server, document):
server = server if server else self.default_server
if "user" not in document:
user_id = document["user_id"]
user = server.get_member(user_id)
document["user"] = user.mention if user else user_id
if "moderator" not in document:
moderator = server.get_member(document["moderator_id"])
document["moderator"] = moderator.mention
document["date"] = document["date"].strftime("%Y/%m/%d %I:%M %p UTC")
document["duration"] = "indefinite" \
if not document["duration"] \
else str(document["duration"]) + " hours"
return ("**{action}**\n" +
"*date*: {date}\n" +
"*user*: {user}\n" +
"*mod*: {moderator}\n" +
"*duration*: {duration}\n" +
"*reason*: {reason}").format(
**document)
async def _punishment_history(self, member, cursor):
output = ""
current = []
if await utils._is_punished(cursor, "warning"):
current.append("**warning**")
if await utils._is_punished(cursor, "mute"):
current.append("**mute**")
if current:
output += "currently active punishments: " + ", ".join(current) + "\n"
output += "\n".join(
[_punishment_format(self, member.server, x) for x in cursor])
return output
def _can_kick(self, user):
return utils.has_permission(user, "kick_members")
def _can_ban(self, user):
return utils.has_permission(user, "ban_members")
#@Discordant.register_command("modhistory", ["modh"], context=True,
arg_func=utils.has_args, perm_func=_can_kick)
async def _moderation_history(self, args, message, context):
"""!modhistory <user>
displays punishment history for a user."""
user = utils.get_user(args, context.server.members, message)
if not user:
await self.send_message(message.channel, "User could not be found.")
return
collection = self.mongodb.punishments
cursor = await collection.find({"user_id": user.id}).to_list(None)
await self.send_message(
message.channel,
await _punishment_history(self, user, cursor)
if cursor else user.name + " has no punishment history.")
_mod_cmd_to_action = {"warn": "warning",
"mute": "mute",
"ban": "ban",
"unwarn": "remove warning",
"unmute": "remove mute",
"unban": "remove ban"}
async def _mod_cmd(self, args, message, context):
keys = ["duration", "reason"]
kwargs = utils.get_kwargs(args, keys)
split = utils.try_shlex(args)
if not kwargs and len(split) > 1:
# has more than one pos arg, no kwargs
args = split[0] + ' reason="' + " ".join(split[1:]) + '"'
kwargs = utils.get_kwargs(args, keys)
user_search = utils.strip_kwargs(args, keys)
user = utils.get_user(user_search, context.server.members, message)
if not user:
await self.send_message(message.channel, "User could not be found.")
return
if not utils.gt_role(self, context.author, user, True):
await self.send_message(
message.channel, "Cannot {} {}".format(context.cmd_name, user.name))
return
duration = utils.get_from_kwargs(
"duration", kwargs,
self.config["moderation"][context.cmd_name + "_duration"])
try:
duration = float(duration)
except ValueError:
await self.send_message(message.channel, "Invalid duration.")
return
reason = utils.get_from_kwargs("reason", kwargs, "No reason given.")
action = _mod_cmd_to_action[context.cmd_name]
collection = self.mongodb.punishments
if await utils.is_punished(self, user, action):
await self.send_message(
message.channel,
user.name + " already has an active " + action + ".")
return
else:
cursor = await collection.find({"user_id": user.id}).to_list(None)
if cursor:
await self.send_message(
message.channel,
user.name + " has a history of:\n" + await _punishment_history(
self, user, cursor) + "\n\nType y/n to continue.")
reply = await self.wait_for_message(
check=lambda m: m.author == message.author and
(m.content.lower() == "y" or
m.content.lower() == "n"),
timeout=60)
if not reply or reply.content.lower() == "n":
await self.send_message(
message.channel, "Cancelled " + action + ".")
return
document = {
"user_id": user.id,
"action": action,
"moderator_id": message.author.id,
"date": datetime.utcnow(),
"duration": duration,
"reason": reason
}
await collection.insert_one(document)
role = utils.action_to_role(self, action)
not_warn = action != "warning"
if role and not_warn: # hide warning change
await self.add_roles(user, role)
await self.send_message(
self.log_channel if not_warn else self.warning_log_channel,
_punishment_format(self, message.server, document))
await utils.add_punishment_timer(self, user, action)
#@Discordant.register_command("warn", context=True,
arg_func=utils.has_args, perm_func=_can_kick)
async def _warn(self, args, message, context):
"""!warn <user> [reason] or !warn <user> [duration=hours] [reason=str]
warns a user."""
await _mod_cmd(self, args, message, context)
#@Discordant.register_command("mute", context=True,
arg_func=utils.has_args, perm_func=_can_kick)
async def _mute(self, args, message, context):
"""!mute <user> [reason] or !mute <user> [duration=hours] [reason=str]
mutes a user."""
await _mod_cmd(self, args, message, context)
async def _mod_remove_cmd(self, args, message, context):
split = utils.try_shlex(args)
user_search = split[0]
reason = " ".join(split[1:]) if len(split) > 1 else "No reason given."
user = utils.get_user(user_search, context.server.members, message)
if not user:
await self.send_message(message.channel, "User could not be found.")
return
if not utils.gt_role(self, context.author, user, True):
await self.send_message(
message.channel, "Cannot {} {}".format(context.cmd_name, user.name))
return
collection = self.mongodb.punishments
action = _mod_cmd_to_action[context.cmd_name]
orig_action = action.replace("remove ", "")
if not await utils.is_punished(self, user, orig_action):
await self.send_message(
message.channel, user.name + " has no active " + orig_action + ".")
return
document = {
"user_id": user.id,
"action": action,
"moderator_id": message.author.id,
"date": datetime.utcnow(),
"duration": 0,
"reason": reason
}
await collection.insert_one(document)
role = utils.action_to_role(self, orig_action)
not_warn = orig_action != "warning"
if role and not_warn: # hide warning change
await self.remove_roles(user, role)
await self.send_message(
self.log_channel if not_warn else self.warning_log_channel,
_punishment_format(self, message.server, document))
#@Discordant.register_command("unwarn", context=True,
arg_func=utils.has_args, perm_func=_can_kick)
async def _unwarn(self, args, message, context):
"""!unwarn <user> [reason]
removes a warning for a user."""
await _mod_remove_cmd(self, args, message, context)
#@Discordant.register_command("unmute", context=True,
arg_func=utils.has_args, perm_func=_can_kick)
async def _unmute(self, args, message, context):
"""!unmute <user> [reason]
removes a mute for a user."""
await _mod_remove_cmd(self, args, message, context)
#@Discordant.register_command("ban", context=True,
arg_func=utils.has_args, perm_func=_can_ban)
async def _ban(self, args, message, context):
"""!ban <user/user id> [reason]
bans a user."""
split = utils.try_shlex(args)
user_search = split[0]
reason = " ".join(split[1:]) if len(split) > 1 else "No reason given."
user = utils.get_user(user_search, context.server.members, message, True)
if not user:
await self.send_message(
message.channel,
"User could not be found. "
"Please use an @ mention string or name#id.\n"
"Search logs? Type y/n.")
reply = await self.wait_for_message(
check=lambda m: m.author == message.author and (
m.content.lower() == "y" or m.content.lower() == "n"),
timeout=60)
if not reply: # if no reply, cancel silently to avoid confusion
return
if reply.content.lower() == "n":
await self.send_message(message.channel, "Cancelled ban.")
return
authors = set()
for channel in context.server.channels:
if channel in [self.staff_channel, self.testing_channel,
self.log_channel, self.warning_log_channel] or \
channel.type != discord.ChannelType.text:
continue
async for msg in self.logs_from(channel, limit=500):
authors.add(msg.author)
user = utils.get_user(user_search, authors)
if not user:
await self.send_message(
message.channel, "User could not be found.")
return
elif not utils.gt_role(self, context.author, user, True):
await self.send_message(message.channel, "Cannot ban " + user.name)
return
collection = self.mongodb.punishments
doc = await collection.find_one({"user_id": user.id, "action": "ban"})
if doc or user in await self.get_bans(context.server):
await self.send_message(
message.channel, user.name + " is already banned.")
return
document = {
"user_id": user.id,
"action": "ban",
"moderator_id": message.author.id,
"date": datetime.utcnow(),
"duration": 0,
"reason": reason
}
await collection.insert_one(document)
await self.send_message(
self.log_channel,
_punishment_format(self, message.server, document))
await self.ban(user)
#@Discordant.register_command("unban", context=True,
arg_func=utils.has_args, perm_func=_can_ban)
async def _unban(self, args, message, context):
"""!unban <user/user id> [reason]
unbans a user."""
split = utils.try_shlex(args)
user_search = split[0]
reason = " ".join(split[1:]) if len(split) > 1 else "No reason given."
bans = await self.get_bans(context.server)
user = utils.get_user(user_search, bans, message, True)
if not user:
await self.send_message(message.channel,
"User could not be found, or is not banned.")
return
collection = self.mongodb.punishments
action = "remove ban"
orig_action = "ban"
if not await utils.is_punished(self, user, orig_action):
await self.send_message(
message.channel, user.name + " has no active " + orig_action + ".")
return
document = {
"user_id": user.id,
"action": action,
"moderator_id": message.author.id,
"date": datetime.utcnow(),
"duration": 0,
"reason": reason
}
await collection.insert_one(document)
await self.send_message(self.log_channel, _punishment_format(
self, message.server, document))
await self.unban(context.server, user)
@Discordant.register_command("bans", context=True, perm_func=_can_ban)
async def _bans(self, args, message, context):
"""!bans [page]
lists the bans in this server."""
page_length = 10
bans = await self.get_bans(context.server)
len_bans = len(bans)
pages = -(-len_bans // page_length) # ceil division
page = int(args) - 1 if args.isdigit() else pages - 1
if page >= pages or page < 0:
await self.send_message(
message.channel, "There are only {} pages available.".format(pages))
return
start = page * page_length
end = (page + 1) * page_length
await self.send_message(
message.channel,
utils.python_format(
"\n".join(["{0}. {1} ({1.id})".format(start + index + 1, user)
for index, user in enumerate(bans[start:end])]) +
"\npage {} out of {}".format(page + 1, pages)))
#@Discordant.register_command("reason", context=True,
arg_func=utils.has_args, perm_func=_can_kick)
async def _reason(self, args, message, context):
"""!reason <user> <reason>
edits the reason of the given user's most recent punishment."""
split = utils.try_shlex(args)
if len(split) < 2:
await self.send_message(message.channel, context.cmd.help)
return
user_search = split[0]
reason = " ".join(split[1:])
user = utils.get_user(user_search, context.server.members, message) or \
utils.get_user(user_search, await self.get_bans(context.server))
if not user:
await self.send_message(message.channel, "User could not be found.")
return
collection = self.mongodb.punishments
query = {"user_id": user.id}
cursor = await collection.find(query).sort(
"$natural", -1).limit(1).to_list(None)
if not cursor:
await self.send_message(
message.channel, user.name + " has no punishment history.")
return
doc = cursor[0]
moderator = context.server.get_member(doc["moderator_id"])
if utils.gt_role(self, moderator, context.author):
await self.send_message(
message.channel,
"Cannot edit punishment issued by moderator of higher role.")
return
doc["reason"] = reason
await collection.save(doc)
async def edit_message(channel):
async for msg in self.logs_from(channel, limit=sys.maxsize):
if "\n*user*: {}\n".format(user) in msg.content:
await self.edit_message(
msg,
re.sub(r"(\*reason\*: ).*", "\g<1>" + reason, msg.content))
return True
return False
if not await edit_message(self.log_channel):
await edit_message(self.warning_log_channel)
|
jonnyli1125/discordant
|
discordant/commands/mod.py
|
Python
|
mit
| 14,797
|
import util
from flask import (
abort,
Blueprint,
current_app,
render_template,
redirect,
request,
url_for,
flash,
)
import model
from database import db_session
configurations = Blueprint(
"configurations", __name__, template_folder="templates/configurations"
)
@configurations.route("/", methods=["GET", "POST"], defaults={"page": 1})
@configurations.route("/<int:page>", methods=["GET", "POST"])
@util.login_required("operator")
def configurations_view(page):
"""
The config view page
Returns:
a rendered config view template
"""
config_query = model.Configuration.query
configs = util.paginate(
config_query.order_by(model.Configuration.category.asc()), page, 30
)
return render_template("configurations/view.html", configs=configs)
@configurations.route("/add/", methods=["GET", "POST"], defaults={"config_id": None})
@configurations.route("/edit/<int:config_id>/", methods=["GET"])
@util.login_required("operator")
def configurations_add(config_id):
"""
Displays the config adding and updating page and accepts form submits from those pages.
Params:
config_id (int): the config to edit, if this is None a new config will be made
Returns:
a rendered add/edit template or a redirect to the config view page
"""
if request.method == "GET": # display add form
return display_config_add_form(config_id)
elif request.method == "POST": # process added/edited config
return add_config()
else:
current_app.logger.info("invalid config add request method: %s", request.method)
abort(400)
@configurations.route("/del/<config_id>/", methods=["GET"])
@util.login_required("operator")
def configurations_del(config_id):
"""
Deletes a config
Params:
config_id (int): the config to delete
Returns:
a redirect to the config view page
"""
config = model.Configuration.query.filter_by(id=int(config_id)).scalar()
if config is None:
error = "Failed to delete config '{}' as config doesn't exist.".format(
config_id
)
current_app.logger.info(error)
flash(error, "danger")
return redirect(url_for("configurations.configurations_view"))
db_session.delete(config)
db_session.commit()
return redirect(url_for("configurations.configurations_view"))
def add_config():
"""
Adds or edits a config
Note:
must be called from within a request context
Returns:
a redirect to the config view page
"""
config_id = request.form.get("config_id")
key = request.form.get("key")
val = request.form.get("val")
valType = request.form.get("valType")
category = request.form.get("category")
if config_id: # edit
config = model.Configuration.query.filter_by(id=int(config_id)).one()
config.key = key
config.val = val
config.valType = valType
config.category = category
else: # add
if is_dup_config_key(key):
error = "Failed to add config '{}' as config already exists.".format(key)
current_app.logger.info(error)
flash(error, "danger")
return redirect(url_for("configurations.configurations_view"))
config = model.Configuration(
key=key, val=val, valType=valType, category=category
)
db_session.add(config)
db_session.commit()
return redirect(url_for("configurations.configurations_view"))
def display_config_add_form(config_id):
"""
Displays the config add template
Params:
config_id (int): config_id
Returns:
a rendered config add/edit template
"""
if config_id is None: # add
return render_template(
"configurations/add_edit.html", action_label="Add", config=None
)
else: # edit
config = model.Configuration.query.filter_by(id=int(config_id)).scalar()
if config is None:
error = "Failed to edit config '{}' as config doesn't exist.".format(
config_id
)
current_app.logger.info(error)
flash(error, "danger")
return redirect(url_for("configurations.configurations_view"))
return render_template(
"configurations/add_edit.html", action_label="Edit", config=config
)
# Util functions
def is_dup_config_key(key):
"""
Checks if a key is a duplicate of another config
Params:
key (str): the config key to test
Returns:
bool: True if the key is a duplicate, False otherwise
"""
dup_config = model.Configuration.query.filter_by(key=key).scalar()
if dup_config:
return True
else:
return False
|
unoacm/code_court
|
code_court/courthouse/views/admin/configurations.py
|
Python
|
mit
| 4,820
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/space/debris/shared_tie_fighter_debris_e.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
anhstudios/swganh
|
data/scripts/templates/object/static/space/debris/shared_tie_fighter_debris_e.py
|
Python
|
mit
| 453
|
#! /usr/bin/env python
# @author Billy Wilson Arante
# @created 1/4/2016 PHT
def product(L):
prod = 1
for i in L:
prod = prod * i
return prod
def test():
data = [1, 2, 3, 4, 5]
print(product(data))
if __name__ == "__main__":
test()
|
arantebillywilson/python-snippets
|
py3/cs-circle/prod.py
|
Python
|
mit
| 268
|
from pprint import pprint # noqa
from aleph.core import db
from aleph.model import Events, Role
from aleph.logic.roles import update_role
from aleph.logic.notifications import publish, GLOBAL
from aleph.views.util import validate
from aleph.tests.util import TestCase
class NotificationsApiTestCase(TestCase):
def setUp(self):
super(NotificationsApiTestCase, self).setUp()
self.rolex = self.create_user(foreign_id="rolex")
self.admin = self.create_user(foreign_id="admin")
self.col = self.create_collection(creator=self.admin)
update_role(self.rolex)
update_role(self.admin)
event = Events.PUBLISH_COLLECTION
publish(
event, self.admin.id, params={"collection": self.col}, channels=[GLOBAL]
)
event = Events.GRANT_COLLECTION
publish(
event,
self.admin.id,
params={"collection": self.col, "role": self.rolex},
channels=[self.col, self.rolex],
)
db.session.commit()
def test_anonymous(self):
res = self.client.get("/api/2/notifications")
assert res.status_code == 403, res
def test_notifications(self):
_, headers = self.login(foreign_id="admin")
res = self.client.get("/api/2/notifications", headers=headers)
assert res.status_code == 200, res
assert res.json["total"] == 0, res.json
assert self.rolex.type == Role.USER, self.rolex.type
_, headers = self.login(foreign_id=self.rolex.foreign_id)
res = self.client.get("/api/2/notifications", headers=headers)
assert res.status_code == 200, res
assert res.json["total"] == 2, res.json
not0 = res.json["results"][0]
validate(not0, "Notification")
role = not0["params"]["role"]
assert isinstance(role, dict), not0
assert "actor" in not0["params"], not0["params"]
|
pudo/aleph
|
aleph/tests/test_notifications_api.py
|
Python
|
mit
| 1,920
|
#############################################################################
# The MIT License (MIT)
#
# Copyright (c) 2015 Jason Pruitt
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#############################################################################
class Config(object):
def __init__(self):
self.MEM_OFFSET = 0x56000000
self.MEM_LENGTH = 0x100
self.GPIO_CON_OFFSET = 0x00
self.GPIO_DATA_OFFSET = 0x04
self.GPIO_UPD_OFFSET = 0x08
self.GPIO_SEL_OFFSET = 0x0C
self.UPDN_UP = 1
self.UPDN_DOWN = 0
self.UPDN_NONE = 2
self.FUNC_IN = 0
self.FUNC_OUT = 1
self.FUNC_EINT = 2
self.FUNC_PWM = 2
self.FUNC_RESET = 0
self.DATA_RESET = 0
self.UPDN_RESET = 1
self.EINT_CONT_OFFSET = 0x88
self.EINT_CONT_LENGTH = 0x60
self.EINT_EN_OFFSET = 0xA4
self.EINT_PEND_OFFSET = 0xA8
self.EINT_LOW = 0x00
self.EINT_HIGH = 0x01
self.EINT_FALL = 0x02
self.EINT_RISE = 0x04
self.EINT_BOTH = 0x07
self.EINT_RESET = 0
self.PWMCHIP_ID = 0
self.pins = {7:{'bank':'GPF', 'gpio':{'num': 1 }}, # EINT1/GPF1 (7) 161,
11:{'bank':'GPF', 'gpio':{'num': 2 }}, # EINT2/GPF2 (11) 162,
12:{'bank':'GPF', 'gpio':{'num': 3 }}, # EINT3/GPF3 (12) 163
13:{'bank':'GPF', 'gpio':{'num': 4 }, 'eint': {'num': 4 }}, # EINT4/GPF4 (13) 164
15:{'bank':'GPF', 'gpio':{'num': 5 }, 'eint': {'num': 5 }}, # EINT5/GPF5 (15) 165
16:{'bank':'GPB', 'gpio':{'num': 2 }}, # TOUT2/GPB2 (16) 34
18:{'bank':'GPG', 'gpio':{'num': 1 }, 'eint': {'num': 9 }}, # EINT9/GPG1 (18) 193
22:{'bank':'GPB', 'gpio':{'num': 0 }, 'pwm': {'num': 0 }}, # TOUT0/GPB0 (22) 32
24:{'bank':'GPL', 'gpio':{'num': 13}}, # SS0/GPL13 (24) 333
26:{'bank':'GPB', 'gpio':{'num': 1 }, 'pwm': {'num': 1 }}, # TOUT1/GPB1 (26) 33
27:{'bank':'GPB', 'gpio':{'num': 7 }}, # SDA1/GPB7 (27) 39
28:{'bank':'GPB', 'gpio':{'num': 8 }}, # SCL1/GPB8 (28) 40
29:{'bank':'GPG', 'gpio':{'num': 3 }, 'eint': {'num': 11}}, # EINT11/GPG3 (29) 195
31:{'bank':'GPG', 'gpio':{'num': 4 }, 'eint': {'num': 12}}, # EINT12/GPG4 (31) 196
32:{'bank':'GPG', 'gpio':{'num': 5 }, 'eint': {'num': 13}}, # EINT13/GPG5 (32) 197
33:{'bank':'GPG', 'gpio':{'num': 6 }, 'eint': {'num': 14}}, # EINT14/GPG6 (33) 198
35:{'bank':'GPG', 'gpio':{'num': 7 }, 'eint': {'num': 15}}, # EINT15/GPG7 (35) 199
36:{'bank':'GPG', 'gpio':{'num': 8 }, 'eint': {'num': 16}}, # EINT16/GPG8 (36) 200
37:{'bank':'GPG', 'gpio':{'num': 9 }, 'eint': {'num': 17}}, # EINT17/GPG9 (37) 201
38:{'bank':'GPG', 'gpio':{'num': 10}, 'eint': {'num': 18}}, # EINT18/GPG10 (38) 202
40:{'bank':'GPG', 'gpio':{'num': 11}, 'eint': {'num': 19}} # EINT19/GPG11 (40) 203
}
self.banks = {'GPB':0x10,
'GPF':0x50,
'GPG':0x60,
'GPL':0xf0}
|
jrspruitt/pyfa_gpio
|
fgpio/boards/nanopi.py
|
Python
|
mit
| 4,871
|
import math
t_max = 100
# for T in range(t_max):
# step = math.exp((alpha*(-T))/t_max)
# print(T)
# print(step)
alpha = 0.1
T=t_max
i = 0
while T>alpha:
step = alpha*math.exp(((T-t_max)/t_max))
T -= step
i+=1
print("\t%d" % i)
print(T)
print("step = %f" % step)
|
sergeimoiseev/othodi
|
test_cooling_step.py
|
Python
|
mit
| 299
|
import gzip
import lzma
import bz2
import io
import builtins
WRITE_MODE = "wt"
class ReusableFile(object):
"""
Class which emulates the builtin file except that calling iter() on it will return separate
iterators on different file handlers (which are automatically closed when iteration stops). This
is useful for allowing a file object to be iterated over multiple times while keep evaluation
lazy.
"""
# pylint: disable=too-many-instance-attributes
def __init__(
self,
path,
delimiter=None,
mode="r",
buffering=-1,
encoding=None,
errors=None,
newline=None,
):
"""
Constructor arguments are passed directly to builtins.open
:param path: passed to open
:param delimiter: passed to open
:param mode: passed to open
:param buffering: passed to open
:param encoding: passed to open
:param errors: passed to open
:param newline: passed to open
:return: ReusableFile from the arguments
"""
self.path = path
self.delimiter = delimiter
self.mode = mode
self.buffering = buffering
self.encoding = encoding
self.errors = errors
self.newline = newline
def __iter__(self):
"""
Returns a new iterator over the file using the arguments from the constructor. Each call
to __iter__ returns a new iterator independent of all others
:return: iterator over file
"""
# pylint: disable=no-member
with builtins.open(
self.path,
mode=self.mode,
buffering=self.buffering,
encoding=self.encoding,
errors=self.errors,
newline=self.newline,
) as file_content:
for line in file_content:
yield line
def read(self):
# pylint: disable=no-member
with builtins.open(
self.path,
mode=self.mode,
buffering=self.buffering,
encoding=self.encoding,
errors=self.errors,
newline=self.newline,
) as file_content:
return file_content.read()
class CompressedFile(ReusableFile):
magic_bytes = None
# pylint: disable=too-many-instance-attributes
def __init__(
self,
path,
delimiter=None,
mode="rt",
buffering=-1,
compresslevel=9,
encoding=None,
errors=None,
newline=None,
):
super(CompressedFile, self).__init__(
path,
delimiter=delimiter,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
self.compresslevel = compresslevel
@classmethod
def is_compressed(cls, data):
return data.startswith(cls.magic_bytes)
class GZFile(CompressedFile):
magic_bytes = b"\x1f\x8b\x08"
# pylint: disable=too-many-instance-attributes
def __init__(
self,
path,
delimiter=None,
mode="rt",
buffering=-1,
compresslevel=9,
encoding=None,
errors=None,
newline=None,
):
super(GZFile, self).__init__(
path,
delimiter=delimiter,
mode=mode,
buffering=buffering,
compresslevel=compresslevel,
encoding=encoding,
errors=errors,
newline=newline,
)
def __iter__(self):
if "t" in self.mode:
with gzip.GzipFile(self.path, compresslevel=self.compresslevel) as gz_file:
gz_file.read1 = gz_file.read
with io.TextIOWrapper(
gz_file,
encoding=self.encoding,
errors=self.errors,
newline=self.newline,
) as file_content:
for line in file_content:
yield line
else:
with gzip.open(
self.path, mode=self.mode, compresslevel=self.compresslevel
) as file_content:
for line in file_content:
yield line
def read(self):
with gzip.GzipFile(self.path, compresslevel=self.compresslevel) as gz_file:
gz_file.read1 = gz_file.read
with io.TextIOWrapper(
gz_file,
encoding=self.encoding,
errors=self.errors,
newline=self.newline,
) as file_content:
return file_content.read()
class BZ2File(CompressedFile):
magic_bytes = b"\x42\x5a\x68"
# pylint: disable=too-many-instance-attributes
def __init__(
self,
path,
delimiter=None,
mode="rt",
buffering=-1,
compresslevel=9,
encoding=None,
errors=None,
newline=None,
):
super(BZ2File, self).__init__(
path,
delimiter=delimiter,
mode=mode,
buffering=buffering,
compresslevel=compresslevel,
encoding=encoding,
errors=errors,
newline=newline,
)
def __iter__(self):
with bz2.open(
self.path,
mode=self.mode,
compresslevel=self.compresslevel,
encoding=self.encoding,
errors=self.errors,
newline=self.newline,
) as file_content:
for line in file_content:
yield line
def read(self):
with bz2.open(
self.path,
mode=self.mode,
compresslevel=self.compresslevel,
encoding=self.encoding,
errors=self.errors,
newline=self.newline,
) as file_content:
return file_content.read()
class XZFile(CompressedFile):
magic_bytes = b"\xfd\x37\x7a\x58\x5a\x00"
# pylint: disable=too-many-instance-attributes
def __init__(
self,
path,
delimiter=None,
mode="rt",
buffering=-1,
compresslevel=9,
encoding=None,
errors=None,
newline=None,
check=-1,
preset=None,
filters=None,
format=None,
):
super(XZFile, self).__init__(
path,
delimiter=delimiter,
mode=mode,
buffering=buffering,
compresslevel=compresslevel,
encoding=encoding,
errors=errors,
newline=newline,
)
self.check = check
self.preset = preset
self.format = format
self.filters = filters
def __iter__(self):
with lzma.open(
self.path,
mode=self.mode,
format=self.format,
check=self.check,
preset=self.preset,
filters=self.filters,
encoding=self.encoding,
errors=self.errors,
newline=self.newline,
) as file_content:
for line in file_content:
yield line
def read(self):
with lzma.open(
self.path,
mode=self.mode,
format=self.format,
check=self.check,
preset=self.preset,
filters=self.filters,
encoding=self.encoding,
errors=self.errors,
newline=self.newline,
) as file_content:
return file_content.read()
COMPRESSION_CLASSES = [GZFile, BZ2File, XZFile]
N_COMPRESSION_CHECK_BYTES = max(len(cls.magic_bytes) for cls in COMPRESSION_CLASSES)
def get_read_function(filename, disable_compression):
if disable_compression:
return ReusableFile
else:
with open(filename, "rb") as f:
start_bytes = f.read(N_COMPRESSION_CHECK_BYTES)
for cls in COMPRESSION_CLASSES:
if cls.is_compressed(start_bytes):
return cls
return ReusableFile
def universal_write_open(
path,
mode,
buffering=-1,
encoding=None,
errors=None,
newline=None,
compresslevel=9,
format=None,
check=-1,
preset=None,
filters=None,
compression=None,
):
# pylint: disable=unexpected-keyword-arg,no-member
if compression is None:
return builtins.open(
path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
elif compression in ("gz", "gzip"):
return gzip.open(
path,
mode=mode,
compresslevel=compresslevel,
errors=errors,
newline=newline,
encoding=encoding,
)
elif compression in ("lzma", "xz"):
return lzma.open(
path,
mode=mode,
format=format,
check=check,
preset=preset,
filters=filters,
encoding=encoding,
errors=errors,
newline=newline,
)
elif compression == "bz2":
return bz2.open(
path,
mode=mode,
compresslevel=compresslevel,
encoding=encoding,
errors=errors,
newline=newline,
)
else:
raise ValueError(
"compression must be None, gz, gzip, lzma, or xz and was {0}".format(
compression
)
)
|
EntilZha/PyFunctional
|
functional/io.py
|
Python
|
mit
| 9,557
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements an interface to the VAMPIRE code for atomistic
simulations of magnetic materials.
This module depends on a compiled vampire executable available in the path.
Please download at https://vampire.york.ac.uk/download/ and
follow the instructions to compile the executable.
If you use this module, please cite the following:
"Atomistic spin model simulations of magnetic nanomaterials."
R. F. L. Evans, W. J. Fan, P. Chureemart, T. A. Ostler, M. O. A. Ellis
and R. W. Chantrell. J. Phys.: Condens. Matter 26, 103202 (2014)
"""
import subprocess
import logging
import numpy as np
import pandas as pd
from monty.dev import requires
from monty.os.path import which
from pymatgen.analysis.magnetism.heisenberg import HeisenbergMapper
__author__ = "ncfrey"
__version__ = "0.1"
__maintainer__ = "Nathan C. Frey"
__email__ = "ncfrey@lbl.gov"
__status__ = "Development"
__date__ = "June 2019"
VAMPEXE = which("vampire-serial")
class VampireCaller:
"""
Run Vampire on a material with magnetic ordering and exchange parameter information to compute the critical
temperature with classical Monte Carlo.
"""
@requires(
VAMPEXE,
"VampireCaller requires vampire-serial to be in the path."
"Please follow the instructions at https://vampire.york.ac.uk/download/.",
)
def __init__(
self,
ordered_structures,
energies,
mc_box_size=4.0,
equil_timesteps=2000,
mc_timesteps=4000,
save_inputs=False,
hm=None,
user_input_settings=None,
):
"""
user_input_settings is a dictionary that can contain:
* start_t (int): Start MC sim at this temp, defaults to 0 K.
* end_t (int): End MC sim at this temp, defaults to 1500 K.
* temp_increment (int): Temp step size, defaults to 25 K.
Args:
ordered_structures (list): Structure objects with magmoms.
energies (list): Energies of each relaxed magnetic structure.
mc_box_size (float): x=y=z dimensions (nm) of MC simulation box
equil_timesteps (int): number of MC steps for equilibrating
mc_timesteps (int): number of MC steps for averaging
save_inputs (bool): if True, save scratch dir of vampire input files
hm (HeisenbergMapper): object already fit to low energy
magnetic orderings.
user_input_settings (dict): optional commands for VAMPIRE Monte Carlo
Parameters:
sgraph (StructureGraph): Ground state graph.
unique_site_ids (dict): Maps each site to its unique identifier
nn_interacations (dict): {i: j} pairs of NN interactions
between unique sites.
ex_params (dict): Exchange parameter values (meV/atom)
mft_t (float): Mean field theory estimate of critical T
mat_name (str): Formula unit label for input files
mat_id_dict (dict): Maps sites to material id # for vampire
indexing.
TODO:
* Create input files in a temp folder that gets cleaned up after run terminates
"""
self.mc_box_size = mc_box_size
self.equil_timesteps = equil_timesteps
self.mc_timesteps = mc_timesteps
self.save_inputs = save_inputs
self.user_input_settings = user_input_settings
# Sort by energy if not already sorted
ordered_structures = [
s for _, s in sorted(zip(energies, ordered_structures), reverse=False)
]
energies = sorted(energies, reverse=False)
# Get exchange parameters and set instance variables
if not hm:
hm = HeisenbergMapper(ordered_structures, energies, cutoff=7.5, tol=0.02)
# Instance attributes from HeisenbergMapper
self.hm = hm
self.structure = hm.ordered_structures[0] # ground state
self.sgraph = hm.sgraphs[0] # ground state graph
self.unique_site_ids = hm.unique_site_ids
self.nn_interactions = hm.nn_interactions
self.dists = hm.dists
self.tol = hm.tol
self.ex_params = hm.get_exchange()
# Full structure name before reducing to only magnetic ions
self.mat_name = str(hm.ordered_structures_[0].composition.reduced_formula)
# Switch to scratch dir which automatically cleans up vampire inputs files unless user specifies to save them
# with ScratchDir('/scratch', copy_from_current_on_enter=self.save_inputs,
# copy_to_current_on_exit=self.save_inputs) as temp_dir:
# os.chdir(temp_dir)
# Create input files
self._create_mat()
self._create_input()
self._create_ucf()
# Call Vampire
process = subprocess.Popen(
["vampire-serial"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
stdout, stderr = process.communicate()
stdout = stdout.decode()
if stderr:
vanhelsing = stderr.decode()
if len(vanhelsing) > 27: # Suppress blank warning msg
logging.warning(vanhelsing)
if process.returncode != 0:
raise RuntimeError(
"Vampire exited with return code {}.".format(process.returncode)
)
self._stdout = stdout
self._stderr = stderr
# Process output
nmats = max(self.mat_id_dict.values())
self.output = VampireOutput("output", nmats)
def _create_mat(self):
structure = self.structure
mat_name = self.mat_name
magmoms = structure.site_properties["magmom"]
# Maps sites to material id for vampire inputs
mat_id_dict = {}
nmats = 0
for key in self.unique_site_ids:
spin_up, spin_down = False, False
nmats += 1 # at least 1 mat for each unique site
# Check which spin sublattices exist for this site id
for site in key:
m = magmoms[site]
if m > 0:
spin_up = True
if m < 0:
spin_down = True
# Assign material id for each site
for site in key:
m = magmoms[site]
if spin_up and not spin_down:
mat_id_dict[site] = nmats
if spin_down and not spin_up:
mat_id_dict[site] = nmats
if spin_up and spin_down:
# Check if spin up or down shows up first
m0 = magmoms[key[0]]
if m > 0 and m0 > 0:
mat_id_dict[site] = nmats
if m < 0 and m0 < 0:
mat_id_dict[site] = nmats
if m > 0 and m0 < 0:
mat_id_dict[site] = nmats + 1
if m < 0 and m0 > 0:
mat_id_dict[site] = nmats + 1
# Increment index if two sublattices
if spin_up and spin_down:
nmats += 1
mat_file = ["material:num-materials=%d" % (nmats)]
for key in self.unique_site_ids:
i = self.unique_site_ids[key] # unique site id
for site in key:
mat_id = mat_id_dict[site]
# Only positive magmoms allowed
m_magnitude = abs(magmoms[site])
if magmoms[site] > 0:
spin = 1
if magmoms[site] < 0:
spin = -1
atom = structure[i].species.reduced_formula
mat_file += ["material[%d]:material-element=%s" % (mat_id, atom)]
mat_file += [
"material[%d]:damping-constant=1.0" % (mat_id),
"material[%d]:uniaxial-anisotropy-constant=1.0e-24"
% (mat_id), # xx - do we need this?
"material[%d]:atomic-spin-moment=%.2f !muB" % (mat_id, m_magnitude),
"material[%d]:initial-spin-direction=0,0,%d" % (mat_id, spin),
]
mat_file = "\n".join(mat_file)
mat_file_name = mat_name + ".mat"
self.mat_id_dict = mat_id_dict
with open(mat_file_name, "w") as f:
f.write(mat_file)
def _create_input(self):
"""Todo:
* How to determine range and increment of simulation?
"""
structure = self.structure
mcbs = self.mc_box_size
equil_timesteps = self.equil_timesteps
mc_timesteps = self.mc_timesteps
mat_name = self.mat_name
input_script = ["material:unit-cell-file=%s.ucf" % (mat_name)]
input_script += ["material:file=%s.mat" % (mat_name)]
# Specify periodic boundary conditions
input_script += [
"create:periodic-boundaries-x",
"create:periodic-boundaries-y",
"create:periodic-boundaries-z",
]
# Unit cell size in Angstrom
abc = structure.lattice.abc
ucx, ucy, ucz = abc[0], abc[1], abc[2]
input_script += ["dimensions:unit-cell-size-x = %.10f !A" % (ucx)]
input_script += ["dimensions:unit-cell-size-y = %.10f !A" % (ucy)]
input_script += ["dimensions:unit-cell-size-z = %.10f !A" % (ucz)]
# System size in nm
input_script += [
"dimensions:system-size-x = %.1f !nm" % (mcbs),
"dimensions:system-size-y = %.1f !nm" % (mcbs),
"dimensions:system-size-z = %.1f !nm" % (mcbs),
]
# Critical temperature Monte Carlo calculation
input_script += [
"sim:integrator = monte-carlo",
"sim:program = curie-temperature",
]
# Default Monte Carlo params
input_script += [
"sim:equilibration-time-steps = %d" % (equil_timesteps),
"sim:loop-time-steps = %d" % (mc_timesteps),
"sim:time-steps-increment = 1",
]
# Set temperature range and step size of simulation
if "start_t" in self.user_input_settings:
start_t = self.user_input_settings["start_t"]
else:
start_t = 0
if "end_t" in self.user_input_settings:
end_t = self.user_input_settings["end_t"]
else:
end_t = 1500
if "temp_increment" in self.user_input_settings:
temp_increment = self.user_input_settings["temp_increment"]
else:
temp_increment = 25
input_script += [
"sim:minimum-temperature = %d" % (start_t),
"sim:maximum-temperature = %d" % (end_t),
"sim:temperature-increment = %d" % (temp_increment),
]
# Output to save
input_script += [
"output:temperature",
"output:mean-magnetisation-length",
"output:material-mean-magnetisation-length",
"output:mean-susceptibility",
]
input_script = "\n".join(input_script)
with open("input", "w") as f:
f.write(input_script)
def _create_ucf(self):
structure = self.structure
mat_name = self.mat_name
abc = structure.lattice.abc
ucx, ucy, ucz = abc[0], abc[1], abc[2]
ucf = ["# Unit cell size:"]
ucf += ["%.10f %.10f %.10f" % (ucx, ucy, ucz)]
ucf += ["# Unit cell lattice vectors:"]
a1 = list(structure.lattice.matrix[0])
ucf += ["%.10f %.10f %.10f" % (a1[0], a1[1], a1[2])]
a2 = list(structure.lattice.matrix[1])
ucf += ["%.10f %.10f %.10f" % (a2[0], a2[1], a2[2])]
a3 = list(structure.lattice.matrix[2])
ucf += ["%.10f %.10f %.10f" % (a3[0], a3[1], a3[2])]
nmats = max(self.mat_id_dict.values())
ucf += ["# Atoms num_materials; id cx cy cz mat cat hcat"]
ucf += ["%d %d" % (len(structure), nmats)]
# Fractional coordinates of atoms
for site, r in enumerate(structure.frac_coords):
# Back to 0 indexing for some reason...
mat_id = self.mat_id_dict[site] - 1
ucf += ["%d %.10f %.10f %.10f %d 0 0" % (site, r[0], r[1], r[2], mat_id)]
# J_ij exchange interaction matrix
sgraph = self.sgraph
ninter = 0
for i, node in enumerate(sgraph.graph.nodes):
ninter += sgraph.get_coordination_of_site(i)
ucf += ["# Interactions"]
ucf += ["%d isotropic" % (ninter)]
iid = 0 # counts number of interaction
for i, node in enumerate(sgraph.graph.nodes):
connections = sgraph.get_connected_sites(i)
for c in connections:
jimage = c[1] # relative integer coordinates of atom j
dx = jimage[0]
dy = jimage[1]
dz = jimage[2]
j = c[2] # index of neighbor
dist = round(c[-1], 2)
# Look up J_ij between the sites
j_exc = self.hm._get_j_exc(i, j, dist)
# Convert J_ij from meV to Joules
j_exc *= 1.6021766e-22
j_exc = str(j_exc) # otherwise this rounds to 0
ucf += ["%d %d %d %d %d %d %s" % (iid, i, j, dx, dy, dz, j_exc)]
iid += 1
ucf = "\n".join(ucf)
ucf_file_name = mat_name + ".ucf"
with open(ucf_file_name, "w") as f:
f.write(ucf)
class VampireOutput:
"""
This class processes results from a Vampire Monte Carlo simulation
and returns the critical temperature.
"""
def __init__(self, vamp_stdout, nmats):
"""
Args:
vamp_stdout (txt file): stdout from running vampire-serial.
Attributes:
critical_temp (float): Monte Carlo Tc result.
"""
self.vamp_stdout = vamp_stdout
self.critical_temp = np.nan
self._parse_stdout(vamp_stdout, nmats)
def _parse_stdout(self, vamp_stdout, nmats):
names = (
["T", "m_total"]
+ ["m_" + str(i) for i in range(1, nmats + 1)]
+ ["X_x", "X_y", "X_z", "X_m", "nan"]
)
# Parsing vampire MC output
df = pd.read_csv(vamp_stdout, sep="\t", skiprows=9, header=None, names=names)
df.drop("nan", axis=1, inplace=True)
df.to_csv("vamp_out.txt")
# Max of susceptibility <-> critical temp
T_crit = df.iloc[df.X_m.idxmax()]["T"]
self.critical_temp = T_crit
|
fraricci/pymatgen
|
pymatgen/command_line/vampire_caller.py
|
Python
|
mit
| 14,662
|
from dodo_commands import Dodo
Dodo.parser.add_argument("what")
Dodo.run(["make", Dodo.args.what], cwd=Dodo.get("/MAKE/cwd"))
|
mnieber/dodo_commands_tutorial
|
part2/after/commands/mk.py
|
Python
|
mit
| 127
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _, msgprint
from frappe.utils import flt, cint, cstr, now
from frappe.modules import load_doctype_module
from frappe.model.base_document import BaseDocument
from frappe.model.naming import set_new_name
# once_only validation
# methods
def get_doc(arg1, arg2=None):
if isinstance(arg1, BaseDocument):
return arg1
elif isinstance(arg1, basestring):
doctype = arg1
else:
doctype = arg1.get("doctype")
controller = get_controller(doctype)
if controller:
return controller(arg1, arg2)
raise ImportError, arg1
_classes = {}
def get_controller(doctype):
if not doctype in _classes:
module = load_doctype_module(doctype)
classname = doctype.replace(" ", "")
if hasattr(module, classname):
_class = getattr(module, classname)
if issubclass(_class, Document):
_class = getattr(module, classname)
else:
raise ImportError, doctype
else:
raise ImportError, doctype
_classes[doctype] = _class
return _classes[doctype]
class Document(BaseDocument):
def __init__(self, arg1, arg2=None):
self.doctype = self.name = None
if arg1 and isinstance(arg1, basestring):
if not arg2:
# single
self.doctype = self.name = arg1
else:
self.doctype = arg1
if isinstance(arg2, dict):
# filter
self.name = frappe.db.get_value(arg1, arg2, "name")
if self.name is None:
raise frappe.DoesNotExistError, (arg1, arg2)
else:
self.name = arg2
self.load_from_db()
elif isinstance(arg1, dict):
super(Document, self).__init__(arg1)
self.init_valid_columns()
else:
# incorrect arguments. let's not proceed.
raise frappe.DataError("Document({0}, {1})".format(arg1, arg2))
def load_from_db(self):
if not getattr(self, "_metaclass", False) and self.meta.issingle:
self.update(frappe.db.get_singles_dict(self.doctype))
self.init_valid_columns()
self._fix_numeric_types()
else:
d = frappe.db.get_value(self.doctype, self.name, "*", as_dict=1)
if not d:
frappe.throw(("{0} {1} not found").format(_(self.doctype), self.name), frappe.DoesNotExistError)
self.update(d)
if self.name=="DocType" and self.doctype=="DocType":
from frappe.model.meta import doctype_table_fields
table_fields = doctype_table_fields
else:
table_fields = self.meta.get_table_fields()
for df in table_fields:
children = frappe.db.get_values(df.options,
{"parent": self.name, "parenttype": self.doctype, "parentfield": df.fieldname},
"*", as_dict=True, order_by="idx asc")
if children:
self.set(df.fieldname, children)
else:
self.set(df.fieldname, [])
def has_permission(self, permtype):
if getattr(self, "ignore_permissions", False):
return True
return frappe.has_permission(self.doctype, permtype, self)
def raise_no_permission_to(self, perm_type):
raise frappe.PermissionError("No permission to {} {} {}".format(perm_type, self.doctype, self.name or ""))
def insert(self, ignore_permissions=None):
if ignore_permissions!=None:
self.ignore_permissions = ignore_permissions
self.set("__islocal", True)
if not self.has_permission("create"):
self.raise_no_permission_to("create")
self._set_defaults()
self._set_docstatus_user_and_timestamp()
self.check_if_latest()
set_new_name(self)
self.run_method("before_insert")
self.set_parent_in_children()
self.run_before_save_methods()
self._validate()
# run validate, on update etc.
# parent
if getattr(self.meta, "issingle", 0):
self.update_single(self.get_valid_dict())
else:
self.db_insert()
# children
for d in self.get_all_children():
d.db_insert()
self.run_method("after_insert")
self.run_post_save_methods()
return self
def save(self, ignore_permissions=None):
if ignore_permissions!=None:
self.ignore_permissions = ignore_permissions
if self.get("__islocal") or not self.get("name"):
self.insert()
return
if not self.has_permission("write"):
self.raise_no_permission_to("save")
self._set_docstatus_user_and_timestamp()
self.check_if_latest()
self.set_parent_in_children()
self.run_before_save_methods()
self._validate()
# parent
if self.meta.issingle:
self.update_single(self.get_valid_dict())
else:
self.db_update()
# children
child_map = {}
ignore_children_type = self.get("ignore_children_type") or []
for d in self.get_all_children():
d.db_update()
child_map.setdefault(d.doctype, []).append(d.name)
for df in self.meta.get_table_fields():
if df.options not in ignore_children_type:
cnames = child_map.get(df.options) or []
if cnames:
frappe.db.sql("""delete from `tab%s` where parent=%s and parenttype=%s and
name not in (%s)""" % (df.options, '%s', '%s', ','.join(['%s'] * len(cnames))),
tuple([self.name, self.doctype] + cnames))
else:
frappe.db.sql("""delete from `tab%s` where parent=%s and parenttype=%s""" \
% (df.options, '%s', '%s'), (self.name, self.doctype))
self.run_post_save_methods()
return self
def update_single(self, d):
frappe.db.sql("""delete from tabSingles where doctype=%s""", self.doctype)
for field, value in d.iteritems():
if field != "doctype":
frappe.db.sql("""insert into tabSingles(doctype, field, value)
values (%s, %s, %s)""", (self.doctype, field, value))
def _set_docstatus_user_and_timestamp(self):
self._original_modified = self.modified
self.modified = now()
self.modified_by = frappe.session.user
if not self.creation:
self.creation = self.modified
if not self.owner:
self.owner = self.modified_by
if self.docstatus==None:
self.docstatus=0
for d in self.get_all_children():
d.docstatus = self.docstatus
d.modified = self.modified
d.modified_by = self.modified_by
if not d.owner:
d.owner = self.owner
if not d.creation:
d.creation = self.creation
def _validate(self):
self._validate_mandatory()
self._validate_links()
self._validate_constants()
for d in self.get_all_children():
d._validate_constants()
self._extract_images_from_text_editor()
def _set_defaults(self):
if frappe.flags.in_import:
return
new_doc = frappe.new_doc(self.doctype)
self.update_if_missing(new_doc)
# children
for df in self.meta.get_table_fields():
new_doc = frappe.new_doc(df.options)
value = self.get(df.fieldname)
if isinstance(value, list):
for d in value:
d.update_if_missing(new_doc)
def check_if_latest(self):
conflict = False
self._action = "save"
if not self.get('__islocal'):
if self.meta.issingle:
modified = frappe.db.get_value(self.doctype, self.name, "modified")
if cstr(modified) and cstr(modified) != cstr(self._original_modified):
conflict = True
else:
tmp = frappe.db.get_value(self.doctype, self.name,
["modified", "docstatus"], as_dict=True)
if not tmp:
frappe.throw(_("Record does not exist"))
modified = cstr(tmp.modified)
if modified and modified != cstr(self._original_modified):
conflict = True
self.check_docstatus_transition(tmp.docstatus)
if conflict:
frappe.msgprint(_("Error: Document has been modified after you have opened it") \
+ (" (%s, %s). " % (modified, self.modified)) \
+ _("Please refresh to get the latest document."),
raise_exception=frappe.TimestampMismatchError)
else:
self.check_docstatus_transition(0)
def check_docstatus_transition(self, docstatus):
if not self.docstatus:
self.docstatus = 0
if docstatus==0:
if self.docstatus==0:
self._action = "save"
elif self.docstatus==1:
self._action = "submit"
if not self.has_permission("submit"):
self.raise_no_permission_to("submit")
else:
raise frappe.DocstatusTransitionError("Cannot change docstatus from 0 to 2")
elif docstatus==1:
if self.docstatus==1:
self._action = "update_after_submit"
self.validate_update_after_submit()
if not self.has_permission("submit"):
self.raise_no_permission_to("submit")
elif self.docstatus==2:
self._action = "cancel"
if not self.has_permission("cancel"):
self.raise_no_permission_to("cancel")
else:
raise frappe.DocstatusTransitionError("Cannot change docstatus from 1 to 0")
elif docstatus==2:
raise frappe.ValidationError
def set_parent_in_children(self):
for d in self.get_all_children():
d.parent = self.name
d.parenttype = self.doctype
def validate_update_after_submit(self):
if getattr(self, "ignore_validate_update_after_submit", False):
return
self._validate_update_after_submit()
for d in self.get_all_children():
d._validate_update_after_submit()
# TODO check only allowed values are updated
def _validate_mandatory(self):
if self.get("ignore_mandatory"):
return
missing = self._get_missing_mandatory_fields()
for d in self.get_all_children():
missing.extend(d._get_missing_mandatory_fields())
if not missing:
return
for fieldname, msg in missing:
msgprint(msg)
raise frappe.MandatoryError(", ".join((each[0] for each in missing)))
def _validate_links(self):
if self.get("ignore_links"):
return
invalid_links = self.get_invalid_links()
for d in self.get_all_children():
invalid_links.extend(d.get_invalid_links())
if not invalid_links:
return
msg = ", ".join((each[2] for each in invalid_links))
frappe.throw(_("Could not find {0}").format(msg),
frappe.LinkValidationError)
def get_all_children(self, parenttype=None):
ret = []
for df in self.meta.get("fields", {"fieldtype": "Table"}):
if parenttype:
if df.options==parenttype:
return self.get(df.fieldname)
value = self.get(df.fieldname)
if isinstance(value, list):
ret.extend(value)
return ret
def _extract_images_from_text_editor(self):
from frappe.utils.file_manager import extract_images_from_html
if self.doctype != "DocType":
for df in self.meta.get("fields", {"fieldtype":"Text Editor"}):
extract_images_from_html(self, df.fieldname)
def run_method(self, method, *args, **kwargs):
"""run standard triggers, plus those in frappe"""
if hasattr(self, method):
fn = lambda self, *args, **kwargs: getattr(self, method)(*args, **kwargs)
fn.__name__ = method.encode("utf-8")
return Document.hook(fn)(self, *args, **kwargs)
def submit(self):
self.docstatus = 1
self.save()
def cancel(self):
self.docstatus = 2
self.save()
def run_before_save_methods(self):
if getattr(self, "ignore_validate", False):
return
if self._action=="save":
self.run_method("validate")
self.run_method("before_save")
elif self._action=="submit":
self.run_method("validate")
self.run_method("before_submit")
elif self._action=="cancel":
self.run_method("before_cancel")
elif self._action=="update_after_submit":
self.run_method("before_update_after_submit")
def run_post_save_methods(self):
if self._action=="save":
self.run_method("on_update")
elif self._action=="submit":
self.run_method("on_update")
self.run_method("on_submit")
elif self._action=="cancel":
self.run_method("on_cancel")
elif self._action=="update_after_submit":
self.run_method("on_update_after_submit")
@staticmethod
def hook(f):
def add_to_return_value(self, new_return_value):
if isinstance(new_return_value, dict):
if not self.get("_return_value"):
self._return_value = {}
self._return_value.update(new_return_value)
else:
self._return_value = new_return_value or self.get("_return_value")
def compose(fn, *hooks):
def runner(self, method, *args, **kwargs):
add_to_return_value(self, fn(self, *args, **kwargs))
for f in hooks:
add_to_return_value(self, f(self, method, *args, **kwargs))
return self._return_value
return runner
def composer(self, *args, **kwargs):
hooks = []
method = f.__name__
doc_events = frappe.get_hooks("doc_events", {})
for handler in doc_events.get(self.doctype, {}).get(method, []) \
+ doc_events.get("*", {}).get(method, []):
hooks.append(frappe.get_attr(handler))
composed = compose(f, *hooks)
return composed(self, method, *args, **kwargs)
return composer
def validate_value(self, fieldname, condition, val2, doc=None, raise_exception=None):
"""check that value of fieldname should be 'condition' val2
else throw exception"""
error_condition_map = {
"in": _("one of"),
"not in": _("none of"),
"^": _("beginning with"),
}
if not doc:
doc = self
df = doc.meta.get_field(fieldname)
val1 = doc.get(fieldname)
if df.fieldtype in ("Currency", "Float"):
val1 = flt(val1, self.precision(df.fieldname, doc.parentfield or None))
val2 = flt(val2, self.precision(df.fieldname, doc.parentfield or None))
elif df.fieldtype in ("Int", "Check"):
val1 = cint(val1)
val2 = cint(val2)
elif df.fieldtype in ("Data", "Text", "Small Text", "Long Text",
"Text Editor", "Select", "Link"):
val1 = cstr(val1)
val2 = cstr(val2)
if not frappe.compare(val1, condition, val2):
label = doc.meta.get_label(fieldname)
condition_str = error_condition_map.get(condition, condition)
if doc.parentfield:
msg = _("Incorrect value in row {0}: {1} must be {2} {3}".format(doc.idx, label, condition_str, val2))
else:
msg = _("Incorrect value: {1} must be {2} {3}".format(label, condition_str, val2))
# raise passed exception or True
msgprint(msg, raise_exception=raise_exception or True)
def validate_table_has_rows(self, parentfield, raise_exception=None):
if not (isinstance(self.get(parentfield), list) and len(self.get(parentfield)) > 0):
label = self.meta.get_label(parentfield)
frappe.throw(_("Table {0} cannot be empty").format(label), raise_exception or frappe.EmptyTableError)
def round_floats_in(self, doc, fieldnames=None):
if not fieldnames:
fieldnames = (df.fieldname for df in
doc.meta.get("fields", {"fieldtype": ["in", ["Currency", "Float"]]}))
for fieldname in fieldnames:
doc.set(fieldname, flt(doc.get(fieldname), self.precision(fieldname, doc.parentfield)))
def precision(self, fieldname, parentfield=None):
from frappe.model.meta import get_field_precision
if parentfield and not isinstance(parentfield, basestring):
parentfield = parentfield.parentfield
if not hasattr(self, "_precision"):
self._precision = frappe._dict({
"default": cint(frappe.db.get_default("float_precision")) or 3,
"options": {}
})
if self._precision.setdefault(parentfield or "main", {}).get(fieldname) is None:
meta = frappe.get_meta(self.meta.get_field(parentfield).options if parentfield else self.doctype)
df = meta.get_field(fieldname)
if df.fieldtype == "Currency" and df.options and not self._precision.options.get(df.options):
self._precision.options[df.options] = get_field_precision(df, self)
if df.fieldtype == "Currency":
self._precision[parentfield or "main"][fieldname] = cint(self._precision.options.get(df.options)) or \
self._precision.default
elif df.fieldtype == "Float":
self._precision[parentfield or "main"][fieldname] = self._precision.default
return self._precision[parentfield or "main"][fieldname]
|
rkawale/Internalhr-frappe
|
frappe/model/document.py
|
Python
|
mit
| 15,295
|
import telegram
import markovify
import builtins
import re
import traceback
def processTextOld(text):
if builtins.markov == None:
print("Setting to text")
builtins.markov = markovify.Text(text)
else:
print("Combining")
builtins.markov = markovify.combine([builtins.markov, markovify.Text(text)], [0.5, 0.5])
def processText(text):
text.replace(".", "\n", 999)
text
f = open('chatStorage/markov.txt', mode='a')
f.write('\n\n')
f.write(text)
f.close()
def getMarkov(messageText):
#if markov is None, just return "not enough info"
f = open('chatStorage/markov.txt', mode='r')
myText = ""
for line in f:
myText += line + "\n"
builtins.markov = markovify.NewlineText(myText)
ret = ""
lastWord = re.split(r'[@\s:,\'*]', messageText.lower())[-1]
if lastWord.lower() != "/markov" and lastWord in myText:
try:
ret = builtins.markov.make_sentence_with_start(lastWord, tries=50)
except Exception:
print(traceback.format_exc())
ret = ""
if ret == "":
ret = builtins.markov.make_short_sentence(140, tries=30)
if ret == "" or ret == None:
x = random.randint(0, 5)
if x == 0:
ret = "haha"
elif x == 1:
ret = "good, good"
elif x == 2:
ret = "Don't you have someone else to be bothering?"
elif x == 3:
ret = "nah"
elif x == 4:
ret = "lmao"
elif x == 5:
ret = "shhhhhhhhhhh"
return ret.capitalize()
|
noisemaster/AdamTestBot
|
src/atbMarkov.py
|
Python
|
mit
| 1,684
|
#!/usr/bin/env python
import os
from flask import Flask
from flask_restful import Api
from blueprint import Blueprint
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class CApp(object):
__metaclass__ = Singleton
def __init__(self, *args):
if (len(args)):
name = args[0]
else:
name = __name__
self.app = Flask(name)
self.api = Api(self.app)
self.appbp = Blueprint('api', name)
self.wsgi_app = self.app.wsgi_app
self.app.config['SECRET_KEY'] = "I\x8e\x96\xd1\x15r\xb4\xbf\xb6\x12\x17*\xed\x93!b/\xc3Yg\x93\xebG#" #os.urandom(24)
self.app.logger.info('constuctor completed')
def debug_mode(self):
self.app.config['DEBUG'] = True
self.app.config['TESTING'] = True
return self
def get_app(self):
return self.app
def get_api(self):
return self.api
|
michalgawin/OKNO
|
common/app.py
|
Python
|
mit
| 1,096
|
'''
Backend KivyAgg
=====
.. image:: images/backend_agg_example.jpg
:align: right
The :class:`FigureCanvasKivyAgg` widget is used to create a matplotlib graph.
The render will cover the whole are of the widget unless something different is
specified using a :meth:`blit`.
When you are creating a FigureCanvasKivyAgg widget, you must at least
initialize it with a matplotlib figure object. This class uses agg to get a
static image of the plot and then the image is render using a
:class:`~kivy.graphics.texture.Texture`. See backend_kivy documentation for
more information since both backends can be used in the exact same way.
Examples
--------
Example of a simple Hello world matplotlib App::
fig, ax = plt.subplots()
ax.text(0.6, 0.5, "hello", size=50, rotation=30.,
ha="center", va="center",
bbox=dict(boxstyle="round",
ec=(1., 0.5, 0.5),
fc=(1., 0.8, 0.8),
)
)
ax.text(0.5, 0.4, "world", size=50, rotation=-30.,
ha="right", va="top",
bbox=dict(boxstyle="square",
ec=(1., 0.5, 0.5),
fc=(1., 0.8, 0.8),
)
)
canvas = FigureCanvasKivyAgg(figure=fig)
The object canvas can be added as a widget into the kivy tree widget.
If a change is done on the figure an update can be performed using
:meth:`~kivy.ext.mpl.backend_kivyagg.FigureCanvasKivyAgg.draw`.::
# update graph
canvas.draw()
The plot can be exported to png with
:meth:`~kivy.ext.mpl.backend_kivyagg.FigureCanvasKivyAgg.print_png`, as an
argument receives the `filename`.::
# export to png
canvas.print_png("my_plot.png")
Backend KivyAgg Events
-----------------------
The events available are the same events available from Backend Kivy.::
def my_callback(event):
print('press released from test', event.x, event.y, event.button)
fig.canvas.mpl_connect('mpl_event', my_callback)
'''
from __future__ import (absolute_import, division, print_function,
unicode_literals)
__all__ = ('FigureCanvasKivyAgg')
import six
import matplotlib
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.figure import Figure
from matplotlib.transforms import Bbox
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.backend_bases import register_backend, ShowBase
try:
import kivy
except ImportError:
raise ImportError("this backend requires Kivy to be installed.")
from kivy.app import App
from kivy.graphics.texture import Texture
from kivy.graphics import Rectangle, Color
from kivy.uix.widget import Widget
from kivy.properties import ObjectProperty
from kivy.base import EventLoop
from kivy.uix.floatlayout import FloatLayout
from kivy.core.image import Image
from kivy.garden.matplotlib.backend_kivy import FigureCanvasKivy,\
FigureManagerKivy, show, new_figure_manager,\
NavigationToolbar2Kivy
register_backend('png', 'backend_kivyagg', 'PNG File Format')
toolbar = None
my_canvas = None
def new_figure_manager(num, *args, **kwargs):
'''Create a new figure manager instance for the figure given.
'''
# if a main-level app must be created, this (and
# new_figure_manager_given_figure) is the usual place to
# do it -- see backend_wx, backend_wxagg and backend_tkagg for
# examples. Not all GUIs require explicit instantiation of a
# main-level app (egg backend_gtk, backend_gtkagg) for pylab
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
'''Create a new figure manager instance and a new figure canvas instance
for the given figure.
'''
canvas = FigureCanvasKivyAgg(figure)
manager = FigureManagerKivy(canvas, num)
global my_canvas
global toolbar
toolbar = manager.toolbar.actionbar if manager.toolbar else None
my_canvas = canvas
return manager
class MPLKivyApp(App):
'''Creates the App initializing a FloatLayout with a figure and toolbar
widget.
'''
figure = ObjectProperty(None)
toolbar = ObjectProperty(None)
def build(self):
EventLoop.ensure_window()
layout = FloatLayout()
if self.figure:
self.figure.size_hint_y = 0.9
layout.add_widget(self.figure)
if self.toolbar:
self.toolbar.size_hint_y = 0.1
layout.add_widget(self.toolbar)
return layout
class Show(ShowBase):
'''mainloop needs to be overwritten to define the show() behavior for kivy
framework.
'''
def mainloop(self):
global my_canvas
global toolbar
app = App.get_running_app()
if app is None:
app = MPLKivyApp(figure=my_canvas, toolbar=toolbar)
app.run()
show = Show()
class FigureCanvasKivyAgg(FigureCanvasKivy, FigureCanvasAgg):
'''FigureCanvasKivyAgg class. See module documentation for more
information.
'''
def __init__(self, figure, **kwargs):
self.figure = figure
self.bind(size=self._on_size_changed)
super(FigureCanvasKivyAgg, self).__init__(figure=self.figure, **kwargs)
self.img_texture = None
self.img_rect = None
self.blit()
def draw(self):
'''
Draw the figure using the agg renderer
'''
self.canvas.clear()
FigureCanvasAgg.draw(self)
if self.blitbox is None:
l, b, w, h = self.figure.bbox.bounds
w, h = int(w), int(h)
buf_rgba = self.get_renderer().buffer_rgba()
else:
bbox = self.blitbox
l, b, r, t = bbox.extents
w = int(r) - int(l)
h = int(t) - int(b)
t = int(b) + h
reg = self.copy_from_bbox(bbox)
buf_rgba = reg.to_string()
texture = Texture.create(size=(w, h))
texture.flip_vertical()
color = self.figure.get_facecolor()
with self.canvas:
Color(*color)
Rectangle(pos=self.pos, size=(w, h))
Color(1.0, 1.0, 1.0, 1.0)
self.img_rect = Rectangle(texture=texture, pos=self.pos,
size=(w, h))
texture.blit_buffer(bytes(buf_rgba), colorfmt='rgba', bufferfmt='ubyte')
self.img_texture = texture
filetypes = FigureCanvasKivy.filetypes.copy()
filetypes['png'] = 'Portable Network Graphics'
def _on_pos_changed(self, *args):
if self.img_rect is not None:
self.img_rect.pos = self.pos
def _print_image(self, filename, *args, **kwargs):
'''Write out format png. The image is saved with the filename given.
'''
l, b, w, h = self.figure.bbox.bounds
img = None
if self.img_texture is None:
texture = Texture.create(size=(w, h))
texture.blit_buffer(bytes(self.get_renderer().buffer_rgba()),
colorfmt='rgba', bufferfmt='ubyte')
texture.flip_vertical()
img = Image(texture)
else:
img = Image(self.img_texture)
img.save(filename)
''' Standard names that backend.__init__ is expecting '''
FigureCanvas = FigureCanvasKivyAgg
FigureManager = FigureManagerKivy
NavigationToolbar = NavigationToolbar2Kivy
show = show
|
MartinHeinz/training-and-food-tracker
|
libs/garden/matplotlib/backend_kivyagg.py
|
Python
|
mit
| 7,628
|
import demistomock as demisto
from CommonServerPython import *
res = demisto.executeCommand('ssh', {'cmd': 'cat /var/log/demisto/docker.log', 'using': 'localhost'})
output = 'File: /var/log/demisto/docker.log\n'
output += res[0].get('Contents').get('output')
return_results(output)
|
demisto/content
|
Packs/ServerLogs/Scripts/ServerLogsDocker/ServerLogsDocker.py
|
Python
|
mit
| 284
|
#!/usr/bin/env python
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""\
Convenience wrapper for compiling V8 with gn/ninja and running tests.
Sets up build output directories if they don't exist.
Produces simulator builds for non-Intel target architectures.
Uses Goma by default if it is detected (at output directory setup time).
Expects to be run from the root of a V8 checkout.
Usage:
gm.py [<arch>].[<mode>].[<target>] [testname...]
All arguments are optional. Most combinations should work, e.g.:
gm.py ia32.debug x64.release d8
gm.py x64 mjsunit/foo cctest/test-bar/*
"""
# See HELP below for additional documentation.
import os
import subprocess
import sys
BUILD_OPTS_DEFAULT = ""
BUILD_OPTS_GOMA = "-j1000 -l50"
BUILD_TARGETS_TEST = ["d8", "cctest", "unittests"]
BUILD_TARGETS_ALL = ["all"]
# All arches that this script understands.
ARCHES = ["ia32", "x64", "arm", "arm64", "mipsel", "mips64el", "ppc", "ppc64",
"s390", "s390x", "x87"]
# Arches that get built/run when you don't specify any.
DEFAULT_ARCHES = ["ia32", "x64", "arm", "arm64"]
# Modes that this script understands.
MODES = ["release", "debug", "optdebug"]
# Modes that get built/run when you don't specify any.
DEFAULT_MODES = ["release", "debug"]
# Build targets that can be manually specified.
TARGETS = ["d8", "cctest", "unittests", "v8_fuzzers", "mkgrokdump"]
# Build targets that get built when you don't specify any (and specified tests
# don't imply any other targets).
DEFAULT_TARGETS = ["d8"]
# Tests that run-tests.py would run by default that can be run with
# BUILD_TARGETS_TESTS.
DEFAULT_TESTS = ["cctest", "debugger", "intl", "message", "mjsunit",
"preparser", "unittests"]
# These can be suffixed to any <arch>.<mode> combo, or used standalone,
# or used as global modifiers (affecting all <arch>.<mode> combos).
ACTIONS = {
"all": {"targets": BUILD_TARGETS_ALL, "tests": []},
"tests": {"targets": BUILD_TARGETS_TEST, "tests": []},
"check": {"targets": BUILD_TARGETS_TEST, "tests": DEFAULT_TESTS},
"checkall": {"targets": BUILD_TARGETS_ALL, "tests": ["ALL"]},
}
HELP = """<arch> can be any of: %(arches)s
<mode> can be any of: %(modes)s
<target> can be any of:
- cctest, d8, unittests, v8_fuzzers (build respective binary)
- all (build all binaries)
- tests (build test binaries)
- check (build test binaries, run most tests)
- checkall (build all binaries, run more tests)
""" % {"arches": " ".join(ARCHES),
"modes": " ".join(MODES)}
TESTSUITES_TARGETS = {"benchmarks": "d8",
"cctest": "cctest",
"debugger": "d8",
"fuzzer": "v8_fuzzers",
"intl": "d8",
"message": "d8",
"mjsunit": "d8",
"mozilla": "d8",
"preparser": "d8",
"test262": "d8",
"unittests": "unittests",
"webkit": "d8"}
OUTDIR = "out"
def DetectGoma():
home_goma = os.path.expanduser("~/goma")
if os.path.exists(home_goma):
return home_goma
if os.environ.get("GOMADIR"):
return os.environ.get("GOMADIR")
return None
GOMADIR = DetectGoma()
IS_GOMA_MACHINE = GOMADIR is not None
USE_GOMA = "true" if IS_GOMA_MACHINE else "false"
BUILD_OPTS = BUILD_OPTS_GOMA if IS_GOMA_MACHINE else BUILD_OPTS_DEFAULT
RELEASE_ARGS_TEMPLATE = """\
is_component_build = false
is_debug = false
%s
use_goma = {GOMA}
v8_enable_backtrace = true
v8_enable_disassembler = true
v8_enable_object_print = true
v8_enable_verify_heap = true
""".replace("{GOMA}", USE_GOMA)
DEBUG_ARGS_TEMPLATE = """\
is_component_build = true
is_debug = true
symbol_level = 2
%s
use_goma = {GOMA}
v8_enable_backtrace = true
v8_enable_slow_dchecks = true
v8_optimized_debug = false
""".replace("{GOMA}", USE_GOMA)
OPTDEBUG_ARGS_TEMPLATE = """\
is_component_build = true
is_debug = true
symbol_level = 1
%s
use_goma = {GOMA}
v8_enable_backtrace = true
v8_enable_verify_heap = true
v8_optimized_debug = true
""".replace("{GOMA}", USE_GOMA)
ARGS_TEMPLATES = {
"release": RELEASE_ARGS_TEMPLATE,
"debug": DEBUG_ARGS_TEMPLATE,
"optdebug": OPTDEBUG_ARGS_TEMPLATE
}
def PrintHelpAndExit():
print(__doc__)
print(HELP)
sys.exit(0)
def _Call(cmd, silent=False):
if not silent: print("# %s" % cmd)
return subprocess.call(cmd, shell=True)
def _Which(cmd):
for path in os.environ["PATH"].split(os.pathsep):
if os.path.exists(os.path.join(path, cmd)):
return os.path.join(path, cmd)
return None
def _Write(filename, content):
print("# echo > %s << EOF\n%sEOF" % (filename, content))
with open(filename, "w") as f:
f.write(content)
def _Notify(summary, body):
if _Which('notify-send') is not None:
_Call("notify-send '{}' '{}'".format(summary, body), silent=True)
else:
print("{} - {}".format(summary, body))
def GetPath(arch, mode):
subdir = "%s.%s" % (arch, mode)
return os.path.join(OUTDIR, subdir)
class Config(object):
def __init__(self, arch, mode, targets, tests=[]):
self.arch = arch
self.mode = mode
self.targets = set(targets)
self.tests = set(tests)
def Extend(self, targets, tests=[]):
self.targets.update(targets)
self.tests.update(tests)
def GetTargetCpu(self):
cpu = "x86"
if "64" in self.arch or self.arch == "s390x":
cpu = "x64"
return "target_cpu = \"%s\"" % cpu
def GetV8TargetCpu(self):
if self.arch in ("arm", "arm64", "mipsel", "mips64el", "ppc", "ppc64",
"s390", "s390x"):
return "\nv8_target_cpu = \"%s\"" % self.arch
return ""
def GetGnArgs(self):
template = ARGS_TEMPLATES[self.mode]
arch_specific = self.GetTargetCpu() + self.GetV8TargetCpu()
return template % arch_specific
def Build(self):
path = GetPath(self.arch, self.mode)
args_gn = os.path.join(path, "args.gn")
if not os.path.exists(path):
print("# mkdir -p %s" % path)
os.makedirs(path)
if not os.path.exists(args_gn):
_Write(args_gn, self.GetGnArgs())
code = _Call("gn gen %s" % path)
if code != 0: return code
targets = " ".join(self.targets)
return _Call("ninja -C %s %s %s" % (path, BUILD_OPTS, targets))
def RunTests(self):
if not self.tests: return 0
if "ALL" in self.tests:
tests = ""
else:
tests = " ".join(self.tests)
return _Call("tools/run-tests.py --arch=%s --mode=%s %s" %
(self.arch, self.mode, tests))
def GetTestBinary(argstring):
for suite in TESTSUITES_TARGETS:
if argstring.startswith(suite): return TESTSUITES_TARGETS[suite]
return None
class ArgumentParser(object):
def __init__(self):
self.global_targets = set()
self.global_tests = set()
self.global_actions = set()
self.configs = {}
def PopulateConfigs(self, arches, modes, targets, tests):
for a in arches:
for m in modes:
path = GetPath(a, m)
if path not in self.configs:
self.configs[path] = Config(a, m, targets, tests)
else:
self.configs[path].Extend(targets, tests)
def ProcessGlobalActions(self):
have_configs = len(self.configs) > 0
for action in self.global_actions:
impact = ACTIONS[action]
if (have_configs):
for c in self.configs:
self.configs[c].Extend(**impact)
else:
self.PopulateConfigs(DEFAULT_ARCHES, DEFAULT_MODES, **impact)
def ParseArg(self, argstring):
if argstring in ("-h", "--help", "help"):
PrintHelpAndExit()
arches = []
modes = []
targets = []
actions = []
tests = []
# Specifying a single unit test looks like "unittests/Foo.Bar".
if argstring.startswith("unittests/"):
words = [argstring]
else:
words = argstring.split('.')
if len(words) == 1:
word = words[0]
if word in ACTIONS:
self.global_actions.add(word)
return
if word in TARGETS:
self.global_targets.add(word)
return
maybe_target = GetTestBinary(word)
if maybe_target is not None:
self.global_tests.add(word)
self.global_targets.add(maybe_target)
return
for word in words:
if word in ARCHES:
arches.append(word)
elif word in MODES:
modes.append(word)
elif word in TARGETS:
targets.append(word)
elif word in ACTIONS:
actions.append(word)
else:
print("Didn't understand: %s" % word)
sys.exit(1)
# Process actions.
for action in actions:
impact = ACTIONS[action]
targets += impact["targets"]
tests += impact["tests"]
# Fill in defaults for things that weren't specified.
arches = arches or DEFAULT_ARCHES
modes = modes or DEFAULT_MODES
targets = targets or DEFAULT_TARGETS
# Produce configs.
self.PopulateConfigs(arches, modes, targets, tests)
def ParseArguments(self, argv):
if len(argv) == 0:
PrintHelpAndExit()
for argstring in argv:
self.ParseArg(argstring)
self.ProcessGlobalActions()
for c in self.configs:
self.configs[c].Extend(self.global_targets, self.global_tests)
return self.configs
def Main(argv):
parser = ArgumentParser()
configs = parser.ParseArguments(argv[1:])
return_code = 0
# If we have Goma but it is not running, start it.
if (GOMADIR is not None and
_Call("ps -e | grep compiler_proxy > /dev/null", silent=True) != 0):
_Call("%s/goma_ctl.py ensure_start" % GOMADIR)
for c in configs:
return_code += configs[c].Build()
if return_code == 0:
for c in configs:
return_code += configs[c].RunTests()
if return_code == 0:
_Notify('Done!', 'V8 compilation finished successfully.')
else:
_Notify('Error!', 'V8 compilation finished with errors.')
return return_code
if __name__ == "__main__":
sys.exit(Main(sys.argv))
|
hoho/dosido
|
nodejs/deps/v8/tools/dev/gm.py
|
Python
|
mit
| 9,909
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import logging
import numpy as np
from abc import ABCMeta, abstractmethod
from monty.json import MSONable, MontyDecoder
from functools import lru_cache
from pymatgen.core.structure import Structure, PeriodicSite
from pymatgen.core.composition import Composition
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.analysis.defects.utils import kb
__author__ = "Danny Broberg, Shyam Dwaraknath"
__copyright__ = "Copyright 2018, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyam Dwaraknath"
__email__ = "shyamd@lbl.gov"
__status__ = "Development"
__date__ = "Mar 15, 2018"
logger = logging.getLogger(__name__)
class Defect(MSONable, metaclass=ABCMeta):
"""
Abstract class for a single point defect
"""
def __init__(self, structure, defect_site, charge=0., multiplicity=None):
"""
Initializes an abstract defect
Args:
structure: Pymatgen Structure without any defects
defect_site (Site): site for defect within structure
must have same lattice as structure
charge: (int or float) defect charge
default is zero, meaning no change to NELECT after defect is created in the structure
(assuming use_structure_charge=True in vasp input set)
multiplicity (int): multiplicity of defect within
the supercell can be supplied by user. if not
specified, then space group symmetry analysis is
used to generate multiplicity.
"""
self._structure = structure
self._charge = int(charge)
self._defect_site = defect_site
if structure.lattice != defect_site.lattice:
raise ValueError("defect_site lattice must be same as structure lattice.")
self._multiplicity = multiplicity if multiplicity else self.get_multiplicity()
@property
def bulk_structure(self):
"""
Returns the structure without any defects.
"""
return self._structure
@property
def charge(self):
"""
Returns the charge of a defect
"""
return self._charge
@property
def site(self):
"""
Returns the defect position as a site object
"""
return self._defect_site
@property
def multiplicity(self):
"""
Returns the multiplicity of a defect site within the structure (needed for concentration analysis)
"""
return self._multiplicity
@property # type: ignore
@abstractmethod
def defect_composition(self):
"""
Returns the defect composition as a Composition object
"""
return
@abstractmethod
def generate_defect_structure(self, supercell=(1, 1, 1)):
"""
Given structure and defect_site (and type of defect) should return a defect_structure that is charged
Args:
supercell (int, [3x1], or [[]] (3x3)): supercell integer, vector, or scaling matrix
"""
return
@property # type: ignore
@abstractmethod
def name(self):
"""
Returns a name for this defect
"""
return
@abstractmethod
def get_multiplicity(self):
"""
Method to determine multiplicity. For non-Interstitial objects, also confirms that defect_site
is a site in bulk_structure.
"""
return
def copy(self):
"""
Convenience method to get a copy of the defect.
Returns:
A copy of the Defect.
"""
return self.from_dict(self.as_dict())
def set_charge(self, new_charge=0.):
"""
Sets the overall charge
Args:
charge (float): new charge to set
"""
self._charge = int(new_charge)
class Vacancy(Defect):
"""
Subclass of Defect to capture essential information for a single Vacancy defect structure.
"""
@property
def defect_composition(self):
temp_comp = self.bulk_structure.composition.as_dict()
temp_comp[str(self.site.specie)] -= 1
return Composition(temp_comp)
def generate_defect_structure(self, supercell=(1, 1, 1)):
"""
Returns Defective Vacancy structure, decorated with charge
Args:
supercell (int, [3x1], or [[]] (3x3)): supercell integer, vector, or scaling matrix
"""
defect_structure = self.bulk_structure.copy()
defect_structure.make_supercell(supercell)
# create a trivial defect structure to find where supercell transformation moves the lattice
struct_for_defect_site = Structure(self.bulk_structure.copy().lattice,
[self.site.specie],
[self.site.frac_coords],
to_unit_cell=True)
struct_for_defect_site.make_supercell(supercell)
defect_site = struct_for_defect_site[0]
poss_deflist = sorted(
defect_structure.get_sites_in_sphere(defect_site.coords, 0.1, include_index=True), key=lambda x: x[1])
defindex = poss_deflist[0][2]
defect_structure.remove_sites([defindex])
defect_structure.set_charge(self.charge)
return defect_structure
def get_multiplicity(self):
"""
Returns the multiplicity of a defect site within the structure (needed for concentration analysis)
and confirms that defect_site is a site in bulk_structure.
"""
sga = SpacegroupAnalyzer(self.bulk_structure)
periodic_struc = sga.get_symmetrized_structure()
poss_deflist = sorted(
periodic_struc.get_sites_in_sphere(self.site.coords, 0.1, include_index=True), key=lambda x: x[1])
if not len(poss_deflist):
raise ValueError("Site {} is not in bulk structure! Cannot create Vacancy object.".format(self.site))
else:
defindex = poss_deflist[0][2]
defect_site = self.bulk_structure[defindex]
equivalent_sites = periodic_struc.find_equivalent_sites(defect_site)
return len(equivalent_sites)
@property
def name(self):
"""
Returns a name for this defect
"""
return "Vac_{}_mult{}".format(self.site.specie, self.multiplicity)
class Substitution(Defect):
"""
Subclass of Defect to capture essential information for a single Substitution defect structure.
"""
@property # type: ignore
@lru_cache(1)
def defect_composition(self):
poss_deflist = sorted(
self.bulk_structure.get_sites_in_sphere(self.site.coords, 0.1, include_index=True), key=lambda x: x[1])
defindex = poss_deflist[0][2]
temp_comp = self.bulk_structure.composition.as_dict()
temp_comp[str(self.site.specie)] += 1
temp_comp[str(self.bulk_structure[defindex].specie)] -= 1
return Composition(temp_comp)
def generate_defect_structure(self, supercell=(1, 1, 1)):
"""
Returns Defective Substitution structure, decorated with charge.
If bulk structure had any site properties, all of these properties are
removed in the resulting defect structure.
Args:
supercell (int, [3x1], or [[]] (3x3)): supercell integer, vector, or scaling matrix
"""
defect_structure = Structure(self.bulk_structure.copy().lattice,
[site.specie for site in self.bulk_structure],
[site.frac_coords for site in self.bulk_structure],
to_unit_cell=True, coords_are_cartesian=False,
site_properties=None) # remove all site_properties
defect_structure.make_supercell(supercell)
# create a trivial defect structure to find where supercell transformation moves the defect
struct_for_defect_site = Structure(self.bulk_structure.copy().lattice,
[self.site.specie],
[self.site.frac_coords],
to_unit_cell=True, coords_are_cartesian=False)
struct_for_defect_site.make_supercell(supercell)
defect_site = struct_for_defect_site[0]
poss_deflist = sorted(
defect_structure.get_sites_in_sphere(defect_site.coords, 0.1, include_index=True), key=lambda x: x[1])
defindex = poss_deflist[0][2]
subsite = defect_structure.pop(defindex)
defect_structure.append(self.site.specie.symbol, subsite.coords, coords_are_cartesian=True,
properties=None)
defect_structure.set_charge(self.charge)
return defect_structure
def get_multiplicity(self):
"""
Returns the multiplicity of a defect site within the structure (needed for concentration analysis)
and confirms that defect_site is a site in bulk_structure.
"""
sga = SpacegroupAnalyzer(self.bulk_structure)
periodic_struc = sga.get_symmetrized_structure()
poss_deflist = sorted(
periodic_struc.get_sites_in_sphere(self.site.coords, 0.1, include_index=True), key=lambda x: x[1])
if not len(poss_deflist):
raise ValueError("Site {} is not in bulk structure! Cannot create Substitution object.".format(self.site))
else:
defindex = poss_deflist[0][2]
defect_site = self.bulk_structure[defindex]
equivalent_sites = periodic_struc.find_equivalent_sites(defect_site)
return len(equivalent_sites)
@property # type: ignore
@lru_cache(1)
def name(self):
"""
Returns a name for this defect
"""
poss_deflist = sorted(
self.bulk_structure.get_sites_in_sphere(self.site.coords, 0.1, include_index=True), key=lambda x: x[1])
defindex = poss_deflist[0][2]
return "Sub_{}_on_{}_mult{}".format(self.site.specie, self.bulk_structure[defindex].specie, self.multiplicity)
class Interstitial(Defect):
"""
Subclass of Defect to capture essential information for a single Interstitial defect structure.
"""
def __init__(self, structure, defect_site, charge=0., site_name='', multiplicity=None):
"""
Initializes an interstial defect.
Args:
structure: Pymatgen Structure without any defects
defect_site (Site): the site for the interstitial
charge: (int or float) defect charge
default is zero, meaning no change to NELECT after defect is created in the structure
(assuming use_structure_charge=True in vasp input set)
site_name: allows user to give a unique name to defect, since Wyckoff symbol/multiplicity
is sometimes insufficient to categorize the defect type.
default is no name beyond multiplicity.
multiplicity (int): multiplicity of defect within
the supercell can be supplied by user. if not
specified, then space group symmetry is used
to generator interstitial sublattice
NOTE: multiplicity generation will not work for
interstitial complexes,
where multiplicity may depend on additional
factors (ex. orientation etc.)
If defect is not a complex, then this
process will yield the correct multiplicity,
provided that the defect does not undergo
significant relaxation.
"""
super().__init__(structure=structure, defect_site=defect_site, charge=charge)
self._multiplicity = multiplicity if multiplicity else self.get_multiplicity()
self.site_name = site_name
@property
def defect_composition(self):
temp_comp = self.bulk_structure.composition.as_dict()
temp_comp[str(self.site.specie)] += 1
return Composition(temp_comp)
def generate_defect_structure(self, supercell=(1, 1, 1)):
"""
Returns Defective Interstitial structure, decorated with charge
If bulk structure had any site properties, all of these properties are
removed in the resulting defect structure
Args:
supercell (int, [3x1], or [[]] (3x3)): supercell integer, vector, or scaling matrix
"""
defect_structure = Structure(self.bulk_structure.copy().lattice,
[site.specie for site in self.bulk_structure],
[site.frac_coords for site in self.bulk_structure],
to_unit_cell=True, coords_are_cartesian=False,
site_properties=None) # remove all site_properties
defect_structure.make_supercell(supercell)
# create a trivial defect structure to find where supercell transformation moves the defect site
struct_for_defect_site = Structure(self.bulk_structure.copy().lattice,
[self.site.specie],
[self.site.frac_coords],
to_unit_cell=True, coords_are_cartesian=False)
struct_for_defect_site.make_supercell(supercell)
defect_site = struct_for_defect_site[0]
defect_structure.append(self.site.specie.symbol, defect_site.coords, coords_are_cartesian=True,
properties=None)
defect_structure.set_charge(self.charge)
return defect_structure
def get_multiplicity(self):
"""
Returns the multiplicity of a defect site within the structure (needed for concentration analysis)
"""
try:
d_structure = create_saturated_interstitial_structure(self)
except ValueError:
logger.debug('WARNING! Multiplicity was not able to be calculated adequately '
'for interstitials...setting this to 1 and skipping for now...')
return 1
sga = SpacegroupAnalyzer(d_structure)
periodic_struc = sga.get_symmetrized_structure()
poss_deflist = sorted(
periodic_struc.get_sites_in_sphere(self.site.coords, 0.1, include_index=True),
key=lambda x: x[1])
defindex = poss_deflist[0][2]
equivalent_sites = periodic_struc.find_equivalent_sites(periodic_struc[defindex])
return len(equivalent_sites)
@property
def name(self):
"""
Returns a name for this defect
"""
if self.site_name:
return "Int_{}_{}_mult{}".format(self.site.specie, self.site_name, self.multiplicity)
else:
return "Int_{}_mult{}".format(self.site.specie, self.multiplicity)
def create_saturated_interstitial_structure(interstitial_def, dist_tol=0.1):
"""
this takes a Interstitial defect object and generates the
sublattice for it based on the structure's space group.
Useful for understanding multiplicity of an interstitial
defect in thermodynamic analysis.
NOTE: if large relaxation happens to interstitial or
defect involves a complex then there may be additional
degrees of freedom that need to be considered for
the multiplicity.
Args:
dist_tol: changing distance tolerance of saturated structure,
allowing for possibly overlapping sites
but ensuring space group is maintained
Returns:
Structure object decorated with interstitial site equivalents
"""
sga = SpacegroupAnalyzer(interstitial_def.bulk_structure.copy())
sg_ops = sga.get_symmetry_operations(cartesian=True)
# copy bulk structure to make saturated interstitial structure out of
# artificially lower distance_tolerance to allow for distinct interstitials
# with lower symmetry to be replicated - This is OK because one would never
# actually use this structure for a practical calcualtion...
saturated_defect_struct = interstitial_def.bulk_structure.copy()
saturated_defect_struct.DISTANCE_TOLERANCE = dist_tol
for sgo in sg_ops:
new_interstit_coords = sgo.operate(interstitial_def.site.coords[:])
poss_new_site = PeriodicSite(
interstitial_def.site.specie,
new_interstit_coords,
saturated_defect_struct.lattice,
to_unit_cell=True,
coords_are_cartesian=True)
try:
# will raise value error if site already exists in structure
saturated_defect_struct.append(
poss_new_site.specie, poss_new_site.coords,
coords_are_cartesian=True, validate_proximity=True)
except ValueError:
pass
# do final space group analysis to make sure symmetry not lowered by saturating defect structure
saturated_sga = SpacegroupAnalyzer(saturated_defect_struct)
if saturated_sga.get_space_group_number() != sga.get_space_group_number():
raise ValueError("Warning! Interstitial sublattice generation "
"has changed space group symmetry. Recommend "
"reducing dist_tol and trying again...")
return saturated_defect_struct
class DefectEntry(MSONable):
"""
An lightweight DefectEntry object containing key computed data
for many defect analysis.
"""
def __init__(self, defect, uncorrected_energy, corrections=None, parameters=None, entry_id=None):
"""
Args:
defect:
A Defect object from pymatgen.analysis.defects.core
uncorrected_energy (float): Energy of the defect entry. Usually the difference between
the final calculated energy for the defect supercell - the perfect
supercell energy
corrections (dict):
Dict of corrections for defect formation energy. All values will be summed and
added to the defect formation energy.
parameters (dict): An optional dict of calculation parameters and data to
use with correction schemes
(examples of parameter keys: supercell_size, axis_grid, bulk_planar_averages
defect_planar_averages )
entry_id (obj): An id to uniquely identify this defect, can be any MSONable
type
"""
self.defect = defect
self.uncorrected_energy = uncorrected_energy
self.corrections = corrections if corrections else {}
self.entry_id = entry_id
self.parameters = parameters if parameters else {}
@property
def bulk_structure(self):
return self.defect.bulk_structure
def as_dict(self):
"""
Json-serializable dict representation of DefectEntry
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"defect": self.defect.as_dict(),
"uncorrected_energy": self.uncorrected_energy,
"corrections": self.corrections,
"parameters": self.parameters,
"entry_id": self.entry_id}
return d
@classmethod
def from_dict(cls, d):
"""
Reconstitute a DefectEntry object from a dict representation created using
as_dict().
Args:
d (dict): dict representation of DefectEntry.
Returns:
DefectEntry object
"""
defect = MontyDecoder().process_decoded(d["defect"])
uncorrected_energy = d["uncorrected_energy"]
corrections = d.get("corrections", None)
parameters = d.get("parameters", None)
entry_id = d.get("entry_id", None)
return cls(defect, uncorrected_energy, corrections=corrections,
parameters=parameters, entry_id=entry_id)
@property
def site(self):
return self.defect.site
@property
def multiplicity(self):
return self.defect.multiplicity
@property
def charge(self):
return self.defect.charge
@property
def energy(self):
"""
Returns the *corrected* energy of the entry
"""
return self.uncorrected_energy + np.sum(list(self.corrections.values()))
@property
def name(self):
"""
Returms the defect name
"""
return self.defect.name
def copy(self):
"""
Convenience method to get a copy of the DefectEntry.
Returns:
A copy of the DefectEntry.
"""
defectentry_dict = self.as_dict()
return DefectEntry.from_dict(defectentry_dict)
def formation_energy(self, chemical_potentials=None, fermi_level=0):
"""
Compute the formation energy for a defect taking into account a given chemical potential and fermi_level
Args:
chemical_potentials (dict): Dictionary of elemental chemical potential values.
Keys are Element objects within the defect structure's composition.
Values are float numbers equal to the atomic chemical potential for that element.
fermi_level (float): Value corresponding to the electron chemical potential.
If "vbm" is supplied in parameters dict, then fermi_level is referenced to the VBM.
If "vbm" is NOT supplied in parameters dict, then fermi_level is referenced to the
calculation's absolute Kohn-Sham potential (and should include the vbm value provided
by a band structure calculation)
Returns:
Formation energy value (float)
"""
chemical_potentials = chemical_potentials if chemical_potentials else {}
chempot_correction = sum([
chem_pot * (self.bulk_structure.composition[el] - self.defect.defect_composition[el])
for el, chem_pot in chemical_potentials.items()
])
formation_energy = self.energy + chempot_correction
if "vbm" in self.parameters:
formation_energy += self.charge * (self.parameters["vbm"] + fermi_level)
else:
formation_energy += self.charge * fermi_level
return formation_energy
def defect_concentration(self, chemical_potentials, temperature=300, fermi_level=0.0):
"""
Compute the defect concentration for a temperature and Fermi level.
Args:
temperature:
the temperature in K
fermi_level:
the fermi level in eV (with respect to the VBM)
Returns:
defects concentration in cm^-3
"""
n = self.multiplicity * 1e24 / self.defect.bulk_structure.volume
conc = n * np.exp(-1.0 * self.formation_energy(chemical_potentials, fermi_level=fermi_level) /
(kb * temperature))
return conc
def __repr__(self):
"""
Human readable string representation of this entry
"""
output = [
"DefectEntry {} - {} - charge {}".format(self.entry_id, self.name, self.charge),
"Energy = {:.4f}".format(self.energy),
"Correction = {:.4f}".format(np.sum(list(self.corrections.values()))),
"Parameters:"
]
for k, v in self.parameters.items():
output.append("\t{} = {}".format(k, v))
return "\n".join(output)
def __str__(self):
return self.__repr__()
class DefectCorrection(MSONable):
"""
A Correction class modeled off the computed entry correction format
"""
@abstractmethod
def get_correction(self, entry):
"""
Returns correction for a single entry.
Args:
entry: A DefectEntry object.
Returns:
A single dictionary with the format
correction_name: energy_correction
Raises:
CompatibilityError if entry is not compatible.
"""
return
def correct_entry(self, entry):
"""
Corrects a single entry.
Args:
entry: A DefectEntry object.
Returns:
An processed entry.
Raises:
CompatibilityError if entry is not compatible.
"""
entry.correction.update(self.get_correction(entry))
return entry
|
fraricci/pymatgen
|
pymatgen/analysis/defects/core.py
|
Python
|
mit
| 24,597
|
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
import sys
from unittest import TestCase
import pytest
class TestImports(TestCase):
"""Test Imports - the quickest test to ensure that we haven't
introduced version-incompatible syntax errors."""
def test_toplevel(self):
"""test toplevel import"""
import zmq
def test_core(self):
"""test core imports"""
from zmq import Context
from zmq import Socket
from zmq import Poller
from zmq import Frame
from zmq import constants
from zmq import device, proxy
from zmq import (
zmq_version,
zmq_version_info,
pyzmq_version,
pyzmq_version_info,
)
def test_devices(self):
"""test device imports"""
import zmq.devices
from zmq.devices import basedevice
from zmq.devices import monitoredqueue
from zmq.devices import monitoredqueuedevice
def test_log(self):
"""test log imports"""
import zmq.log
from zmq.log import handlers
def test_eventloop(self):
"""test eventloop imports"""
try:
import tornado
except ImportError:
pytest.skip('requires tornado')
import zmq.eventloop
from zmq.eventloop import ioloop
from zmq.eventloop import zmqstream
def test_utils(self):
"""test util imports"""
import zmq.utils
from zmq.utils import strtypes
from zmq.utils import jsonapi
def test_ssh(self):
"""test ssh imports"""
from zmq.ssh import tunnel
def test_decorators(self):
"""test decorators imports"""
from zmq.decorators import context, socket
|
sserrot/champion_relationships
|
venv/Lib/site-packages/zmq/tests/test_imports.py
|
Python
|
mit
| 1,805
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/item/shared_armor_bone_leggings.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
anhstudios/swganh
|
data/scripts/templates/object/static/item/shared_armor_bone_leggings.py
|
Python
|
mit
| 444
|
import numpy
from cupy.random import distributions
from cupy.random import generator
from cupy.random import sample as sample_
rand = sample_.rand
randn = sample_.randn
random_sample = sample_.random_sample
random = random_sample
ranf = random_sample
sample = random_sample
bytes = numpy.random.bytes
lognormal = distributions.lognormal
normal = distributions.normal
standard_normal = distributions.standard_normal
uniform = distributions.uniform
RandomState = generator.RandomState
get_random_state = generator.get_random_state
seed = generator.seed
reset_states = generator.reset_states
|
tscohen/chainer
|
cupy/random/__init__.py
|
Python
|
mit
| 594
|
from Jugador import Jugador
from Proyectil import Proyectil
from Enemigo import Enemigo
from Muro import Muro
from Defensa import Defensa
from EntradaEnemigo import EntradaEnemigo
from Estadisticas import Estadisticas
|
DavidRamirez19/Total-Demolition---PYGAME
|
Total Demolition 1.2/Clases/__init__.py
|
Python
|
mit
| 218
|
'''OpenGL extension ARB.texture_buffer_object_rgb32
This module customises the behaviour of the
OpenGL.raw.GL.ARB.texture_buffer_object_rgb32 to provide a more
Python-friendly API
Overview (from the spec)
This extension adds three new buffer texture formats - RGB32F, RGB32I,
and RGB32UI. This partially addresses one of the limitations of buffer
textures in the original EXT_texture_buffer_object extension and in
OpenGL 3.1, which provide no support for three-component formats.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/texture_buffer_object_rgb32.txt
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.ARB.texture_buffer_object_rgb32 import *
### END AUTOGENERATED SECTION
|
Universal-Model-Converter/UMC3.0a
|
data/Python/x86/Lib/site-packages/OpenGL/GL/ARB/texture_buffer_object_rgb32.py
|
Python
|
mit
| 861
|
import os
_basedir = os.path.abspath(os.path.dirname(__file__))
DEBUG = True
TESTING = True
ADMINS = frozenset(['youremail@yourdomain.com'])
SECRET_KEY = 'SecretKeyForSessionSigning'
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(_basedir, 'app.db')
DATABASE_CONNECT_OPTIONS = {}
THREADS_PER_PAGE = 8
CSRF_ENABLED=True
CSRF_SESSION_KEY="somethingimpossibletoguess"
|
open-iot-stack/open-iot-web
|
app/config.py
|
Python
|
mit
| 377
|
import json
import datetime
import demistomock as demisto
from OutOfOfficeListCleanup import main
def execute_command(name, args=None):
if name == 'getList':
get_list_response = [{"user": "admin", "offuntil": "2020-04-20", "addedby": "admin"}]
return [{'Contents': json.dumps(get_list_response)}]
else:
return None
def execute_command_with_ooo_user(name, args=None):
if name == 'getList':
tommorow = (datetime.date.today() + datetime.timedelta(days=1)).strftime("%Y-%m-%d")
get_list_response = [{"user": "admin", "offuntil": tommorow, "addedby": "admin"}]
return [{'Contents': json.dumps(get_list_response)}]
else:
return None
def test_out_of_office_list_cleanup_list_changed(mocker):
"""
Given:
- List of OOO(out of office) users with one user that should be removed.
When:
- running OutOfOfficeListCleanup script.
Then:
- Check if the list is now empty.
"""
mocker.patch.object(demisto, 'args', return_value={'listname': 'OOO List'})
mocker.patch.object(demisto, 'executeCommand', side_effect=execute_command)
mocker.patch.object(demisto, 'results')
main()
results = demisto.results.call_args[0][0]
assert demisto.executeCommand.call_args[0][1]['listData'] == '[]'
assert results == 'The following Users were removed from the Out of Office List OOO List:\nadmin'
def test_out_of_office_list_cleanup_list_not_changed(mocker):
"""
Given:
- List of OOO(out of office) users with one user that should be in vacation until tomorrow.
When:
- running OutOfOfficeListCleanup script.
Then:
- Check if the list stays the same.
"""
mocker.patch.object(demisto, 'args', return_value={'listname': 'OOO List'})
mocker.patch.object(demisto, 'executeCommand', side_effect=execute_command_with_ooo_user)
mocker.patch.object(demisto, 'results')
main()
results = demisto.results.call_args[0][0]
assert results == 'No users removed from the list OOO List'
|
demisto/content
|
Packs/ShiftManagement/Scripts/OutOfOfficeListCleanup/OutOfOfficeListCleanup_test.py
|
Python
|
mit
| 2,038
|
#!/usr/bin/env python
"""
Parts of this file were taken from the luigi project
(https://github.com/spotify/luigi) as reference.
"""
import os
from setuptools import setup, find_packages
# set version
VERSION = '0.2.6'
def get_requirements():
"""Loads contents from requirements.txt."""
requirements = []
with open('requirements.txt') as f:
data = f.read().splitlines()
if any(data):
data = data[1:]
requirements = [item.split(";")[0].split(" ")[0] for item in data]
return requirements
LONG_DESCRIPTION = ''
readme_note = """\
.. note::
For the latest source, discussion, etc, please visit the
`GitHub repository <https://github.com/dbcollection/dbcollection>`_\n\n
"""
with open('README.rst') as fobj:
LONG_DESCRIPTION = readme_note + fobj.read()
setup(
name='dbcollection',
version=VERSION,
author='M. Farrajota',
url='https://github.com/dbcollection/dbcollection',
download_url='https://github.com/dbcollection/dbcollection/archive/' + VERSION + '.tar.gz',
description="A collection of popular datasets for deep learning.",
long_description=LONG_DESCRIPTION,
license='MIT License',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: MacOS X',
'Environment :: Win32 (MS Windows)',
'Environment :: X11 Applications',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
platforms='any',
packages=find_packages(exclude=['docs',
'notebooks',
'ci',
'conda-recipe',
'tests']),
install_requires=get_requirements(),
include_package_data=True,
)
|
dbcollection/dbcollection
|
setup.py
|
Python
|
mit
| 2,317
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# calculate1.py
# A simple and functional calculator created with Tkinter.
# Exercise 10.6: Python How to Program, 1st Ed. by Deitel, P. & Deitel, H.
#
# Author: Billy Wilson Arante
# Created: 2016/10/31 EDT
# Modified: 2016/11/02 EDT
from Tkinter import *
class MainApp():
def __init__(self, master):
"""Constructor"""
frame = Frame(master)
# Auto-resizeable buttons
frame.grid(row=0, column=0, sticky=W+E+N+S)
frame.master.title("Tkinter Buttons 1")
frame.master.geometry("300x300")
self.buttons = []
self.btn_names = [
"7", "8", "9", "/",
"4", "5", "6", "*",
"1", "2", "3", "-",
"0", ".", "=", "+"
]
# Auto-resizeable buttons
for b in range(16):
for i in range(1, 5):
Grid.rowconfigure(frame, i, weight=1)
for j in range(4):
Grid.columnconfigure(frame, j, weight=1)
self.txt_display = Text(frame, height=1, width=40,
font="Arial 24 bold")
self.txt_display.grid(row=0, column=0, columnspan=4, sticky=W+E+N+S)
r = 1
c = 0
for i in range(16):
btn_name = self.btn_names[i]
self.buttons.append(Button(frame, text=btn_name,
command=lambda btn_name=btn_name: self.gotowork(btn_name)))
self.buttons[i].grid(row=r, column=c, sticky=W+E+N+S)
c = c + 1 # Move to next column
if c % 4 == 0: # Are we on the 4th column?
r = r + 1 # Move to next row then
c = 0 # and go back to 1st column
def gotowork(self, text):
"""Functionalities of buttons"""
if text != "=":
self.display_me(text)
else:
result = eval(self.txt_display.get(1.0, END))
self.txt_display.delete(1.0, END)
#self.display_me(eval(self.txt_display.get(1.0, END)))
self.display_me(result)
def display_me(self, text):
return self.txt_display.insert(INSERT, text)
def main():
"""Main"""
root = Tk()
# Auto-resizeable buttons
Grid.rowconfigure(root, 0, weight=1)
Grid.columnconfigure(root, 0, weight=1)
new_app = MainApp(root)
root.mainloop()
if __name__ == "__main__":
# Executes only if run as script
main()
|
arantebillywilson/python-snippets
|
py2/htp/ex10/calculate1.py
|
Python
|
mit
| 2,474
|
# The MIT License (MIT)
#
# Copyright (c) 2019 Looker Data Sciences, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from looker_sdk.rtl import auth_token
def test_defaults_with_empty_token():
"""Confirm the defaults when initializing AuthToken without arguments."""
actual = auth_token.AuthToken()
assert actual.access_token == ""
assert actual.token_type == ""
assert actual.expires_in == 0
assert actual.is_active is False
def test_is_active_with_full_token():
"""Confirm active token when AuthToken is initialized properly."""
actual = auth_token.AuthToken(
auth_token.AccessToken(
access_token="all-access", token_type="backstage", expires_in=3600
),
)
assert actual.access_token == "all-access"
assert actual.token_type == "backstage"
assert actual.expires_in == 3600
assert actual.is_active is True
def test_lag_time_is_used():
"""Confirm active token when expiration is > lag time."""
actual = auth_token.AuthToken(
auth_token.AccessToken(
access_token="all-access", token_type="backstage", expires_in=9
),
)
assert actual.access_token == "all-access"
assert actual.token_type == "backstage"
assert actual.expires_in == 9
assert actual.is_active is False
actual = auth_token.AuthToken(
auth_token.AccessToken(
access_token="all-access", token_type="backstage", expires_in=11
),
)
assert actual.expires_in == 11
assert actual.is_active is True
|
looker-open-source/sdk-codegen
|
python/tests/rtl/test_auth_token.py
|
Python
|
mit
| 2,557
|
import os
import xbox
class TestBase(object):
def setup_method(self, method):
# reset the client instance
xbox.client = xbox.Client()
# reset auth creds
os.environ.pop('MS_LOGIN', None)
os.environ.pop('MS_PASSWD', None)
|
joealcorn/xbox
|
xbox/tests/__init__.py
|
Python
|
mit
| 269
|
import os
def is_development() :
mode = os.getenv("MODE", "prod")
return mode == "dev"
DEBUG = is_development()
|
andrewrothstein/docker-coreos-ipxeserver
|
config.py
|
Python
|
mit
| 116
|
# coding: utf-8
import datetime
from quokka.core.models.channel import Channel
from quokka.core.models.config import Config
from quokka.core.models.content import Content, Link
def configure(app):
@app.context_processor
def inject():
now = datetime.datetime.now()
return dict(
channels=Channel.objects(published=True,
available_at__lte=now,
parent=None),
Config=Config,
Content=Content,
Channel=Channel,
homepage=Channel.get_homepage(),
Link=Link
)
|
alexandre/quokka
|
quokka/ext/context_processors.py
|
Python
|
mit
| 631
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
# @author : beaengine@gmail.com
from headers.BeaEnginePython import *
from nose.tools import *
class TestSuite:
def test(self):
# VEX.L0.66.0F3A.W1 30 /r
# KSHIFTRW k1, k2, imm8
myVEX = VEX('VEX.L0.66.0F3A.W1')
myVEX.R = 1
Buffer = bytes.fromhex('{}30c911'.format(myVEX.c4()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x30)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'kshiftrw')
assert_equal(myDisasm.repr(), 'kshiftrw k1, k1, 11h')
# VEX.L0.66.0F3A.W0 30 /r
# KSHIFTRB k1, k2, imm8
myVEX = VEX('VEX.L0.66.0F3A.W0')
myVEX.R = 1
Buffer = bytes.fromhex('{}30e011'.format(myVEX.c4()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x30)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'kshiftrb')
assert_equal(myDisasm.repr(), 'kshiftrb k4, k0, 11h')
|
0vercl0k/rp
|
src/third_party/beaengine/tests/0f3a30.py
|
Python
|
mit
| 1,731
|
#!/usr/bin/env python
#
# GrovePi Example for using the Grove - I2C ADC(http://www.seeedstudio.com/depot/Grove-I2C-ADC-p-1580.html)
#
# The GrovePi connects the Raspberry Pi and Grove sensors. You can learn more about GrovePi here: http://www.dexterindustries.com/GrovePi
#
# Have a question about this example? Ask on the forums here: http://forum.dexterindustries.com/c/grovepi
#
'''
## License
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2015 Dexter Industries
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import grove_i2c_adc
import time
# You can initialize with a different address too: grove_i2c_adc.ADC(address=0x56)
adc= grove_i2c_adc.ADC()
while True:
#Print the 12 bit value from the I2C ADC
print(adc.adc_read())
time.sleep(.5)
|
karan259/GrovePi
|
Software/Python/grove_i2c_adc/i2c_adc_example.py
|
Python
|
mit
| 1,831
|
###########################################################
#
# Copyright (c) 2010, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ['ProjectTemplateInstallerCmd', 'ProjectTemplateCreatorCmd', 'ProjectTemplateCheckCmd']
import tacticenv
from pyasm.common import Xml, Environment, TacticException, Common
from pyasm.search import Search, SearchType
from pyasm.biz import Project
from pyasm.command import Command
import os, shutil
import re
class ProjectDeleteCmd(Command):
def execute(self):
# drop database pg
# DATA
# delete from file where project_code = 'pg'
# delete from snapshot where project_code = 'pg'
# delete from task where project_code = 'pg'
# delete from work_hour where project_code = 'pg'
# delete from note where project_code = 'pg'
# delete from wdg_settings where project_code = 'pg'
# configuration
# delete from schema where code = 'pg'
# delete from pipeline where project_code = 'pg'
# delete from search_object where namespace = 'pg'
pass
class ProjectTemplateCreatorCmd(Command):
def execute(self):
self.base_dir = self.kwargs.get("base_dir")
if not self.base_dir:
self.base_dir = Environment.get_template_dir()
self.project_code = self.kwargs.get("project_code")
if not self.project_code:
self.project_code = Project.get_project_code()
assert self.project_code
# project code can be called anything, and we want to have a _template suffix for the template code
#self.plugin_code = "%s_template" % self.project_code
#self.template_project_code = re.sub( '_template$', '', self.plugin_code)
self.template_project_code = self.project_code
self.project = Project.get_by_code(self.project_code)
if not self.project:
raise TacticException('This project [%s] does not exist'%self.project_code)
self.project_type = self.project.get_value("type")
if not self.project_type:
self.project_type = self.project_code
Project.set_project(self.project_code)
self.export_template()
def export_template(self):
xml = Xml()
self.xml = xml
xml.create_doc("manifest")
manifest_node = xml.get_root_node()
# Old implementation. Code is now on the data node
xml.set_attribute(manifest_node, "code", self.template_project_code)
# dump the notification entries
data_node = xml.create_element("data")
xml.append_child(manifest_node, data_node)
code_node = xml.create_element("code")
xml.append_child(data_node, code_node)
xml.set_node_value(code_node, self.template_project_code)
version = self.kwargs.get("version") or ""
version_node = xml.create_element("version")
xml.append_child(data_node, version_node)
xml.set_node_value(version_node, version)
# dump the project entry
data_node = xml.create_element("sobject")
xml.append_child(manifest_node, data_node)
xml.set_attribute(data_node, "expression", "@SOBJECT(sthpw/project['code','%s'])" % self.project_code)
xml.set_attribute(data_node, "search_type", "sthpw/project")
xml.set_attribute(data_node, "unique", "true")
# dump the project_type entry
data_node = xml.create_element("sobject")
xml.append_child(manifest_node, data_node)
xml.set_attribute(data_node, "expression", "@SOBJECT(sthpw/project['code','%s'].sthpw/project_type)" % self.project_code)
xml.set_attribute(data_node, "search_type", "sthpw/project_type")
xml.set_attribute(data_node, "unique", "true")
# dump the schema entry
data_node = xml.create_element("sobject")
xml.append_child(manifest_node, data_node)
xml.set_attribute(data_node, "expression", "@SOBJECT(sthpw/schema['code','%s'])" % self.project_code)
xml.set_attribute(data_node, "search_type", "sthpw/schema")
xml.set_attribute(data_node, "unique", "true")
# find the project template search types
namespace = self.project_type
if not namespace or namespace == "default":
namespace = self.project_code
project_search_types = Search.eval("@GET(sthpw/search_object['namespace','%s'].search_type)" % namespace)
#project_types = Search.eval("@GET(sthpw/search_object['namespace','%s'].search_type)" % self.project_code)
# just dump the definition for data
for search_type in project_search_types:
data_node = xml.create_element("search_type")
xml.append_child(manifest_node, data_node)
xml.set_attribute(data_node, "code", search_type)
search_types = [
"config/custom_script",
"config/widget_config",
"config/naming",
"config/client_trigger",
"config/process",
"config/trigger",
"config/url",
"config/prod_setting",
#"config/ingest_rule",
#"config/ingest_session",
]
for search_type in search_types:
data_node = xml.create_element("sobject")
xml.append_child(manifest_node, data_node)
xml.set_attribute(data_node, "search_type", search_type)
# find the currval
st_obj = SearchType.get(search_type)
# have to call nextval() to initiate this sequence in the session in psql since Postgres 8.1
seq_id = SearchType.sequence_nextval(search_type)
seq_id = SearchType.sequence_currval(search_type)
seq_id -= 1
if seq_id > 0:
SearchType.sequence_setval(search_type, seq_id)
xml.set_attribute(data_node, "seq_max", seq_id)
#xml.set_attribute(data_node, "path", "data.spt")
# dump the login_groups entries
data_node = xml.create_element("sobject")
xml.append_child(manifest_node, data_node)
xml.set_attribute(data_node, "expression", "@SOBJECT(sthpw/login_group['project_code','%s'])" % self.project_code)
xml.set_attribute(data_node, "search_type", "sthpw/login_group")
xml.set_attribute(data_node, "unique", "true")
# dump the pipelines entries
data_node = xml.create_element("sobject")
xml.append_child(manifest_node, data_node)
xml.set_attribute(data_node, "expression", "@SOBJECT(sthpw/pipeline['project_code','%s'])" % self.project_code)
xml.set_attribute(data_node, "search_type", "sthpw/pipeline")
xml.set_attribute(data_node, "unique", "true")
# dump the notification entries
data_node = xml.create_element("sobject")
xml.append_child(manifest_node, data_node)
xml.set_attribute(data_node, "expression", "@SOBJECT(sthpw/notification['project_code','%s'])" % self.project_code)
xml.set_attribute(data_node, "search_type", "sthpw/notification")
from .plugin import PluginCreator
creator = PluginCreator( base_dir=self.base_dir, manifest=xml.to_string(), force=True, version=version )
creator.execute()
self.zip_path = creator.get_zip_path()
def get_zip_path(self):
return self.zip_path
class ProjectTemplateInstallerCmd(Command):
'''Install a template project thru a zip file or the unzipped folder'''
def execute(self):
self.new_project = self.kwargs.get("new_project")
if self.new_project in [False, 'false']:
self.new_project = False
else:
self.new_project = True
self.mode = self.kwargs.get("mode")
if not self.mode:
self.mode = 'copy'
# if a path is specified, then handle this
self.path = self.kwargs.get("path")
self.project_code = self.kwargs.get("project_code")
self.is_template = self.kwargs.get("is_template")
# check to see if the project already exists
# FIXME: how to determine which project code? pass it in even with path kwarg for now
project = Project.get_by_code(self.project_code)
if self.new_project and project:
raise TacticException("Project [%s] already exists in this installation. Exiting..." % self.project_code)
# handle plugins with manifest
if self.path and os.path.isdir(self.path):
manifest_path = "%s/manifest.xml" % self.path
if not os.path.exists(manifest_path):
raise Exception("Manifest does not exist: ", manifest_path)
elif self.path:
self.handle_path(self.path)
assert self.project_code
# determines which template to use
self.template_code = self.kwargs.get("template_code")
if not self.template_code:
self.template_code = self.project_code
# template code can end with _template or not depending if it's coming from a zip file
#if self.template_code.endswith("_template"):
# self.plugin_code = self.template_code
#else:
# self.plugin_code = "%s_template" % self.template_code
#self.template_project_code = re.sub( '_template$', '', self.template_code)
self.template_project_code = self.template_code
self.force_database = self.kwargs.get("force_database")
try:
self.import_template()
except Exception as e:
#project = Project.get_by_code(self.project_code)
#if project:
# print("Deleting project entry: ", project.get_code())
# project.delete()
raise
def get_template_dir(self, template_dir):
'''check if it exists and return the one that does'''
if not os.path.exists(template_dir):
# for backward compatibility
template_dir2 = '%s_template' %template_dir
if not os.path.exists(template_dir2):
return template_dir
else:
return template_dir2
return template_dir
def import_template(self):
if self.path:
base_dir = os.path.dirname(self.path)
else:
base_dir = Environment.get_template_dir()
version = self.kwargs.get("version")
if version:
template_dir = "%s/%s-%s" % (base_dir, self.template_code, version)
else:
template_dir = "%s/%s" % (base_dir, self.template_code)
template_dir = self.get_template_dir(template_dir)
# if the directory does not exist then look for a zip file
use_zip = False
if not os.path.exists(template_dir):
template_zip = "%s.zip" % (template_dir)
if os.path.exists(template_zip):
use_zip = True
else:
hint = "Please check if you have created the Template already using the Update button in the Template Project view."
if version:
raise TacticException("No template found for [%s] version [%s]. %s" % (self.template_code, version, hint))
else:
raise TacticException("No template found for [%s]. %s" % (self.template_code, hint))
# check to see if the database exists in the default
# database implementation
from pyasm.search import DbContainer, DatabaseImpl
impl = DatabaseImpl.get()
exists = impl.database_exists(self.project_code)
# if the database already exists, then raise an exception
if exists and self.new_project:
msg = "WARNING: Database [%s] already exists" % self.project_code
print(msg)
raise TacticException(msg)
# this is the overriding factor:
if self.is_template == True:
title = Common.get_display_title(self.project_code)
elif self.is_template == False:
title = Common.get_display_title(self.project_code)
elif self.is_template == None:
# these 2 is for old usage using the command line script create_template.py
if self.template_project_code != self.project_code:
self.is_template = False
title = Common.get_display_title(self.project_code)
else:
self.is_template = True
title = Common.get_display_title(self.template_project_code)
# create a new project if this was desired
if self.new_project == True:
from .create_project_cmd import CreateProjectCmd
project_image_path = self.kwargs.get("project_image_path")
# the project_type will get updated properly by the PluginInstaller
# but that break the ties to the project_type entry created though,
# which is ok
creator = CreateProjectCmd(
project_code=self.project_code,
project_title=title,
project_type=self.template_project_code,
is_template=self.is_template,
use_default_side_bar=False,
project_image_path=project_image_path
)
creator.execute()
# set the project
Project.set_project(self.project_code)
# import from a plugin
if use_zip:
kwargs = {
'zip_path': template_zip,
'code': self.project_code
}
else:
# is there a manifest in this folder
manifest_path = "%s/manifest.xml" % template_dir
if not os.path.exists(manifest_path):
raise Exception("No manifest file found")
kwargs = {
'plugin_dir': template_dir,
'register': True
}
kwargs['filter_line_handler'] = self.filter_line_handler
kwargs['filter_sobject_handler'] = self.filter_sobject_handler
from .plugin import PluginCreator, PluginInstaller
installer = PluginInstaller( **kwargs )
installer.execute()
def handle_path(self, src_path):
src_path = src_path.replace("\\", "/")
# upload folder
basename = os.path.basename(src_path)
if self.mode =='copy':
target_path = src_path
target_dir = os.path.dirname(target_path)
else:
target_dir = Environment.get_upload_dir()
target_path = "%s/%s" % (target_dir, basename)
base_dir = Environment.get_template_dir()
template_dir = "%s/%s" % (base_dir, self.project_code)
if os.path.exists(template_dir):
shutil.rmtree(template_dir)
#raise TacticException("Template is already installed at [%s]" %template_dir)
# unzip the file
from pyasm.common import ZipUtil
paths = ZipUtil.extract(target_path)
# veryify that the paths extracted are the expected ones
rootname, ext = os.path.splitext(basename)
# check if it unzips at the templates folder directly
unzip_at_template_dir = False
# move the plugin zip file to the appropriate folder
if self.mode == 'copy':
# if they manually drop the zip file already here, skip
if target_dir != base_dir:
shutil.copy(target_path, base_dir)
else:
unzip_at_template_dir = True
else:
shutil.move(target_path, base_dir)
# move unzipped files into the plugin area
# remove any version info, only allow 1 particular version installed for now
import re
rootname = re.sub('(.*)(-)(\d.*)', r'\1', rootname)
unzip_path = "%s/%s" % (target_dir, rootname)
dest_dir = '%s/%s'%(base_dir, rootname)
if not unzip_at_template_dir and os.path.exists(dest_dir):
shutil.rmtree(dest_dir)
shutil.move(unzip_path, dest_dir)
def filter_sobject_handler(self, sobject):
search_type = sobject.get_base_search_type()
if search_type == 'sthpw/project':
project = Project.get_by_code(self.project_code)
if project:
sobject.set_value("id", project.get_id() )
# change the code of the project
sobject.set_value("code", self.project_code)
title = Common.get_display_title(self.project_code)
sobject.set_value("title", title)
if self.is_template:
sobject.set_value("is_template", True)
else:
sobject.set_value("is_template", False)
elif search_type == 'sthpw/schema':
sobject.set_value("code", self.project_code)
elif search_type == 'sthpw/notification':
sobject.set_value("project_code", self.project_code)
sobject.set_value("code", "")
elif search_type in ['sthpw/pipeline']:
sobject.set_value("project_code", self.project_code)
if self.template_project_code != self.project_code:
# get the old code
old_code = sobject.get_code()
if old_code.startswith("%s/" % self.template_project_code):
new_code = old_code.replace("%s/" % self.template_project_code, "%s/" % self.project_code)
else:
new_code = "%s/%s" % (self.project_code, old_code)
sobject.set_value("code", new_code)
elif search_type in ['sthpw/login_group']:
sobject.set_value("project_code", self.project_code)
if self.template_project_code != self.project_code:
# get the old login_group
for column in ['login_group', 'code']:
old_code = sobject.get_value(column)
if old_code.startswith("%s/" % self.template_project_code):
new_code = old_code.replace("%s/" % self.template_project_code, "%s/" % self.project_code)
else:
new_code = "%s/%s" % (self.project_code, old_code)
sobject.set_value(column, new_code)
# go through the access rules and replace project
access_rules = sobject.get_xml_value("access_rules")
nodes = access_rules.get_nodes("rules/rule")
for node in nodes:
project_code = Xml.get_attribute(node, "project")
if project_code and project_code != "*" and project_code == self.template_project_code:
Xml.set_attribute(node, "project", self.project_code)
sobject.set_value("access_rules", access_rules.to_string())
return sobject
def filter_line_handler(self, path, line):
'''NOT used now'''
return line
# this is only called if the project code is different from the
# template code
file_name = os.path.basename(path)
if file_name in ['sthpw_project.spt']:
# change codes to project code
if line.startswith('''insert.set_value('code','''):
line = '''insert.set_value('code', """%s""")\n''' % self.project_code
elif line.startswith('''insert.set_value('title','''):
title = Common.get_display_title(self.project_code)
line = '''insert.set_value('title', """%s""")\n''' % title
elif line.startswith('''insert.set_value('is_template','''):
if self.is_template:
line = '''insert.set_value('is_template', """true""")\n'''
else:
line = '''insert.set_value('is_template', """false""")\n'''
elif file_name in ['sthpw_schema.spt']:
if line.startswith('''insert.set_value('code','''):
line = '''insert.set_value('code', """%s""")\n''' % self.project_code
elif file_name in ['sthpw_pipeline.spt']:
if line.startswith('''insert.set_value('project_code','''):
line = '''insert.set_value('project_code', """%s""")\n''' % self.project_code
return line
class ProjectTemplateUpdaterCmd(Command):
def execute(self):
# force every search type and sobject to be unique
manifest_xml = ""
class ProjectTemplateCheckCmd(Command):
'''This will check the integrity of a project to see if is suitable
for export as a distributable project template'''
def execute(self):
self.project_code = self.kwargs.get("project_code")
self.prefix = self.kwargs.get("prefix")
self.project = Project.get_by_code(self.project_code)
self.project_type = self.project.get_value("type")
self.check_project()
self.check_search_type()
def check_project(self):
# check that the project code starts with the prefix
if not self.project.get_code().startswith("%s_" % self.prefix):
raise TacticException("Project code [%s] does not start with prefix [%s]" % (self.project_code, self.prefix) )
# check that the project type is the same as the project code
if not self.project_code != self.project_type:
raise TacticException("Project code [%s] does not match the project_type [%s]" % (self.project_code, self.project_type) )
def check_search_type(self):
# all search objects in the namespace of <project_code> should
# start with the prefix
search = Seach("sthpw/search_type")
search.add_filter("namespace", self.project_type)
search_types = search.get_sobjects()
for search_type in search_types:
if search_type.get_value("search_type").startswith("%s_" % self.prefix):
raise TacticException( "sType [%s] does not start with prefix [%s]" % (search_type.get_value("search_type"), self.prefix) )
if __name__ == '__main__':
from pyasm.security import Batch
Batch(project_code='admin')
#cmd = ProjectTemplateCreatorCmd(project_code='pg')
#Command.execute_cmd(cmd)
cmd = ProjectTemplateInstallerCmd(project_code='scrum')
Command.execute_cmd(cmd)
#cmd = ProjectTemplateCheckCmd(project_code='di', prefix='di')
#Command.execute_cmd(cmd)
|
Southpaw-TACTIC/TACTIC
|
src/tactic/command/project_template_cmd.py
|
Python
|
epl-1.0
| 22,701
|
# -*- coding: utf-8 -*-
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2003-2005 Donald N. Allingham
# Copyright (C) 2008 Stefan Siegel
# Copyright (C) 2008 Brian G. Matherly
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Original version written by Alex Roitman, largely based on relationship.py
# by Don Allingham and on valuable input from Dr. Martin Senftleben
# Modified by Joachim Breitner to not use „Großcousine“, in accordance with
# http://de.wikipedia.org/wiki/Verwandtschaftsbeziehung
# Rewritten from scratch for Gramps 3 by Stefan Siegel,
# loosely based on rel_fr.py
#
# some changes for Austrian terms:
# siebte -> siebente, Adoptivkind/-eltern -> Wahlkind/ -eltern, Schwippschwager -> Schwiegerschwager
"""
German-Austrian specific classes for relationships.
"""
#-------------------------------------------------------------------------
#
# standard python modules
#
#-------------------------------------------------------------------------
import re
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.lib import Person
import gramps.gen.relationship
#-------------------------------------------------------------------------
#
#
#
#-------------------------------------------------------------------------
_ordinal = [ 'nullte',
'erste', 'zweite', 'dritte', 'vierte', 'fünfte', 'sechste',
'siebente', 'achte', 'neunte', 'zehnte', 'elfte', 'zwölfte',
]
_removed = [ '',
'', 'Groß', 'Urgroß',
'Alt', 'Altgroß', 'Alturgroß',
'Ober', 'Obergroß', 'Oberurgroß',
'Stamm', 'Stammgroß', 'Stammurgroß',
'Ahnen', 'Ahnengroß', 'Ahnenurgroß',
'Urahnen', 'Urahnengroß', 'Urahnenurgroß',
'Erz', 'Erzgroß', 'Erzurgroß',
'Erzahnen', 'Erzahnengroß', 'Erzahnenurgroß',
]
_lineal_up = {
'many': '%(p)sEltern%(s)s',
'unknown': '%(p)sElter%(s)s', # "Elter" sounds strange but is correct
'male': '%(p)sVater%(s)s',
'female': '%(p)sMutter%(s)s',
}
_lineal_down = {
'many': '%(p)sKinder%(s)s',
'unknown': '%(p)sKind%(s)s',
'male': '%(p)sSohn%(s)s',
'female': '%(p)sTochter%(s)s',
}
_collateral_up = {
'many': '%(p)sOnkel und %(p)sTanten%(s)s',
'unknown': '%(p)sOnkel oder %(p)sTante%(s)s',
'male': '%(p)sOnkel%(s)s',
'female': '%(p)sTante%(s)s',
}
_collateral_down = {
'many': '%(p)sNeffen und %(p)sNichten%(s)s',
'unknown': '%(p)sNeffe oder %(p)sNichte%(s)s',
'male': '%(p)sNeffe%(s)s',
'female': '%(p)sNichte%(s)s',
}
_collateral_same = {
'many': '%(p)sCousins und %(p)sCousinen%(s)s',
'unknown': '%(p)sCousin oder %(p)sCousine%(s)s',
'male': '%(p)sCousin%(s)s',
'female': '%(p)sCousine%(s)s',
}
_collateral_sib = {
'many': '%(p)sGeschwister%(s)s',
'unknown': '%(p)sGeschwisterkind%(s)s',
'male': '%(p)sBruder%(s)s',
'female': '%(p)sSchwester%(s)s',
}
_schwager = {
'many': '%(p)sSchwager%(s)s',
'unknown': '%(p)sSchwager%(s)s',
'male': '%(p)sSchwager%(s)s',
'female': '%(p)sSchwägerin%(s)s',
}
_schwiegerschwager = {
'many': '%(p)sSchwiegerschwager%(s)s',
'unknown': '%(p)sSchwiegerschwager%(s)s',
'male': '%(p)sSchwiegerschwager%(s)s',
'female': '%(p)sSchwiegerschwägerin%(s)s',
}
#-------------------------------------------------------------------------
#
#
#
#-------------------------------------------------------------------------
class RelationshipCalculator(gramps.gen.relationship.RelationshipCalculator):
"""
RelationshipCalculator Class
"""
def __init__(self):
gramps.gen.relationship.RelationshipCalculator.__init__(self)
def _make_roman(self, num):
roman = ''
for v, r in [(1000, 'M'), (900, 'CM'), (500, 'D'), (400, 'CD'),
( 100, 'C'), ( 90, 'XC'), ( 50, 'L'), ( 40, 'XL'),
( 10, 'X'), ( 9, 'IX'), ( 5, 'V'), ( 4, 'IV'),
( 1, 'I')]:
while num > v:
num -= v
roman += r
return roman
def _fix_caps(self, string):
return re.sub(r'(?<=[^\s(/A-Z])[A-Z]', lambda m: m.group().lower(), string)
def _removed_text(self, degree, removed):
if (degree, removed) == (0, -2):
return 'Enkel'
elif (degree, removed) == (0, -3):
return 'Urenkel'
removed = abs(removed)
if removed < len(_removed):
return _removed[removed]
else:
return '(%s)' % self._make_roman(removed-2)
def _degree_text(self, degree, removed):
if removed == 0:
degree -= 1 # a cousin has same degree as his parent (uncle/aunt)
if degree <= 1:
return ''
if degree < len(_ordinal):
return ' %sn Grades' % _ordinal[degree]
else:
return ' %d. Grades' % degree
def _gender_convert(self, gender):
if gender == Person.MALE:
return 'male'
elif gender == Person.FEMALE:
return 'female'
else:
return 'unknown'
def _get_relationship_string(self, Ga, Gb, gender,
reltocommon_a='', reltocommon_b='',
only_birth=True,
in_law_a=False, in_law_b=False):
common_ancestor_count = 0
if reltocommon_a == '':
reltocommon_a = self.REL_FAM_BIRTH
if reltocommon_b == '':
reltocommon_b = self.REL_FAM_BIRTH
if reltocommon_a[-1] in [self.REL_MOTHER, self.REL_FAM_BIRTH,
self.REL_FAM_BIRTH_MOTH_ONLY] and \
reltocommon_b[-1] in [self.REL_MOTHER, self.REL_FAM_BIRTH,
self.REL_FAM_BIRTH_MOTH_ONLY]:
common_ancestor_count += 1 # same female ancestor
if reltocommon_a[-1] in [self.REL_FATHER, self.REL_FAM_BIRTH,
self.REL_FAM_BIRTH_FATH_ONLY] and \
reltocommon_b[-1] in [self.REL_FATHER, self.REL_FAM_BIRTH,
self.REL_FAM_BIRTH_FATH_ONLY]:
common_ancestor_count += 1 # same male ancestor
degree = min(Ga, Gb)
removed = Ga-Gb
if degree == 0 and removed < 0:
# for descendants the "in-law" logic is reversed
(in_law_a, in_law_b) = (in_law_b, in_law_a)
rel_str = ''
pre = ''
post = ''
if in_law_b and degree == 0:
pre += 'Stief'
elif (not only_birth) or common_ancestor_count == 0:
pre += 'Stief-/Wahl'
if in_law_a and (degree, removed) != (1, 0):
# A "Schwiegerbruder" really is a "Schwager" (handled later)
pre += 'Schwieger'
if degree != 0 and common_ancestor_count == 1:
pre += 'Halb'
pre += self._removed_text(degree, removed)
post += self._degree_text(degree, removed)
if in_law_b and degree != 0 and (degree, removed) != (1, 0):
# A "Bruder (angeheiratet)" also is a "Schwager" (handled later)
post += ' (angeheiratet)'
if degree == 0:
# lineal relationship
if removed > 0:
rel_str = _lineal_up[gender]
elif removed < 0:
rel_str = _lineal_down[gender]
elif in_law_a or in_law_b:
rel_str = 'Partner'
else:
rel_str = 'Proband'
else:
# collateral relationship
if removed > 0:
rel_str = _collateral_up[gender]
elif removed < 0:
rel_str = _collateral_down[gender]
elif degree == 1:
if in_law_a or in_law_b:
if in_law_a and in_law_b:
rel_str = _schwiegerschwager[gender]
else:
rel_str = _schwager[gender]
else:
rel_str = _collateral_sib[gender]
else:
rel_str = _collateral_same[gender]
return self._fix_caps(rel_str % {'p': pre, 's': post})
def get_plural_relationship_string(self, Ga, Gb,
reltocommon_a='', reltocommon_b='',
only_birth=True,
in_law_a=False, in_law_b=False):
return self._get_relationship_string(Ga, Gb, 'many',
reltocommon_a, reltocommon_b,
only_birth, in_law_a, in_law_b)
def get_single_relationship_string(self, Ga, Gb, gender_a, gender_b,
reltocommon_a, reltocommon_b,
only_birth=True,
in_law_a=False, in_law_b=False):
return self._get_relationship_string(Ga, Gb,
self._gender_convert(gender_b),
reltocommon_a, reltocommon_b,
only_birth, in_law_a, in_law_b)
def get_sibling_relationship_string(self, sib_type, gender_a, gender_b,
in_law_a=False, in_law_b=False):
if sib_type in [self.NORM_SIB, self.UNKNOWN_SIB]:
# the NORM_SIB translation is generic and suitable for UNKNOWN_SIB
rel = self.REL_FAM_BIRTH
only_birth = True
elif sib_type == self.HALF_SIB_FATHER:
rel = self.REL_FAM_BIRTH_FATH_ONLY
only_birth = True
elif sib_type == self.HALF_SIB_MOTHER:
rel = self.REL_FAM_BIRTH_MOTH_ONLY
only_birth = True
elif sib_type == self.STEP_SIB:
rel = self.REL_FAM_NONBIRTH
only_birth = False
return self._get_relationship_string(1, 1,
self._gender_convert(gender_b),
rel, rel,
only_birth, in_law_a, in_law_b)
if __name__ == "__main__":
# Test function. Call it as follows from the command line (so as to find
# imported modules):
# export PYTHONPATH=/path/to/gramps/src
# python src/plugins/rel/rel_de.py
# (Above not needed here)
"""TRANSLATORS, copy this if statement at the bottom of your
rel_xx.py module, and test your work with:
python src/plugins/rel/rel_xx.py
"""
from gramps.gen.relationship import test
rc = RelationshipCalculator()
test(rc, True)
|
gramps-project/gramps
|
gramps/plugins/rel/rel_de_AT.py
|
Python
|
gpl-2.0
| 11,462
|
#!/usr/bin/env python
############################################################################
# prepare.py
# Copyright (C) 2015 Belledonne Communications, Grenoble France
#
############################################################################
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
############################################################################
import argparse
import os
import re
import shutil
import tempfile
import sys
from logging import *
from distutils.spawn import find_executable
from subprocess import Popen, PIPE
sys.dont_write_bytecode = True
sys.path.insert(0, 'submodules/cmake-builder')
try:
import prepare
except Exception as e:
error(
"Could not find prepare module: {}, probably missing submodules/cmake-builder? Try running:\ngit submodule update --init --recursive".format(e))
exit(1)
class IOSTarget(prepare.Target):
def __init__(self, arch):
prepare.Target.__init__(self, 'ios-' + arch)
current_path = os.path.dirname(os.path.realpath(__file__))
self.config_file = 'configs/config-ios-' + arch + '.cmake'
self.toolchain_file = 'toolchains/toolchain-ios-' + arch + '.cmake'
self.output = 'liblinphone-sdk/' + arch + '-apple-darwin.ios'
self.additional_args = [
'-DLINPHONE_BUILDER_EXTERNAL_SOURCE_PATH=' +
current_path + '/submodules'
]
def clean(self):
if os.path.isdir('WORK'):
shutil.rmtree(
'WORK', ignore_errors=False, onerror=self.handle_remove_read_only)
if os.path.isdir('liblinphone-sdk'):
shutil.rmtree(
'liblinphone-sdk', ignore_errors=False, onerror=self.handle_remove_read_only)
class IOSi386Target(IOSTarget):
def __init__(self):
IOSTarget.__init__(self, 'i386')
class IOSx8664Target(IOSTarget):
def __init__(self):
IOSTarget.__init__(self, 'x86_64')
class IOSarmv7Target(IOSTarget):
def __init__(self):
IOSTarget.__init__(self, 'armv7')
class IOSarm64Target(IOSTarget):
def __init__(self):
IOSTarget.__init__(self, 'arm64')
targets = {
'i386': IOSi386Target(),
'x86_64': IOSx8664Target(),
'armv7': IOSarmv7Target(),
'arm64': IOSarm64Target()
}
archs_device = ['arm64', 'armv7']
archs_simu = ['i386', 'x86_64']
platforms = ['all', 'devices', 'simulators'] + archs_device + archs_simu
class PlatformListAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if values:
for value in values:
if value not in platforms:
message = ("invalid platform: {0!r} (choose from {1})".format(
value, ', '.join([repr(platform) for platform in platforms])))
raise argparse.ArgumentError(self, message)
setattr(namespace, self.dest, values)
def gpl_disclaimer(platforms):
cmakecache = 'WORK/ios-{arch}/cmake/CMakeCache.txt'.format(arch=platforms[0])
gpl_third_parties_enabled = "ENABLE_GPL_THIRD_PARTIES:BOOL=YES" in open(
cmakecache).read() or "ENABLE_GPL_THIRD_PARTIES:BOOL=ON" in open(cmakecache).read()
if gpl_third_parties_enabled:
warning("\n***************************************************************************"
"\n***************************************************************************"
"\n***** CAUTION, this liblinphone SDK is built using 3rd party GPL code *****"
"\n***** Even if you acquired a proprietary license from Belledonne *****"
"\n***** Communications, this SDK is GPL and GPL only. *****"
"\n***** To disable 3rd party gpl code, please use: *****"
"\n***** $ ./prepare.py -DENABLE_GPL_THIRD_PARTIES=NO *****"
"\n***************************************************************************"
"\n***************************************************************************")
else:
warning("\n***************************************************************************"
"\n***************************************************************************"
"\n***** Linphone SDK without 3rd party GPL software *****"
"\n***** If you acquired a proprietary license from Belledonne *****"
"\n***** Communications, this SDK can be used to create *****"
"\n***** a proprietary linphone-based application. *****"
"\n***************************************************************************"
"\n***************************************************************************")
def extract_from_xcode_project_with_regex(regex):
l = []
f = open('linphone.xcodeproj/project.pbxproj', 'r')
lines = f.readlines()
f.close()
for line in lines:
m = regex.search(line)
if m is not None:
l += [m.group(1)]
return list(set(l))
def extract_deployment_target():
regex = re.compile("IPHONEOS_DEPLOYMENT_TARGET = (.*);")
return extract_from_xcode_project_with_regex(regex)[0]
def extract_libs_list():
# name = libspeexdsp.a; path = "liblinphone-sdk/apple-darwin/lib/libspeexdsp.a"; sourceTree = "<group>"; };
regex = re.compile("name = \"*(lib\S+)\.a(\")*; path = \"liblinphone-sdk/apple-darwin/")
return extract_from_xcode_project_with_regex(regex)
missing_dependencies = {}
def check_is_installed(binary, prog=None, warn=True):
if not find_executable(binary):
if warn:
missing_dependencies[binary] = prog
# error("Could not find {}. Please install {}.".format(binary, prog))
return False
return True
def detect_package_manager():
if find_executable("brew"):
return "brew"
elif find_executable("port"):
return "sudo port"
else:
error(
"No package manager found. Please README or install brew using:\n\truby -e \"$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)\"")
return "brew"
def check_tools():
package_manager_info = {"brew-pkg-config": "pkg-config",
"sudo port-pkg-config": "pkgconfig",
"brew-binary-path": "/usr/local/bin/",
"sudo port-binary-path": "/opt/local/bin/"
}
reterr = 0
if " " in os.path.dirname(os.path.realpath(__file__)):
error("Invalid location: linphone-iphone path should not contain any spaces.")
reterr = 1
for prog in ["autoconf", "automake", "doxygen", "java", "nasm", "cmake", "wget", "yasm", "optipng"]:
reterr |= not check_is_installed(prog, prog)
reterr |= not check_is_installed("pkg-config", package_manager_info[detect_package_manager() + "-pkg-config"])
reterr |= not check_is_installed("ginstall", "coreutils")
reterr |= not check_is_installed("intltoolize", "intltool")
reterr |= not check_is_installed("convert", "imagemagick")
if find_executable("nasm"):
nasm_output = Popen("nasm -f elf32".split(" "), stderr=PIPE, stdout=PIPE).stderr.read()
if "fatal: unrecognised output format" in nasm_output:
missing_dependencies["nasm"] = "nasm"
reterr = 1
if check_is_installed("libtoolize", warn=False):
if not check_is_installed("glibtoolize", "libtool"):
glibtoolize_path = find_executable(glibtoolize)
reterr = 1
msg = "Please do a symbolic link from glibtoolize to libtoolize:\n\tln -s {} ${}"
error(msg.format(glibtoolize_path, glibtoolize_path.replace("glibtoolize", "libtoolize")))
# list all missing packages to install
if missing_dependencies:
error("The following binaries are missing: {}. Please install them using:\n\t{} install {}".format(
" ".join(missing_dependencies.keys()),
detect_package_manager(),
" ".join(missing_dependencies.values())))
devnull = open(os.devnull, 'wb')
# just ensure that JDK is installed - if not, it will automatically display a popup to user
p = Popen("java -version".split(" "), stderr=devnull, stdout=devnull)
p.wait()
if p.returncode != 0:
error("Please install Java JDK (not just JRE).")
reterr = 1
# needed by x264
if not find_executable("gas-preprocessor.pl"):
error("""Could not find gas-preprocessor.pl, please install it:
wget --no-check-certificate https://raw.github.com/yuvi/gas-preprocessor/master/gas-preprocessor.pl && \\
chmod +x gas-preprocessor.pl && \\
sudo mv gas-preprocessor.pl {}""".format(package_manager_info[detect_package_manager() + "-binary-path"]))
reterr = 1
if not os.path.isdir("submodules/linphone/mediastreamer2/src") or not os.path.isdir("submodules/linphone/oRTP/src"):
error("Missing some git submodules. Did you run:\n\tgit submodule update --init --recursive")
reterr = 1
p = Popen("xcrun --sdk iphoneos --show-sdk-path".split(" "), stdout=devnull, stderr=devnull)
p.wait()
if p.returncode != 0:
error("iOS SDK not found, please install Xcode from AppStore or equivalent.")
reterr = 1
else:
xcode_version = int(
Popen("xcodebuild -version".split(" "), stdout=PIPE).stdout.read().split("\n")[0].split(" ")[1].split(".")[0])
if xcode_version < 7:
sdk_platform_path = Popen(
"xcrun --sdk iphonesimulator --show-sdk-platform-path".split(" "), stdout=PIPE, stderr=devnull).stdout.read()[:-1]
sdk_strings_path = "{}/{}".format(sdk_platform_path, "Developer/usr/bin/strings")
if not os.path.isfile(sdk_strings_path):
strings_path = find_executable("strings")
error("strings binary missing, please run:\n\tsudo ln -s {} {}".format(strings_path, sdk_strings_path))
reterr = 1
return reterr
def install_git_hook():
git_hook_path = ".git{sep}hooks{sep}pre-commit".format(sep=os.sep)
if os.path.isdir(".git{sep}hooks".format(sep=os.sep)) and not os.path.isfile(git_hook_path):
info("Installing Git pre-commit hook")
shutil.copyfile(".git-pre-commit", git_hook_path)
os.chmod(git_hook_path, 0755)
def generate_makefile(platforms, generator):
packages = os.listdir('WORK/ios-' + platforms[0] + '/Build')
packages.sort()
arch_targets = ""
for arch in platforms:
arch_targets += """
{arch}: {arch}-build
{arch}-build:
\t@for package in $(packages); do \\
\t\t$(MAKE) {arch}-build-$$package; \\
\tdone
{arch}-clean:
\t@for package in $(packages); do \\
\t\t$(MAKE) {arch}-clean-$$package; \\
\tdone
{arch}-veryclean:
\t@for package in $(packages); do \\
\t\t$(MAKE) {arch}-veryclean-$$package; \\
\tdone
{arch}-build-dummy_libraries:
\t{generator} WORK/ios-{arch}/cmake EP_dummy_libraries
{arch}-build-%: package-in-list-%
\trm -f WORK/ios-{arch}/Stamp/EP_$*/EP_$*-update; \\
\t{generator} WORK/ios-{arch}/cmake EP_$*
{arch}-clean-%: package-in-list-%
\t{generator} WORK/ios-{arch}/Build/$* clean; \\
\trm -f WORK/ios-{arch}/Stamp/EP_$*/EP_$*-build; \\
\trm -f WORK/ios-{arch}/Stamp/EP_$*/EP_$*-install;
{arch}-veryclean-%: package-in-list-%
\ttest -f WORK/ios-{arch}/Build/$*/install_manifest.txt && \\
\tcat WORK/ios-{arch}/Build/$*/install_manifest.txt | xargs rm; \\
\trm -rf WORK/ios-{arch}/Build/$*/*; \\
\trm -f WORK/ios-{arch}/Stamp/EP_$*/*; \\
\techo "Run 'make {arch}-build-$*' to rebuild $* correctly.";
{arch}-veryclean-ffmpeg:
\t{generator} WORK/ios-{arch}/Build/ffmpeg uninstall; \\
\trm -rf WORK/ios-{arch}/Build/ffmpeg/*; \\
\trm -f WORK/ios-{arch}/Stamp/EP_ffmpeg/*; \\
\techo "Run 'make {arch}-build-ffmpeg' to rebuild ffmpeg correctly.";
{arch}-clean-openh264:
\tcd WORK/ios-{arch}/Build/openh264; \\
\t$(MAKE) -f ../../../../submodules/externals/openh264/Makefile clean; \\
\trm -f WORK/ios-{arch}/Stamp/EP_openh264/EP_openh264-build; \\
\trm -f WORK/ios-{arch}/Stamp/EP_openh264/EP_openh264-install;
{arch}-veryclean-openh264:
\trm -rf liblinphone-sdk/{arch}-apple-darwin.ios/include/wels; \\
\trm -f liblinphone-sdk/{arch}-apple-darwin.ios/lib/libopenh264.*; \\
\trm -rf WORK/ios-{arch}/Build/openh264/*; \\
\trm -f WORK/ios-{arch}/Stamp/EP_openh264/*; \\
\techo "Run 'make {arch}-build-openh264' to rebuild openh264 correctly.";
{arch}-veryclean-vpx:
\trm -rf liblinphone-sdk/{arch}-apple-darwin.ios/include/vpx; \\
\trm -f liblinphone-sdk/{arch}-apple-darwin.ios/lib/libvpx.*; \\
\trm -rf WORK/ios-{arch}/Build/vpx/*; \\
\trm -f WORK/ios-{arch}/Stamp/EP_vpx/*; \\
\techo "Run 'make {arch}-build-vpx' to rebuild vpx correctly.";
""".format(arch=arch, generator=generator)
multiarch = ""
for arch in platforms[1:]:
multiarch += \
"""\tif test -f "$${arch}_path"; then \\
\t\tall_paths=`echo $$all_paths $${arch}_path`; \\
\t\tall_archs="$$all_archs,{arch}" ; \\
\telse \\
\t\techo "WARNING: archive `basename $$archive` exists in {first_arch} tree but does not exists in {arch} tree: $${arch}_path."; \\
\tfi; \\
""".format(first_arch=platforms[0], arch=arch)
makefile = """
archs={archs}
packages={packages}
LINPHONE_IPHONE_VERSION=$(shell git describe --always)
.PHONY: all
.SILENT: sdk
all: build
package-in-list-%:
\tif ! grep -q " $* " <<< " $(packages) "; then \\
\t\techo "$* not in list of available packages: $(packages)"; \\
\t\texit 3; \\
\tfi
build-%: package-in-list-% $(addsuffix -build-%, $(archs))
\t@echo "Build of $* terminated"
clean-%: package-in-list-% $(addsuffix -clean, $(archs))
\t@echo "Clean of $* terminated"
veryclean-%: package-in-list-% $(addsuffix -veryclean, $(archs))
\t@echo "Veryclean of $* terminated"
clean: $(addprefix clean-,$(packages))
veryclean: $(addprefix veryclean-,$(packages))
sdk:
\tarchives=`find liblinphone-sdk/{first_arch}-apple-darwin.ios -name *.a` && \\
\trm -rf liblinphone-sdk/apple-darwin && \\
\tmkdir -p liblinphone-sdk/apple-darwin && \\
\tcp -rf liblinphone-sdk/{first_arch}-apple-darwin.ios/include liblinphone-sdk/apple-darwin/. && \\
\tcp -rf liblinphone-sdk/{first_arch}-apple-darwin.ios/share liblinphone-sdk/apple-darwin/. && \\
\tfor archive in $$archives ; do \\
\t\tarmv7_path=`echo $$archive | sed -e "s/{first_arch}/armv7/"`; \\
\t\tarm64_path=`echo $$archive | sed -e "s/{first_arch}/arm64/"`; \\
\t\ti386_path=`echo $$archive | sed -e "s/{first_arch}/i386/"`; \\
\t\tx86_64_path=`echo $$archive | sed -e "s/{first_arch}/x86_64/"`; \\
\t\tdestpath=`echo $$archive | sed -e "s/-debug//" | sed -e "s/{first_arch}-//" | sed -e "s/\.ios//"`; \\
\t\tall_paths=`echo $$archive`; \\
\t\tall_archs="{first_arch}"; \\
\t\tmkdir -p `dirname $$destpath`; \\
\t\t{multiarch} \\
\t\techo "[{archs}] Mixing `basename $$archive` in $$destpath"; \\
\t\tlipo -create $$all_paths -output $$destpath; \\
\tdone
build: $(addsuffix -build, $(archs))
\t$(MAKE) sdk
ipa: build
\txcodebuild -configuration Release \\
\t&& xcrun -sdk iphoneos PackageApplication -v build/Release-iphoneos/linphone.app -o $$PWD/linphone-iphone.ipa
zipsdk: sdk
\techo "Generating SDK zip file for version $(LINPHONE_IPHONE_VERSION)"
\tzip -r liblinphone-iphone-sdk-$(LINPHONE_IPHONE_VERSION).zip \\
\tliblinphone-sdk/apple-darwin \\
\tliblinphone-tutorials \\
\t-x liblinphone-tutorials/hello-world/build\* \\
\t-x liblinphone-tutorials/hello-world/hello-world.xcodeproj/*.pbxuser \\
\t-x liblinphone-tutorials/hello-world/hello-world.xcodeproj/*.mode1v3
pull-transifex:
\ttx pull -af
push-transifex:
\t./Tools/i18n_generate_strings_files.sh && \\
\ttx push -s -f --no-interactive
zipres:
\t@tar -czf ios_assets.tar.gz Resources iTunesArtwork
{arch_targets}
help-prepare-options:
\t@echo "prepare.py was previously executed with the following options:"
\t@echo " {options}"
help: help-prepare-options
\t@echo ""
\t@echo "(please read the README.md file first)"
\t@echo ""
\t@echo "Available architectures: {archs}"
\t@echo "Available packages: {packages}"
\t@echo ""
\t@echo "Available targets:"
\t@echo ""
\t@echo " * all or build: builds all architectures and creates the liblinphone SDK"
\t@echo " * sdk: creates the liblinphone SDK. Use this only after a full build"
\t@echo " * zipsdk: generates a ZIP archive of liblinphone-sdk/apple-darwin containing the SDK. Use this only after SDK is built."
\t@echo " * zipres: creates a tar.gz file with all the resources (images)"
\t@echo ""
\t@echo "=== Advanced usage ==="
\t@echo ""
\t@echo " * build-[package]: builds the package for all architectures"
\t@echo " * clean-[package]: cleans package compilation for all architectures"
\t@echo " * veryclean-[package]: cleans the package for all architectures"
\t@echo ""
\t@echo " * [{arch_opts}]-build-[package]: builds a package for the selected architecture"
\t@echo " * [{arch_opts}]-clean-[package]: cleans package compilation for the selected architecture"
\t@echo " * [{arch_opts}]-veryclean-[package]: cleans the package for the selected architecture"
\t@echo ""
""".format(archs=' '.join(platforms), arch_opts='|'.join(platforms),
first_arch=platforms[0], options=' '.join(sys.argv),
arch_targets=arch_targets, packages=' '.join(packages),
multiarch=multiarch, generator=generator)
f = open('Makefile', 'w')
f.write(makefile)
f.close()
gpl_disclaimer(platforms)
def main(argv=None):
basicConfig(format="%(levelname)s: %(message)s", level=INFO)
if argv is None:
argv = sys.argv
argparser = argparse.ArgumentParser(
description="Prepare build of Linphone and its dependencies.")
argparser.add_argument(
'-c', '-C', '--clean', help="Clean a previous build instead of preparing a build.", action='store_true')
argparser.add_argument(
'-d', '--debug', help="Prepare a debug build, eg. add debug symbols and use no optimizations.", action='store_true')
argparser.add_argument(
'-dv', '--debug-verbose', help="Activate ms_debug logs.", action='store_true')
argparser.add_argument(
'-f', '--force', help="Force preparation, even if working directory already exist.", action='store_true')
argparser.add_argument(
'--disable-gpl-third-parties', help="Disable GPL third parties such as FFMpeg, x264.", action='store_true')
argparser.add_argument(
'--enable-non-free-codecs', help="Enable non-free codecs such as OpenH264, MPEG4, etc.. Final application must comply with their respective license (see README.md).", action='store_true')
argparser.add_argument(
'-G' '--generator', help="CMake build system generator (default: Unix Makefiles, use cmake -h to get the complete list).", default='Unix Makefiles', dest='generator')
argparser.add_argument(
'-L', '--list-cmake-variables', help="List non-advanced CMake cache variables.", action='store_true', dest='list_cmake_variables')
argparser.add_argument(
'-lf', '--list-features', help="List optional features and their default values.", action='store_true', dest='list_features')
argparser.add_argument(
'-t', '--tunnel', help="Enable Tunnel.", action='store_true')
argparser.add_argument('platform', nargs='*', action=PlatformListAction, default=[
'x86_64', 'devices'], help="The platform to build for (default is 'x86_64 devices'). Space separated architectures in list: {0}.".format(', '.join([repr(platform) for platform in platforms])))
args, additional_args = argparser.parse_known_args()
additional_args += ["-G", args.generator]
if check_tools() != 0:
return 1
additional_args += ["-DLINPHONE_IOS_DEPLOYMENT_TARGET=" + extract_deployment_target()]
additional_args += ["-DLINPHONE_BUILDER_DUMMY_LIBRARIES=" + ' '.join(extract_libs_list())]
if args.debug_verbose is True:
additional_args += ["-DENABLE_DEBUG_LOGS=YES"]
if args.enable_non_free_codecs is True:
additional_args += ["-DENABLE_NON_FREE_CODECS=YES"]
if args.disable_gpl_third_parties is True:
additional_args += ["-DENABLE_GPL_THIRD_PARTIES=NO"]
if args.tunnel or os.path.isdir("submodules/tunnel"):
if not os.path.isdir("submodules/tunnel"):
info("Tunnel wanted but not found yet, trying to clone it...")
p = Popen("git clone gitosis@git.linphone.org:tunnel.git submodules/tunnel".split(" "))
p.wait()
if p.retcode != 0:
error("Could not clone tunnel. Please see http://www.belledonne-communications.com/voiptunnel.html")
return 1
warning("Tunnel enabled, disabling GPL third parties.")
additional_args += ["-DENABLE_TUNNEL=ON", "-DENABLE_GPL_THIRD_PARTIES=OFF"]
if args.list_features:
tmpdir = tempfile.mkdtemp(prefix="linphone-iphone")
tmptarget = IOSarm64Target()
tmptarget.abs_cmake_dir = tmpdir
option_regex = re.compile("ENABLE_(.*):(.*)=(.*)")
option_list = [""]
build_type = 'Debug' if args.debug else 'Release'
for line in Popen(tmptarget.cmake_command(build_type, False, True, additional_args),
cwd=tmpdir, shell=False, stdout=PIPE).stdout.readlines():
match = option_regex.match(line)
if match is not None:
option_list.append("ENABLE_{} (is currently {})".format(match.groups()[0], match.groups()[2]))
info("Here is the list of available features: {}".format("\n\t".join(option_list)))
info("To enable some feature, please use -DENABLE_SOMEOPTION=ON")
info("Similarly, to disable some feature, please use -DENABLE_SOMEOPTION=OFF")
shutil.rmtree(tmpdir)
return 0
selected_platforms_dup = []
for platform in args.platform:
if platform == 'all':
selected_platforms_dup += archs_device + archs_simu
elif platform == 'devices':
selected_platforms_dup += archs_device
elif platform == 'simulators':
selected_platforms_dup += archs_simu
else:
selected_platforms_dup += [platform]
# unify platforms but keep provided order
selected_platforms = []
for x in selected_platforms_dup:
if x not in selected_platforms:
selected_platforms.append(x)
for platform in selected_platforms:
target = targets[platform]
if args.clean:
target.clean()
else:
retcode = prepare.run(target, args.debug, False, args.list_cmake_variables, args.force, additional_args)
if retcode != 0:
if retcode == 51:
Popen("make help-prepare-options".split(" "))
retcode = 0
return retcode
if args.clean:
if os.path.isfile('Makefile'):
os.remove('Makefile')
elif selected_platforms:
install_git_hook()
# only generated makefile if we are using Ninja or Makefile
if args.generator == 'Ninja':
if not check_is_installed("ninja", "it"):
return 1
generate_makefile(selected_platforms, 'ninja -C')
elif args.generator == "Unix Makefiles":
generate_makefile(selected_platforms, '$(MAKE) -C')
elif args.generator == "Xcode":
print("You can now open Xcode project with: open WORK/cmake/Project.xcodeproj")
else:
print("Not generating meta-makefile for generator {}.".format(args.generator))
return 0
if __name__ == "__main__":
sys.exit(main())
|
lyx2014/linphone-iphone
|
prepare.py
|
Python
|
gpl-2.0
| 24,475
|
#!/usr/bin/env python
# This file is Copyright David Francos Cuartero, licensed under the GPL2 license.
from distutils.core import setup
setup(name='airgraph-ng',
version='1.1',
description='Aircrack-ng network grapher',
author='TheX1le',
console = [{"script": "airgraph-ng" }],
url='https://aircrack-ng.org',
license='GPL2',
classifiers=[
'Development Status :: 4 - Beta',
],
packages=['airgraphviz'],
scripts=['airodump-join', 'airgraph-ng'],
)
|
creaktive/aircrack-ng
|
scripts/airgraph-ng/setup.py
|
Python
|
gpl-2.0
| 524
|
from gui.battle_control.ChatCommandsController import ChatCommandsController
from Avatar import PlayerAvatar
from MarkersStorage import MarkersStorage
from markersUtils import showMarker
import BigWorld
from plugins.Engine.ModUtils import BattleUtils,MinimapUtils,FileUtils,HotKeysUtils,DecorateUtils
from warnings import catch_warnings
from debug_utils import LOG_ERROR, LOG_CURRENT_EXCEPTION, LOG_DEBUG, LOG_NOTE
from chat_shared import CHAT_COMMANDS
from plugins.Engine.Plugin import Plugin
from gui.app_loader import g_appLoader
from gui.shared import g_eventBus, events
from gui.app_loader.settings import APP_NAME_SPACE as _SPACE
class Focus(Plugin):
lastCallback = None
inBattle = False
myConf = {
'reloadConfigKey': 'KEY_NUMPAD1',
"pluginEnable":False,
"setVName":False,
"swf_path":"gui/scaleform",
"maxArrows":3,
"maxArrowTime":60,
"delIfUnspotted":True,
"delIfNotVisible":False,
"delIfDeath":True,
"colors":("red", "purple"),
"swf_file_name":"DirectionIndicator.swf",
"flash_class":"WGDirectionIndicatorFlash",
"flash_mc_name":"directionalIndicatorMc",
"flash_size":(680,680),
"heightMode":"PIXEL",
"widthMode":"PIXEL",
"relativeRadius":0.5,
"moveFocus":False,
"focus":False,
"scaleMode":"NoScale",
"backgroundAlpha":0.0
}
@classmethod
def run(cls):
super(Focus, Focus).run()
cls.addEventHandler(Focus.myConf['reloadConfigKey'],cls.reloadConfig)
saveOldFuncs()
injectNewFuncs()
@staticmethod
def stopBattle():
if event.ns == _SPACE.SF_BATTLE:
Focus.inBattle = False
MarkersStorage.clear()
if Focus.lastCallback is not None:
try:
BigWorld.cancelCallback(Focus.lastCallback)
except:
pass
Focus.lastCallback = None
@staticmethod
def check():
if not Focus.inBattle:
return
MarkersStorage.updateMarkers(Focus.myConf)
Focus.lastCallback = BigWorld.callback(0.7,Focus.check)
@staticmethod
def new_handlePublicCommand(self, cmd):
old_handlePublicCommand(self, cmd)
if not Focus.inBattle:
Focus.inBattle = True
Focus.check()
receiverID = cmd.getFirstTargetID()
if receiverID and cmd.showMarkerForReceiver():
showMarker(receiverID,Focus.myConf)
def saveOldFuncs():
global old_handlePublicCommand
DecorateUtils.ensureGlobalVarNotExist('old_handlePublicCommand')
old_handlePublicCommand = ChatCommandsController._ChatCommandsController__handlePublicCommand
def injectNewFuncs():
ChatCommandsController._ChatCommandsController__handlePublicCommand = Focus.new_handlePublicCommand
add = g_eventBus.addListener
appEvent = events.AppLifeCycleEvent
add(appEvent.INITIALIZING, Focus.stopBattle)
|
jstar88/wotmods
|
files/uncompyled/wot_folder/res_mods/0.9.10/scripts/client/plugins/Focus_plugin/Focus.py
|
Python
|
gpl-2.0
| 3,230
|
from __future__ import absolute_import
from ..backend import KeyringBackend
from ..errors import PasswordDeleteError
from ..errors import PasswordSetError
from ..util import properties
try:
import dbus
except ImportError:
pass
class DBusKeyring(KeyringBackend):
"""KDE KWallet via D-Bus"""
folder = 'Python'
appid = 'Python program'
@properties.ClassProperty
@classmethod
def priority(cls):
if 'dbus' not in globals():
raise RuntimeError('python-dbus not installed')
try:
bus = dbus.SessionBus()
except dbus.DBusException as exc:
raise RuntimeError(exc.get_dbus_message())
try:
bus.get_object('org.kde.kwalletd5', '/modules/kwalletd5')
except dbus.DBusException:
raise RuntimeError('cannot connect to org.kde.kwalletd5')
return 4.9
def __init__(self, *arg, **kw):
super(DBusKeyring, self).__init__(*arg, **kw)
self.handle = -1
def connected(self):
if self.handle >= 0:
return True
bus = dbus.SessionBus()
wId = 0
try:
remote_obj = bus.get_object('org.kde.kwalletd5', '/modules/kwalletd5')
self.iface = dbus.Interface(remote_obj, 'org.kde.KWallet')
self.handle = self.iface.open(
self.iface.networkWallet(), wId, self.appid)
except dbus.DBusException:
self.handle = -1
if self.handle < 0:
return False
if not self.iface.hasFolder(self.handle, self.folder, self.appid):
self.iface.createFolder(self.handle, self.folder, self.appid)
return True
def get_password(self, service, username):
"""Get password of the username for the service
"""
key = username + '@' + service
if not self.connected():
# the user pressed "cancel" when prompted to unlock their keyring.
return None
if not self.iface.hasEntry(self.handle, self.folder, key, self.appid):
return None
return self.iface.readPassword(
self.handle, self.folder, key, self.appid)
def set_password(self, service, username, password):
"""Set password for the username of the service
"""
key = username + '@' + service
if not self.connected():
# the user pressed "cancel" when prompted to unlock their keyring.
raise PasswordSetError("Cancelled by user")
self.iface.writePassword(
self.handle, self.folder, key, password, self.appid)
def delete_password(self, service, username):
"""Delete the password for the username of the service.
"""
key = username + '@' + service
if not self.connected():
# the user pressed "cancel" when prompted to unlock their keyring.
raise PasswordDeleteError("Cancelled by user")
if not self.iface.hasEntry(self.handle, self.folder, key, self.appid):
raise PasswordDeleteError("Password not found")
self.iface.removeEntry(self.handle, self.folder, key, self.appid)
|
b-jesch/service.fritzbox.callmonitor
|
resources/lib/PhoneBooks/pyicloud/vendorlibs/keyring/backends/kwallet.py
|
Python
|
gpl-2.0
| 3,157
|
class Solution:
# @param A a list of integers
# @param m an integer, length of A
# @param B a list of integers
# @param n an integer, length of B
# @return nothing
def merge(self, A, m, B, n):
len_a = m - 1
len_b = n - 1
i = m + n - 1
while i >= 0 and len_b >= 0:
if len_a < 0 or A[len_a] <= B[len_b]:
A[i] = B[len_b]
len_b -= 1
else:
A[i] = A[len_a]
len_a -=1
i -= 1
return
|
lileeyao/acm
|
sorted/intermediate/merge_sorted_array.py
|
Python
|
gpl-2.0
| 562
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
MultipleInputPanel.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4 import QtCore, QtGui
from sextante.gui.MultipleInputDialog import MultipleInputDialog
class MultipleInputPanel(QtGui.QWidget):
def __init__(self, options, datatype = None, parent = None):
super(MultipleInputPanel, self).__init__(parent)
self.options = options
self.datatype = datatype
self.selectedoptions = []
self.horizontalLayout = QtGui.QHBoxLayout(self)
self.horizontalLayout.setSpacing(2)
self.horizontalLayout.setMargin(0)
self.label = QtGui.QLabel()
self.label.setText("0 elements selected")
self.label.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
self.horizontalLayout.addWidget(self.label)
self.pushButton = QtGui.QPushButton()
self.pushButton.setText("...")
self.pushButton.clicked.connect(self.showSelectionDialog)
self.horizontalLayout.addWidget(self.pushButton)
self.setLayout(self.horizontalLayout)
def setSelectedItems(self, selected):
#no checking is performed!
self.selectedoptions = selected
self.label.setText(str(len(self.selectedoptions)) + " elements selected")
def showSelectionDialog(self):
#=======================================================================
# #If there is a datatype, we use it to create the list of options
# if self.datatype is not None:
# if self.datatype == ParameterMultipleInput.TYPE_RASTER:
# options = QGisLayers.getRasterLayers()
# elif self.datatype == ParameterMultipleInput.TYPE_VECTOR_ANY:
# options = QGisLayers.getVectorLayers()
# else:
# options = QGisLayers.getVectorLayers(self.datatype)
# opts = []
# for opt in options:
# opts.append(opt.name())
# self.options = opts
#=======================================================================
dlg = MultipleInputDialog(self.options, self.selectedoptions)
dlg.exec_()
if dlg.selectedoptions != None:
self.selectedoptions = dlg.selectedoptions
self.label.setText(str(len(self.selectedoptions)) + " elements selected")
|
innotechsoftware/Quantum-GIS
|
python/plugins/sextante/gui/MultipleInputPanel.py
|
Python
|
gpl-2.0
| 3,348
|
# -*- coding: utf-8 -*-
import wx
from base import basePanel
class favsPanel(basePanel):
def __init__(self, parent, name):
super(favsPanel, self).__init__(parent, name)
self.type = "favourites_timeline"
|
codeofdusk/ProjectMagenta
|
src/wxUI/buffers/favourites.py
|
Python
|
gpl-2.0
| 208
|
# -*- coding: utf-8 -*-
"""This module tests events that are invoked by Cloud/Infra VMs."""
import fauxfactory
import pytest
from cfme import test_requirements
from cfme.cloud.provider import CloudProvider
from cfme.cloud.provider.gce import GCEProvider
from cfme.control.explorer.policies import VMControlPolicy
from cfme.infrastructure.provider import InfraProvider
from cfme.infrastructure.provider.kubevirt import KubeVirtProvider
from cfme.markers.env_markers.provider import providers
from cfme.utils.providers import ProviderFilter
from cfme.utils.wait import wait_for
all_prov = ProviderFilter(classes=[InfraProvider, CloudProvider], required_fields=['provisioning'])
excluded = ProviderFilter(classes=[KubeVirtProvider], inverted=True)
pytestmark = [
pytest.mark.usefixtures('uses_infra_providers', 'uses_cloud_providers'),
pytest.mark.tier(2),
pytest.mark.provider(gen_func=providers, filters=[all_prov, excluded],
scope='module'),
test_requirements.events,
]
@pytest.fixture(scope="function")
def vm_crud(provider, setup_provider_modscope, small_template_modscope):
template = small_template_modscope
base_name = 'test-events-' if provider.one_of(GCEProvider) else 'test_events_'
vm_name = base_name + fauxfactory.gen_alpha(length=8).lower()
collection = provider.appliance.provider_based_collection(provider)
vm = collection.instantiate(vm_name, provider, template_name=template.name)
yield vm
vm.cleanup_on_provider()
@pytest.mark.rhv2
def test_vm_create(request, appliance, vm_crud, provider, register_event):
""" Test whether vm_create_complete event is emitted.
Prerequisities:
* A provider that is set up and able to deploy VMs
Steps:
* Create a Control setup (action, policy, profile) that apply a tag on a VM when
``VM Create Complete`` event comes
* Deploy the VM outside of CFME (directly in the provider)
* Refresh provider relationships and wait for VM to appear
* Assert the tag appears.
Metadata:
test_flag: provision, events
Polarion:
assignee: jdupuy
casecomponent: Events
caseimportance: high
initialEstimate: 1/8h
"""
action = appliance.collections.actions.create(
fauxfactory.gen_alpha(),
"Tag",
dict(tag=("My Company Tags", "Environment", "Development")))
request.addfinalizer(action.delete)
policy = appliance.collections.policies.create(
VMControlPolicy,
fauxfactory.gen_alpha()
)
request.addfinalizer(policy.delete)
policy.assign_events("VM Create Complete")
@request.addfinalizer
def _cleanup():
policy.unassign_events("VM Create Complete")
policy.assign_actions_to_event("VM Create Complete", action)
profile = appliance.collections.policy_profiles.create(
fauxfactory.gen_alpha(), policies=[policy])
request.addfinalizer(profile.delete)
provider.assign_policy_profiles(profile.description)
request.addfinalizer(lambda: provider.unassign_policy_profiles(profile.description))
register_event(target_type='VmOrTemplate', target_name=vm_crud.name, event_type='vm_create')
vm_crud.create_on_provider(find_in_cfme=True)
def _check():
return any(tag.category.display_name == "Environment" and tag.display_name == "Development"
for tag in vm_crud.get_tags())
wait_for(_check, num_sec=300, delay=15, message="tags to appear")
|
Yadnyawalkya/integration_tests
|
cfme/tests/cloud_infra_common/test_events.py
|
Python
|
gpl-2.0
| 3,512
|
from urllib.parse import urlparse
import attr
import pytest
from cfme.fixtures import terminalreporter
from cfme.fixtures.pytest_store import store
from cfme.utils import conf
from cfme.utils.appliance import DummyAppliance
from cfme.utils.appliance import load_appliances_from_config
from cfme.utils.appliance import stack
from cfme.utils.path import log_path
PLUGIN_KEY = "appliance-holder"
def pytest_addoption(parser):
parser.addoption('--dummy-appliance', action='store_true')
parser.addoption('--dummy-appliance-version', default=None)
parser.addoption('--appliance-version', default=None)
parser.addoption('--num-dummies', default=1, type=int)
def appliances_from_cli(cli_appliances, appliance_version):
appliance_config = dict(appliances=[])
for appliance_data in cli_appliances:
parsed_url = urlparse(appliance_data['hostname'])
if not parsed_url.hostname:
raise ValueError(
f"Invalid appliance url: {appliance_data}"
)
appliance = appliance_data.copy()
appliance.update(dict(
hostname=parsed_url.hostname,
ui_protocol=parsed_url.scheme if parsed_url.scheme else "https",
ui_port=parsed_url.port if parsed_url.port else 443,
version=appliance_version
))
appliance_config['appliances'].append(appliance)
return load_appliances_from_config(appliance_config)
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config):
if config.getoption('--help'):
return
reporter = terminalreporter.reporter()
if config.getoption('--dummy-appliance'):
appliances = [
DummyAppliance.from_config(config) for _ in range(config.getoption('--num-dummies'))
]
if not config.option.collectonly:
config.option.collectonly = True
reporter.write_line('Retrieved Dummy Appliance', red=True)
elif stack.top:
appliances = [stack.top]
elif config.option.appliances:
appliances = appliances_from_cli(config.option.appliances, config.option.appliance_version)
reporter.write_line('Retrieved these appliances from the --appliance parameters', red=True)
elif config.getoption('--use-sprout'):
from cfme.test_framework.sprout.plugin import mangle_in_sprout_appliances
mangle_in_sprout_appliances(config)
# TODO : handle direct sprout pass on?
appliances = appliances_from_cli(config.option.appliances, None)
reporter.write_line('Retrieved these appliances from the --sprout-* parameters', red=True)
else:
appliances = load_appliances_from_config(conf.env)
reporter.write_line('Retrieved these appliances from the conf.env', red=True)
if not stack.top:
for appliance in appliances:
reporter.write_line(f'* {appliance!r}', cyan=True)
appliance = appliances[0]
stack.push(appliance)
plugin = ApplianceHolderPlugin(appliance, appliances)
config.pluginmanager.register(plugin, PLUGIN_KEY)
if not any((isinstance(appliance, DummyAppliance), appliance.is_dev)):
config.hook.pytest_appliance_setup(config=config)
@pytest.hookimpl(trylast=True)
def pytest_unconfigure(config):
config.hook.pytest_appliance_teardown(config=config)
stack.pop()
@attr.s(eq=False)
class ApplianceHolderPlugin:
held_appliance = attr.ib()
appliances = attr.ib(default=attr.Factory(list))
@pytest.fixture(scope="session")
def appliance(self):
return self.held_appliance
def pytest_sessionstart(self):
if isinstance(self.held_appliance, DummyAppliance) or self.held_appliance.is_dev:
return
if store.parallelizer_role != 'slave':
with log_path.join('appliance_version').open('w') as appliance_version:
appliance_version.write(self.held_appliance.version.vstring)
|
nachandr/cfme_tests
|
cfme/test_framework/appliance.py
|
Python
|
gpl-2.0
| 3,908
|
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Michael A.G. Aivazis
# California Institute of Technology
# (C) 1998-2005 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from UDPSocket import UDPSocket
from PortMonitor import PortMonitor
class UDPMonitor(PortMonitor, UDPSocket):
def __init__(self):
PortMonitor.__init__(self)
UDPSocket.__init__(self)
return
# version
__id__ = "$Id: UDPMonitor.py,v 1.1.1.1 2005/03/08 16:13:41 aivazis Exp $"
# End of file
|
bmi-forum/bmi-pyre
|
pythia-0.8/packages/pyre/pyre/ipc/UDPMonitor.py
|
Python
|
gpl-2.0
| 701
|
## Copyright (C) 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Invenio Multiple Record Editor Engine.
Every action related to record modification is performed
by specific class (successor of some of the commands).
Every of these classes is designed to perform specific action.
The engine itself receives a list of this classes and after retrieving
the records, asks the commands to perform their changes. This way the
engine itself is independent of the actions for modification of the records.
When we need to perform a new action on the records, we define a new command
and take care to pass it to the engine.
***************************************************************************
Subfield commands represent the actions performed on the subfields
of the record. The interface of these commands is defined in their
base class.
"""
__revision__ = "$Id"
import subprocess
from invenio import search_engine
from invenio import bibrecord
from invenio import bibformat
from invenio.config import CFG_TMPSHAREDDIR, CFG_BIBEDITMULTI_LIMIT_INSTANT_PROCESSING,\
CFG_BIBEDITMULTI_LIMIT_DELAYED_PROCESSING,\
CFG_BIBEDITMULTI_LIMIT_DELAYED_PROCESSING_TIME
from time import strftime
from invenio.bibtask import task_low_level_submission
from invenio.webuser import collect_user_info, isUserSuperAdmin
from invenio.dbquery import run_sql
from invenio import xmlmarc2textmarc as xmlmarc2textmarc
from invenio import template
multiedit_templates = template.load('bibeditmulti')
# base command for subfields
class BaseSubfieldCommand:
"""Base class for commands manipulating subfields"""
def __init__(self, subfield, value = "", new_value = "", condition = "", condition_exact_match=True, condition_subfield = ""):
"""Initialization."""
self._subfield = subfield
self._value = value
self._new_value = new_value
self._condition = condition
self._condition_subfield = condition_subfield
self._condition_exact_match = condition_exact_match
self._modifications = 0
def process_field(self, record, tag, field_number):
"""Make changes to a record.
By default this method is empty.
Every specific command provides its own implementation"""
pass
def _subfield_condition_match(self, subfield_value):
"""Check if the condition is met for the given subfield value
in order to act only on certain subfields
@return True if condition match, False if condition does not match
"""
if self._condition_exact_match:
# exact matching
if self._condition == subfield_value:
return True
else:
# partial matching
if self._condition in subfield_value:
return True
return False
def _perform_on_all_matching_subfields(self, record, tag, field_number, callback):
"""Perform an action on all subfields of a given field matching
the subfield represented by the current command.
e.g. change the value of all subfields 'a' in a given field
This method is necessary because in order to make changes in the
subfields of a given field you always have to iterate through all
of them. This is repetition of code is extracted in this method.
@param record: record structure representing record to be modified
@param tag: the tag used to identify the field
@param field_number: field number used to identify the field
@param callback: callback method that will be called to
perform an action on the subfield.
This callback should accept the following parameters:
record, tag, field_number, subfield_index
"""
if tag not in record.keys():
return
for field in record[tag]:
if field[4] == field_number:
subfield_index = 0
for subfield in field[0]:
if self._condition != 'condition':
# only modify subfields that match the condition
if subfield[0] == self._subfield:
for subfield in field[0]:
if self._condition_subfield == subfield[0]:
if self._subfield_condition_match(subfield[1]):
self._add_subfield_modification()
callback(record, tag, field_number, subfield_index)
elif subfield[0] == self._subfield:
self._add_subfield_modification()
callback(record, tag, field_number, subfield_index)
subfield_index = subfield_index+1
def _add_subfield_modification(self):
"""Keep a record of the number of modifications made to subfields"""
self._modifications += 1
# specific commands for subfields
class AddSubfieldCommand(BaseSubfieldCommand):
"""Add subfield to a given field"""
def _perform_on_all_matching_subfields_add_subfield(self, record, tag, field_number, callback):
if tag not in record.keys():
return
for field in record[tag]:
if field[4] == field_number:
for subfield in field[0]:
if self._condition_subfield == subfield[0]:
if self._subfield_condition_match(subfield[1]):
self._add_subfield_modification()
callback(record, tag, field_number, None)
def process_field(self, record, tag, field_number):
"""@see: BaseSubfieldCommand.process_field"""
action = lambda record, tag, field_number, subfield_index: \
bibrecord.record_add_subfield_into(record, tag,
self._subfield, self._value,
None,
field_position_global=field_number)
if self._condition != 'condition':
self._perform_on_all_matching_subfields_add_subfield(record, tag,
field_number, action)
else:
self._add_subfield_modification()
action(record, tag, field_number, None)
class DeleteSubfieldCommand(BaseSubfieldCommand):
"""Delete subfield from a given field"""
def process_field(self, record, tag, field_number):
"""@see: BaseSubfieldCommand.process_field"""
action = lambda record, tag, field_number, subfield_index: \
bibrecord.record_delete_subfield_from(record, tag,
subfield_index,
field_position_global=field_number)
self._perform_on_all_matching_subfields(record, tag,
field_number, action)
class ReplaceSubfieldContentCommand(BaseSubfieldCommand):
"""Replace content of subfield in a given field"""
def process_field(self, record, tag, field_number):
"""@see: BaseSubfieldCommand.process_field"""
action = lambda record, tag, field_number, subfield_index: \
bibrecord.record_modify_subfield(record, tag,
self._subfield,
self._value,
subfield_index,
field_position_global=field_number)
self._perform_on_all_matching_subfields(record,
tag,
field_number,
action)
class ReplaceTextInSubfieldCommand(BaseSubfieldCommand):
"""Replace text in content of subfield of a given field"""
def process_field(self, record, tag, field_number):
"""@see: BaseSubfieldCommand.process_field"""
def replace_text(record, tag, field_number, subfield_index):
"""Method for replacing the text, performed on
all the matching fields."""
#get the field value
field_value = ""
for field in record[tag]:
if field[4] == field_number:
subfields = field[0]
(field_code, field_value) = subfields[subfield_index]
#replace text
new_value = field_value.replace(self._value, self._new_value)
#update the subfield if needed
if new_value != field_value:
bibrecord.record_modify_subfield(record, tag,
self._subfield, new_value,
subfield_index,
field_position_global=field_number)
else:
#No modification ocurred, update modification counter
self._modifications -= 1
self._perform_on_all_matching_subfields(record,
tag,
field_number,
replace_text)
"""***************************************************************************
Field commands represent the actions performed on the fields
of the record. The interface of these commands is defined in their
base class.
In general the changes related to field's subfields are handled by subfield
commands, that are passed to the field command.
"""
# base command for fields
class BaseFieldCommand:
"""Base class for commands manipulating record fields"""
def __init__(self, tag, ind1, ind2, subfield_commands):
"""Initialization."""
self._tag = tag
self._ind1 = ind1
self._ind2 = ind2
self._subfield_commands = subfield_commands
self._modifications = 0
def process_record(self, record):
"""Make changes to a record.
By default this method is empty.
Every specific command provides its own implementation"""
pass
def _apply_subfield_commands_to_field(self, record, field_number):
"""Applies all subfield commands to a given field"""
field_modified = False
for subfield_command in self._subfield_commands:
current_modifications = subfield_command._modifications
subfield_command.process_field(record, self._tag, field_number)
if subfield_command._modifications > current_modifications:
field_modified = True
if field_modified:
self._modifications += 1
# specific commands for fields
class AddFieldCommand(BaseFieldCommand):
"""Deletes given fields from a record"""
def process_record(self, record):
"""@see: BaseFieldCommand.process_record"""
# if the tag is empty, we don't make any changes
if self._tag == "" or self._tag == None:
return
field_number = bibrecord.record_add_field(record, self._tag,
self._ind1, self._ind2)
self._apply_subfield_commands_to_field(record, field_number)
class DeleteFieldCommand(BaseFieldCommand):
"""Deletes given fields from a record"""
def __init__(self, tag, ind1, ind2, subfield_commands, conditionSubfield="", condition="", condition_exact_match=True):
BaseFieldCommand.__init__(self, tag, ind1, ind2, subfield_commands)
self._conditionSubfield = conditionSubfield
self._condition = condition
self._condition_exact_match = condition_exact_match
def _delete_field_condition(self, record):
"""Checks if a subfield meets the condition for the
field to be deleted
"""
try:
for field in record[self._tag]:
for subfield in field[0]:
if subfield[0] == self._conditionSubfield:
if self._condition_exact_match:
if self._condition == subfield[1]:
bibrecord.record_delete_field(record, self._tag, self._ind1, self._ind2, field_position_global=field[4])
self._modifications += 1
break
else:
if self._condition in subfield[1]:
bibrecord.record_delete_field(record, self._tag, self._ind1, self._ind2, field_position_global=field[4])
self._modifications += 1
break
except KeyError:
pass
def process_record(self, record):
"""@see: BaseFieldCommand.process_record"""
if self._condition:
self._delete_field_condition(record)
else:
bibrecord.record_delete_field(record, self._tag, self._ind1, self._ind2)
self._modifications += 1
class UpdateFieldCommand(BaseFieldCommand):
"""Deletes given fields from a record"""
def process_record(self, record):
"""@see: BaseFieldCommand.process_record"""
# if the tag is empty, we don't make any changes
if self._tag == "" or self._tag == None:
return
matching_field_instances = \
bibrecord.record_get_field_instances(record, self._tag,
self._ind1, self._ind2)
for current_field in matching_field_instances:
self._apply_subfield_commands_to_field(record, current_field[4])
def perform_request_index(language):
"""Creates the page of MultiEdit
@param language: language of the page
"""
collections = ["Any collection"]
collections.extend([collection[0] for collection in run_sql('SELECT name FROM collection')])
return multiedit_templates.page_contents(language=language, collections=collections)
def get_scripts():
"""Returns JavaScripts that have to be
imported in the page"""
return multiedit_templates.scripts()
def get_css():
"""Returns the local CSS for the pages."""
return multiedit_templates.styles()
def perform_request_detailed_record(record_id, update_commands, output_format, language):
"""Returns
@param record_id: the identifier of the record
@param update_commands: list of commands used to update record contents
@param output_format: specifies the output format as expected from bibformat
@param language: language of the page
"""
response = {}
record_content = _get_formated_record(record_id=record_id,
output_format=output_format,
update_commands = update_commands,
language=language)
response['search_html'] = multiedit_templates.detailed_record(record_content, language)
return response
def perform_request_test_search(search_criteria, update_commands, output_format, page_to_display,
language, outputTags, collection="", compute_modifications = 0):
"""Returns the results of a test search.
@param search_criteria: search criteria used in the test search
@type search_criteria: string
@param update_commands: list of commands used to update record contents
@type update_commands: list of objects
@param output_format: specifies the output format as expected from bibformat
@type output_format: string (hm, hb, hd, xm, xn, hx)
@param page_to_display: the number of the page that should be displayed to the user
@type page_to_display: int
@param language: the language used to format the content
@param outputTags: list of tags to be displayed in search results
@type outputTags: list of strings
@param collection: collection to be filtered in the results
@type collection: string
@param compute_modifications: if equals 0 do not compute else compute modifications
@type compute_modifications: int
"""
RECORDS_PER_PAGE = 100
response = {}
if collection == "Any collection":
collection = ""
record_IDs = search_engine.perform_request_search(p=search_criteria, c=collection)
number_of_records = len(record_IDs)
if page_to_display < 1:
page_to_display = 1
last_page_number = number_of_records/RECORDS_PER_PAGE+1
if page_to_display > last_page_number:
page_to_display = last_page_number
first_record_to_display = RECORDS_PER_PAGE * (page_to_display - 1)
last_record_to_display = (RECORDS_PER_PAGE*page_to_display) - 1
if not compute_modifications:
record_IDs = record_IDs[first_record_to_display:last_record_to_display + 1]
records_content = []
record_modifications = 0
for record_id in record_IDs:
current_modifications = [current_command._modifications for current_command in update_commands]
formated_record = _get_formated_record(record_id=record_id,
output_format=output_format,
update_commands = update_commands,
language=language, outputTags=outputTags)
new_modifications = [current_command._modifications for current_command in update_commands]
if new_modifications > current_modifications:
record_modifications += 1
records_content.append((record_id, formated_record))
total_modifications = []
if compute_modifications:
field_modifications = 0
subfield_modifications = 0
for current_command in update_commands:
field_modifications += current_command._modifications
for subfield_command in current_command._subfield_commands:
subfield_modifications += subfield_command._modifications
if record_modifications:
total_modifications.append(record_modifications)
total_modifications.append(field_modifications)
total_modifications.append(subfield_modifications)
records_content = records_content[first_record_to_display:last_record_to_display + 1]
response['display_info_box'] = compute_modifications
response['info_html'] = multiedit_templates.info_box(language = language,
total_modifications = total_modifications)
response['search_html'] = multiedit_templates.search_results(records = records_content,
number_of_records = number_of_records,
current_page = page_to_display,
records_per_page = RECORDS_PER_PAGE,
language = language,
output_format=output_format)
return response
def perform_request_submit_changes(search_criteria, update_commands, language, upload_mode, tag_list, collection, req):
"""Submits changes for upload into database.
@param search_criteria: search criteria used in the test search
@param update_commands: list of commands used to update record contents
@param language: the language used to format the content
"""
response = {}
status, file_path = _submit_changes_to_bibupload(search_criteria, update_commands, upload_mode, tag_list, collection, req)
response['search_html'] = multiedit_templates.changes_applied(status, file_path)
return response
def _get_record_diff(record_textmarc, updated_record_textmarc, outputTags, record_id):
"""
Use difflib library to compare the old record with the modified version and
return the output for Multiedit interface
@param record_textmarc: original record textmarc representation
@type record_textmarc: string
@param updated_record_textmarc: updated record textmarc representation
@type updated_record_textmarc: string
@param outputTags: tags to be filtered while printing output
@type outputTags: list
@return: content to be displayed on Multiedit interface for this record
@rtype: string
"""
import difflib
differ = difflib.Differ()
filter_tags = "All tags" not in outputTags and outputTags
result = ["<pre>"]
for line in differ.compare(record_textmarc.splitlines(), updated_record_textmarc.splitlines()):
if line[0] == ' ':
if not filter_tags or line.split()[0].replace('_', '') in outputTags:
result.append("%09d " % record_id + line.strip())
elif line[0] == '-':
# Mark as deleted
if not filter_tags or line.split()[1].replace('_', '') in outputTags:
result.append('<strong class="multiedit_field_deleted">' + "%09d " % record_id + line[2:].strip() + "</strong>")
elif line[0] == '+':
# Mark as added/modified
if not filter_tags or line.split()[1].replace('_', '') in outputTags:
result.append('<strong class="multiedit_field_modified">' + "%09d " % record_id + line[2:].strip() + "</strong>")
else:
continue
result.append("</pre>")
return '\n'.join(result)
def _get_formated_record(record_id, output_format, update_commands, language, outputTags=""):
"""Returns a record in a given format
@param record_id: the ID of record to format
@param output_format: an output format code (or short identifier for the output format)
@param update_commands: list of commands used to update record contents
@param language: the language to use to format the record
"""
if update_commands:
# Modify te bibrecord object with the appropriate actions
updated_record = _get_updated_record(record_id, update_commands)
textmarc_options = {"aleph-marc":0, "correct-mode":1, "append-mode":0,
"delete-mode":0, "insert-mode":0, "replace-mode":0,
"text-marc":1}
old_record = search_engine.get_record(recid=record_id)
old_record_textmarc = xmlmarc2textmarc.create_marc_record(old_record, sysno="", options=textmarc_options)
if "hm" == output_format:
if update_commands:
updated_record_textmarc = xmlmarc2textmarc.create_marc_record(updated_record, sysno="", options=textmarc_options)
result = _get_record_diff(old_record_textmarc, updated_record_textmarc, outputTags, record_id)
else:
filter_tags = "All tags" not in outputTags and outputTags
result = ['<pre>']
for line in old_record_textmarc.splitlines()[:-1]:
if not filter_tags or line.split()[0].replace('_', '') in outputTags:
result.append("%09d " % record_id + line.strip())
result.append('</pre>')
result = '\n'.join(result)
else:
if update_commands:
# No coloring of modifications in this case
xml_record = bibrecord.record_xml_output(updated_record)
else:
xml_record = bibrecord.record_xml_output(old_record)
result = bibformat.format_record(recID=None,
of=output_format,
xml_record=xml_record,
ln=language)
return result
# FIXME: Remove this method as soon as the formatting for MARC is
# implemented in bibformat
def _create_marc(records_xml):
"""Creates MARC from MARCXML.
@param records_xml: MARCXML containing information about the records
@return: string containing information about the records
in MARC format
"""
aleph_marc_output = ""
records = bibrecord.create_records(records_xml)
for (record, status_code, list_of_errors) in records:
sysno = ""
options = {"aleph-marc":0, "correct-mode":1, "append-mode":0,
"delete-mode":0, "insert-mode":0, "replace-mode":0,
"text-marc":1}
aleph_record = xmlmarc2textmarc.create_marc_record(record,
sysno,
options)
aleph_marc_output += aleph_record
return aleph_marc_output
def _submit_changes_to_bibupload(search_criteria, update_commands, upload_mode, tag_list, collection, req):
"""This methods takes care of submitting the changes to the server
through bibupload.
@param search_criteria: the search criteria used for filtering the
records. The changes will be applied to all the records matching
the criteria
@param update_commands: the commands defining the changes. These
commands perform the necessary changes before the records are submitted
"""
if collection == "Any collection":
collection = ""
record_IDs = search_engine.perform_request_search(p=search_criteria, c=collection)
num_records = len(record_IDs)
updated_records = []
for current_id in record_IDs:
current_updated_record = _get_updated_record(current_id, update_commands)
updated_records.append(current_updated_record)
file_path = _get_file_path_for_bibupload()
_save_records_xml(updated_records, file_path, upload_mode, tag_list)
return _upload_file_with_bibupload(file_path, upload_mode, num_records, req)
def _get_updated_record(record_id, update_commands):
"""Applies all the changes specified by the commands
to record identified by record_id and returns resulting record
@param record_id: identifier of the record that will be updated
@param update_commands: list of commands used to update record contents
@return: updated record structure"""
record = search_engine.get_record(recid=record_id)
for current_command in update_commands:
current_command.process_record(record)
return record
def _upload_file_with_bibupload(file_path, upload_mode, num_records, req):
"""
Uploads file with bibupload
@param file_path: path to the file where the XML will be saved.
@param upload_mode: -c for correct or -r for replace
@return tuple formed by status of the upload:
0-changes to be made instantly
1-changes to be made only in limited hours
2-user is superadmin. Changes made in limited hours
3-no rights to upload
and the upload file path
"""
if num_records < CFG_BIBEDITMULTI_LIMIT_INSTANT_PROCESSING:
task_low_level_submission('bibupload', 'multiedit', '-P', '5', upload_mode, '%s' % file_path)
return (0, file_path)
elif num_records < CFG_BIBEDITMULTI_LIMIT_DELAYED_PROCESSING:
task_low_level_submission('bibupload', 'multiedit', '-P', '5', upload_mode, '-L', CFG_BIBEDITMULTI_LIMIT_DELAYED_PROCESSING_TIME,'%s' % file_path)
return (1, file_path)
else:
user_info = collect_user_info(req)
if isUserSuperAdmin(user_info):
task_low_level_submission('bibupload', 'multiedit', '-P', '5', upload_mode, '-L', CFG_BIBEDITMULTI_LIMIT_DELAYED_PROCESSING_TIME, '%s' % file_path)
return (2, file_path)
return (3, file_path)
def _get_file_path_for_bibupload():
"""Returns file path for saving a file for bibupload """
current_time = strftime("%Y%m%d%H%M%S")
return "%s/%s_%s%s" % (CFG_TMPSHAREDDIR, "multiedit", current_time, ".xml")
def _save_records_xml(records, file_path, upload_mode, tag_list):
"""Saves records in a file in XML format
@param records: list of records (record structures)
@param file_path: path to the file where the XML will be saved."""
output_file = None
try:
output_file = open(file_path, "w")
if upload_mode == "-c":
for record in records:
for tag in record.keys():
if tag not in tag_list:
del(record[tag])
records_xml = bibrecord.print_recs(records)
output_file.write(records_xml)
finally:
if not output_file is None:
output_file.close()
|
cul-it/Invenio
|
modules/bibedit/lib/bibeditmulti_engine.py
|
Python
|
gpl-2.0
| 28,975
|