code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
"""testApp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from data import views
urlpatterns = [
url(r'^$', views.default_view),
url(r'^api/data/$', views.DataList.as_view()),
url(r'^admin/', include(admin.site.urls)),
]
| Kos/rtable | testApp/urls.py | Python | isc | 867 |
# -*- coding: utf-8 -*-
from plugin_elrte_widget import ElrteWidget
from plugin_dialog import DIALOG
from plugin_uploadify_widget import (
uploadify_widget, IS_UPLOADIFY_IMAGE, IS_UPLOADIFY_LENGTH
)
# use disk_db for image storing
disk_db = db
# define a product table using memory db
db = DAL('sqlite:memory:')
db.define_table('product', Field('description', 'text'))
# define an image table using disk db
image_table = disk_db.define_table('plugin_elrte_widget_image',
Field('image', 'upload', autodelete=True, comment='<- upload an image (max file size=10k)'),
)
image_table.image.widget = uploadify_widget
image_table.image.requires = [IS_UPLOADIFY_LENGTH(10240, 1), IS_UPLOADIFY_IMAGE()]
# define a file table using disk db
file_table = disk_db.define_table('plugin_elrte_widget_file',
Field('name'),
Field('file', 'upload', autodelete=True, comment='<- upload a file(max file size=100k)'),
)
file_table.file.widget = uploadify_widget
file_table.file.requires = IS_UPLOADIFY_LENGTH(102400, 1)
# restrict records for demo
for table in (image_table, file_table):
if disk_db(table.id > 0).count() > 3:
last = disk_db(table.id > 0).select(orderby=~table.id).first()
disk_db(table.id <= last.id - 3).delete()
def index():
# set language
try:
lang = T.accepted_language.split('-')[0].replace('ja', 'jp')
except:
lang = 'en'
image_chooser = DIALOG(title=T('Select an image'), close=T('close'), renderstyle=True,
content=LOAD('plugin_elrte_widget', 'image_upload_or_choose', ajax=True))
file_chooser = DIALOG(title=T('Select a file'), close=T('close'), renderstyle=True,
content=LOAD('plugin_elrte_widget', 'file_upload_or_choose', ajax=True))
fm_open = """function(callback, kind){
if (kind == 'elfinder') {%s;} else {%s;}
jQuery.data(document.body, 'elrte_callback', callback)
}""" % (file_chooser.show(), image_chooser.show())
cssfiles = [URL('static', 'css/base.css')]
################################ The core ######################################
# Inject the elrte widget
# You can specify the language for the editor, and include your image chooser.
# In this demo, the image chooser uses the uploadify plugin.
# If you want to edit contents with css applied, pass the css file urls for an argument.
db.product.description.widget = ElrteWidget()
db.product.description.widget.settings.lang = lang
db.product.description.widget.settings.fm_open = fm_open
db.product.description.widget.settings.cssfiles = cssfiles
################################################################################
form = SQLFORM(db.product)
if form.accepts(request.vars, session):
session.flash = 'submitted %s' % form.vars
redirect(URL('index'))
return dict(form=form)
def image_upload_or_choose():
form = SQLFORM(image_table, upload=URL('download'))
info = ''
if form.accepts(request.vars, session):
info = 'submitted %s' % form.vars
records = disk_db(image_table.id > 0).select(orderby=~image_table.id, limitby=(0, 3))
_get_src = lambda r: URL(request.controller, 'download', args=r.image)
records = DIV([IMG(_src=_get_src(r),
_onclick="""
jQuery.data(document.body, 'elrte_callback')('%s');jQuery('.dialog').hide(); return false;
""" % _get_src(r), _style='max-width:50px;max-height:50px;margin:5px;cursor:pointer;')
for r in records])
return BEAUTIFY(dict(form=form, info=info, records=records))
def file_upload_or_choose():
form = SQLFORM(file_table, upload=URL('download'))
info = ''
if form.accepts(request.vars, session):
info = 'submitted %s' % form.vars
def _get_icon(v):
ext = v.split('.')[-1]
if ext in ('pdf',):
filename = 'icon_pdf.gif'
elif ext in ('doc', 'docx', 'rst'):
filename = 'icon_doc.gif'
elif ext in ('xls', 'xlsx'):
filename = 'icon_xls.gif'
elif ext in ('ppt', 'pptx', 'pps'):
filename = 'icon_pps.gif'
elif ext in ('jpg', 'gif', 'png', 'bmp', 'svg', 'eps'):
filename = 'icon_pic.gif'
elif ext in ('swf', 'fla'):
filename = 'icon_flash.gif'
elif ext in ('mp3', 'wav', 'ogg', 'wma', 'm4a'):
filename = 'icon_music.gif'
elif ext in ('mov', 'wmv', 'mp4', 'api', 'mpg', 'flv'):
filename = 'icon_film.gif'
elif ext in ('zip', 'rar', 'gzip', 'bzip', 'ace', 'gz'):
filename = 'icon_archive.gif'
else:
filename = 'icon_txt.gif'
return IMG(_src=URL('static', 'plugin_elrte_widget/custom/icons/%s' % filename),
_style='cursor:pointer;margin-right:5px;')
records = disk_db(file_table.id > 0).select(orderby=~file_table.id, limitby=(0, 3))
records = DIV([DIV(A(_get_icon(r.file), r.name, _href='#', _onclick="""
jQuery.data(document.body, 'elrte_callback')('%s');jQuery('.dialog').hide(); return false;
""" % A(_get_icon(r.file), r.name, _href=URL(request.controller, 'download', args=r.file)).xml()),
_style='margin-bottom:5px;') for r in records])
return BEAUTIFY(dict(form=form, info=info, records=records))
def download():
return response.download(request, disk_db)
| chugle/myapp | applications/welcome/controllers/plugin_elrte_widget.py | Python | gpl-2.0 | 5,412 |
#
#
# Copyright 2011,2013 Luis Ariel Vega Soliz and contributors.
# ariel.vega@uremix.org
#
# This file is part of python-mobile.
#
# python-mobile is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-mobile is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with UADH. If not, see <http://www.gnu.org/licenses/>.
#
#
'''
Created on 24/04/2013
@author: Luis Ariel Vega Soliz (ariel.vega@uremix.org)
@contact: Uremix Team (http://uremix.org)
'''
class Unlocker:
def unlock(self):
pass
def is_locked(self):
pass
def get_unlockcode(self):
pass
def get_flashcode(self):
pass
| arielvega/python-mobile | src/mobile/unlocker/__init__.py | Python | gpl-3.0 | 1,103 |
import sys
import traceback
import urlparse
import dataetc
from Array import Array
from CSSStyleDeclaration import CSSStyleDeclaration
from unknown import unknown
import config
import time
from HTTP.HttpHoneyClient import hc
import re
class DOMObject(object):
def __init__(self, window, tag, parser):
self.tagName = tag
self.__parser = parser
self.__dict__['__window'] = window
if tag == 'body':
self.__dict__['__window'].document.body = self
if tag == 'select':
self.options = Array()
if tag == 'input':
self.value = ''
if tag == 'option':
self.value = 0
self.children = []
self.childNodes = Array()
self.style = CSSStyleDeclaration()
self.__dict__['__window'].document.all.append(self)
# assign an initial id to every dom node
varname = 'domnode' + str(int(time.time()*10000000))
self.__setattr__('id', varname)
def handle_src(self, name, val):
url = self.__dict__['__window'].document.location.fix_url(val)
if config.retrieval_all:
hc.get(url, self.__dict__['__window'].document.location.href)
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
if scheme not in ('http','file','https','ftp'):
config.VERBOSE(config.VERBOSE_WARNING, "[WARNING] Got unknown scheme: %s in %s.%s ."%(url,self.tagName, name));
if 'onerror' in self.__dict__:
config.VERBOSE(config.VERBOSE_DEBUG, "[DEBUG] Calling onerror of %s."%(self.tagName));
self.onerror()
if self.tagName == "iframe":
from Window import Window
from PageParser import PageParser
window = Window(self.__dict__['__window'].__dict__['__root'],
self.__dict__['__window'].document.location.fix_url(val),
self.__dict__['__window'].document.location.href)
parser = PageParser(window, window.document, window.__dict__['__html'])
parser.close()
def handle_id(self, name, val):
self.__dict__[name] = val
val = val.replace(':','_').replace('-','_')
try:
#if self.__dict__['__window'].__dict__['__cx'].execute('typeof ' + val + ' == "undefined"'):
self.__dict__['__window'].__dict__['__cx'].add_global(val, self)
except:
#traceback.print_exc()
pass
self.__dict__['__window'].__dict__['__fl'][-1].__setattr__(val, self)
def handle_name(self, name, val):
self.handle_id(name, val)
def handle_innerHTML(self, name, val):
val = str(val)
if self.__parser:
self.__parser.html = self.__parser.html[:self.begin] + val + self.__parser.html[self.end:]
dev = self.end - self.begin - len(val)
for i in self.__dict__['__window'].document.all:
if i.begin:
if i.begin > self.end:
i.begin -= dev
if i.end:
if i.end >= self.end:
i.end -= dev
self.__parser.current -= dev
return
from PageParser import PageParser
self.__parser = PageParser(self.__dict__['__window'], self.__dict__['__window'].document, val)
def __setattr__(self, name, val):
try:
handler = getattr(self, "handle_%s" % (name, ))
handler(name, val)
except:
pass
#if it's an event, let it be a function
if dataetc.isevent(name, self.tagName):
# using 'this' in methods may cause additional problems.
# i think i find a way to handle this, but there could
# be some cases it cannot cover.
val = str(val) + ';'
cx = self.__dict__['__window'].__dict__['__cx']
val = cx.patch_script(val)
try:
if 'id' in self.__dict__:
vals = re.split('(?<=[^a-zA-Z0-9_])this(?=[^a-zA-Z0-9_])', val)
valstmp = re.split('^this(?=[^a-zA-Z0-9_])', vals[0])
if len(vals) > 1:
vals = valstmp + vals[1:]
valstmp = re.split('(?<=[^a-zA-Z0-9_])this$', vals[-1])
if len(vals) > 1:
vals = vals[:-1] + valstmp
val = self.id.join(vals)
self.__dict__[name] = cx.execute('function(){' + val + '}')
except:
try:
p = val.decode('string-escape')
self.__dict__[name] = cx.execute('function(){' + p + '}')
except:
print val
#traceback.print_exc()
self.__dict__[name] = val
def focus(self):
if 'onfocus' in self.__dict__:
self.onfocus()
self.__dict__['__window'].document.activeElement = self
def blur(self):
if 'onblur' in self.__dict__:
self.onblur()
def __getattr__(self, name):
if name == 'innerHTML':
return self.__parser.html[self.begin:self.end]
return unknown()
def appendChild(self, dom):
if self.childNodes.length == 0:
self.firstChild = dom
self.childNodes.append(dom)
self.lastChild = dom
dom.__dict__['parentNode'] = self
self.children.append(dom)
def removeChild(self, dom):
self.childNodes.remove(dom)
if self.childNodes.length == 0:
self.firstChild = None
self.lastChild = None
else:
self.firstChild = self.childNodes[0]
self.lastChild = self.childNodes[self.childNodes.length - 1]
def setAttribute(self, attname, attval):
self.__setattr__(attname, attval)
def removeAttribute(self, attname):
if self.__dict__[attname]:
del self.__dict__[attname]
| amohanta/phoneyc-1 | DOM/DOMObject.py | Python | gpl-2.0 | 6,096 |
# -*- coding: utf-8 -*-
import datetime
import re
import scrapy
from locations.items import GeojsonPointItem
from locations.hours import OpeningHours
class JasonsDeliSpider(scrapy.Spider):
download_delay = 0.2
name = "jasonsdeli"
allowed_domains = ["jasonsdeli.com"]
start_urls = (
'https://www.jasonsdeli.com/restaurants',
)
def parse_hours(self, elements):
opening_hours = OpeningHours()
for item in elements:
day, open_time, close_time = re.search(
r'([a-z]{3}):.([0-9:\sAPM]+)\s-\s([0-9:\sAPM]+)', item, flags=re.IGNORECASE).groups()
opening_hours.add_range(day=day[0:2],
open_time=datetime.datetime.strptime(open_time, '%I:%M %p').strftime('%H:%M'),
close_time=datetime.datetime.strptime(close_time, '%I:%M %p').strftime('%H:%M'))
return opening_hours.as_opening_hours()
def parse_store(self, response):
ref = re.search(r'.+/(.+)', response.url).group(1)
properties = {
'addr_full': response.xpath('//div[@class="address"]/text()').extract_first()[0].split('\n')[0],
'city': response.xpath('//div[@class="address"]/text()').extract()[-1].split(',')[0],
'state': response.xpath('//div[@class="address"]/text()').extract()[-1].split(', ')[1].split(' ')[-2],
'postcode': response.xpath('//div[@class="address"]/text()').extract()[-1].split(', ')[1].split(' ')[-1],
'ref': ref,
'website': response.url,
'phone': response.xpath('//a[@class="cnphone"]/text()').extract_first()
}
hours = self.parse_hours(response.xpath('//div[@class="loc-hours"]/p/text()').extract())
try:
bus_name = response.xpath('//div[@class="loc-title"]/text()').extract()[0].split(': ')[1]
except IndexError:
bus_name = response.xpath('//div[@class="loc-title"]/text()').extract_first()
properties['name'] = bus_name
if hours:
properties['opening_hours'] = hours
yield GeojsonPointItem(**properties)
def parse(self, response):
urls = response.xpath('//span[@class="field-content"]/a/@href').extract()
for url in urls:
yield scrapy.Request(response.urljoin(url), callback=self.parse_store)
| iandees/all-the-places | locations/spiders/jasonsdeli.py | Python | mit | 2,370 |
from __future__ import absolute_import, unicode_literals
import sys
from functools import partial
from billiard.einfo import ExceptionInfo
from django.http import HttpResponse
from django.test.testcases import TestCase as DjangoTestCase
from django.template import TemplateDoesNotExist
from anyjson import deserialize
from celery import current_app
from celery import states
from celery.task import task
from celery.utils import gen_unique_id, get_full_cls_name
from djcelery.views import task_webhook
from djcelery.tests.req import MockRequest
try:
from django.urls import reverse # Django 1.10+
except ImportError:
from django.core.urlresolvers import reverse
def reversestar(name, **kwargs):
return reverse(name, kwargs=kwargs)
class MyError(Exception):
# On Py2.4 repr(exc) includes the object id, so comparing
# texts is pointless when the id the "same" KeyError does not match.
def __repr__(self):
return '<{0.__class__.__name__}: {0.args!r}>'.format(self)
class MyRetryTaskError(MyError):
pass
task_is_successful = partial(reversestar, 'celery-is_task_successful')
task_status = partial(reversestar, 'celery-task_status')
task_apply = partial(reverse, 'celery-apply')
registered_tasks = partial(reverse, 'celery-tasks')
scratch = {}
@task()
def mytask(x, y):
ret = scratch['result'] = int(x) * int(y)
return ret
def create_exception(name, base=Exception):
return type(name, (base, ), {})
def catch_exception(exception):
try:
raise exception
except exception.__class__ as exc:
exc = current_app.backend.prepare_exception(exc)
return exc, ExceptionInfo(sys.exc_info()).traceback
class ViewTestCase(DjangoTestCase):
def assertJSONEqual(self, json, py):
json = isinstance(json, HttpResponse) and json.content or json
try:
self.assertEqual(deserialize(json.decode('utf-8')), py)
except TypeError as exc:
raise TypeError('{0}: {1}'.format(exc, json))
def assertIn(self, expected, source, *args):
try:
DjangoTestCase.assertIn(self, expected, source, *args)
except AttributeError:
self.assertTrue(expected in source)
def assertDictContainsSubset(self, subset, dictionary, *args):
for key, value in subset.items():
self.assertIn(key, dictionary)
self.assertEqual(dictionary[key], value)
class test_task_apply(ViewTestCase):
def test_apply(self):
current_app.conf.CELERY_ALWAYS_EAGER = True
try:
self.client.get(
task_apply(kwargs={'task_name': mytask.name}) + '?x=4&y=4',
)
self.assertEqual(scratch['result'], 16)
finally:
current_app.conf.CELERY_ALWAYS_EAGER = False
def test_apply_raises_404_on_unregistered_task(self):
current_app.conf.CELERY_ALWAYS_EAGER = True
try:
name = 'xxx.does.not.exist'
action = partial(
self.client.get,
task_apply(kwargs={'task_name': name}) + '?x=4&y=4',
)
try:
res = action()
except TemplateDoesNotExist:
pass # pre Django 1.5
else:
self.assertEqual(res.status_code, 404)
finally:
current_app.conf.CELERY_ALWAYS_EAGER = False
class test_registered_tasks(ViewTestCase):
def test_list_registered_tasks(self):
json = self.client.get(registered_tasks())
tasks = deserialize(json.content.decode('utf-8'))
self.assertIn('celery.backend_cleanup', tasks['regular'])
class test_webhook_task(ViewTestCase):
def test_successful_request(self):
@task_webhook
def add_webhook(request):
x = int(request.GET['x'])
y = int(request.GET['y'])
return x + y
request = MockRequest().get('/tasks/add', dict(x=10, y=10))
response = add_webhook(request)
self.assertDictContainsSubset(
{'status': 'success', 'retval': 20},
deserialize(response.content.decode('utf-8')))
def test_failed_request(self):
@task_webhook
def error_webhook(request):
x = int(request.GET['x'])
y = int(request.GET['y'])
raise MyError(x + y)
request = MockRequest().get('/tasks/error', dict(x=10, y=10))
response = error_webhook(request)
self.assertDictContainsSubset(
{'status': 'failure',
'reason': '<MyError: (20,)>'},
deserialize(response.content.decode('utf-8')))
class test_task_status(ViewTestCase):
def assertStatusForIs(self, status, res, traceback=None):
uuid = gen_unique_id()
current_app.backend.store_result(uuid, res, status,
traceback=traceback)
json = self.client.get(task_status(task_id=uuid))
expect = dict(id=uuid, status=status, result=res)
if status in current_app.backend.EXCEPTION_STATES:
instore = current_app.backend.get_result(uuid)
self.assertEqual(str(instore.args[0]), str(res.args[0]))
expect['result'] = repr(res)
expect['exc'] = get_full_cls_name(res.__class__)
expect['traceback'] = traceback
self.assertJSONEqual(json, dict(task=expect))
def test_success(self):
self.assertStatusForIs(states.SUCCESS, 'The quick brown fox')
def test_failure(self):
exc, tb = catch_exception(MyError('foo'))
self.assertStatusForIs(states.FAILURE, exc, tb)
def test_retry(self):
oexc, _ = catch_exception(MyError('Resource not available'))
exc, tb = catch_exception(MyRetryTaskError(str(oexc), oexc))
self.assertStatusForIs(states.RETRY, exc, tb)
class test_task_is_successful(ViewTestCase):
def assertStatusForIs(self, status, outcome, result=None):
uuid = gen_unique_id()
result = result or gen_unique_id()
current_app.backend.store_result(uuid, result, status)
json = self.client.get(task_is_successful(task_id=uuid))
self.assertJSONEqual(json, {'task': {'id': uuid,
'executed': outcome}})
def test_success(self):
self.assertStatusForIs(states.SUCCESS, True)
def test_pending(self):
self.assertStatusForIs(states.PENDING, False)
def test_failure(self):
self.assertStatusForIs(states.FAILURE, False, KeyError('foo'))
def test_retry(self):
self.assertStatusForIs(states.RETRY, False, KeyError('foo'))
| sivaprakashniet/push_pull | p2p/lib/python2.7/site-packages/djcelery/tests/test_views.py | Python | bsd-3-clause | 6,651 |
import base64
import ipaddress
try:
import collections.abc as collections_abc # only works on python 3.3+
except ImportError:
import collections as collections_abc
from datetime import date, datetime
from dateutil import parser, tz
from six import string_types, iteritems, integer_types
from six.moves import map
from .query import Q
from .utils import DslBase, AttrDict, AttrList
from .exceptions import ValidationException
from .wrappers import Range
unicode = type(u'')
def construct_field(name_or_field, **params):
# {"type": "text", "analyzer": "snowball"}
if isinstance(name_or_field, collections_abc.Mapping):
if params:
raise ValueError('construct_field() cannot accept parameters when passing in a dict.')
params = name_or_field.copy()
if 'type' not in params:
# inner object can be implicitly defined
if 'properties' in params:
name = 'object'
else:
raise ValueError('construct_field() needs to have a "type" key.')
else:
name = params.pop('type')
return Field.get_dsl_class(name)(**params)
# Text()
if isinstance(name_or_field, Field):
if params:
raise ValueError('construct_field() cannot accept parameters when passing in a construct_field object.')
return name_or_field
# "text", analyzer="snowball"
return Field.get_dsl_class(name_or_field)(**params)
class Field(DslBase):
_type_name = 'field'
_type_shortcut = staticmethod(construct_field)
# all fields can be multifields
_param_defs = {'fields': {'type': 'field', 'hash': True}}
name = None
_coerce = False
def __init__(self, multi=False, required=False, *args, **kwargs):
"""
:arg bool multi: specifies whether field can contain array of values
:arg bool required: specifies whether field is required
"""
self._multi = multi
self._required = required
super(Field, self).__init__(*args, **kwargs)
def __getitem__(self, subfield):
return self._params.get('fields', {})[subfield]
def _serialize(self, data):
return data
def _deserialize(self, data):
return data
def _empty(self):
return None
def empty(self):
if self._multi:
return AttrList([])
return self._empty()
def serialize(self, data):
if isinstance(data, (list, AttrList, tuple)):
return list(map(self._serialize, data))
return self._serialize(data)
def deserialize(self, data):
if isinstance(data, (list, AttrList, tuple)):
data = [
None if d is None else self._deserialize(d)
for d in data
]
return data
if data is None:
return None
return self._deserialize(data)
def clean(self, data):
if data is not None:
data = self.deserialize(data)
if data in (None, [], {}) and self._required:
raise ValidationException("Value required for this field.")
return data
def to_dict(self):
d = super(Field, self).to_dict()
name, value = d.popitem()
value['type'] = name
return value
class CustomField(Field):
name = 'custom'
_coerce = True
def to_dict(self):
if isinstance(self.builtin_type, Field):
return self.builtin_type.to_dict()
d = super(CustomField, self).to_dict()
d['type'] = self.builtin_type
return d
class Object(Field):
name = 'object'
_coerce = True
def __init__(self, doc_class=None, dynamic=None, properties=None, **kwargs):
"""
:arg document.InnerDoc doc_class: base doc class that handles mapping.
If no `doc_class` is provided, new instance of `InnerDoc` will be created,
populated with `properties` and used. Can not be provided together with `properties`
:arg dynamic: whether new properties may be created dynamically.
Valid values are `True`, `False`, `'strict'`.
Can not be provided together with `doc_class`.
See https://www.elastic.co/guide/en/elasticsearch/reference/current/dynamic.html
for more details
:arg dict properties: used to construct underlying mapping if no `doc_class` is provided.
Can not be provided together with `doc_class`
"""
if doc_class and (properties or dynamic is not None):
raise ValidationException(
'doc_class and properties/dynamic should not be provided together')
if doc_class:
self._doc_class = doc_class
else:
# FIXME import
from .document import InnerDoc
# no InnerDoc subclass, creating one instead...
self._doc_class = type('InnerDoc', (InnerDoc, ), {})
for name, field in iteritems(properties or {}):
self._doc_class._doc_type.mapping.field(name, field)
if dynamic is not None:
self._doc_class._doc_type.mapping.meta('dynamic', dynamic)
self._mapping = self._doc_class._doc_type.mapping
super(Object, self).__init__(**kwargs)
def __getitem__(self, name):
return self._mapping[name]
def __contains__(self, name):
return name in self._mapping
def _empty(self):
return self._wrap({})
def _wrap(self, data):
return self._doc_class.from_es(data, data_only=True)
def empty(self):
if self._multi:
return AttrList([], self._wrap)
return self._empty()
def to_dict(self):
d = self._mapping.to_dict()
d.update(super(Object, self).to_dict())
return d
def _collect_fields(self):
return self._mapping.properties._collect_fields()
def _deserialize(self, data):
# don't wrap already wrapped data
if isinstance(data, self._doc_class):
return data
if isinstance(data, AttrDict):
data = data._d_
return self._wrap(data)
def _serialize(self, data):
if data is None:
return None
# somebody assigned raw dict to the field, we should tolerate that
if isinstance(data, collections_abc.Mapping):
return data
return data.to_dict()
def clean(self, data):
data = super(Object, self).clean(data)
if data is None:
return None
if isinstance(data, (list, AttrList)):
for d in data:
d.full_clean()
else:
data.full_clean()
return data
def update(self, other, update_only=False):
if not isinstance(other, Object):
# not an inner/nested object, no merge possible
return
self._mapping.update(other._mapping, update_only)
class Nested(Object):
name = 'nested'
def __init__(self, *args, **kwargs):
kwargs.setdefault('multi', True)
super(Nested, self).__init__(*args, **kwargs)
class Date(Field):
name = 'date'
_coerce = True
def __init__(self, default_timezone=None, *args, **kwargs):
"""
:arg default_timezone: timezone that will be automatically used for tz-naive values
May be instance of `datetime.tzinfo` or string containing TZ offset
"""
self._default_timezone = default_timezone
if isinstance(self._default_timezone, string_types):
self._default_timezone = tz.gettz(self._default_timezone)
super(Date, self).__init__(*args, **kwargs)
def _deserialize(self, data):
if isinstance(data, string_types):
try:
data = parser.parse(data)
except Exception as e:
raise ValidationException('Could not parse date from the value (%r)' % data, e)
if isinstance(data, datetime):
if self._default_timezone and data.tzinfo is None:
data = data.replace(tzinfo=self._default_timezone)
return data
if isinstance(data, date):
return data
if isinstance(data, integer_types):
# Divide by a float to preserve milliseconds on the datetime.
return datetime.utcfromtimestamp(data / 1000.0)
raise ValidationException('Could not parse date from the value (%r)' % data)
class Text(Field):
_param_defs = {
'fields': {'type': 'field', 'hash': True},
'analyzer': {'type': 'analyzer'},
'search_analyzer': {'type': 'analyzer'},
'search_quote_analyzer': {'type': 'analyzer'},
}
name = 'text'
class Keyword(Field):
_param_defs = {
'fields': {'type': 'field', 'hash': True},
'search_analyzer': {'type': 'analyzer'},
'normalizer': {'type': 'normalizer'}
}
name = 'keyword'
class Boolean(Field):
name = 'boolean'
_coerce = True
def _deserialize(self, data):
if data == "false":
return False
return bool(data)
def clean(self, data):
if data is not None:
data = self.deserialize(data)
if data is None and self._required:
raise ValidationException("Value required for this field.")
return data
class Float(Field):
name = 'float'
_coerce = True
def _deserialize(self, data):
return float(data)
class HalfFloat(Float):
name = 'half_float'
class ScaledFloat(Float):
name = 'scaled_float'
def __init__(self, scaling_factor, *args, **kwargs):
super(ScaledFloat, self).__init__(scaling_factor=scaling_factor, *args, **kwargs)
class Double(Float):
name = 'double'
class Integer(Field):
name = 'integer'
_coerce = True
def _deserialize(self, data):
return int(data)
class Byte(Integer):
name = 'byte'
class Short(Integer):
name = 'short'
class Long(Integer):
name = 'long'
class Ip(Field):
name = 'ip'
_coerce = True
def _deserialize(self, data):
# the ipaddress library for pypy only accepts unicode.
return ipaddress.ip_address(unicode(data))
def _serialize(self, data):
if data is None:
return None
return str(data)
class Binary(Field):
name = 'binary'
_coerce = True
def clean(self, data):
# Binary fields are opaque, so there's not much cleaning
# that can be done.
return data
def _deserialize(self, data):
return base64.b64decode(data)
def _serialize(self, data):
if data is None:
return None
return base64.b64encode(data).decode()
class GeoPoint(Field):
name = 'geo_point'
class GeoShape(Field):
name = 'geo_shape'
class Completion(Field):
_param_defs = {
'analyzer': {'type': 'analyzer'},
'search_analyzer': {'type': 'analyzer'},
}
name = 'completion'
class Percolator(Field):
name = 'percolator'
_coerce = True
def _deserialize(self, data):
return Q(data)
def _serialize(self, data):
if data is None:
return None
return data.to_dict()
class RangeField(Field):
_coerce = True
_core_field = None
def _deserialize(self, data):
if isinstance(data, Range):
return data
data = dict((k, self._core_field.deserialize(v)) for k, v in iteritems(data))
return Range(data)
def _serialize(self, data):
if data is None:
return None
if not isinstance(data, collections_abc.Mapping):
data = data.to_dict()
return dict((k, self._core_field.serialize(v)) for k, v in iteritems(data))
class IntegerRange(RangeField):
name = 'integer_range'
_core_field = Integer()
class FloatRange(RangeField):
name = 'float_range'
_core_field = Float()
class LongRange(RangeField):
name = 'long_range'
_core_field = Long()
class DoubleRange(RangeField):
name = 'double_ranged'
_core_field = Double()
class DateRange(RangeField):
name = 'date_range'
_core_field = Date()
class IpRange(Field):
# not a RangeField since ip_range supports CIDR ranges
name = 'ip_range'
class Join(Field):
name = 'join'
class TokenCount(Field):
name = 'token_count'
class Murmur3(Field):
name = 'murmur3'
| 3lnc/elasticsearch-dsl-py | elasticsearch_dsl/field.py | Python | apache-2.0 | 12,401 |
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Relocator for user-provided relocation directives
"""
class MyRelocator(object):
"""
Main class
"""
def __init__(self):
"""
Initialize the relocator
"""
pass
def setController(self, controller):
"""
Sets the controller
"""
pass
def getRelocations(self, gvt, activities, horizon):
"""
Fetch the relocations that are pending for the current GVT
:param gvt: current GVT
:param activities: the activities being passed on the GVT ring
:returns: dictionary containing all relocations
"""
# Perform a relocation, for example move the model with ID 1 to node 2, and the model with ID 3 to node 0
# Remaps are allowed to happen to the current location, as they will simply be discarded by the actual relocator
relocate = {1: 2, 3: 0}
return relocate
def lastStateOnly(self):
"""
Should the sum of all activities within this horizon be used, or simply the activity from the last state?
This has no effect on performance, but defines which activities the relocator can read.
Use 'last state only' if you require an abstracted view of the activities at a single timestep (equal to the GVT).
Use 'all states' if you require all information to be merged, such as in activity tracking.
"""
# "all states"
return False
| kdheepak89/pypdevs | pypdevs/templates/relocator.py | Python | apache-2.0 | 2,139 |
import pythoncom
import win32com.server.util
import win32com.test.util
import unittest
from pywin32_testutil import str2bytes
class Persists:
_public_methods_ = [ 'GetClassID', 'IsDirty', 'Load', 'Save',
'GetSizeMax', 'InitNew' ]
_com_interfaces_ = [ pythoncom.IID_IPersistStreamInit ]
def __init__(self):
self.data = str2bytes("abcdefg")
self.dirty = 1
def GetClassID(self):
return pythoncom.IID_NULL
def IsDirty(self):
return self.dirty
def Load(self, stream):
self.data = stream.Read(26)
def Save(self, stream, clearDirty):
stream.Write(self.data)
if clearDirty:
self.dirty = 0
def GetSizeMax(self):
return 1024
def InitNew(self):
pass
class Stream:
_public_methods_ = [ 'Read', 'Write', 'Seek' ]
_com_interfaces_ = [ pythoncom.IID_IStream ]
def __init__(self, data):
self.data = data
self.index = 0
def Read(self, amount):
result = self.data[self.index : self.index + amount]
self.index = self.index + amount
return result
def Write(self, data):
self.data = data
self.index = 0
return len(data)
def Seek(self, dist, origin):
if origin==pythoncom.STREAM_SEEK_SET:
self.index = dist
elif origin==pythoncom.STREAM_SEEK_CUR:
self.index = self.index + dist
elif origin==pythoncom.STREAM_SEEK_END:
self.index = len(self.data)+dist
else:
raise ValueError('Unknown Seek type: ' +str(origin))
if self.index < 0:
self.index = 0
else:
self.index = min(self.index, len(self.data))
return self.index
class BadStream(Stream):
""" PyGStream::Read could formerly overflow buffer if the python implementation
returned more data than requested.
"""
def Read(self, amount):
return str2bytes('x')*(amount+1)
class StreamTest(win32com.test.util.TestCase):
def _readWrite(self, data, write_stream, read_stream = None):
if read_stream is None: read_stream = write_stream
write_stream.Write(data)
read_stream.Seek(0, pythoncom.STREAM_SEEK_SET)
got = read_stream.Read(len(data))
self.assertEqual(data, got)
read_stream.Seek(1, pythoncom.STREAM_SEEK_SET)
got = read_stream.Read(len(data)-2)
self.assertEqual(data[1:-1], got)
def testit(self):
mydata = str2bytes('abcdefghijklmnopqrstuvwxyz')
# First test the objects just as Python objects...
s = Stream(mydata)
p = Persists()
p.Load(s)
p.Save(s, 0)
self.assertEqual(s.data, mydata)
# Wrap the Python objects as COM objects, and make the calls as if
# they were non-Python COM objects.
s2 = win32com.server.util.wrap(s, pythoncom.IID_IStream)
p2 = win32com.server.util.wrap(p, pythoncom.IID_IPersistStreamInit)
self._readWrite(mydata, s, s)
self._readWrite(mydata, s, s2)
self._readWrite(mydata, s2, s)
self._readWrite(mydata, s2, s2)
self._readWrite(str2bytes("string with\0a NULL"), s2, s2)
# reset the stream
s.Write(mydata)
p2.Load(s2)
p2.Save(s2, 0)
self.assertEqual(s.data, mydata)
def testseek(self):
s = Stream(str2bytes('yo'))
s = win32com.server.util.wrap(s, pythoncom.IID_IStream)
# we used to die in py3k passing a value > 32bits
s.Seek(0x100000000, pythoncom.STREAM_SEEK_SET)
def testerrors(self):
# setup a test logger to capture tracebacks etc.
records, old_log = win32com.test.util.setup_test_logger()
## check for buffer overflow in Read method
badstream = BadStream('Check for buffer overflow')
badstream2 = win32com.server.util.wrap(badstream, pythoncom.IID_IStream)
self.assertRaises(pythoncom.com_error, badstream2.Read, 10)
win32com.test.util.restore_test_logger(old_log)
# expecting 2 pythoncom errors to have been raised by the gateways.
self.assertEqual(len(records), 2)
self.failUnless(records[0].msg.startswith('pythoncom error'))
self.failUnless(records[1].msg.startswith('pythoncom error'))
if __name__=='__main__':
unittest.main()
| zhanqxun/cv_fish | win32com/test/testStreams.py | Python | apache-2.0 | 4,521 |
"""Test how the ufuncs in special handle nan inputs.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_array_equal, assert_
import pytest
import scipy.special as sc
from scipy._lib._numpy_compat import suppress_warnings
KNOWNFAILURES = {}
POSTPROCESSING = {}
def _get_ufuncs():
ufuncs = []
ufunc_names = []
for name in sorted(sc.__dict__):
obj = sc.__dict__[name]
if not isinstance(obj, np.ufunc):
continue
msg = KNOWNFAILURES.get(obj)
if msg is None:
ufuncs.append(obj)
ufunc_names.append(name)
else:
fail = pytest.mark.xfail(run=False, reason=msg)
ufuncs.append(pytest.param(obj, marks=fail))
ufunc_names.append(name)
return ufuncs, ufunc_names
UFUNCS, UFUNC_NAMES = _get_ufuncs()
@pytest.mark.parametrize("func", UFUNCS, ids=UFUNC_NAMES)
def test_nan_inputs(func):
args = (np.nan,)*func.nin
with suppress_warnings() as sup:
# Ignore warnings about unsafe casts from legacy wrappers
sup.filter(RuntimeWarning,
"floating point number truncated to an integer")
try:
res = func(*args)
except TypeError:
# One of the arguments doesn't take real inputs
return
if func in POSTPROCESSING:
res = POSTPROCESSING[func](*res)
msg = "got {} instead of nan".format(res)
assert_array_equal(np.isnan(res), True, err_msg=msg)
def test_legacy_cast():
with suppress_warnings() as sup:
sup.filter(RuntimeWarning,
"floating point number truncated to an integer")
res = sc.bdtrc(np.nan, 1, 0.5)
assert_(np.isnan(res))
| lhilt/scipy | scipy/special/tests/test_nan_inputs.py | Python | bsd-3-clause | 1,775 |
from ctypes import Structure, c_int16, c_uint16
class Filter(Structure):
""" Represents a Fixture filter """
_fields_ = [("categoryBits", c_uint16),
("maskBits", c_uint16),
("groupIndex", c_int16)]
def __init__(self, categoryBits=0x1, maskBits=0xFFFF, groupIndex=0):
""" Initialize the filter with the proper defaults """
Structure.__init__(self, categoryBits=categoryBits, maskBits=maskBits, groupIndex=groupIndex) | cloew/NytramBox2D | nytram_box2d/engine/filter.py | Python | mit | 503 |
"""Base Entity for all TelldusLive entities."""
from datetime import datetime
import logging
from homeassistant.const import ATTR_BATTERY_LEVEL, DEVICE_DEFAULT_NAME
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity
from .const import SIGNAL_UPDATE_ENTITY
_LOGGER = logging.getLogger(__name__)
ATTR_LAST_UPDATED = 'time_last_updated'
class TelldusLiveEntity(Entity):
"""Base class for all Telldus Live entities."""
def __init__(self, client, device_id):
"""Initialize the entity."""
self._id = device_id
self._client = client
self._name = self.device.name
self._async_unsub_dispatcher_connect = None
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
_LOGGER.debug('Created device %s', self)
self._async_unsub_dispatcher_connect = async_dispatcher_connect(
self.hass, SIGNAL_UPDATE_ENTITY, self._update_callback)
async def async_will_remove_from_hass(self):
"""Disconnect dispatcher listener when removed."""
if self._async_unsub_dispatcher_connect:
self._async_unsub_dispatcher_connect()
@callback
def _update_callback(self):
"""Return the property of the device might have changed."""
if self.device.name:
self._name = self.device.name
self.async_schedule_update_ha_state()
@property
def device_id(self):
"""Return the id of the device."""
return self._id
@property
def device(self):
"""Return the representation of the device."""
return self._client.device(self.device_id)
@property
def _state(self):
"""Return the state of the device."""
return self.device.state
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def assumed_state(self):
"""Return true if unable to access real state of entity."""
return True
@property
def name(self):
"""Return name of device."""
return self._name or DEVICE_DEFAULT_NAME
@property
def available(self):
"""Return true if device is not offline."""
return self._client.is_available(self.device_id)
@property
def device_state_attributes(self):
"""Return the state attributes."""
attrs = {}
if self._battery_level:
attrs[ATTR_BATTERY_LEVEL] = self._battery_level
if self._last_updated:
attrs[ATTR_LAST_UPDATED] = self._last_updated
return attrs
@property
def _battery_level(self):
"""Return the battery level of a device."""
from tellduslive import (BATTERY_LOW,
BATTERY_UNKNOWN,
BATTERY_OK)
if self.device.battery == BATTERY_LOW:
return 1
if self.device.battery == BATTERY_UNKNOWN:
return None
if self.device.battery == BATTERY_OK:
return 100
return self.device.battery # Percentage
@property
def _last_updated(self):
"""Return the last update of a device."""
return str(datetime.fromtimestamp(self.device.lastUpdated)) \
if self.device.lastUpdated else None
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._id
@property
def device_info(self):
"""Return device info."""
device = self._client.device_info(self.device.device_id)
device_info = {
'identifiers': {('tellduslive', self.device.device_id)},
'name': self.device.name,
}
model = device.get('model')
if model is not None:
device_info['model'] = model.title()
protocol = device.get('protocol')
if protocol is not None:
device_info['manufacturer'] = protocol.title()
client = device.get('client')
if client is not None:
device_info['via_hub'] = ('tellduslive', client)
return device_info
| HydrelioxGitHub/home-assistant | homeassistant/components/tellduslive/entry.py | Python | apache-2.0 | 4,178 |
from blinker import Signal
on_init = Signal()
on_session = Signal()
on_parse = Signal()
on_meta = Signal()
on_wait = Signal()
| pudo/krauler | krauler/signals.py | Python | mit | 127 |
from __future__ import division, print_function
import os, json
from glob import glob
import numpy as np
from scipy import misc, ndimage
from scipy.ndimage.interpolation import zoom
import keras
from keras import backend as K
from keras.models import Sequential, Model
from keras.layers.core import Flatten, Dense, Dropout, Lambda
from keras.layers import Input, Activation, merge
from keras.optimizers import RMSprop
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import Conv2D, MaxPooling2D, ZeroPadding2D, AveragePooling2D # Conv2D: Keras2
import keras.preprocessing.image as image
from keras.utils.data_utils import get_file
from keras.utils.layer_utils import convert_all_kernels_in_model
from keras.applications.resnet50 import identity_block, conv_block
class Resnet50():
"""The Resnet 50 Imagenet model"""
def __init__(self, size=(224,224), include_top=True):
self.FILE_PATH = 'http://files.fast.ai/models/'
self.vgg_mean = np.array([123.68, 116.779, 103.939]).reshape((3,1,1))
self.create(size, include_top)
self.get_classes()
def get_classes(self):
fname = 'imagenet_class_index.json'
fpath = get_file(fname, self.FILE_PATH+fname, cache_subdir='models')
with open(fpath) as f:
class_dict = json.load(f)
self.classes = [class_dict[str(i)][1] for i in range(len(class_dict))]
def predict(self, imgs, details=False):
all_preds = self.model.predict(imgs)
idxs = np.argmax(all_preds, axis=1)
preds = [all_preds[i, idxs[i]] for i in range(len(idxs))]
classes = [self.classes[idx] for idx in idxs]
return np.array(preds), idxs, classes
def vgg_preprocess(self, x):
x = x - self.vgg_mean
return x[:, ::-1] # reverse axis bgr->rgb
def create(self, size, include_top):
input_shape = (3,)+size
img_input = Input(shape=input_shape)
bn_axis = 1
x = Lambda(self.vgg_preprocess)(img_input)
x = ZeroPadding2D((3, 3))(x)
x = Conv2D(64, 7, 7, subsample=(2, 2), name='conv1')(x) # Keras2
x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
x = Activation('relu')(x)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
for n in ['b','c','d']: x = identity_block(x, 3, [128, 128, 512], stage=3, block=n)
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
for n in ['b','c','d', 'e', 'f']: x = identity_block(x, 3, [256, 256, 1024], stage=4, block=n)
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')
if include_top:
x = AveragePooling2D((7, 7), name='avg_pool')(x)
x = Flatten()(x)
x = Dense(1000, activation='softmax', name='fc1000')(x)
fname = 'resnet50.h5'
else:
fname = 'resnet_nt.h5'
self.img_input = img_input
self.model = Model(self.img_input, x)
convert_all_kernels_in_model(self.model)
self.model.load_weights(get_file(fname, self.FILE_PATH+fname, cache_subdir='models'))
def get_batches(self, path, gen=image.ImageDataGenerator(),class_mode='categorical', shuffle=True, batch_size=8):
return gen.flow_from_directory(path, target_size=(224,224),
class_mode=class_mode, shuffle=shuffle, batch_size=batch_size)
def finetune(self, batches):
model = self.model
model.layers.pop()
for layer in model.layers: layer.trainable=False
m = Dense(batches.num_classes, activation='softmax')(model.layers[-1].output) # Keras 2.1
self.model = Model(model.input, m)
self.model.compile(optimizer=RMSprop(lr=0.1), loss='categorical_crossentropy', metrics=['accuracy']
# Keras2
def fit(self, batches, val_batches, batch_size, nb_epoch=1):
# Keras 1
# self.model.fit_generator(batches, samples_per_epoch=batches.nb_sample, nb_epoch=nb_epoch,
# validation_data=val_batches, nb_val_samples=val_batches.nb_sample)
# Keras 2
self.model.fit_generator(batches, steps_per_epoch=int(np.ceil(batches.samples/batch_size)), epochs=nb_epoch,
validation_data=val_batches, validation_steps=int(np.ceil(val_batches.samples/batch_size)))
# Keras2
def test(self, path, batch_size=8):
test_batches = self.get_batches(path, shuffle=False, batch_size=batch_size, class_mode=None)
return test_batches, self.model.predict_generator(test_batches, int(np.ceil(test_batches.samples/batch_size))) | roebius/deeplearning1_keras2 | nbs/resnet50.py | Python | apache-2.0 | 5,047 |
import os
import sys
import uuid
import logging
import datetime
import traceback
from PyQt5 import QtCore
import PyQt5.QtWidgets as QtWidgets
import PyQt5.Qt as Qt
import numpy as np
from pandas import Series, DatetimeIndex
from matplotlib.axes import Axes
from matplotlib.patches import Rectangle
from matplotlib.dates import date2num
os.chdir('..')
import dgp.lib.project as project
# from dgp.gui.plotting.plotter2 import FlightLinePlot
class MockDataChannel:
def __init__(self, series, label):
self._series = series
self.label = label
self.uid = uuid.uuid4().__str__()
def series(self):
return self._series
def plot(self, *args):
pass
class PlotExample(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle('Plotter Testing')
self.setBaseSize(Qt.QSize(600, 600))
self._flight = project.Flight(None, 'test')
self._plot = FlightLinePlot(self._flight, parent=self)
self._plot.set_mode(grab=True)
print("Plot: ", self._plot)
# self.plot.figure.canvas.mpl_connect('pick_event', lambda x: print(
# "Pick event handled"))
# self.plot.mgr = StackedAxesManager(self.plot.figure, rows=2)
# self._toolbar = NavToolbar(self.plot, parent=self)
# self._toolbar.actions()[0] = QtWidgets.QAction("Reset View")
# self._toolbar.actions()[0].triggered.connect(lambda x: print(
# "Action 0 triggered"))
self.tb = self._plot.get_toolbar()
plot_layout = QtWidgets.QVBoxLayout()
plot_layout.addWidget(self._plot)
plot_layout.addWidget(self.tb)
c_widget = QtWidgets.QWidget()
c_widget.setLayout(plot_layout)
self.setCentralWidget(c_widget)
plot_layout.addWidget(QtWidgets.QPushButton("Reset"))
# toolbar = self.plot.get_toolbar(self)
self.show()
def plot_sin(self):
idx = DatetimeIndex(freq='5S', start=datetime.datetime.now(),
periods=1000)
ser = Series([np.sin(x)*3 for x in np.arange(0, 100, 0.1)], index=idx)
self.plot.mgr.add_series(ser)
self.plot.mgr.add_series(-ser)
ins_0 = self.plot.mgr.add_inset_axes(0) # type: Axes
ins_0.plot(ser.index, ser.values)
x0, x1 = ins_0.get_xlim()
width = (x1 - x0) * .5
y0, y1 = ins_0.get_ylim()
height = (y1 - y0) * .5
# Draw rectangle patch on inset axes - proof of concept to add inset
# locator when zoomed in on large data set.
ax0 = self.plot.mgr[0][0] # type: Axes
rect = Rectangle((date2num(idx[0]), 0), width, height,
edgecolor='black',
linewidth=2, alpha=.5, fill='red')
rect.set_picker(True)
patch = ins_0.add_patch(rect) # type: Rectangle
# Future idea: Add click+drag to view patch to pan in main plot
def update_rect(ax: Axes):
x0, x1 = ax.get_xlim()
y0, y1 = ax.get_ylim()
patch.set_x(x0)
patch.set_y(y0)
height = y1 - y0
width = x1 - x0
patch.set_width(width)
patch.set_height(height)
ax.draw_artist(patch)
self.plot.draw()
ax0.callbacks.connect('xlim_changed', update_rect)
ax0.callbacks.connect('ylim_changed', update_rect)
self.plot.draw()
ins_1 = self.plot.mgr.add_inset_axes(1)
def excepthook(type_, value, traceback_):
"""This allows IDE to properly display unhandled exceptions which are
otherwise silently ignored as the application is terminated.
Override default excepthook with
>>> sys.excepthook = excepthook
See: http://pyqt.sourceforge.net/Docs/PyQt5/incompatibilities.html
"""
traceback.print_exception(type_, value, traceback_)
QtCore.qFatal('')
if __name__ == '__main__':
sys.excepthook = excepthook
app = QtWidgets.QApplication(sys.argv)
_log = logging.getLogger()
_log.addHandler(logging.StreamHandler(sys.stdout))
_log.setLevel(logging.DEBUG)
window = PlotExample()
# window.plot_sin()
sys.exit(app.exec_())
| DynamicGravitySystems/DGP | examples/plot2_prototype.py | Python | apache-2.0 | 4,218 |
import os
from distutils import util
from distutils.core import Command
from distutils.filelist import FileList
class InstallMisc(Command):
"""
Common base class for installing some files in a subdirectory.
Currently used by install_data and install_localstate.
"""
user_options = [
('force', 'f', "force installation (overwrite existing files)"),
]
def initialize_options(self):
self.install_dir = None
self.force = None
self.allfiles = None
self.filelists = None
return
def finalize_options (self):
self.set_undefined_options('install',
(self.get_command_name(), 'install_dir'),
('force', 'force'))
return
def _get_distribution_filelists(self):
raise NotImplementedError('subclass %s must override' % self.__class__)
def run(self):
for filelist in self.get_filelists():
sources = filelist.files
outputs = self.get_filelist_outputs(filelist)
# If there are no files listed, the user must be
# trying to create an empty directory.
if not sources:
assert len(outputs) == 1
self.mkpath(outputs[0])
else:
assert len(sources) == len(outputs)
for src, dst in zip(sources, outputs):
self.mkpath(os.path.dirname(dst))
self.copy_file(src, dst)
return
def process_filelist(self, filelist):
assert isinstance(filelist, FileList)
filelist.set_allfiles(self.distribution.get_allfiles())
for source in tuple(filelist.sources):
pattern = util.convert_path(source)
if filelist.recursive:
found = filelist.include_pattern(None, prefix=pattern)
else:
found = filelist.include_pattern(pattern, anchor=True)
if not found:
self.warn("no files found matching '%s'" % source)
filelist.sources.remove(source)
for exclude in filelist.excludes:
pattern = util.convert_path(exclude)
if filelist.recursive:
found = filelist.exclude_pattern(None, prefix=pattern)
else:
found = filelist.exclude_pattern(pattern, anchor=True)
if not found:
self.warn("no previously included files found"
" matching '%s'" % exclude)
filelist.sort()
filelist.remove_duplicates()
return filelist
def get_filelists(self):
if self.filelists is None:
# Convert the list of filespecs into a list of actual files
self.filelists = self._get_distribution_filelists()
for filelist in self.filelists:
self.process_filelist(filelist)
return self.filelists
def get_filelist_outputs(self, filelist):
outputs = []
destdir = util.convert_path(filelist.dest)
destdir = os.path.join(self.install_dir, destdir)
if not filelist.sources:
# If there are no files listed, the user must be
# trying to create an empty directory, so add the
# directory to the list of output files.
outputs.append(destdir)
elif filelist.recursive:
for pattern in filelist.sources:
pattern = util.convert_path(pattern)
for filename in filelist.files:
assert filename.startswith(pattern)
source = filename[len(pattern):]
assert source.startswith(os.sep)
outputs.append(destdir + source)
else:
# all files in the filelist are copied directly into the
# destination directory
for filename in filelist.files:
source = os.path.basename(filename)
outputs.append(os.path.join(destdir, source))
return outputs
# -- Reporting methods ---------------------------------------------
def get_source_files(self):
sources = []
for filelist in self.get_filelists():
sources.extend(filelist.files)
return sources
def get_inputs(self):
inputs = []
for filelist in self.get_filelists():
inputs.extend(filelist.files)
return inputs
def get_outputs(self):
outputs = []
for filelist in self.get_filelists():
outputs.extend(self.get_filelist_outputs(filelist))
return outputs
| Pikecillo/genna | external/4Suite-XML-1.0.2/Ft/Lib/DistExt/InstallMisc.py | Python | gpl-2.0 | 4,645 |
import numpy
import six
from chainer import backend
from chainer import function_node
import chainer.functions
import chainer.utils
from chainer.utils import type_check
import chainerx
class SelectorBase(function_node.FunctionNode):
"""Select an array element from a given axis or set of axes."""
def __init__(self, axis=None, keepdims=False):
self.keepdims = keepdims
if axis is None:
self.axis = None
elif isinstance(axis, six.integer_types):
self.axis = (axis,)
elif isinstance(axis, tuple) and all(
isinstance(a, six.integer_types) for a in axis):
if len(set(axis)) != len(axis):
raise ValueError('duplicate value in axis: ({})'.format(
', '.join(map(str, axis))))
self.axis = axis
else:
raise TypeError('None, int or tuple of int are required')
def _fwd(self, x, xp):
raise NotImplementedError('_fwd should be implemented in sub-class.')
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
if self.axis is not None:
for axis in self.axis:
if axis >= 0:
type_check.expect(
axis < in_types[0].ndim,
)
else:
type_check.expect(
-axis - 1 < in_types[0].ndim,
)
def forward(self, x):
self.retain_inputs((0,))
self.retain_outputs((0,))
xp = backend.get_array_module(*x)
return xp.asarray(self._fwd(x[0], xp)),
def backward(self, indexes, gy):
x = self.get_retained_inputs()[0]
y = self.get_retained_outputs()[0]
if self.axis is None:
axis = range(x.ndim)
else:
axis = [ax % x.ndim for ax in self.axis]
# Add broadcastable dimensions to y and gy
# for each one that was reduced in the forward operation
shape = [s if ax not in axis else 1 for ax, s in enumerate(x.shape)]
gy = gy[0].reshape(shape)
y = y.reshape(shape)
# Compute the gradient
cond = (x.data == y.data)
gy = chainer.functions.broadcast_to(gy, cond.shape)
return gy * cond,
class Max(SelectorBase):
def forward_chainerx(self, x):
return chainerx.amax(x[0], axis=self.axis, keepdims=self.keepdims),
def _fwd(self, x, xp):
return xp.amax(x, axis=self.axis, keepdims=self.keepdims)
class Min(SelectorBase):
def forward_chainerx(self, x):
return chainerx.amin(x[0], axis=self.axis, keepdims=self.keepdims),
def _fwd(self, x, xp):
return xp.amin(x, axis=self.axis, keepdims=self.keepdims)
class IndexSelectorBase(function_node.FunctionNode):
"""Select index of an array element from a given axis."""
def __init__(self, axis=None):
if axis is None:
self.axis = None
elif isinstance(axis, six.integer_types):
self.axis = axis
else:
raise TypeError('None or int are required')
def _fwd(self, x, xp):
raise NotImplementedError('_fwd should be implemented in sub-class.')
def check_type_forward(self, in_types):
type_check.expect(
in_types.size() == 1,
in_types[0].dtype.kind == 'f'
)
if self.axis is not None:
if self.axis >= 0:
type_check.expect(
self.axis < in_types[0].ndim,
)
else:
type_check.expect(
-self.axis - 1 < in_types[0].ndim,
)
def forward(self, x):
xp = backend.get_array_module(*x)
return xp.asarray(self._fwd(x[0], xp)),
def backward(self, indexes, grad_outputs):
return None,
class ArgMin(IndexSelectorBase):
def forward_chainerx(self, x):
return chainerx.argmin(x[0], axis=self.axis).astype(numpy.int32),
def _fwd(self, x, xp):
return xp.argmin(x, axis=self.axis).astype(numpy.int32)
class ArgMax(IndexSelectorBase):
def forward_chainerx(self, x):
return chainerx.argmax(x[0], axis=self.axis).astype(numpy.int32),
def _fwd(self, x, xp):
return xp.argmax(x, axis=self.axis).astype(numpy.int32)
def max(x, axis=None, keepdims=False):
"""Maximum of array elements over a given axis.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Array to be maximized.
axis (None, int, or tuple of int): Axis over which a max is performed.
The default (axis = None) is perform a max over all the dimensions
of the input array.
Returns:
~chainer.Variable: Output variable.
"""
return Max(axis, keepdims).apply((x,))[0]
def min(x, axis=None, keepdims=False):
"""Minimum of array elements over a given axis.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Array to be minimized.
axis (None, int, or tuple of int): Axis over which a min is performed.
The default (axis = None) is perform a min over all the dimensions
of the input array.
Returns:
~chainer.Variable: Output variable.
"""
return Min(axis, keepdims).apply((x,))[0]
def argmax(x, axis=None):
"""Returns index which holds maximum of array elements over a given axis.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Array to find maximum elements.
axis (None or int): Axis over which a max is performed.
The default (axis = None) is perform a max over all the dimensions
of the input array.
Returns:
~chainer.Variable: Output variable.
"""
return ArgMax(axis).apply((x,))[0]
def argmin(x, axis=None):
"""Returns index which holds minimum of array elements over a given axis.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Array to find minimum elements.
axis (None or int): Axis over which a min is performed.
The default (axis = None) is perform a min over all the dimensions
of the input array.
Returns:
~chainer.Variable: Output variable.
"""
return ArgMin(axis).apply((x,))[0]
| okuta/chainer | chainer/functions/math/minmax.py | Python | mit | 6,399 |
#
# threads.py: anaconda thread management
#
# Copyright (C) 2012
# Red Hat, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author(s): Chris Lumens <clumens@redhat.com>
#
import logging
log = logging.getLogger("anaconda")
import threading
class ThreadManager(object):
"""A singleton class for managing threads and processes.
Notes:
THE INSTANCE HAS TO BE CREATED IN THE MAIN THREAD!
This manager makes one assumption that contradicts python's
threading module documentation. In this class, we assume that thread
names are unique and meaningful. This is an okay assumption for us
to make given that anaconda is only ever going to have a handful of
special purpose threads.
"""
def __init__(self):
self._objs = {}
self._errors = {}
self._main_thread = threading.current_thread()
def __call__(self):
return self
def add(self, obj):
"""Given a Thread or Process object, add it to the list of known objects
and start it. It is assumed that obj.name is unique and descriptive.
"""
if obj.name in self._objs:
raise KeyError("Cannot add thread '%s', a thread with the same name already running" % obj.name)
self._objs[obj.name] = obj
self._errors[obj.name] = None
obj.start()
def remove(self, name):
"""Removes a thread from the list of known objects. This should only
be called when a thread exits, or there will be no way to get a
handle on it.
"""
self._objs.pop(name)
def exists(self, name):
"""Determine if a thread or process exists with the given name."""
return name in self._objs
def get(self, name):
"""Given an object name, see if it exists and return the object.
Return None if no such object exists. Additionally, this method
will re-raise any uncaught exception in the thread.
"""
obj = self._objs.get(name)
if obj:
self.raise_error(name)
return obj
def wait(self, name):
"""Wait for the thread to exit and if the thread exited with an error
re-raise it here.
"""
if self.exists(name):
self.get(name).join()
self.raise_error(name)
def wait_all(self):
"""Wait for all threads to exit and if there was an error re-raise it.
"""
for name in self._objs.keys():
if self.get(name) == threading.current_thread():
continue
log.debug("Waiting for thread %s to exit", name)
self.wait(name)
def set_error(self, name, *exc_info):
"""Set the error data for a thread
The exception data is expected to be the tuple from sys.exc_info()
"""
self._errors[name] = exc_info
def get_error(self, name):
"""Get the error data for a thread using its name
"""
return self._errors.get(name)
def any_errors(self):
"""Return True of there have been any errors in any threads
"""
return any(self._errors.values())
def raise_error(self, name):
"""If a thread has failed due to an exception, raise it into the main
thread.
"""
if self._errors.get(name):
raise self._errors[name][0], self._errors[name][1], self._errors[name][2]
def in_main_thread(self):
"""Return True if it is run in the main thread."""
cur_thread = threading.current_thread()
return cur_thread is self._main_thread
@property
def running(self):
""" Return the number of running threads.
:returns: number of running threads
:rtype: int
"""
return len(self._objs)
@property
def names(self):
""" Return the names of the running threads.
:returns: list of thread names
:rtype: list of strings
"""
return self._objs.keys()
class AnacondaThread(threading.Thread):
"""A threading.Thread subclass that exists only for a couple purposes:
(1) Make exceptions that happen in a thread invoke our exception handling
code as well. Otherwise, threads will silently die and we are doing
a lot of complicated code in them now.
(2) Remove themselves from the thread manager when completed.
(3) All created threads are made daemonic, which means anaconda will quit
when the main process is killed.
"""
def __init__(self, *args, **kwargs):
threading.Thread.__init__(self, *args, **kwargs)
self.daemon = True
def run(self, *args, **kwargs):
# http://bugs.python.org/issue1230540#msg25696
import sys
log.info("Running Thread: %s (%s)", self.name, self.ident)
try:
threading.Thread.run(self, *args, **kwargs)
except KeyboardInterrupt:
raise
except:
threadMgr.set_error(self.name, *sys.exc_info())
sys.excepthook(*sys.exc_info())
finally:
threadMgr.remove(self.name)
log.info("Thread Done: %s (%s)", self.name, self.ident)
def initThreading():
"""Set up threading for anaconda's use. This method must be called before
any GTK or threading code is called, or else threads will only run when
an event is triggered in the GTK main loop. And IT HAS TO BE CALLED IN
THE MAIN THREAD.
"""
global threadMgr
threadMgr = ThreadManager()
threadMgr = None
| Sabayon/anaconda | pyanaconda/threads.py | Python | gpl-2.0 | 6,224 |
from misago.markup import checksums
def is_post_valid(post):
valid_checksum = make_post_checksum(post)
return post.checksum == valid_checksum
def make_post_checksum(post):
post_seeds = [unicode(v) for v in (post.id, post.poster_ip)]
return checksums.make_checksum(post.parsed, post_seeds)
def update_post_checksum(post):
post.checksum = make_post_checksum(post)
return post.checksum
def is_report_valid(report):
valid_checksum = make_report_checksum(report)
return report.checksum == valid_checksum
def make_report_checksum(report):
report_seeds = [unicode(v) for v in (report.id, report.reported_by_ip)]
return checksums.make_checksum(report.message, report_seeds)
def update_report_checksum(report):
report.checksum = make_report_checksum(report)
return report.checksum
def is_event_valid(event):
valid_checksum = make_event_checksum(event)
return event.checksum == valid_checksum
def make_event_checksum(event):
event_seeds = [unicode(v) for v in (event.id, event.occured_on)]
return checksums.make_checksum(event.message, event_seeds)
def update_event_checksum(event):
event.checksum = make_event_checksum(event)
return event.checksum
| leture/Misago | misago/threads/checksums.py | Python | gpl-2.0 | 1,229 |
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors: Pavlo Svirin <pavlo.svirin@gmail.com>
import unittest
import os
# from pilot.control.job import get_fake_job
# from pilot.info import JobData
from pilot.info.filespec import FileSpec
from pilot.util.tracereport import TraceReport
def check_env():
"""
Function to check whether rucio copytool is loaded correctly.
To be used to decide whether to skip some test functions.
:returns True: if rucio copytool is available. Otherwise False.
"""
aval = False
return aval
@unittest.skipIf(not check_env(), "No Rucio copytool")
class TestCopytoolRucio(unittest.TestCase):
"""
Unit tests for rucio copytool.
"""
def setUp(self):
test_file = open('test.txt', 'w')
test_file.write('For test purposes only.')
test_file.close()
fspec_out = FileSpec()
fspec_out.lfn = 'test.txt'
fspec_out.scope = 'user.tjavurek'
fspec_out.checksum = {'adler32': '682c08b9'}
fspec_out.pfn = os.getcwd() + '/' + 'test.txt'
fspec_out.ddmendpoint = 'UNI-FREIBURG_SCRATCHDISK'
self.outdata = [fspec_out]
def test_copy_out_rucio(self):
from pilot.copytool.rucio import copy_out
trace_report = TraceReport()
trace_report.update(eventType='unit test')
copy_out(self.outdata, trace_report=trace_report)
os.remove(self.outdata[0].pfn)
if __name__ == '__main__':
unittest.main()
| PalNilsson/pilot2 | pilot/test/test_copytools_rucio.py | Python | apache-2.0 | 1,674 |
# mako/_ast_util.py
# Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
ast
~~~
The `ast` module helps Python applications to process trees of the Python
abstract syntax grammar. The abstract syntax itself might change with
each Python release; this module helps to find out programmatically what
the current grammar looks like and allows modifications of it.
An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as
a flag to the `compile()` builtin function or by using the `parse()`
function from this module. The result will be a tree of objects whose
classes all inherit from `ast.AST`.
A modified abstract syntax tree can be compiled into a Python code object
using the built-in `compile()` function.
Additionally various helper functions are provided that make working with
the trees simpler. The main intention of the helper functions and this
module in general is to provide an easy to use interface for libraries
that work tightly with the python syntax (template engines for example).
:copyright: Copyright 2008 by Armin Ronacher.
:license: Python License.
"""
from _ast import * # noqa
from mako.compat import arg_stringname
BOOLOP_SYMBOLS = {
And: 'and',
Or: 'or'
}
BINOP_SYMBOLS = {
Add: '+',
Sub: '-',
Mult: '*',
Div: '/',
FloorDiv: '//',
Mod: '%',
LShift: '<<',
RShift: '>>',
BitOr: '|',
BitAnd: '&',
BitXor: '^'
}
CMPOP_SYMBOLS = {
Eq: '==',
Gt: '>',
GtE: '>=',
In: 'in',
Is: 'is',
IsNot: 'is not',
Lt: '<',
LtE: '<=',
NotEq: '!=',
NotIn: 'not in'
}
UNARYOP_SYMBOLS = {
Invert: '~',
Not: 'not',
UAdd: '+',
USub: '-'
}
ALL_SYMBOLS = {}
ALL_SYMBOLS.update(BOOLOP_SYMBOLS)
ALL_SYMBOLS.update(BINOP_SYMBOLS)
ALL_SYMBOLS.update(CMPOP_SYMBOLS)
ALL_SYMBOLS.update(UNARYOP_SYMBOLS)
def parse(expr, filename='<unknown>', mode='exec'):
"""Parse an expression into an AST node."""
return compile(expr, filename, mode, PyCF_ONLY_AST)
def to_source(node, indent_with=' ' * 4):
"""
This function can convert a node tree back into python sourcecode. This
is useful for debugging purposes, especially if you're dealing with custom
asts not generated by python itself.
It could be that the sourcecode is evaluable when the AST itself is not
compilable / evaluable. The reason for this is that the AST contains some
more data than regular sourcecode does, which is dropped during
conversion.
Each level of indentation is replaced with `indent_with`. Per default this
parameter is equal to four spaces as suggested by PEP 8, but it might be
adjusted to match the application's styleguide.
"""
generator = SourceGenerator(indent_with)
generator.visit(node)
return ''.join(generator.result)
def dump(node):
"""
A very verbose representation of the node passed. This is useful for
debugging purposes.
"""
def _format(node):
if isinstance(node, AST):
return '%s(%s)' % (node.__class__.__name__,
', '.join('%s=%s' % (a, _format(b))
for a, b in iter_fields(node)))
elif isinstance(node, list):
return '[%s]' % ', '.join(_format(x) for x in node)
return repr(node)
if not isinstance(node, AST):
raise TypeError('expected AST, got %r' % node.__class__.__name__)
return _format(node)
def copy_location(new_node, old_node):
"""
Copy the source location hint (`lineno` and `col_offset`) from the
old to the new node if possible and return the new one.
"""
for attr in 'lineno', 'col_offset':
if attr in old_node._attributes and attr in new_node._attributes \
and hasattr(old_node, attr):
setattr(new_node, attr, getattr(old_node, attr))
return new_node
def fix_missing_locations(node):
"""
Some nodes require a line number and the column offset. Without that
information the compiler will abort the compilation. Because it can be
a dull task to add appropriate line numbers and column offsets when
adding new nodes this function can help. It copies the line number and
column offset of the parent node to the child nodes without this
information.
Unlike `copy_location` this works recursive and won't touch nodes that
already have a location information.
"""
def _fix(node, lineno, col_offset):
if 'lineno' in node._attributes:
if not hasattr(node, 'lineno'):
node.lineno = lineno
else:
lineno = node.lineno
if 'col_offset' in node._attributes:
if not hasattr(node, 'col_offset'):
node.col_offset = col_offset
else:
col_offset = node.col_offset
for child in iter_child_nodes(node):
_fix(child, lineno, col_offset)
_fix(node, 1, 0)
return node
def increment_lineno(node, n=1):
"""
Increment the line numbers of all nodes by `n` if they have line number
attributes. This is useful to "move code" to a different location in a
file.
"""
for node in zip((node,), walk(node)):
if 'lineno' in node._attributes:
node.lineno = getattr(node, 'lineno', 0) + n
def iter_fields(node):
"""Iterate over all fields of a node, only yielding existing fields."""
# CPython 2.5 compat
if not hasattr(node, '_fields') or not node._fields:
return
for field in node._fields:
try:
yield field, getattr(node, field)
except AttributeError:
pass
def get_fields(node):
"""Like `iter_fields` but returns a dict."""
return dict(iter_fields(node))
def iter_child_nodes(node):
"""Iterate over all child nodes or a node."""
for name, field in iter_fields(node):
if isinstance(field, AST):
yield field
elif isinstance(field, list):
for item in field:
if isinstance(item, AST):
yield item
def get_child_nodes(node):
"""Like `iter_child_nodes` but returns a list."""
return list(iter_child_nodes(node))
def get_compile_mode(node):
"""
Get the mode for `compile` of a given node. If the node is not a `mod`
node (`Expression`, `Module` etc.) a `TypeError` is thrown.
"""
if not isinstance(node, mod):
raise TypeError('expected mod node, got %r' % node.__class__.__name__)
return {
Expression: 'eval',
Interactive: 'single'
}.get(node.__class__, 'expr')
def get_docstring(node):
"""
Return the docstring for the given node or `None` if no docstring can be
found. If the node provided does not accept docstrings a `TypeError`
will be raised.
"""
if not isinstance(node, (FunctionDef, ClassDef, Module)):
raise TypeError("%r can't have docstrings" % node.__class__.__name__)
if node.body and isinstance(node.body[0], Str):
return node.body[0].s
def walk(node):
"""
Iterate over all nodes. This is useful if you only want to modify nodes in
place and don't care about the context or the order the nodes are returned.
"""
from collections import deque
todo = deque([node])
while todo:
node = todo.popleft()
todo.extend(iter_child_nodes(node))
yield node
class NodeVisitor(object):
"""
Walks the abstract syntax tree and call visitor functions for every node
found. The visitor functions may return values which will be forwarded
by the `visit` method.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `get_visitor` function. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
Don't use the `NodeVisitor` if you want to apply changes to nodes during
traversing. For this a special visitor exists (`NodeTransformer`) that
allows modifications.
"""
def get_visitor(self, node):
"""
Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
method = 'visit_' + node.__class__.__name__
return getattr(self, method, None)
def visit(self, node):
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node)
return self.generic_visit(node)
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item)
elif isinstance(value, AST):
self.visit(value)
class NodeTransformer(NodeVisitor):
"""
Walks the abstract syntax tree and allows modifications of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor functions to replace or remove the old node. If the return
value of the visitor function is `None` the node will be removed
from the previous location otherwise it's replaced with the return
value. The return value may be the original node in which case no
replacement takes place.
Here an example transformer that rewrites all `foo` to `data['foo']`::
class RewriteName(NodeTransformer):
def visit_Name(self, node):
return copy_location(Subscript(
value=Name(id='data', ctx=Load()),
slice=Index(value=Str(s=node.id)),
ctx=node.ctx
), node)
Keep in mind that if the node you're operating on has child nodes
you must either transform the child nodes yourself or call the generic
visit function for the node first.
Nodes that were part of a collection of statements (that applies to
all statement nodes) may also return a list of nodes rather than just
a single node.
Usually you use the transformer like this::
node = YourTransformer().visit(node)
"""
def generic_visit(self, node):
for field, old_value in iter_fields(node):
old_value = getattr(node, field, None)
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, AST):
value = self.visit(value)
if value is None:
continue
elif not isinstance(value, AST):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, AST):
new_node = self.visit(old_value)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
class SourceGenerator(NodeVisitor):
"""
This visitor is able to transform a well formed syntax tree into python
sourcecode. For more details have a look at the docstring of the
`node_to_source` function.
"""
def __init__(self, indent_with):
self.result = []
self.indent_with = indent_with
self.indentation = 0
self.new_lines = 0
def write(self, x):
if self.new_lines:
if self.result:
self.result.append('\n' * self.new_lines)
self.result.append(self.indent_with * self.indentation)
self.new_lines = 0
self.result.append(x)
def newline(self, n=1):
self.new_lines = max(self.new_lines, n)
def body(self, statements):
self.new_line = True
self.indentation += 1
for stmt in statements:
self.visit(stmt)
self.indentation -= 1
def body_or_else(self, node):
self.body(node.body)
if node.orelse:
self.newline()
self.write('else:')
self.body(node.orelse)
def signature(self, node):
want_comma = []
def write_comma():
if want_comma:
self.write(', ')
else:
want_comma.append(True)
padding = [None] * (len(node.args) - len(node.defaults))
for arg, default in zip(node.args, padding + node.defaults):
write_comma()
self.visit(arg)
if default is not None:
self.write('=')
self.visit(default)
if node.vararg is not None:
write_comma()
self.write('*' + arg_stringname(node.vararg))
if node.kwarg is not None:
write_comma()
self.write('**' + arg_stringname(node.kwarg))
def decorators(self, node):
for decorator in node.decorator_list:
self.newline()
self.write('@')
self.visit(decorator)
# Statements
def visit_Assign(self, node):
self.newline()
for idx, target in enumerate(node.targets):
if idx:
self.write(', ')
self.visit(target)
self.write(' = ')
self.visit(node.value)
def visit_AugAssign(self, node):
self.newline()
self.visit(node.target)
self.write(BINOP_SYMBOLS[type(node.op)] + '=')
self.visit(node.value)
def visit_ImportFrom(self, node):
self.newline()
self.write('from %s%s import ' % ('.' * node.level, node.module))
for idx, item in enumerate(node.names):
if idx:
self.write(', ')
self.write(item)
def visit_Import(self, node):
self.newline()
for item in node.names:
self.write('import ')
self.visit(item)
def visit_Expr(self, node):
self.newline()
self.generic_visit(node)
def visit_FunctionDef(self, node):
self.newline(n=2)
self.decorators(node)
self.newline()
self.write('def %s(' % node.name)
self.signature(node.args)
self.write('):')
self.body(node.body)
def visit_ClassDef(self, node):
have_args = []
def paren_or_comma():
if have_args:
self.write(', ')
else:
have_args.append(True)
self.write('(')
self.newline(n=3)
self.decorators(node)
self.newline()
self.write('class %s' % node.name)
for base in node.bases:
paren_or_comma()
self.visit(base)
# XXX: the if here is used to keep this module compatible
# with python 2.6.
if hasattr(node, 'keywords'):
for keyword in node.keywords:
paren_or_comma()
self.write(keyword.arg + '=')
self.visit(keyword.value)
if getattr(node, "starargs", None):
paren_or_comma()
self.write('*')
self.visit(node.starargs)
if getattr(node, "kwargs", None):
paren_or_comma()
self.write('**')
self.visit(node.kwargs)
self.write(have_args and '):' or ':')
self.body(node.body)
def visit_If(self, node):
self.newline()
self.write('if ')
self.visit(node.test)
self.write(':')
self.body(node.body)
while True:
else_ = node.orelse
if len(else_) == 1 and isinstance(else_[0], If):
node = else_[0]
self.newline()
self.write('elif ')
self.visit(node.test)
self.write(':')
self.body(node.body)
else:
self.newline()
self.write('else:')
self.body(else_)
break
def visit_For(self, node):
self.newline()
self.write('for ')
self.visit(node.target)
self.write(' in ')
self.visit(node.iter)
self.write(':')
self.body_or_else(node)
def visit_While(self, node):
self.newline()
self.write('while ')
self.visit(node.test)
self.write(':')
self.body_or_else(node)
def visit_With(self, node):
self.newline()
self.write('with ')
self.visit(node.context_expr)
if node.optional_vars is not None:
self.write(' as ')
self.visit(node.optional_vars)
self.write(':')
self.body(node.body)
def visit_Pass(self, node):
self.newline()
self.write('pass')
def visit_Print(self, node):
# XXX: python 2.6 only
self.newline()
self.write('print ')
want_comma = False
if node.dest is not None:
self.write(' >> ')
self.visit(node.dest)
want_comma = True
for value in node.values:
if want_comma:
self.write(', ')
self.visit(value)
want_comma = True
if not node.nl:
self.write(',')
def visit_Delete(self, node):
self.newline()
self.write('del ')
for idx, target in enumerate(node):
if idx:
self.write(', ')
self.visit(target)
def visit_TryExcept(self, node):
self.newline()
self.write('try:')
self.body(node.body)
for handler in node.handlers:
self.visit(handler)
def visit_TryFinally(self, node):
self.newline()
self.write('try:')
self.body(node.body)
self.newline()
self.write('finally:')
self.body(node.finalbody)
def visit_Global(self, node):
self.newline()
self.write('global ' + ', '.join(node.names))
def visit_Nonlocal(self, node):
self.newline()
self.write('nonlocal ' + ', '.join(node.names))
def visit_Return(self, node):
self.newline()
self.write('return ')
self.visit(node.value)
def visit_Break(self, node):
self.newline()
self.write('break')
def visit_Continue(self, node):
self.newline()
self.write('continue')
def visit_Raise(self, node):
# XXX: Python 2.6 / 3.0 compatibility
self.newline()
self.write('raise')
if hasattr(node, 'exc') and node.exc is not None:
self.write(' ')
self.visit(node.exc)
if node.cause is not None:
self.write(' from ')
self.visit(node.cause)
elif hasattr(node, 'type') and node.type is not None:
self.visit(node.type)
if node.inst is not None:
self.write(', ')
self.visit(node.inst)
if node.tback is not None:
self.write(', ')
self.visit(node.tback)
# Expressions
def visit_Attribute(self, node):
self.visit(node.value)
self.write('.' + node.attr)
def visit_Call(self, node):
want_comma = []
def write_comma():
if want_comma:
self.write(', ')
else:
want_comma.append(True)
self.visit(node.func)
self.write('(')
for arg in node.args:
write_comma()
self.visit(arg)
for keyword in node.keywords:
write_comma()
self.write(keyword.arg + '=')
self.visit(keyword.value)
if getattr(node, "starargs", None):
write_comma()
self.write('*')
self.visit(node.starargs)
if getattr(node, "kwargs", None):
write_comma()
self.write('**')
self.visit(node.kwargs)
self.write(')')
def visit_Name(self, node):
self.write(node.id)
def visit_NameConstant(self, node):
self.write(str(node.value))
def visit_arg(self, node):
self.write(node.arg)
def visit_Str(self, node):
self.write(repr(node.s))
def visit_Bytes(self, node):
self.write(repr(node.s))
def visit_Num(self, node):
self.write(repr(node.n))
def visit_Tuple(self, node):
self.write('(')
idx = -1
for idx, item in enumerate(node.elts):
if idx:
self.write(', ')
self.visit(item)
self.write(idx and ')' or ',)')
def sequence_visit(left, right):
def visit(self, node):
self.write(left)
for idx, item in enumerate(node.elts):
if idx:
self.write(', ')
self.visit(item)
self.write(right)
return visit
visit_List = sequence_visit('[', ']')
visit_Set = sequence_visit('{', '}')
del sequence_visit
def visit_Dict(self, node):
self.write('{')
for idx, (key, value) in enumerate(zip(node.keys, node.values)):
if idx:
self.write(', ')
self.visit(key)
self.write(': ')
self.visit(value)
self.write('}')
def visit_BinOp(self, node):
self.write('(')
self.visit(node.left)
self.write(' %s ' % BINOP_SYMBOLS[type(node.op)])
self.visit(node.right)
self.write(')')
def visit_BoolOp(self, node):
self.write('(')
for idx, value in enumerate(node.values):
if idx:
self.write(' %s ' % BOOLOP_SYMBOLS[type(node.op)])
self.visit(value)
self.write(')')
def visit_Compare(self, node):
self.write('(')
self.visit(node.left)
for op, right in zip(node.ops, node.comparators):
self.write(' %s ' % CMPOP_SYMBOLS[type(op)])
self.visit(right)
self.write(')')
def visit_UnaryOp(self, node):
self.write('(')
op = UNARYOP_SYMBOLS[type(node.op)]
self.write(op)
if op == 'not':
self.write(' ')
self.visit(node.operand)
self.write(')')
def visit_Subscript(self, node):
self.visit(node.value)
self.write('[')
self.visit(node.slice)
self.write(']')
def visit_Slice(self, node):
if node.lower is not None:
self.visit(node.lower)
self.write(':')
if node.upper is not None:
self.visit(node.upper)
if node.step is not None:
self.write(':')
if not (isinstance(node.step, Name) and node.step.id == 'None'):
self.visit(node.step)
def visit_ExtSlice(self, node):
for idx, item in node.dims:
if idx:
self.write(', ')
self.visit(item)
def visit_Yield(self, node):
self.write('yield ')
self.visit(node.value)
def visit_Lambda(self, node):
self.write('lambda ')
self.signature(node.args)
self.write(': ')
self.visit(node.body)
def visit_Ellipsis(self, node):
self.write('Ellipsis')
def generator_visit(left, right):
def visit(self, node):
self.write(left)
self.visit(node.elt)
for comprehension in node.generators:
self.visit(comprehension)
self.write(right)
return visit
visit_ListComp = generator_visit('[', ']')
visit_GeneratorExp = generator_visit('(', ')')
visit_SetComp = generator_visit('{', '}')
del generator_visit
def visit_DictComp(self, node):
self.write('{')
self.visit(node.key)
self.write(': ')
self.visit(node.value)
for comprehension in node.generators:
self.visit(comprehension)
self.write('}')
def visit_IfExp(self, node):
self.visit(node.body)
self.write(' if ')
self.visit(node.test)
self.write(' else ')
self.visit(node.orelse)
def visit_Starred(self, node):
self.write('*')
self.visit(node.value)
def visit_Repr(self, node):
# XXX: python 2.6 only
self.write('`')
self.visit(node.value)
self.write('`')
# Helper Nodes
def visit_alias(self, node):
self.write(node.name)
if node.asname is not None:
self.write(' as ' + node.asname)
def visit_comprehension(self, node):
self.write(' for ')
self.visit(node.target)
self.write(' in ')
self.visit(node.iter)
if node.ifs:
for if_ in node.ifs:
self.write(' if ')
self.visit(if_)
def visit_excepthandler(self, node):
self.newline()
self.write('except')
if node.type is not None:
self.write(' ')
self.visit(node.type)
if node.name is not None:
self.write(' as ')
self.visit(node.name)
self.write(':')
self.body(node.body)
| Widiot/simpleblog | venv/lib/python3.5/site-packages/mako/_ast_util.py | Python | mit | 25,691 |
# python
# This file is generated by a program (mib2py). Any edits will be lost.
from pycopia.aid import Enum
import pycopia.SMI.Basetypes
Range = pycopia.SMI.Basetypes.Range
Ranges = pycopia.SMI.Basetypes.Ranges
from pycopia.SMI.Objects import ColumnObject, MacroObject, NotificationObject, RowObject, ScalarObject, NodeObject, ModuleObject, GroupObject
# imports
from SNMPv2_SMI import MODULE_IDENTITY, OBJECT_TYPE, Counter32, Integer32, NOTIFICATION_TYPE
from SNMPv2_CONF import MODULE_COMPLIANCE, OBJECT_GROUP, NOTIFICATION_GROUP
from RFC1213_MIB import transmission
from SNMPv2_TC import TEXTUAL_CONVENTION, RowStatus, TimeStamp
from IF_MIB import InterfaceIndex
class FRAME_RELAY_DTE_MIB(ModuleObject):
path = '/usr/share/snmp/mibs/ietf/FRAME-RELAY-DTE-MIB'
conformance = 5
name = 'FRAME-RELAY-DTE-MIB'
language = 2
description = 'The MIB module to describe the use of a Frame Relay\ninterface by a DTE.'
# nodes
class frameRelayDTE(NodeObject):
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32])
name = 'frameRelayDTE'
class frameRelayTraps(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 0])
name = 'frameRelayTraps'
class frameRelayTrapControl(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 4])
name = 'frameRelayTrapControl'
class frConformance(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 6])
name = 'frConformance'
class frGroups(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 6, 1])
name = 'frGroups'
class frCompliances(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 6, 2])
name = 'frCompliances'
# macros
# types
class DLCI(pycopia.SMI.Basetypes.Integer32):
status = 1
ranges = Ranges(Range(0, 8388607))
# scalars
class frTrapState(ScalarObject):
status = 1
access = 5
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 4, 1])
syntaxobject = pycopia.SMI.Basetypes.Enumeration
enumerations = [Enum(1, 'enabled'), Enum(2, 'disabled')]
class frTrapMaxRate(ScalarObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 4, 2])
syntaxobject = pycopia.SMI.Basetypes.Integer32
# columns
class frDlcmiIfIndex(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 1, 1, 1])
syntaxobject = InterfaceIndex
class frDlcmiState(ColumnObject):
status = 1
access = 5
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 1, 1, 2])
syntaxobject = pycopia.SMI.Basetypes.Enumeration
enumerations = [Enum(1, 'noLmiConfigured'), Enum(2, 'lmiRev1'), Enum(3, 'ansiT1617D'), Enum(4, 'ansiT1617B'), Enum(5, 'itut933A'), Enum(6, 'ansiT1617D1994')]
class frDlcmiAddress(ColumnObject):
status = 1
access = 5
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 1, 1, 3])
syntaxobject = pycopia.SMI.Basetypes.Enumeration
enumerations = [Enum(1, 'q921'), Enum(2, 'q922March90'), Enum(3, 'q922November90'), Enum(4, 'q922')]
class frDlcmiAddressLen(ColumnObject):
status = 1
access = 5
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 1, 1, 4])
syntaxobject = pycopia.SMI.Basetypes.Enumeration
enumerations = [Enum(2, 'twoOctets'), Enum(3, 'threeOctets'), Enum(4, 'fourOctets')]
class frDlcmiPollingInterval(ColumnObject):
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 1, 1, 5])
syntaxobject = pycopia.SMI.Basetypes.Integer32
access = 5
units = 'seconds'
class frDlcmiFullEnquiryInterval(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 1, 1, 6])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class frDlcmiErrorThreshold(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 1, 1, 7])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class frDlcmiMonitoredEvents(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 1, 1, 8])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class frDlcmiMaxSupportedVCs(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 1, 1, 9])
syntaxobject = DLCI
class frDlcmiMulticast(ColumnObject):
status = 1
access = 5
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 1, 1, 10])
syntaxobject = pycopia.SMI.Basetypes.Enumeration
enumerations = [Enum(1, 'nonBroadcast'), Enum(2, 'broadcast')]
class frDlcmiStatus(ColumnObject):
status = 1
access = 4
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 1, 1, 11])
syntaxobject = pycopia.SMI.Basetypes.Enumeration
enumerations = [Enum(1, 'running'), Enum(2, 'fault'), Enum(3, 'initializing')]
class frDlcmiRowStatus(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 1, 1, 12])
syntaxobject = pycopia.SMI.Basetypes.RowStatus
class frCircuitIfIndex(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 2, 1, 1])
syntaxobject = InterfaceIndex
class frCircuitDlci(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 2, 1, 2])
syntaxobject = DLCI
class frCircuitState(ColumnObject):
status = 1
access = 5
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 2, 1, 3])
syntaxobject = pycopia.SMI.Basetypes.Enumeration
enumerations = [Enum(1, 'invalid'), Enum(2, 'active'), Enum(3, 'inactive')]
class frCircuitReceivedFECNs(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 2, 1, 4])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class frCircuitReceivedBECNs(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 2, 1, 5])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class frCircuitSentFrames(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 2, 1, 6])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class frCircuitSentOctets(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 2, 1, 7])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class frCircuitReceivedFrames(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 2, 1, 8])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class frCircuitReceivedOctets(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 2, 1, 9])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class frCircuitCreationTime(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 2, 1, 10])
syntaxobject = pycopia.SMI.Basetypes.TimeStamp
class frCircuitLastTimeChange(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 2, 1, 11])
syntaxobject = pycopia.SMI.Basetypes.TimeStamp
class frCircuitCommittedBurst(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 2, 1, 12])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class frCircuitExcessBurst(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 2, 1, 13])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class frCircuitThroughput(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 2, 1, 14])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class frCircuitMulticast(ColumnObject):
status = 1
access = 5
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 2, 1, 15])
syntaxobject = pycopia.SMI.Basetypes.Enumeration
enumerations = [Enum(1, 'unicast'), Enum(2, 'oneWay'), Enum(3, 'twoWay'), Enum(4, 'nWay')]
class frCircuitType(ColumnObject):
status = 1
access = 4
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 2, 1, 16])
syntaxobject = pycopia.SMI.Basetypes.Enumeration
enumerations = [Enum(1, 'static'), Enum(2, 'dynamic')]
class frCircuitDiscards(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 2, 1, 17])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class frCircuitReceivedDEs(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 2, 1, 18])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class frCircuitSentDEs(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 2, 1, 19])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class frCircuitLogicalIfIndex(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 2, 1, 20])
syntaxobject = InterfaceIndex
class frCircuitRowStatus(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 2, 1, 21])
syntaxobject = pycopia.SMI.Basetypes.RowStatus
class frErrIfIndex(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 3, 1, 1])
syntaxobject = InterfaceIndex
class frErrType(ColumnObject):
status = 1
access = 4
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 3, 1, 2])
syntaxobject = pycopia.SMI.Basetypes.Enumeration
enumerations = [Enum(1, 'unknownError'), Enum(2, 'receiveShort'), Enum(3, 'receiveLong'), Enum(4, 'illegalAddress'), Enum(5, 'unknownAddress'), Enum(6, 'dlcmiProtoErr'), Enum(7, 'dlcmiUnknownIE'), Enum(8, 'dlcmiSequenceErr'), Enum(9, 'dlcmiUnknownRpt'), Enum(10, 'noErrorSinceReset')]
class frErrData(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 3, 1, 3])
syntaxobject = pycopia.SMI.Basetypes.OctetString
class frErrTime(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 3, 1, 4])
syntaxobject = pycopia.SMI.Basetypes.TimeStamp
class frErrFaults(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 3, 1, 5])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class frErrFaultTime(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 3, 1, 6])
syntaxobject = pycopia.SMI.Basetypes.TimeStamp
# rows
class frDlcmiEntry(RowObject):
status = 1
index = pycopia.SMI.Objects.IndexObjects([frDlcmiIfIndex], False)
create = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 1, 1])
access = 2
rowstatus = frDlcmiRowStatus
columns = {'frDlcmiIfIndex': frDlcmiIfIndex, 'frDlcmiState': frDlcmiState, 'frDlcmiAddress': frDlcmiAddress, 'frDlcmiAddressLen': frDlcmiAddressLen, 'frDlcmiPollingInterval': frDlcmiPollingInterval, 'frDlcmiFullEnquiryInterval': frDlcmiFullEnquiryInterval, 'frDlcmiErrorThreshold': frDlcmiErrorThreshold, 'frDlcmiMonitoredEvents': frDlcmiMonitoredEvents, 'frDlcmiMaxSupportedVCs': frDlcmiMaxSupportedVCs, 'frDlcmiMulticast': frDlcmiMulticast, 'frDlcmiStatus': frDlcmiStatus, 'frDlcmiRowStatus': frDlcmiRowStatus}
class frCircuitEntry(RowObject):
status = 1
index = pycopia.SMI.Objects.IndexObjects([frCircuitIfIndex, frCircuitDlci], False)
create = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 2, 1])
access = 2
rowstatus = frCircuitRowStatus
columns = {'frCircuitIfIndex': frCircuitIfIndex, 'frCircuitDlci': frCircuitDlci, 'frCircuitState': frCircuitState, 'frCircuitReceivedFECNs': frCircuitReceivedFECNs, 'frCircuitReceivedBECNs': frCircuitReceivedBECNs, 'frCircuitSentFrames': frCircuitSentFrames, 'frCircuitSentOctets': frCircuitSentOctets, 'frCircuitReceivedFrames': frCircuitReceivedFrames, 'frCircuitReceivedOctets': frCircuitReceivedOctets, 'frCircuitCreationTime': frCircuitCreationTime, 'frCircuitLastTimeChange': frCircuitLastTimeChange, 'frCircuitCommittedBurst': frCircuitCommittedBurst, 'frCircuitExcessBurst': frCircuitExcessBurst, 'frCircuitThroughput': frCircuitThroughput, 'frCircuitMulticast': frCircuitMulticast, 'frCircuitType': frCircuitType, 'frCircuitDiscards': frCircuitDiscards, 'frCircuitReceivedDEs': frCircuitReceivedDEs, 'frCircuitSentDEs': frCircuitSentDEs, 'frCircuitLogicalIfIndex': frCircuitLogicalIfIndex, 'frCircuitRowStatus': frCircuitRowStatus}
class frErrEntry(RowObject):
status = 1
index = pycopia.SMI.Objects.IndexObjects([frErrIfIndex], False)
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 3, 1])
access = 2
columns = {'frErrIfIndex': frErrIfIndex, 'frErrType': frErrType, 'frErrData': frErrData, 'frErrTime': frErrTime, 'frErrFaults': frErrFaults, 'frErrFaultTime': frErrFaultTime}
# notifications (traps)
class frDLCIStatusChange(NotificationObject):
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 0, 1])
# groups
class frPortGroup(GroupObject):
access = 2
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 6, 1, 1])
group = [frDlcmiIfIndex, frDlcmiState, frDlcmiAddress, frDlcmiAddressLen, frDlcmiPollingInterval, frDlcmiFullEnquiryInterval, frDlcmiErrorThreshold, frDlcmiMonitoredEvents, frDlcmiMaxSupportedVCs, frDlcmiMulticast, frDlcmiStatus, frDlcmiRowStatus]
class frCircuitGroup(GroupObject):
access = 2
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 6, 1, 2])
group = [frCircuitIfIndex, frCircuitDlci, frCircuitState, frCircuitReceivedFECNs, frCircuitReceivedBECNs, frCircuitSentFrames, frCircuitSentOctets, frCircuitReceivedFrames, frCircuitReceivedOctets, frCircuitCreationTime, frCircuitLastTimeChange, frCircuitCommittedBurst, frCircuitExcessBurst, frCircuitThroughput, frCircuitMulticast, frCircuitType, frCircuitDiscards, frCircuitReceivedDEs, frCircuitSentDEs, frCircuitLogicalIfIndex, frCircuitRowStatus]
class frTrapGroup(GroupObject):
access = 2
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 6, 1, 3])
group = [frTrapState, frTrapMaxRate]
class frErrGroup(GroupObject):
access = 2
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 6, 1, 4])
group = [frErrIfIndex, frErrType, frErrData, frErrTime, frErrFaults, frErrFaultTime]
class frNotificationGroup(GroupObject):
access = 2
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 6, 1, 5])
group = [frDLCIStatusChange]
class frPortGroup0(GroupObject):
access = 2
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 6, 1, 6])
group = [frDlcmiIfIndex, frDlcmiState, frDlcmiAddress, frDlcmiAddressLen, frDlcmiPollingInterval, frDlcmiFullEnquiryInterval, frDlcmiErrorThreshold, frDlcmiMonitoredEvents, frDlcmiMaxSupportedVCs, frDlcmiMulticast]
class frCircuitGroup0(GroupObject):
access = 2
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 6, 1, 7])
group = [frCircuitIfIndex, frCircuitDlci, frCircuitState, frCircuitReceivedFECNs, frCircuitReceivedBECNs, frCircuitSentFrames, frCircuitSentOctets, frCircuitReceivedFrames, frCircuitReceivedOctets, frCircuitCreationTime, frCircuitLastTimeChange, frCircuitCommittedBurst, frCircuitExcessBurst, frCircuitThroughput]
class frErrGroup0(GroupObject):
access = 2
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 6, 1, 8])
group = [frErrIfIndex, frErrType, frErrData, frErrTime]
class frTrapGroup0(GroupObject):
access = 2
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 2, 1, 10, 32, 6, 1, 9])
group = [frTrapState]
# capabilities
# special additions
# Add to master OIDMAP.
from pycopia import SMI
SMI.update_oidmap(__name__)
| xiangke/pycopia | mibs/pycopia/mibs/FRAME_RELAY_DTE_MIB.py | Python | lgpl-2.1 | 16,317 |
#! /usr/bin/python2.7
# coding=utf-8
########################################################
import matplotlib.pyplot as plt
import numpy as np
from numpy import *
import pylab
import os
import interpFunctions # Contient la fonction der_auto pour caler automatiquement
from interpFunctions import * # la droite de pente nulle de la dérivée à l'IARF
########################################################
print ''
print '################################################################'
print '################################################################'
print '### ###'
print '### Outil d analyse des essais en bout de tige pour les ###'
print '### fluides compressibles et incompressibles. Methodes ###'
print '### tirees de Ezekwe - 2010 - Petroleum reservoir engin- ###'
print '### eering practice - Prentice Hall ###'
print '### ###'
print '################################################################'
print '################################################################'
print ''
print 'Récuperation des données de pression et temps'
print''
#
# Tests logiques pour entrer les unites de pression et temps
#
# Pression
#
q_p=float(raw_input('Unité de pression? psi = 0, kPa = 1, MPa = 2, Pa = 3\n'))
if q_p==2:
unit_p='MPa'
print 'Conversion MPa en psi'
print ''
p_MPa=loadtxt("pression.txt")
p=p_MPa/0.00689475729
elif q_p==1:
unit_p='kPa'
print 'Conversion kPa en psi'
print ''
p_kPa=loadtxt("pression.txt")
p=p_kPa/6.89475729
elif q_p==3:
unit_p='Pa'
print 'Conversion Pa en psi'
print ''
p_Pa=loadtxt("pression.txt")
p=p_Pa/6894.75729
else:
unit_p='psi'
p = loadtxt("pression.txt")
print 'Pressions',p,'psi'
print ''
#
# Temps de test
#
q_t=float(raw_input('Unité de temps? s = 0, min = 1, h = 2\n'))
if q_t==0:
unit_t='sec'
print 'Conversion sec en min'
print ''
t=loadtxt("temps.txt")/60
elif q_t==1:
unit_t='min'
t = loadtxt("temps.txt")
else:
unit_t='heure'
print 'Conversion h en min'
print ''
t=60*loadtxt("temps.txt")
if t[0]==0:
t=t[1:] # on veut exclure t=0 car
p=p[1:] # un calcul de log de temps est effectué juste après
print 'Temps',t,'min'
print ''
#
# Temps de production
#
tp=float(raw_input('Temps de production de fluides? (en min)\n'))
print ''
#############################################################
### ###
### Calcul de la dérivée de la pression ###
### ###
#############################################################
te,lt=agarwal(t,tp)
pf, dp, dt =deriv(lt,te,p)
#
# Plot loglog
#
popp='Pression'
plot_bourd(te,pf,dt,dp,popp)
hap='n'
while hap=='n':
pf, dp, dt = deriv(lt,te,p)
x_IARF, reg_dp, ind_IARF_deb, ind_IARF_fin, y_dp_0 = der_auto(dt,dp)
### Plotty time!
plot_bourd_s(te,pf,dt,dp,x_IARF,reg_dp,popp)
hap=raw_input('Alors heureux? (n = retrace Bourdet plot)')
print''
# y_dp_0=float(raw_input('Valeur de dp relevee avec pente de 0?'))
#############################################
### ###
### Horner plot ###
### ###
#############################################
#
# Calcul du temps de Horner
#
tH=(tp+t)/t
#
# Calcul de la pente de l'IARF sur un Horner plot
#
slope = penteH(p,tH,ind_IARF_deb,ind_IARF_fin)
#
# Calcul de p0
#
p0=slope*log10(tH[ind_IARF_fin]/1)+p[ind_IARF_fin]
print ''
print 'Pente de Horner', slope, 'psi/cycle log'
print ''
print 'Pseudo-pression initiale du réservoir', p0, 'psi'
print ''
#
# Tracé de la droite correspondant à la pente de l'IARF
#
tH_pente=1
tH_pente=np.append(tH_pente,tH)
p_pente=-slope*log10(tH_pente)+p0
#
# Horner plot de la pseudo-pression
#
hap='n'
while hap=='n':
ylab='pression (psi)'
plot_horner(tH,p,tH_pente,p_pente,ylab,popp)
hap=raw_input('Alors heureux? (n = retrace Horner plot)\n')
mano=raw_input('Recalcul manuel de la pente ? o = oui, retrace Horner plot.\n')
if mano=='o':
hap='n'
while hap=='n':
p0, slope, tH_pente, p_pente = penteM(tH,popp)
ylab='pression (psi)'
plot_horner(tH,p,tH_pente,p_pente,ylab,popp)
hap=raw_input('Alors heureux? (n = retrace Horner plot)\n')
###################################################################
### ###
### Récupération des données de débit et détermination ###
### de la méthode d'interprétation ###
### ###
###################################################################
Vprod=raw_input('Volume produit connu directement? o = oui\n')
if Vprod=='o':
v_flu=float(raw_input('Volume produit? (m3)\n'))
else:
unit_h=float(raw_input('Unité de hauteur de fluides et longueur des tiges? Mètres= 0 et pieds = 1\n'))
print''
h_flu=float(raw_input('Hauteur des fluides?\n'))
print''
l_MT=float(raw_input('Longueur des masses tiges?\n'))
print''
d_MT_FU=float(raw_input('Diamètre interne des masses tiges (en pouces) ?\n'))
print''
d_T_FU=float(raw_input('Diamètre interne des tiges (en pouces) ?\n'))
print''
A_MT=(d_MT_FU*0.0254/2)**2*np.pi
A_T=(d_T_FU*0.0254/2)**2*np.pi
print"Aire interne des masses tiges",A_MT,'m2'
print''
print"Aire interne des tiges",A_T,'m2'
print''
#
# Conversion des longueurs en pieds vers les mètres
#
if unit_h==1:
h_flu=h_flu*0.3048
l_MT=l_MT*0.3048
#
# Calcul du volume produit dans les tiges en unités SI
#
if h_flu>l_MT:
v_flu=l_MT*A_MT+(h_flu-l_MT)*A_T
else:
v_flu=h_flu*A_MT
v_flu_FU=v_flu*6.2898
#
#Calcul du débit en fonction du volume produit et du temps de production
#
q=v_flu_FU/(tp/1440)
###################################################################
### ###
### Calcul des propriétés des fluides ###
### ###
###################################################################
T_USI, T_FU =Temp()
S=float(raw_input('Salinité (en pourcentage de masse) ?\n'))
print''
#
# Densité des eaux de formation en lb/ft**3
#
rau_w=62.368+0.438603*S+1.60074e-3*S**2
#
# Viscosité des eaux de formation d'après McCain - 1991
#
mu_w1=(109.574-8.40564*S+0.313314*S**2+8.72213e-3*S**3)*T_FU**(-(1.12166-2.63951e-2*S+6.79461e-4*S**2+5.47119e-5*S**3-1.55586e-6*S**4))
mu_w=mu_w1*(0.9994+4.0295e-5*p0+3.1062e-9*p0**2)
#
# Formation Volume Factor des eaux de formations d'après McCain - 1991
#
B_w=(1+(-1.0001e-2+1.3391e-4*T_FU+5.50654e-7*T_FU**2))*(1+(-1.95301e-9*p0*T_FU-1.72834e-13*p0**2*T_FU-3.58922e-7*p0-2.25341e-10*p0**2))
rau_w=rau_w*B_w
###################################################################
### ###
### Calcul de la perméabilité du réservoir ###
### ###
###################################################################
unit_z=raw_input("Unité de profondeur des obturateurs et d'élévation du KB? (m)ètres ou (f)eet\n")
print''
z_inf=float(raw_input('Profondeur du bas de la zone investiguée ?\n'))
print ''
z_sup=float(raw_input('Profondeur du haut de la zone investiguée ?\n'))
print ''
if unit_z=='m':
z_inf_USI=z_inf
z_inf=z_inf/0.3048
z_sup_USI=z_sup
z_sup=z_sup/0.3048
if unit_z=='f':
z_inf_USI=z_inf*0.3048
z_sup_USI=z_sup*0.3048
h=z_inf-z_sup
print'Épaisseur de la formation testée:',h,'pieds'
print''
h_USI=h*0.3048
#
# Calcul de la perméabilité d'après la pente de la droite de Horner Ezekwe - 11.30
#
m=slope
k=162.6*q*B_w*mu_w/m/h
#
# Calcul de la perméabilité d'après la hauteur de la dérivée à l'IARF
#
k_Bourdet=162.6*q*B_w*mu_w*0.5/h/y_dp_0
print ''
print "Perméabilité de la formation d'après la méthode de Horner k =",k,'md'
print ''
print "Perméabilité de la formation, d'après la dérivée, k =",k_Bourdet,'md'
print ''
#######################################
### ###
### Rendu sexy ###
### ###
#######################################
#
# Plots de Horner et Bourdet avec les k et p0
#
pouits=raw_input("Cote du puits?")
print ''
num=raw_input("Numéro d'essai?")
print ''
typ=raw_input("ISI ou FSI?")
print ''
Essai=pouits+'DST'+num+typ
hkb=float(raw_input('Élévation du Kelly Bushing (même unité que z obturateurs)'))
print''
if unit_z=='f':
hkb=hkb*0.3048
plot_horner_f(tH,p,tH_pente,p_pente,ylab,popp,k,p0,Essai)
plot_bourd_f(te,pf,dt,dp,x_IARF,reg_dp,popp,k_Bourdet,Essai)
#
# Définition d'une matrice de résultats inéressants modifiable
#
if Vprod=='o':
results=array([Essai,'k Horner (md)',k,'k Bourdet (md)',k_Bourdet,'pression intiale (psi)',p0,'Température °C',T_USI,'Température °F',T_FU,'Débit (bbl/j)',q,'Bas de la zone investiguée (m)',z_inf_USI,'Haut de la zone investiguée (m)',z_sup_USI,'Épaisseur (m)',h_USI,'Élévation du KB (m)',hkb,'FVF (-)',B_w,'Viscosité des fluides (cp)',mu_w,'Temps de production (min)',tp,'Ordonnée de la droite de pente nulle (psi)',y_dp_0,'Pente de la droite de Horner de pp (psi/cycle)',slope])
else:
results=array([Essai,'k Horner (md)',k,'k Bourdet (md)',k_Bourdet,'pression intiale (psi)',p0,'Température °C',T_USI,'Température °F',T_FU,'Débit (bbl/j)',q,'Bas de la zone investiguée (m)',z_inf_USI,'Haut de la zone investiguée (m)',z_sup_USI,'Épaisseur (m)',h_USI,'Élévation du KB (m)',hkb,'FVF (-)',B_w,'Viscosité des fluides (cp)',mu_w,'Temps de production (min)',tp,'Ordonnée de la droite de pente nulle (psi)',y_dp_0,'Pente de la droite de Horner de pp (psi/cycle)',slope,'Hauteur de fluide récupérée (m)',h_flu,'Diamètre interne des masses tiges (in)',d_MT_FU,'Diamètre interne des tiges (in)',d_T_FU,'Longueur des masses tiges (m)',l_MT])
#
# Enregistrement des résultats dans un fichier *.csv
#
results.tofile(file='resultatsfi.csv', sep='\n')
#
# Ouverture d'un éditeur de texte pour visualiser/copier les résultats
#
#os.system('leafpad resultatsfi.csv')
#
# Fun avec LaTeX
#
l1='\\documentclass[10pt]{article} \n\\usepackage[utf8x]{inputenc} \n\\usepackage[frenchb,english]{babel} \n\\usepackage[T1]{fontenc} \n\\usepackage{lmodern} \n \\usepackage{graphicx} \n\n\\begin{document} \n\n\\begin{table}[!h]'
l2='%s %s %s' % ("\\caption{Résultats de l'essai",Essai,'}')
l3='\\begin{center}\n\\begin{tabular}{|c|c|c|}\\hline'
l4='\\bfseries Perméabilité Horner & %.4f & md\\\\ ' % k
l5='\\bfseries Perméabilité Bourdet & %.4f & md\\\\ ' % k_Bourdet
l6='\\bfseries Pression intiale & %.2f & psi\\\\ ' % p0
l7='\\bfseries Température & %.1f & ° C\\\\ ' % T_USI
l8='\\bfseries Température & %.1f & ° F\\\\ ' % T_FU
l9='\\bfseries Bas de la zone investiguée & %s & m\\\\ ' % z_inf_USI
l10='\\bfseries Haut de la zone investiguée & %s & m\\\\ ' % z_sup_USI
l11='\\bfseries Épaisseur & %s & m\\\\ ' % h_USI
l12='\\bfseries Élévation du KB & %s & m\\\\ ' % hkb
l13='\\bfseries Temps de production & %.2f & min\\\\ ' % tp
l14='\\bfseries Débits & %.2f & bbl/j\\\\ ' % q
l15='\\bfseries FVF & %.2f & -\\\\ ' % B_w
l16='\\bfseries Viscosité & %.3f & cp\\\\ ' % mu_w
l17='\\bfseries Ordonnée de la droite de pente nulle & %.1f & psi$^2$/cp\\\\' % y_dp_0
l18='\\bfseries Pente de la droite de Horner & %.1f & psi/cycle log\\\\' % slope
llastab='\\hline\n\\end{tabular}\n\\end{center}\n\\end{table}\n'
lfig1="\\begin{figure}[!h]\\centering\\includegraphics[scale=0.55]{%sHp.eps}\\caption{Horner plot de l'essai %s}\\end{figure}" % (Essai, Essai)
lfig2="\\begin{figure}[!h]\\centering\\includegraphics[scale=0.55]{%sBp.eps}\\caption{Derivative plot de l'essai %s}\\end{figure}" % (Essai, Essai)
llast='\n\\end{document}'
if Vprod !='o':
l19='\\bfseries Hauteur de fluides récupérés & %s & m\\\\ ' % h_flu
l20='\\bfseries Longueur des masses tiges & %s & m\\\\ ' % l_MT
l21='\\bfseries Diamètre interne des MT & %s & in\\\\ ' % d_MT_FU
l22='\\bfseries Diamètre interne des tiges & %s & in\\\\ ' % d_T_FU
crlatex=array([l1,l2,l3,l4,l5,l6,l7,l8,l9,l10,l11,l12,l13,l14,l15,l16,l17,l18,l19,l20,l21,l22,llastab,lfig1,lfig2,llast])
else:
crlatex=array([l1,l2,l3,l4,l5,l6,l7,l8,l9,l10,l11,l12,l13,l14,l15,l16,l17,l18,llastab,lfig1,lfig2,llast])
crlatex.tofile(file='woupfi.tex', sep='\n')
#
# Partie de compilation du fichier tex, affichage du dvi et effacage des déchets
# sert surtout à se faire mousser
#
os.system('latex woupfi.tex')
os.system('xdvi woupfi.dvi')
os.system('rm woupfi.log')
os.system('rm woupfi.aux')
#
# Message de fin
#
print''
print" -"
print" -"
print" -"
print " - - - - - Tout est bien qui fini bien. - - - - - "
print" -"
print" -"
print" -"
print ''
| ogirou/ODSTA | ODSTAIF.py | Python | mit | 13,600 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-16 00:30
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tratamientos', '0003_auto_20160515_2028'),
('turnos', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='turno',
name='sesion',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='tratamientos.Sesion', verbose_name='sesión realizada'),
),
]
| mava-ar/sgk | src/turnos/migrations/0002_turno_sesion.py | Python | apache-2.0 | 623 |
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import os
import sys
import shutil
import subprocess
from fnmatch import fnmatchcase
from distutils.util import convert_path
# Do not EVER use setuptools, it makes cythonization fail
# Distribute fixes that
from distutils.core import setup, Extension
import numpy
# import numba
import gen_type_conversion
from Cython.Distutils import build_ext
from Cython.Distutils.extension import Extension as CythonExtension
if sys.version_info[:2] < (2, 6):
raise Exception('numba requires Python 2.6 or greater.')
import versioneer
#------------------------------------------------------------------------
# Setup constants and arguments
#------------------------------------------------------------------------
versioneer.versionfile_source = 'numba/_version.py'
versioneer.versionfile_build = 'numba/_version.py'
versioneer.tag_prefix = ''
versioneer.parentdir_prefix = 'numba-'
cmdclass = versioneer.get_cmdclass()
cmdclass['build_ext'] = build_ext
setup_args = {
'long_description': open('README.md').read(),
}
numba_root = os.path.dirname(os.path.abspath(__file__))
deps_root = os.path.join(numba_root, 'deps')
pyext_root = os.path.join(deps_root, 'pyextensibletype')
pyext_dst = os.path.join(numba_root, "numba", "pyextensibletype")
def get_include():
"""Use numba.get_include() instead (make numba importable without
building it first)
"""
return os.path.join(numba_root, "numba", "include")
numba_include_dir = get_include()
#------------------------------------------------------------------------
# Package finding
#------------------------------------------------------------------------
def find_packages(where='.', exclude=()):
out = []
stack=[(convert_path(where), '')]
while stack:
where, prefix = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where,name)
if ('.' not in name and os.path.isdir(fn) and
os.path.isfile(os.path.join(fn, '__init__.py'))
):
out.append(prefix+name)
stack.append((fn, prefix+name+'.'))
if sys.version_info[0] == 3:
exclude = exclude + ('*py2only*', )
for pat in list(exclude) + ['ez_setup', 'distribute_setup']:
out = [item for item in out if not fnmatchcase(item, pat)]
return out
#------------------------------------------------------------------------
# 2to3
#------------------------------------------------------------------------
def run_2to3():
import lib2to3.refactor
from distutils.command.build_py import build_py_2to3 as build_py
print("Installing 2to3 fixers")
# need to convert sources to Py3 on installation
fixes = 'dict imports imports2 unicode ' \
'xrange itertools itertools_imports long types'.split()
fixes = ['lib2to3.fixes.fix_' + fix
for fix in fixes]
build_py.fixer_names = fixes
cmdclass["build_py"] = build_py
# cmdclass["build"] = build_py
# Distribute options
# setup_args["use_2to3"] = True
#------------------------------------------------------------------------
# pyextensibletype
#------------------------------------------------------------------------
def cleanup_pyextensibletype():
if os.path.exists(pyext_dst):
shutil.rmtree(pyext_dst)
def register_pyextensibletype():
with open(os.path.join(deps_root, '__init__.py'), 'w'):
pass
with open(os.path.join(pyext_root, '__init__.py'), 'w'):
pass
shutil.copytree(pyext_root, pyext_dst)
from deps.pyextensibletype import setupconfig
exts = setupconfig.get_extensions(pyext_dst, "numba.pyextensibletype")
return exts
#------------------------------------------------------------------------
# Generate code for build
#------------------------------------------------------------------------
build = set(sys.argv) & set(['build', 'build_ext', 'install',
'bdist_wininst'])
cleanup_pyextensibletype()
if build:
gen_type_conversion.run()
# TODO: Finish and release pyextensibletype
extensibletype_extensions = register_pyextensibletype()
else:
extensibletype_extensions = []
extensibletype_include = "numba/pyextensibletype/include"
if sys.version_info[0] >= 3:
run_2to3()
#------------------------------------------------------------------------
# setup
#------------------------------------------------------------------------
setup(
name="numba",
version=versioneer.get_version(),
author="Continuum Analytics, Inc.",
author_email="numba-users@continuum.io",
url="http://numba.github.com",
license="BSD",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python",
# "Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
# "Programming Language :: Python :: 3.2",
"Topic :: Utilities",
],
description="compiling Python code using LLVM",
packages=find_packages(exclude=('*deps*',)),
entry_points = {
'console_scripts': [
'pycc = numba.pycc:main',
]
},
package_data={
'': ['*.md'],
'numba.minivect': ['include/*'],
'numba.asdl.common': ['*.asdl'],
'numba.asdl.py2_7': ['*.asdl'],
'numba.asdl.py3_2': ['*.asdl'],
'numba.asdl.py3_3': ['*.asdl'],
'numba.external.utilities': ['*.c', '*.h'],
'numba': ['*.c', '*.h', 'include/*', '*.pxd'],
'numba.vectorize': ['*.h'],
},
ext_modules=extensibletype_extensions + [
Extension(
name="numba.vectorize._internal",
sources=["numba/vectorize/_internal.c",
"numba/vectorize/_ufunc.c",
"numba/vectorize/_gufunc.c"],
include_dirs=[numpy.get_include(), "numba/minivect/include/"],
depends=["numba/vectorize/_internal.h",
"numba/minivect/include/miniutils.h"]),
Extension(
name="numba.external.utilities.utilities",
sources=["numba/external/utilities/utilities.c"],
include_dirs=[numba_include_dir, extensibletype_include],
depends=["numba/external/utilities/type_conversion.c",
"numba/external/utilities/virtuallookup.c",
"numba/external/utilities/generated_conversions.c",
"numba/external/utilities/generated_conversions.h"]),
CythonExtension(
name="numba.pyconsts",
sources=["numba/pyconsts.pyx"],
depends=["numba/_pyconsts.pxd"],
include_dirs=[numba_include_dir]),
CythonExtension(
name="numba.exttypes.extension_types",
sources=["numba/exttypes/extension_types.pyx"],
cython_gdb=True),
CythonExtension(
name="numba.numbawrapper",
sources=["numba/numbawrapper.pyx", "numba/numbafunction.c"],
depends=["numba/numbafunction.h"],
include_dirs=[numba_include_dir,
numpy.get_include()],
cython_gdb=True),
],
cmdclass=cmdclass,
**setup_args
)
| shiquanwang/numba | setup.py | Python | bsd-2-clause | 7,359 |
#!/usr/bin/env python
# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
# Copyright (C) 2008-2018 NIWA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Utility functions for printing and manipulating PARSEC NESTED DICTS.
The copy and override functions below assume values are either dicts
(nesting) or shallow collections of simple types.
"""
import sys
from copy import copy
from parsec.OrderedDict import OrderedDictWithDefaults
def listjoin(lst, none_str=''):
"""Return string from joined list.
Quote all elements if any of them contain comment or list-delimiter
characters (currently quoting must be consistent across all elements).
Note: multi-line values in list is not handle.
"""
if not lst:
# empty list
return none_str
items = []
for item in lst:
if item is None:
items.append(none_str)
elif any(char in str(item) for char in ',#"\''):
items.append(repr(item)) # will be quoted
else:
items.append(str(item))
return ', '.join(items)
def printcfg(cfg, level=0, indent=0, prefix='', none_str='',
handle=sys.stdout):
"""Pretty-print a parsec config item or section (nested dict).
As returned by parse.config.get().
"""
stack = [("", cfg, level, indent)]
while stack:
key_i, cfg_i, level_i, indent_i = stack.pop()
spacer = " " * 4 * (indent_i - 1)
if isinstance(cfg_i, dict):
if not cfg_i and none_str is None:
# Don't print empty sections if none_str is None. This does not
# handle sections with no items printed because the values of
# all items are empty or None.
continue
if key_i and level_i:
# Print heading
handle.write("%s%s%s%s%s\n" % (
prefix, spacer, '[' * level_i, str(key_i), ']' * level_i))
# Nested sections are printed after normal settings
subsections = []
values = []
for key, item in cfg_i.items():
if isinstance(item, dict):
subsections.append((key, item, level_i + 1, indent_i + 1))
else:
values.append((key, item, level_i + 1, indent_i + 1))
stack += reversed(subsections)
stack += reversed(values)
else:
key = ""
if key_i:
key = "%s = " % key_i
if cfg_i is None:
value = none_str
elif isinstance(cfg_i, list):
value = listjoin(cfg_i, none_str)
elif "\n" in str(cfg_i) and key:
value = '"""\n'
for line in str(cfg_i).splitlines(True):
value += spacer + " " * 4 + line
value += '\n' + spacer + '"""'
else:
value = str(cfg_i)
if value is not None:
handle.write("%s%s%s%s\n" % (prefix, spacer, key, value))
def replicate(target, source):
"""Replicate source *into* target.
Source elements need not exist in target already, so source overrides
common elements in target and otherwise adds elements to it.
"""
if not source:
return
if hasattr(source, "defaults_"):
target.defaults_ = pdeepcopy(source.defaults_)
for key, val in source.items():
if isinstance(val, dict):
if key not in target:
target[key] = OrderedDictWithDefaults()
if hasattr(val, 'defaults_'):
target[key].defaults_ = pdeepcopy(val.defaults_)
replicate(target[key], val)
elif isinstance(val, list):
target[key] = val[:]
else:
target[key] = val
def pdeepcopy(source):
"""Make a deep copy of a pdict source"""
target = OrderedDictWithDefaults()
replicate(target, source)
return target
def poverride(target, sparse, prepend=False):
"""Override or add items in a target pdict.
Target sub-dicts must already exist. For keys that already exist in the
target, the value is overridden in-place. New keys can be prepended in the
target (Cylc use case: broadcast environment variables should be defined
first in the user environment section, to allow use in subsequent variable
definitions.
"""
if not sparse:
target = OrderedDictWithDefaults()
return
for key, val in sparse.items():
if isinstance(val, dict):
poverride(target[key], val, prepend)
else:
if prepend and (key not in target):
# Prepend new items in the target ordered dict.
setitem = target.prepend
else:
# Override in-place in the target ordered dict.
setitem = target.__setitem__
if isinstance(val, list):
setitem(key, val[:])
else:
setitem(key, val)
def m_override(target, sparse):
"""Override items in a target pdict.
Target keys must already exist unless there is a "__MANY__" placeholder in
the right position.
"""
if not sparse:
target = OrderedDictWithDefaults()
return
stack = [(sparse, target, [], OrderedDictWithDefaults())]
defaults_list = []
while stack:
source, dest, keylist, many_defaults = stack.pop(0)
if many_defaults:
defaults_list.append((dest, many_defaults))
for key, val in source.items():
if isinstance(val, dict):
if key in many_defaults:
child_many_defaults = many_defaults[key]
else:
child_many_defaults = OrderedDictWithDefaults()
if key not in dest:
if '__MANY__' in dest:
dest[key] = OrderedDictWithDefaults()
child_many_defaults = dest['__MANY__']
elif '__MANY__' in many_defaults:
# A 'sub-many' dict - would it ever exist in real life?
dest[key] = OrderedDictWithDefaults()
child_many_defaults = many_defaults['__MANY__']
elif key in many_defaults:
dest[key] = OrderedDictWithDefaults()
else:
# TODO - validation prevents this, but handle properly
# for completeness.
raise Exception(
"parsec dict override: no __MANY__ placeholder" +
"%s" % (keylist + [key])
)
stack.append(
(val, dest[key], keylist + [key], child_many_defaults))
else:
if key not in dest:
if ('__MANY__' in dest or key in many_defaults or
'__MANY__' in many_defaults):
if isinstance(val, list):
dest[key] = val[:]
else:
dest[key] = val
else:
# TODO - validation prevents this, but handle properly
# for completeness.
raise Exception(
"parsec dict override: no __MANY__ placeholder" +
"%s" % (keylist + [key])
)
if isinstance(val, list):
dest[key] = val[:]
else:
dest[key] = val
for dest_dict, defaults in defaults_list:
dest_dict.defaults_ = defaults
def un_many(cfig):
"""Remove any '__MANY__' items from a nested dict, in-place."""
if not cfig:
return
for key, val in cfig.items():
if key == '__MANY__':
try:
del cfig[key]
except KeyError:
if hasattr(cfig, 'defaults_') and key in cfig.defaults_:
del cfig.defaults_[key]
else:
raise
elif isinstance(val, dict):
un_many(cfig[key])
def itemstr(parents=None, item=None, value=None):
"""
Pretty-print an item from list of sections, item name, and value
E.g.: ([sec1, sec2], item, value) to '[sec1][sec2]item = value'.
"""
if parents:
keys = copy(parents)
if value and not item:
# last parent is the item
item = keys[-1]
keys.remove(item)
text = '[' + ']['.join(keys) + ']'
else:
text = ''
if item:
text += str(item)
if value:
text += " = " + str(value)
if not text:
text = str(value)
return text
if __name__ == "__main__":
print 'Item strings:'
print ' ', itemstr(['sec1', 'sec2'], 'item', 'value')
print ' ', itemstr(['sec1', 'sec2'], 'item')
print ' ', itemstr(['sec1', 'sec2'])
print ' ', itemstr(['sec1'])
print ' ', itemstr(item='item', value='value')
print ' ', itemstr(item='item')
print ' ', itemstr(value='value')
# error or useful?
print ' ', itemstr(parents=['sec1', 'sec2'], value='value')
print 'Configs:'
printcfg('foo', prefix=' > ')
printcfg(['foo', 'bar'], prefix=' > ')
printcfg({}, prefix=' > ')
printcfg({'foo': 1}, prefix=' > ')
printcfg({'foo': None}, prefix=' > ')
printcfg({'foo': None}, none_str='(none)', prefix=' > ')
printcfg({'foo': {'bar': 1}}, prefix=' > ')
printcfg({'foo': {'bar': None}}, prefix=' > ')
printcfg({'foo': {'bar': None}}, none_str='(none)', prefix=' > ')
printcfg({'foo': {'bar': 1, 'baz': 2, 'qux': {'boo': None}}},
none_str='(none)', prefix=' > ')
| arjclark/cylc | lib/parsec/util.py | Python | gpl-3.0 | 10,442 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from types import *
def typer(x,y):
if type(x) is StringType or type(y) is StringType :
print u'получена строка'
else:
if x > y:
print u'больше'
elif x < y:
print u'меньше'
else:
print u'равно'
typer("12", 4)
typer("12","4")
typer(12, 4)
typer(4, 45)
typer(4, 4) | pybursa/homeworks | a_karnauh/hw1/6.py | Python | gpl-2.0 | 354 |
"""SCons.Tool.link
Tool-specific initialization for the generic Posix linker.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/link.py 5023 2010/06/14 22:05:46 scons"
import SCons.Defaults
import SCons.Tool
import SCons.Util
import SCons.Warnings
from SCons.Tool.FortranCommon import isfortran
cplusplus = __import__('c++', globals(), locals(), [])
issued_mixed_link_warning = False
def smart_link(source, target, env, for_signature):
has_cplusplus = cplusplus.iscplusplus(source)
has_fortran = isfortran(env, source)
if has_cplusplus and has_fortran:
global issued_mixed_link_warning
if not issued_mixed_link_warning:
msg = "Using $CXX to link Fortran and C++ code together.\n\t" + \
"This may generate a buggy executable if the '%s'\n\t" + \
"compiler does not know how to deal with Fortran runtimes."
SCons.Warnings.warn(SCons.Warnings.FortranCxxMixWarning,
msg % env.subst('$CXX'))
issued_mixed_link_warning = True
return '$CXX'
elif has_fortran:
return '$FORTRAN'
elif has_cplusplus:
return '$CXX'
return '$CC'
def shlib_emitter(target, source, env):
for tgt in target:
tgt.attributes.shared = 1
return (target, source)
def generate(env):
"""Add Builders and construction variables for gnulink to an Environment."""
SCons.Tool.createSharedLibBuilder(env)
SCons.Tool.createProgBuilder(env)
env['SHLINK'] = '$LINK'
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -shared')
env['SHLINKCOM'] = '$SHLINK -o $TARGET $SHLINKFLAGS $SOURCES $_LIBDIRFLAGS $_LIBFLAGS'
# don't set up the emitter, cause AppendUnique will generate a list
# starting with None :-(
env.Append(SHLIBEMITTER = [shlib_emitter])
env['SMARTLINK'] = smart_link
env['LINK'] = "$SMARTLINK"
env['LINKFLAGS'] = SCons.Util.CLVar('')
env['LINKCOM'] = '$LINK -o $TARGET $LINKFLAGS $SOURCES $_LIBDIRFLAGS $_LIBFLAGS'
env['LIBDIRPREFIX']='-L'
env['LIBDIRSUFFIX']=''
env['_LIBFLAGS']='${_stripixes(LIBLINKPREFIX, LIBS, LIBLINKSUFFIX, LIBPREFIXES, LIBSUFFIXES, __env__)}'
env['LIBLINKPREFIX']='-l'
env['LIBLINKSUFFIX']=''
if env['PLATFORM'] == 'hpux':
env['SHLIBSUFFIX'] = '.sl'
elif env['PLATFORM'] == 'aix':
env['SHLIBSUFFIX'] = '.a'
# For most platforms, a loadable module is the same as a shared
# library. Platforms which are different can override these, but
# setting them the same means that LoadableModule works everywhere.
SCons.Tool.createLoadableModuleBuilder(env)
env['LDMODULE'] = '$SHLINK'
# don't set up the emitter, cause AppendUnique will generate a list
# starting with None :-(
env.Append(LDMODULEEMITTER='$SHLIBEMITTER')
env['LDMODULEPREFIX'] = '$SHLIBPREFIX'
env['LDMODULESUFFIX'] = '$SHLIBSUFFIX'
env['LDMODULEFLAGS'] = '$SHLINKFLAGS'
env['LDMODULECOM'] = '$LDMODULE -o $TARGET $LDMODULEFLAGS $SOURCES $_LIBDIRFLAGS $_LIBFLAGS'
def exists(env):
# This module isn't really a Tool on its own, it's common logic for
# other linkers.
return None
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| kerwinxu/barcodeManager | zxing/cpp/scons/scons-local-2.0.0.final.0/SCons/Tool/link.py | Python | bsd-2-clause | 4,734 |
def gen_next(v, x, y):
next = (v * 252533) % 33554393
if x == 0:
x = y + 1
y = 0
else:
x -= 1
y += 1
return [next, x, y]
a = [[0 for i in range(10)] for j in range(10)]
[v, x, y] = [20151125, 0, 0]
dx = 3010
dy = 3019
dx -= 1
dy -= 1
while True:
[v, x, y] = gen_next(v, x, y)
#a[x][y] = v
if x == dx and y == dy:
print(v)
break
print('res =', v)
| imylyanyk/AdventOfCode | day25.py | Python | mit | 429 |
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
#
"""Logictech MouseMan serial protocol.
http://www.softnco.demon.co.uk/SerialMouse.txt
"""
from twisted.internet import protocol
class MouseMan(protocol.Protocol):
"""
Parser for Logitech MouseMan serial mouse protocol (compatible
with Microsoft Serial Mouse).
"""
state = 'initial'
leftbutton=None
rightbutton=None
middlebutton=None
leftold=None
rightold=None
middleold=None
horiz=None
vert=None
horizold=None
vertold=None
def down_left(self):
pass
def up_left(self):
pass
def down_middle(self):
pass
def up_middle(self):
pass
def down_right(self):
pass
def up_right(self):
pass
def move(self, x, y):
pass
horiz=None
vert=None
def state_initial(self, byte):
if byte & 1<<6:
self.word1=byte
self.leftbutton = byte & 1<<5
self.rightbutton = byte & 1<<4
return 'horiz'
else:
return 'initial'
def state_horiz(self, byte):
if byte & 1<<6:
return self.state_initial(byte)
else:
x=(self.word1 & 0x03)<<6 | (byte & 0x3f)
if x>=128:
x=-256+x
self.horiz = x
return 'vert'
def state_vert(self, byte):
if byte & 1<<6:
# short packet
return self.state_initial(byte)
else:
x = (self.word1 & 0x0c)<<4 | (byte & 0x3f)
if x>=128:
x=-256+x
self.vert = x
self.snapshot()
return 'maybemiddle'
def state_maybemiddle(self, byte):
if byte & 1<<6:
self.snapshot()
return self.state_initial(byte)
else:
self.middlebutton=byte & 1<<5
self.snapshot()
return 'initial'
def snapshot(self):
if self.leftbutton and not self.leftold:
self.down_left()
self.leftold=1
if not self.leftbutton and self.leftold:
self.up_left()
self.leftold=0
if self.middlebutton and not self.middleold:
self.down_middle()
self.middleold=1
if not self.middlebutton and self.middleold:
self.up_middle()
self.middleold=0
if self.rightbutton and not self.rightold:
self.down_right()
self.rightold=1
if not self.rightbutton and self.rightold:
self.up_right()
self.rightold=0
if self.horiz or self.vert:
self.move(self.horiz, self.vert)
def dataReceived(self, data):
for c in data:
byte = ord(c)
self.state = getattr(self, 'state_'+self.state)(byte)
| sorenh/cc | vendor/Twisted-10.0.0/twisted/protocols/mice/mouseman.py | Python | apache-2.0 | 2,882 |
'''
Copyright (C) 2016 Quinn D Granfor <spootdev@gmail.com>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
version 2, as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License version 2 for more details.
You should have received a copy of the GNU General Public License
version 2 along with this program; if not, write to the Free
Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA.
'''
from __future__ import absolute_import, division, print_function, unicode_literals
import logging # pylint: disable=W0611
import bz2
from twisted.protocols.basic import Int32StringReceiver
from common import common_file
class NetworkEvents(Int32StringReceiver):
"""
Process the network events for the server
"""
# init is called on every connection
def __init__(self, users):
self.MAX_LENGTH = 32000000 # pylint: disable=C0103
self.cpu_use_table = {}
# server info
self.users = users
self.user_host_name = None
self.user_ip_addy = None
self.user_user_name = None
self.user_verified = 0
def connectionMade(self):
"""
Network connection made from client so ask for ident
"""
logging.info('Got Connection')
self.sendString('IDENT')
def connectionLost(self, reason):
"""
Network connection dropped so remove client
"""
logging.info('Lost Connection')
if self.users.has_key(self.user_user_name):
del self.users[self.user_user_name]
def stringReceived(self, data):
"""
Message received from client
"""
msg = None
message_words = data.split(' ')
logging.info('GOT Data: %s', data)
logging.info('Message: %s', message_words[0])
if message_words[0] == "VALIDATE":
# have to create the self.player data so network knows how to send data back
self.user_host_name = message_words[1]
self.user_ip_addy = str(self.transport.getPeer()).split('\'')[1]
self.user_user_name = message_words[1]
self.users[message_words[1]] = self
logging.info("user: %s %s", self.user_host_name, self.user_ip_addy)
# user commands
elif message_words[0] == "LOGIN":
pass
elif message_words[0] == "KODI_LOG":
common_file.com_file_save_data(
'./log_debug/Kodi', bz2.decompress(message_words[1]), False, True, '.log')
elif message_words[0] == "DEBUG_LOG":
common_file.com_file_save_data(
'./log_debug/Debug', bz2.decompress(message_words[1]), False, True, '.log')
else:
logging.error("UNKNOWN TYPE: %s", message_words[0])
msg = "UNKNOWN_TYPE"
if msg is not None:
logging.info("should be sending data")
self.send_single_user(msg)
def send_single_user(self, message):
"""
Send message to single user
"""
for user_host_name, protocol in self.users.iteritems(): # pylint: disable=W0612
if protocol == self:
logging.info('send single: %s', message)
protocol.sendString(message.encode("utf8"))
break
def send_all_users(self, message):
"""
Send message to all users
"""
for user_host_name, protocol in self.users.iteritems():
if self.users[user_host_name].user_verified == 1:
logging.info('send all: %s', message)
protocol.sendString(message.encode("utf8"))
| MediaKraken/mkarchive | network_base_string_weblog.py | Python | gpl-2.0 | 4,025 |
"""
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two feature extraction methods can be used in this example:
- TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most
frequent words to features indices and hence compute a word occurrence
frequency (sparse) matrix. The word frequencies are then reweighted using
the Inverse Document Frequency (IDF) vector collected feature-wise over
the corpus.
- HashingVectorizer hashes word occurrences to a fixed dimensional space,
possibly with collisions. The word count vectors are then normalized to
each have l2-norm equal to one (projected to the euclidean unit-ball) which
seems to be important for k-means to work in high dimensional space.
HashingVectorizer does not provide IDF weighting as this is a stateless
model (the fit method does nothing). When IDF weighting is needed it can
be added by pipelining its output to a TfidfTransformer instance.
Two algorithms are demoed: ordinary k-means and its more scalable cousin
minibatch k-means.
It can be noted that k-means (and minibatch k-means) are very sensitive to
feature scaling and that in this case the IDF weighting helps improve the
quality of the clustering by quite a lot as measured against the "ground truth"
provided by the class label assignments of the 20 newsgroups dataset.
This improvement is not visible in the Silhouette Coefficient which is small
for both as this measure seem to suffer from the phenomenon called
"Concentration of Measure" or "Curse of Dimensionality" for high dimensional
datasets such as text data. Other measures such as V-measure and Adjusted Rand
Index are information theoretic based evaluation scores: as they are only based
on cluster assignments rather than distances, hence not affected by the curse
of dimensionality.
Note: as k-means is optimizing a non-convex objective function, it will likely
end up in a local optimum. Several runs with independent random init might be
necessary to get a good convergence.
"""
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
from pymongo import MongoClient
from operator import itemgetter
import jieba
client = MongoClient()
data = []
i = 0
for x in client.News.News.find():
try:
#fop = open('news_article/news_article_%d' % i, 'w')
#fop.write(x['content'].encode('utf8'))
data.append(x)
i += 1
except KeyError, TypeError:
continue
jieba.set_dictionary('dict.txt.big')
article_num = len(data)
words_table = []
for i in xrange(0, article_num):
x = data[i]
try:
words = jieba.cut(x['content'], cut_all=False)
words = [word for word in words]
words_table.append(reduce(lambda x, y: x + ' ' + y, words))
except:
continue
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=1000000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
###############################################################################
# Uncomment the following to do the analysis on all the categories
categories = None
labels = article_num
true_k = 10
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', non_negative=True,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
non_negative=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(words_table)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
lsa = make_pipeline(svd, Normalizer(copy=False))
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
###############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
#print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
#print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
#print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
#print("Adjusted Rand-Index: %.3f"
# % metrics.adjusted_rand_score(labels, km.labels_))
#print("Silhouette Coefficient: %0.3f"
# % metrics.silhouette_score(X, labels, sample_size=1000))
#print()
if not (opts.n_components or opts.use_hashing):
print("Top terms per cluster:")
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
| irisyuichan/news_topic_mining | news_topic_clustering.py | Python | mit | 8,205 |
# -*- coding: utf-8 -*-
# Copyright 2018 OpenSynergy Indonesia
# Copyright 2022 PT. Simetri Sinergi Indonesia
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
# pylint: disable=locally-disabled, manifest-required-author
{
"name": "Timesheet Tier Validation",
"version": "8.0.1.0.0",
"website": "https://simetri-sinergi.id",
"author": "OpenSynergy Indonesia, PT. Simetri Sinergi Indonesia",
"license": "AGPL-3",
"installable": False,
"depends": [
"hr_timesheet_sheet",
"base_tier_validation",
],
"data": ["views/hr_timesheet_sheet_views.xml"],
}
| open-synergy/opnsynid-hr | hr_timesheet_tier_validation/__openerp__.py | Python | agpl-3.0 | 612 |
from __future__ import unicode_literals
from django.db import models
class Author(models.Model):
name = models.CharField(max_length=50)
affiliation = models.CharField(max_length=100,null=True)
last_modified_date = models.DateTimeField(
blank=True, null=True)
def publish(self):
self.last_modified_date = timezone.now()
self.save()
def __str__(self):
return self.name
class Reference(models.Model):
authors = models.CharField(max_length=50)
year = models.IntegerField()
journal = models.CharField(max_length=50, null=True)
volume = models.CharField(max_length=10, null=True)
page = models.CharField(max_length=10, null=True)
url = models.URLField(max_length=200, null=True)
search_key = models.CharField(max_length=200)
last_modified_date = models.DateTimeField(
blank=True, null=True)
def set_params(self,params):
for key, value in params.items():
setattr(self,key,value)
def publish(self):
self.last_modified_date = timezone.now()
self.save()
def __str__(self):
ref_string = ''
if self.url != None:
ref_string = '<a href="'+str(self.url)+'">'+str(self.authors)+\
' ('+str(self.year)+')'
for f in [ 'journal', 'volume', 'page' ]:
val = getattr(self,f)
if val != None:
ref_string+=', '+str(val)
ref_string+='</a>'
else:
ref_string = str(self.authors)+' ('+str(self.year)+')'
for f in [ 'journal', 'volume', 'page' ]:
val = getattr(self,f)
if val != None:
ref_string+=', '+str(val)
return ref_string
class TextBlock(models.Model):
text = models.TextField()
last_modified_date = models.DateTimeField(
blank=True, null=True)
def publish(self):
self.last_modified_date = timezone.now()
self.save()
def __str__(self):
return self.text
class TutorialPage(models.Model):
author = models.ForeignKey(Author,on_delete=models.SET_NULL,null=True)
title = models.CharField(max_length=200)
short_title = models.CharField(max_length=20,null=True)
course_index = models.IntegerField(null=True)
text = models.TextField()
references = models.ManyToManyField(Reference)
last_modified_date = models.DateTimeField(
blank=True, null=True)
def publish(self):
self.last_modified_date = timezone.now()
self.save()
def __str__(self):
return self.title
class ConceptPage(models.Model):
author = models.ForeignKey(Author,on_delete=models.SET_NULL,null=True)
title = models.CharField(max_length=200)
short_title = models.CharField(max_length=20,null=True)
course_index = models.IntegerField(null=True)
text = models.TextField()
last_modified_date = models.DateTimeField(
blank=True, null=True)
def publish(self):
self.last_modified_date = timezone.now()
self.save()
def __str__(self):
return self.title
class SitePage(models.Model):
name = models.CharField(max_length=20)
text = models.TextField()
last_modified_date = models.DateTimeField(
blank=True, null=True)
def publish(self):
self.last_modified_date = timezone.now()
self.save()
def __str__(self):
return self.name
class InteractiveTool(models.Model):
author = models.ForeignKey(Author,on_delete=models.SET_NULL,null=True)
name = models.CharField(max_length=20)
title = models.CharField(max_length=200)
text = models.TextField()
tools_index = models.IntegerField(null=True)
last_modified_date = models.DateTimeField(
blank=True, null=True)
def publish(self):
self.last_modified_date = timezone.now()
self.save()
def __str__(self):
return self.name
class GroundSurvey(models.Model):
name = models.CharField(max_length=20)
pi = models.CharField(max_length=40)
tel_diameter = models.DecimalField(decimal_places=1, max_digits=5)
last_modified_date = models.DateTimeField(
blank=True, null=True)
def publish(self):
self.last_modified_date = timezone.now()
self.save()
def __str__(self):
return self.name
class OnlineResource(models.Model):
name = models.CharField(max_length=100)
url = models.URLField(max_length=200)
group = models.CharField(max_length=100)
last_modified_date = models.DateTimeField(
blank=True, null=True)
def publish(self):
self.last_modified_date = timezone.now()
self.save()
def __str__(self):
return self.url
class Movie(models.Model):
name = models.CharField(max_length=200,null=True)
filename = models.CharField(max_length=100,null=True)
caption = models.CharField(max_length=500,null=True)
credit = models.CharField(max_length=50,null=True)
url = models.URLField(max_length=200,null=True)
thumbnail = models.CharField(max_length=100,null=True)
keywords = models.CharField(max_length=200,null=True)
last_modified_date = models.DateTimeField(
blank=True, null=True)
def publish(self):
self.last_modified_date = timezone.now()
self.save()
def __str__(self):
return self.name
class Picture(models.Model):
name = models.CharField(max_length=200,null=True)
filename = models.CharField(max_length=100,null=True)
caption = models.CharField(max_length=500,null=True)
credit = models.CharField(max_length=50,null=True)
url = models.URLField(max_length=200,null=True)
thumbnail = models.CharField(max_length=100,null=True)
keywords = models.CharField(max_length=200,null=True)
last_modified_date = models.DateTimeField(
blank=True, null=True)
def publish(self):
self.last_modified_date = timezone.now()
self.save()
def __str__(self):
return self.name
class File(models.Model):
name = models.CharField(max_length=200,null=True)
filename = models.CharField(max_length=100,null=True)
last_modified_date = models.DateTimeField(
blank=True, null=True)
def publish(self):
self.last_modified_date = timezone.now()
self.save()
def __str__(self):
return self.name
class Meeting(models.Model):
name = models.CharField(max_length=200)
location = models.CharField(max_length=200)
date_start = models.DateField()
date_end = models.DateField()
registration_deadline = models.DateField(null=True,blank=True)
abstract_deadline = models.DateField(null=True,blank=True)
url = models.URLField(null=True,blank=True)
topic = models.CharField(max_length=500,null=True,blank=True)
last_modified_date = models.DateTimeField(
blank=True, null=True)
def publish(self):
self.last_modified_date = timezone.now()
self.save()
def __str__(self):
return self.name
class Job(models.Model):
title = models.CharField(max_length=100)
location = models.CharField(max_length=100)
deadline = models.DateField(null=True)
description = models.CharField(max_length=1000,null=True)
url = models.URLField(null=True)
last_modified_date = models.DateTimeField(
blank=True, null=True)
def publish(self):
self.last_modified_date = timezone.now()
self.save()
def __str__(self):
return self.title
class Grant(models.Model):
title = models.CharField(max_length=100)
agency = models.CharField(max_length=100)
deadline = models.DateField(null=True)
description = models.CharField(max_length=1000,null=True)
url = models.URLField(null=True)
last_modified_date = models.DateTimeField(
blank=True, null=True)
def publish(self):
self.last_modified_date = timezone.now()
self.save()
def __str__(self):
return self.title
| rachel3834/microlensing-online | tutorial/models.py | Python | gpl-3.0 | 8,075 |
# -*- coding: utf-8 -
#
# This file is part of offset. See the NOTICE for more information.#
import sys
__all__ = []
from .file import File, pipe
os_mod = sys.modules[__name__]
_signal = __import__('signal')
for name in dir(_signal):
if name[:3] == "SIG" and name[3] != "_":
setattr(os_mod, name, getattr(_signal, name))
__all__.append(name)
del _signal
| benoitc/offset | offset/os/__init__.py | Python | mit | 383 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Embedding layer.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export('keras.layers.Embedding')
class Embedding(Layer):
"""Turns positive integers (indexes) into dense vectors of fixed size.
eg. [[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]]
This layer can only be used as the first layer in a model.
Example:
```python
model = Sequential()
model.add(Embedding(1000, 64, input_length=10))
# the model will take as input an integer matrix of size (batch,
input_length).
# the largest integer (i.e. word index) in the input should be no larger
than 999 (vocabulary size).
# now model.output_shape == (None, 10, 64), where None is the batch
dimension.
input_array = np.random.randint(1000, size=(32, 10))
model.compile('rmsprop', 'mse')
output_array = model.predict(input_array)
assert output_array.shape == (32, 10, 64)
```
Arguments:
input_dim: int > 0. Size of the vocabulary,
i.e. maximum integer index + 1.
output_dim: int >= 0. Dimension of the dense embedding.
embeddings_initializer: Initializer for the `embeddings` matrix.
embeddings_regularizer: Regularizer function applied to
the `embeddings` matrix.
embeddings_constraint: Constraint function applied to
the `embeddings` matrix.
mask_zero: Whether or not the input value 0 is a special "padding"
value that should be masked out.
This is useful when using recurrent layers
which may take variable length input.
If this is `True` then all subsequent layers
in the model need to support masking or an exception will be raised.
If mask_zero is set to True, as a consequence, index 0 cannot be
used in the vocabulary (input_dim should equal size of
vocabulary + 1).
input_length: Length of input sequences, when it is constant.
This argument is required if you are going to connect
`Flatten` then `Dense` layers upstream
(without it, the shape of the dense outputs cannot be computed).
Input shape:
2D tensor with shape: `(batch_size, input_length)`.
Output shape:
3D tensor with shape: `(batch_size, input_length, output_dim)`.
"""
def __init__(self,
input_dim,
output_dim,
embeddings_initializer='uniform',
embeddings_regularizer=None,
activity_regularizer=None,
embeddings_constraint=None,
mask_zero=False,
input_length=None,
**kwargs):
if 'input_shape' not in kwargs:
if input_length:
kwargs['input_shape'] = (input_length,)
else:
kwargs['input_shape'] = (None,)
dtype = kwargs.pop('dtype', K.floatx())
super(Embedding, self).__init__(dtype=dtype, **kwargs)
self.input_dim = input_dim
self.output_dim = output_dim
self.embeddings_initializer = initializers.get(embeddings_initializer)
self.embeddings_regularizer = regularizers.get(embeddings_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.embeddings_constraint = constraints.get(embeddings_constraint)
self.mask_zero = mask_zero
self.supports_masking = mask_zero
self.input_length = input_length
@tf_utils.shape_type_conversion
def build(self, input_shape):
# Note: most sparse optimizers do not have GPU kernels defined. When
# building graphs, the placement algorithm is able to place variables on CPU
# since it knows all kernels using the variable only exist on CPU.
# When eager execution is enabled, the placement decision has to be made
# right now. Checking for the presence of GPUs to avoid complicating the
# TPU codepaths which can handle sparse optimizers.
if context.executing_eagerly() and context.context().num_gpus():
with ops.device('cpu:0'):
self.embeddings = self.add_weight(
shape=(self.input_dim, self.output_dim),
initializer=self.embeddings_initializer,
name='embeddings',
regularizer=self.embeddings_regularizer,
constraint=self.embeddings_constraint)
else:
self.embeddings = self.add_weight(
shape=(self.input_dim, self.output_dim),
initializer=self.embeddings_initializer,
name='embeddings',
regularizer=self.embeddings_regularizer,
constraint=self.embeddings_constraint)
self.built = True
def compute_mask(self, inputs, mask=None):
if not self.mask_zero:
return None
return math_ops.not_equal(inputs, 0)
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if self.input_length is None:
return input_shape + (self.output_dim,)
else:
# input_length can be tuple if input is 3D or higher
if isinstance(self.input_length, (list, tuple)):
in_lens = list(self.input_length)
else:
in_lens = [self.input_length]
if len(in_lens) != len(input_shape) - 1:
raise ValueError('"input_length" is %s, '
'but received input has shape %s' % (str(
self.input_length), str(input_shape)))
else:
for i, (s1, s2) in enumerate(zip(in_lens, input_shape[1:])):
if s1 is not None and s2 is not None and s1 != s2:
raise ValueError('"input_length" is %s, '
'but received input has shape %s' % (str(
self.input_length), str(input_shape)))
elif s1 is None:
in_lens[i] = s2
return (input_shape[0],) + tuple(in_lens) + (self.output_dim,)
def call(self, inputs):
dtype = K.dtype(inputs)
if dtype != 'int32' and dtype != 'int64':
inputs = math_ops.cast(inputs, 'int32')
out = embedding_ops.embedding_lookup(self.embeddings, inputs)
return out
def get_config(self):
config = {
'input_dim':
self.input_dim,
'output_dim':
self.output_dim,
'embeddings_initializer':
initializers.serialize(self.embeddings_initializer),
'embeddings_regularizer':
regularizers.serialize(self.embeddings_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'embeddings_constraint':
constraints.serialize(self.embeddings_constraint),
'mask_zero':
self.mask_zero,
'input_length':
self.input_length
}
base_config = super(Embedding, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| hehongliang/tensorflow | tensorflow/python/keras/layers/embeddings.py | Python | apache-2.0 | 7,974 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-07-04 03:01
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('tntapp', '0004_auto_20160617_0048'),
]
operations = [
migrations.AddField(
model_name='materia',
name='ts_actualizacion_eventos',
field=models.DateTimeField(default=datetime.datetime(2016, 7, 4, 3, 1, 56, 940110, tzinfo=utc)),
),
]
| Pazitos10/TNT | webapp/app/tntapp/migrations/0005_materia_ts_actualizacion_eventos.py | Python | mit | 578 |
# -*- Mode: Python; test-case-name: flumotion.test.test_greeter -*-
# vi:si:et:sw=4:sts=4:ts=4
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007,2008,2009 Fluendo, S.L.
# Copyright (C) 2010,2011 Flumotion Services, S.A.
# All rights reserved.
#
# This file may be distributed and/or modified under the terms of
# the GNU Lesser General Public License version 2.1 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.LGPL" in the source distribution for more information.
#
# Headers in this file shall remain intact.
from flumotion.admin.gtk.greeter import Greeter
from flumotion.common import testsuite
from flumotion.test.gtkunit import UITestCase
from flumotion.ui.simplewizard import SimpleWizard, WizardStep
attr = testsuite.attr
class FakeAdminWindow:
def getWindow(self):
return None
class WizardTest(UITestCase):
slow = True
def _prev(self):
self.click('button_prev')
def _next(self):
self.click('button_next')
def _checkPrevNext(self, can_prev, can_next):
self.assertSensitive('button_prev', can_prev)
self.assertSensitive('button_next', can_next)
def testGreeter(self):
greeter = Greeter(FakeAdminWindow())
self.failUnless(isinstance(greeter, SimpleWizard))
self.failUnless(isinstance(greeter.name, str))
self.failUnless(isinstance(greeter.page, WizardStep))
self.failUnless(isinstance(greeter.pages, dict))
self.failUnless(isinstance(greeter.page_stack, list))
self.failUnless(isinstance(greeter.state, dict))
self.setWindow(greeter.window)
self._checkPrevNext(False, True)
self.click('connect_to_existing')
self._next()
self._prev()
# FIXME: Save the last option selected instead of
# always selecting the first
self.click('connect_to_existing')
self._next()
self._checkPrevNext(True, True)
self.setText('host_entry', 'foolio')
self._checkPrevNext(True, True)
self.click('ssl_check')
self.checkText('port_entry', '8642')
self._next()
self._prev()
self._next()
self._checkPrevNext(True, False)
self.setText('user_entry', 'bar')
self._checkPrevNext(True, False)
self.setText('passwd_entry', 'baz')
self._checkPrevNext(True, True)
self._next()
state = greeter.run()
self.refreshUI()
self.assertEquals(state.get('passwd'), 'baz')
self.assertEquals(state.get('host'), 'foolio')
self.assertEquals(state.get('port'), 8642)
self.assertEquals(state.get('use_insecure'), True)
self.assertEquals(state.get('user'), 'bar')
self.failUnless('connectionInfo' in state)
| timvideos/flumotion | flumotion/test/test_greeter.py | Python | lgpl-2.1 | 2,934 |
import logging
from zentral.core.exceptions import ImproperlyConfigured
default_app_config = "zentral.core.compliance_checks.apps.ZentralComplianceChecksAppConfig"
logger = logging.getLogger("zentral.core.compliance_checks")
# compliance checks classes
compliance_check_classes = {}
def register_compliance_check_class(compliance_check_class):
try:
model = compliance_check_class.get_model()
except AttributeError:
raise ImproperlyConfigured('Not a valid compliance check class')
if model in compliance_check_classes:
raise ImproperlyConfigured(f'Compliance check class "{model}" already registered')
compliance_check_classes[model] = compliance_check_class
logger.debug('Compliance check class "%s" registered', model)
def compliance_check_class_from_model(model):
try:
return compliance_check_classes[model]
except KeyError:
logger.error('Unknown compliance check model "%s"', model)
# BaseComplianceCheck registered in .compliance_checks
return compliance_check_classes["BaseComplianceCheck"]
| zentralopensource/zentral | zentral/core/compliance_checks/__init__.py | Python | apache-2.0 | 1,092 |
import re
from datetime import timedelta
from urlparse import urlparse
from functools import wraps
from django.core.cache import cache
from .settings import (
FACEBOOK_APPLICATION_CANVAS_URL, FACEBOOK_APPLICATION_DOMAIN,
FACEBOOK_APPLICATION_NAMESPACE, FACEBOOK_SITE_URL
)
def cached_property(**kwargs):
"""Cache the return value of a property."""
def decorator(function):
@wraps(function)
def wrapper(self):
key = 'fbgamers.%(model)s.%(property)s_%(pk)s' % {
'model': self.__class__.__name__,
'pk': self.pk,
'property': function.__name__
}
cached_value = cache.get(key)
delta = timedelta(**kwargs)
if cached_value is None:
value = function(self)
cache.set(key, value, delta.days * 86400 + delta.seconds)
else:
value = cached_value
return value
return wrapper
return decorator
def get_post_authorization_redirect_url(request, canvas=True):
"""Determine the URL users should be redirected to upon authorization the application."""
path = request.get_full_path()
if canvas:
if FACEBOOK_APPLICATION_CANVAS_URL:
path = path.replace(urlparse(FACEBOOK_APPLICATION_CANVAS_URL).path, '')
redirect_uri = 'http://%(domain)s/%(namespace)s%(path)s' % {
'domain': FACEBOOK_APPLICATION_DOMAIN,
'namespace': FACEBOOK_APPLICATION_NAMESPACE,
'path': path
}
else:
path = path.replace(urlparse(FACEBOOK_SITE_URL).path, '')
redirect_uri = FACEBOOK_SITE_URL + path
return redirect_uri
def get_full_path(request, remove_querystrings=()):
"""Gets the current path, removing specified querstrings"""
path = request.get_full_path()
for qs in remove_querystrings:
path = re.sub(r'&?' + qs + '=?(.+)?&?', '', path)
return path
| luismasuelli/hoods-raising | fbgamers/utils.py | Python | gpl-3.0 | 1,973 |
from UI import window
import sys
import os
UI = window()
UI.loop()
| vongola12324/PhetStorm | main.py | Python | gpl-3.0 | 68 |
import os
import re
import asyncio
import logging
from collections import OrderedDict
from pypeman.message import Message
from pypeman.errors import PypemanConfigError
logger = logging.getLogger("pypeman.store")
DATE_FORMAT = '%Y%m%d_%H%M'
class MessageStoreFactory():
""" Message store factory class can generate Message store instance for specific store_id. """
def get_store(self, store_id):
"""
:param store_id: identifier of corresponding message store.
:return: A MessageStore corresponding to correct store_id.
"""
class MessageStore():
""" A MessageStore keep an history of processed messages. Mainly used in channels. """
async def start(self):
"""
Called at startup to initialize store.
"""
async def store(self, msg):
"""
Store a message in the store.
:param msg: The message to store.
:return: Id for this specific message.
"""
async def change_message_state(self, id, new_state):
"""
Change the `id` message state.
:param id: Message specific store id.
:param new_state: Target state.
"""
async def get(self, id):
"""
Return one message corresponding to given `id` with his status.
:param id: Message id. Message store dependant.
:return: A dict `{'id':<message_id>, 'state': <message_state>, 'message': <message_object>}`.
"""
async def search(self, start=0, count=10, order_by='timestamp'):
"""
Return a list of message with store specific `id` and processed status.
:param start: First element.
:param count: Count of elements since first element.
:param order_by: Message order. Allowed values : ['timestamp', 'status'].
:return: A list of dict `{'id':<message_id>, 'state': <message_state>, 'message': <message_object>}`.
"""
async def total(self):
"""
:return: total count of messages
"""
class NullMessageStoreFactory(MessageStoreFactory):
""" Return an NullMessageStore that do nothing at all. """
def get_store(self, store_id):
return NullMessageStore()
class NullMessageStore(MessageStore):
""" For testing purpose """
async def store(self, msg):
return None
async def get(self, id):
return None
async def search(self, **kwargs):
return None
async def total(self):
return 0
class FakeMessageStoreFactory(MessageStoreFactory):
""" Return an Fake message store """
def get_store(self, store_id):
return FakeMessageStore()
class FakeMessageStore(MessageStore):
""" For testing purpose """
async def store(self, msg):
logger.debug("Should store message %s", msg)
return 'fake_id'
async def get(self, id):
return {'id':id, 'state': 'processed', 'message': None}
async def search(self, **kwargs):
return []
async def total(self):
return 0
class MemoryMessageStoreFactory(MessageStoreFactory):
""" Return a Memory message store. All message are lost at pypeman stop. """
def __init__(self):
self.base_dict = {}
def get_store(self, store_id):
return MemoryMessageStore(self.base_dict, store_id)
class MemoryMessageStore(MessageStore):
""" Store messages in memory """
def __init__(self, base_dict, store_id):
super().__init__()
self.messages = base_dict.setdefault(store_id, OrderedDict())
async def store(self, msg):
msg_id = msg.uuid
self.messages[msg_id] = {'id': msg_id, 'state': Message.PENDING, 'timestamp': msg.timestamp, 'message': msg.to_dict()}
return msg_id
async def change_message_state(self, id, new_state):
self.messages[id]['state'] = new_state
async def get(self, id):
resp = dict(self.messages[id])
resp['message'] = Message.from_dict(resp['message'])
return resp
async def search(self, start=0, count=10, order_by='timestamp'):
if order_by.startswith('-'):
reverse = True
sort_key = order_by[1:]
else:
reverse = False
sort_key = order_by
result = []
for value in sorted(self.messages.values(), key=lambda x: x[sort_key], reverse=reverse):
resp = dict(value)
resp['message'] = Message.from_dict(resp['message'])
result.append(resp)
return result[start: start + count]
async def total(self):
return len(self.messages)
class FileMessageStoreFactory(MessageStoreFactory):
"""
Generate a FileMessageStore message store instance.
Store a file in `<base_path>/<store_id>/<month>/<day>/` hierachy.
"""
# TODO add an option to reguraly archive old file or delete them
def __init__(self, path):
super().__init__()
if path is None:
raise PypemanConfigError('file message store requires a path')
self.base_path = path
def get_store(self, store_id):
return FileMessageStore(self.base_path, store_id)
class FileMessageStore(MessageStore):
""" Store a file in `<base_path>/<store_id>/<month>/<day>/` hierachy."""
# TODO file access should be done in another thread. Waiting for file backend.
def __init__(self, path, store_id):
super().__init__()
self.base_path = os.path.join(path, store_id)
# Match msg file name
self.msg_re = re.compile(r'^([0-9]{8})_([0-9]{2})([0-9]{2})_[0-9abcdef]*$')
try:
# Try to make dirs if necessary
os.makedirs(os.path.join(self.base_path))
except FileExistsError:
pass
self._total = 0
async def start(self):
self._total = await self.count_msgs()
async def store(self, msg):
""" Store a file in `<base_path>/<store_id>/<month>/<day>/` hierachy."""
# TODO implement a safer store to avoid broken messages
# The filename is the file id
filename = "{}_{}".format(msg.timestamp.strftime(DATE_FORMAT), msg.uuid)
dirs = os.path.join(str(msg.timestamp.year), "%02d" % msg.timestamp.month, "%02d" % msg.timestamp.day)
try:
# Try to make dirs if necessary
os.makedirs(os.path.join(self.base_path, dirs))
except FileExistsError:
pass
file_path = os.path.join(dirs, filename)
# Write message to file
with open(os.path.join(self.base_path, file_path), "w") as f:
f.write(msg.to_json())
await self.change_message_state(file_path, Message.PENDING)
self._total += 1
return file_path
async def change_message_state(self, id, new_state):
with open(os.path.join(self.base_path, id + '.meta'), "w") as f:
f.write(new_state)
async def get_message_state(self, id):
with open(os.path.join(self.base_path, id + '.meta'), "r") as f:
state = f.read()
return state
async def get(self, id):
if not os.path.exists(os.path.join(self.base_path, id)):
raise IndexError
with open(os.path.join(self.base_path, id), "rb") as f:
msg = Message.from_json(f.read().decode('utf-8'))
return {'id': id, 'state': await self.get_message_state(id), 'message': msg}
async def sorted_list_directories(self, path, reverse=True):
"""
:param path: Base path
:param reverse: reverse order
:return: List of directories in specified path ordered
"""
return sorted([d for d in os.listdir(path) if os.path.isdir(os.path.join(path, d))], reverse=reverse)
async def count_msgs(self):
"""
Count message by listing all directories. To be used at startup.
"""
count = 0
for year in await self.sorted_list_directories(os.path.join(self.base_path)):
for month in await self.sorted_list_directories(os.path.join(self.base_path, year)):
for day in await self.sorted_list_directories(os.path.join(self.base_path, year, month)):
for msg_name in sorted(os.listdir(os.path.join(self.base_path, year, month, day))):
found = self.msg_re.match(msg_name)
if found:
count +=1
return count
async def search(self, start=0, count=10, order_by='timestamp'):
# TODO better performance for slicing by counting file in dirs ?
if order_by.startswith('-'):
reverse = True
sort_key = order_by[1:]
else:
reverse = False
sort_key = order_by
# TODO handle sort_key
result = []
end = start + count
position = 0
for year in await self.sorted_list_directories(os.path.join(self.base_path), reverse=reverse):
for month in await self.sorted_list_directories(os.path.join(self.base_path, year), reverse=reverse):
for day in await self.sorted_list_directories(os.path.join(self.base_path, year, month), reverse=reverse):
for msg_name in sorted(os.listdir(os.path.join(self.base_path, year, month, day)), reverse=reverse):
found = self.msg_re.match(msg_name)
if found:
if start <= position < end:
mid = os.path.join(year, month, day, msg_name)
result.append(await self.get(mid))
position += 1
return result
async def total(self):
return self._total
| jrmi/pypeman | pypeman/msgstore.py | Python | apache-2.0 | 9,722 |
import pygame
import math
import random
def interpolate(v1, v2, range):
return pygame.math.Vector2(v1.x + (v2.x - v1.x) * range,
v1.y + (v2.y - v1.y) * range)
class Particle(pygame.sprite.Sprite):
def __init__(self, game, image, pos, vel, life, lifetime,
fade_start, dorotate):
pygame.sprite.Sprite.__init__(self)
self.game = game
self.pos = pos
self.vel = vel
self.rot_cache = {}
self.base_image = image
self.dorotate = dorotate
if dorotate:
self.image = pygame.transform.rotate(self.base_image, -self.rot)
else:
self.image = self.base_image.copy()
self.rect = self.image.get_rect()
self.lifetime = lifetime
self.life = life
self.fade_start = fade_start
self.duration = lifetime - fade_start
self.update()
def update(self):
# if self.dorotate:
# old_center = self.rect.center
# if self.rot in self.rot_cache:
# self.image = self.rot_cache[self.rot]
# else:
# self.image = pygame.transform.rotate(self.base_image, -self.rot)
# self.rot_cache[self.rot] = self.image
# self.rect = self.image.get_rect()
# self.rect.center = old_center
self.life += self.game.dt
self.fade()
self.pos += self.vel
self.rect.centerx = self.pos.x #+ self.game.OFFSET.x
self.rect.centery = self.pos.y #+ self.game.OFFSET.y
def blit(self):
return self.game.screen.blit(self.image, self.rect, special_flags=pygame.BLEND_ADD)
def fade(self):
if self.life > self.fade_start:
try:
ratio = (self.life - self.fade_start) / self.duration
except ZeroDivisionError:
ratio = 1.0
if ratio > 1.0:
ratio = 1.0
mask = int(255 * (1 - ratio))
self.image.fill([mask, mask, mask], special_flags=pygame.BLEND_MIN)
def is_dead(self):
if self.life > self.lifetime:
return True
return False
class ParticleEmitter:
def __init__(self, game, parent, offset, vel, image, count, lifetime,
fade_start, size, angle_range, dorotate=False):
self.game = game
self.parent = parent
self.offset = offset
self.particle_vel = vel
self.pos = self.parent.pos + self.game.OFFSET + self.offset.rotate(self.parent.rot)
self.base_image = image
self.size = size
self.angle_range = angle_range
self.image = pygame.transform.scale(self.base_image, (self.size, self.size))
self.count = count
self.lifetime = lifetime
self.fade_start = fade_start
self.particles = []
self.timer = 0
self.prevcurve = [self.pos for x in range(3)]
self.active = True
def print_state(self):
print("c:{}, p:{}".format(self.count, len(self.particles)))
def update(self):
self.pos = self.parent.pos + self.game.OFFSET + self.offset.rotate(-self.parent.rot)
self.rand_angle = random.randint(-self.angle_range, self.angle_range)
# update all particles
for part in self.particles:
part.update()
if part.is_dead():
self.particles.remove(part)
# print("p.kill")
# create a new particle
if self.count != 0 and self.active:
self.timer += self.game.dt
newparticles = self.count * self.timer
if newparticles > 1:
for i in range(int(newparticles)):
t = i / newparticles
time_elapsed = (1.0 - t) * self.game.dt
vel = self.particle_vel.rotate(-self.parent.rot+self.rand_angle)
pos = interpolate(self.prevcurve[0], self.pos, t)
pos += (self.parent.vel + vel) * time_elapsed
# pos += vel * time_elapsed
init_life = time_elapsed
self.timer = 0
# print("new part: pos: {} vel: {}".format(pos, vel))
self.particles.append(Particle(self.game, self.image, pos,
vel, init_life, self.lifetime,
self.fade_start, False))
self.prevcurve[2] = self.prevcurve[1]
self.prevcurve[1] = self.prevcurve[0]
self.prevcurve[0] = self.pos
def draw(self):
rects = []
for part in self.particles:
rects.append(part.blit())
return rects
def kill_all(self):
self.count = 0
self.active = False
self.particles = []
| kidscancode/gamedev | war/particles.py | Python | mit | 4,829 |
# Copyright 2013 - Noorul Islam K M
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from solumclient import client
from solumclient.common import auth
from solumclient.openstack.common.apiclient import exceptions
from solumclient.tests import base
class ClientTest(base.TestCase):
def test_client_unsupported_version(self):
self.assertRaises(exceptions.UnsupportedVersion,
client.Client, '111.11', **{})
def test_client(self):
with mock.patch.object(auth, 'KeystoneAuthPlugin'):
client.Client('1', **{})
| ed-/python-solumclient | solumclient/tests/test_client.py | Python | apache-2.0 | 1,083 |
# -*- coding: utf-8 -*-
import logging
from osv import fields, osv
_logger = logging.getLogger(__name__)
class hourly_fee_p_discount(osv.osv):
'''按位钟点费优惠设置'''
_name = "ktv.hourly_fee_p_discount"
_inherit = "ktv.hourly_fee_discount"
_description = "按位钟点费优惠设置"
| chengdh/openerp-ktv | openerp/addons/ktv_sale/hourly_fee_p_discount.py | Python | agpl-3.0 | 312 |
from django.conf.urls.defaults import *
urlpatterns = patterns('testrunner',
# Example:
# (r'^server/', include('server.foo.urls')),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# (r'^admin/(.*)', admin.site.root),
(r'^run$', 'runner.main'),
(r'^perfgen$', 'runner.perfgen')
)
| eob/synckit-research | server/testrunner/urls.py | Python | bsd-3-clause | 504 |
#----------------------------------------------------------------------
# Copyright (c) 2010-2015 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
'''
Credential creation and verification utilities.
'''
from __future__ import absolute_import
import os
import logging
import xmlrpclib
import sys
import datetime
import dateutil
from ...sfa.trust import credential as cred
from ...sfa.trust import gid
from ...sfa.trust import rights
from ...sfa.util.xrn import hrn_authfor_hrn
from ...sfa.trust.credential_factory import CredentialFactory
from ...sfa.trust.abac_credential import ABACCredential
from ...sfa.trust.certificate import Certificate
from .speaksfor_util import determine_speaks_for
def naiveUTC(dt):
"""Converts dt to a naive datetime in UTC.
if 'dt' has a timezone then
convert to UTC
strip off timezone (make it "naive" in Python parlance)
"""
if dt.tzinfo:
tz_utc = dateutil.tz.tzutc()
dt = dt.astimezone(tz_utc)
dt = dt.replace(tzinfo=None)
return dt
class CredentialVerifier(object):
"""Utilities to verify signed credentials from a given set of
root certificates. Will compare target and source URNs, and privileges.
See verify and verify_from_strings methods in particular."""
CATEDCERTSFNAME = 'CATedCACerts.pem'
# root_cert_fileordir is a trusted root cert file or directory of
# trusted roots for verifying credentials
def __init__(self, root_cert_fileordir):
self.logger = logging.getLogger('cred-verifier')
if root_cert_fileordir is None:
raise Exception("Missing Root certs argument")
elif os.path.isdir(root_cert_fileordir):
files = os.listdir(root_cert_fileordir)
self.root_cert_files = []
for file in files:
# FIXME: exclude files that aren't cert files?
if file == CredentialVerifier.CATEDCERTSFNAME:
continue
self.root_cert_files.append(os.path.expanduser(os.path.join(root_cert_fileordir, file)))
self.logger.info('Will accept credentials signed by any of %d root certs found in %s: %r' % (len(self.root_cert_files), root_cert_fileordir, self.root_cert_files))
elif os.path.isfile(root_cert_fileordir):
self.logger.info('Will accept credentials signed by the single root cert %s' % root_cert_fileordir)
self.root_cert_files = [root_cert_fileordir]
else:
raise Exception("Couldn't find Root certs in %s" % root_cert_fileordir)
@classmethod
def getCAsFileFromDir(cls, caCerts):
'''Take a directory of CA certificates and concatenate them into a single
file suitable for use by the Python SSL library to validate client
credentials. Existing file is replaced.'''
if caCerts is None:
raise Exception ('Missing caCerts argument')
if os.path.isfile(os.path.expanduser(caCerts)):
return caCerts
if not os.path.isdir(os.path.expanduser(caCerts)):
raise Exception ('caCerts arg Not a file or a dir: %s' % caCerts)
logger = logging.getLogger('cred-verifier')
# Now we have a dir of caCerts files
# For each file in the dir (isfile), concatenate them into a new file
comboFullPath = os.path.join(caCerts, CredentialVerifier.CATEDCERTSFNAME)
caFiles = os.listdir(caCerts)
#logger.debug('Got %d potential caCert files in the dir', len(caFiles))
outfile = open(comboFullPath, "w")
okFileCount = 0
for filename in caFiles:
filepath = os.path.join(caCerts, filename)
# Confirm it's a CA file?
# if not file.endswith('.pem'):
# continue
if not os.path.isfile(os.path.expanduser(filepath)):
logger.debug('Skipping non file %s', filepath)
continue
if filename == CredentialVerifier.CATEDCERTSFNAME:
# logger.debug('Skipping previous cated certs file')
continue
okFileCount += 1
logger.info("Adding trusted cert file %s", filename)
certfile = open(filepath)
for line in certfile:
outfile.write(line)
certfile.close()
outfile.close()
if okFileCount == 0:
sys.exit('Found NO trusted certs in %s!' % caCerts)
else:
logger.info('Combined dir of %d trusted certs %s into file %s for Python SSL support', okFileCount, caCerts, comboFullPath)
return comboFullPath
# Get the GID of the caller, substituting the real user if this is a 'speaks-for' invocation
def get_caller_gid(self, gid_string, cred_strings, options=None):
root_certs = \
[Certificate(filename=root_cert_file) \
for root_cert_file in self.root_cert_files]
caller_gid = gid.GID(string=gid_string)
# Potentially, change gid_string to be the cert of the actual user
# if this is a 'speaks-for' invocation
speaksfor_gid = \
determine_speaks_for(self.logger, \
cred_strings, # May include ABAC speaks_for credential
caller_gid, # Caller cert (may be the tool 'speaking for' user)
options, # May include 'geni_speaking_for' option with user URN
root_certs
)
if caller_gid.get_subject() != speaksfor_gid.get_subject():
speaksfor_urn = speaksfor_gid.get_urn()
self.logger.info("Speaks-for Invocation: %s speaking for %s" % (caller_gid.get_urn(), speaksfor_urn))
caller_gid = speaksfor_gid
return caller_gid
def verify_from_strings(self, gid_string, cred_strings, target_urn,
privileges, options=None):
'''Create Credential and GID objects from the given strings,
and then verify the GID has the right privileges according
to the given credentials on the given target.'''
def make_cred(cred_string):
credO = None
try:
credO = CredentialFactory.createCred(credString=cred_string)
except Exception, e:
self.logger.warn("Skipping unparsable credential. Error: %s. Credential begins: %s...", e, cred_string[:60])
return credO
# Get the GID of the caller, substituting the real user if this is a 'speaks-for' invocation
caller_gid = self.get_caller_gid(gid_string, cred_strings, options)
# Remove the abac credentials
cred_strings = [cred_string for cred_string in cred_strings \
if CredentialFactory.getType(cred_string) == cred.Credential.SFA_CREDENTIAL_TYPE]
return self.verify(caller_gid,
map(make_cred, cred_strings),
target_urn,
privileges)
def verify_source(self, source_gid, credential):
'''Ensure the credential is giving privileges to the caller/client.
Return True iff the given source (client) GID's URN
is == the given credential's Caller (Owner) URN'''
source_urn = source_gid.get_urn()
cred_source_urn = credential.get_gid_caller().get_urn()
#self.logger.debug('Verifying source %r against credential source %r (cred target %s)',
# source_urn, cred_source_urn, credential.get_gid_object().get_urn())
result = (cred_source_urn == source_urn)
if result:
# self.logger.debug('Source URNs match')
pass
else:
self.logger.debug('Source URNs do not match. Source URN %r != credential source URN %r', source_urn, cred_source_urn)
return result
def verify_target(self, target_urn, credential):
'''Ensure the credential is giving privileges on the right subject/target.
Return True if no target is specified, or the target URN
matches the credential's Object's (target's) URN, else return False.
No target is required, for example, to ListResources.'''
if not target_urn:
# self.logger.debug('No target specified, considering it a match.')
return True
else:
cred_target_urn = credential.get_gid_object().get_urn()
# self.logger.debug('Verifying target %r against credential target %r',
# target_urn, cred_target_urn)
result = target_urn == cred_target_urn
if result:
# self.logger.debug('Target URNs match.')
pass
else:
self.logger.debug('Target URNs do NOT match. Target URN %r != Credential URN %r', target_urn, cred_target_urn)
return result
def verify_privileges(self, privileges, credential):
''' Return True iff the given credential gives the privilege
to perform ALL of the privileges (actions) in the given list.
In particular, the given list of 'privileges' is really a list
of names of operations. The privileges in credentials are
each turned in to Rights objects (see sfa/trust/rights.py).
And the SFA rights table is used to map from names of privileges
as specified in credentials, to names of operations.'''
result = True
privs = credential.get_privileges()
for priv in privileges:
if not privs.can_perform(priv):
self.logger.debug('Privilege %s not found on credential %s of %s', priv, credential.get_gid_object().get_urn(), credential.get_gid_caller().get_urn())
result = False
return result
def verify(self, gid, credentials, target_urn, privileges):
'''Verify that the given Source GID supplied at least one credential
in the given list of credentials that has all the privileges required
in the privileges list on the given target.
IE if any of the supplied credentials has a caller that matches gid
and a target that matches target_urn, and has all the privileges in
the given list, then return the list of credentials that were ok.
If no target_urn is supplied, then no credential is required, but any supplied
credential must be valid.
Throw an Exception if we fail to verify any credential.'''
# Note that here we treat a list of credentials as being options
# Alternatively could accumulate privileges for example.
# The semantics of the list of credentials is under specified.
self.logger.debug('Verifying privileges')
result = list()
failure = ""
tried_creds = ""
if len(credentials) == 0:
if (target_urn is None):
self.logger.debug("No credentials, but also no target, so OK")
return result
else:
# EG a slice_urn was supplied but no credentials
failure = "No credentials found"
for cred in credentials:
if cred is None:
failure = "Credential was unparseable"
continue
if cred.get_cred_type() == cred.SFA_CREDENTIAL_TYPE:
cS = cred.get_gid_caller().get_urn()
elif cred.get_cred_type() == ABACCredential.ABAC_CREDENTIAL_TYPE:
cS = cred.get_summary_tostring()
else:
cS = "Unknown credential type %s" % cred.get_cred_type()
if tried_creds != "":
tried_creds = "%s, %s" % (tried_creds, cS)
else:
tried_creds = cS
if cred.get_cred_type() != cred.SFA_CREDENTIAL_TYPE:
failure = "Not an SFA credential: " + cS
continue
if not self.verify_source(gid, cred):
failure = "Cred %s fails: Credential doesn't grant rights to you (%s), but to %s (over object %s)" % (cred.get_gid_caller().get_urn(), gid.get_urn(), cred.get_gid_caller().get_urn(), cred.get_gid_object().get_urn())
continue
if not self.verify_target(target_urn, cred):
failure = "Cred granting rights to %s on %s fails: It grants permissions over a different target, not %s (URNs dont match)" % (cred.get_gid_caller().get_urn(), cred.get_gid_object().get_urn(), target_urn)
continue
if not self.verify_privileges(privileges, cred):
failure = "Cred for %s over %s doesn't provide sufficient privileges" % (cred.get_gid_caller().get_urn(), cred.get_gid_object().get_urn())
continue
try:
if not cred.verify(self.root_cert_files):
failure = "Couldn't validate credential for caller %s with target %s with any of %d known root certs" % (cred.get_gid_caller().get_urn(), cred.get_gid_object().get_urn(), len(self.root_cert_files))
continue
except Exception, exc:
failure = "Couldn't validate credential for caller %s with target %s with any of %d known root certs: %s: %s" % (cred.get_gid_caller().get_urn(), cred.get_gid_object().get_urn(), len(self.root_cert_files), exc.__class__.__name__, exc)
self.logger.info(failure)
continue
# If got here it verified
result.append(cred)
if result and result != list():
# At least one credential verified ok and was added to the list
# return that list
return result
else:
# We did not find any credential with sufficient privileges
# Raise an exception.
fault_code = 'Insufficient privileges'
fault_string = 'No credential was found with appropriate privileges. Tried %s. Last failure: %s' % (tried_creds, failure)
self.logger.error(fault_string)
# GCF ticket #120 - do not raise an xmlrpclib Fault here -
# just an Exception. But the caller may want to turn this
# into one
# raise xmlrpclib.Fault(fault_code, fault_string)
raise Exception(fault_string)
def create_credential(caller_gid, object_gid, expiration, typename, issuer_keyfile, issuer_certfile, trusted_roots, delegatable=False):
'''Create and Return a Credential object issued by given key/cert for the given caller
and object GID objects, given life in seconds, and given type.
Privileges are determined by type per sfa/trust/rights.py
Privileges are delegatable if requested.'''
# FIXME: Validate args: my gids, >0 life,
# type of cred one I can issue
# and readable key and cert files
if caller_gid is None:
raise ValueError("Missing Caller GID")
if object_gid is None:
raise ValueError("Missing Object GID")
if expiration is None:
raise ValueError("Missing expiration")
naive_expiration = naiveUTC(expiration)
duration = naive_expiration - datetime.datetime.utcnow()
life_secs = duration.seconds + duration.days * 24 * 3600
if life_secs < 1:
raise ValueError("Credential expiration is in the past")
if trusted_roots is None:
raise ValueError("Missing list of trusted roots")
if typename is None or typename.strip() == '':
raise ValueError("Missing credential type")
typename = typename.strip().lower()
if typename not in ("user", "sa", "ma", "authority", "slice", "component"):
raise ValueError("Unknown credential type %s" % typename)
if not os.path.isfile(issuer_keyfile):
raise ValueError("Cant read issuer key file %s" % issuer_keyfile)
if not os.path.isfile(issuer_certfile):
raise ValueError("Cant read issuer cert file %s" % issuer_certfile)
issuer_gid = gid.GID(filename=issuer_certfile)
if not (object_gid.get_urn() == issuer_gid.get_urn() or
(issuer_gid.get_type().find('authority') == 0 and
hrn_authfor_hrn(issuer_gid.get_hrn(), object_gid.get_hrn()))):
raise ValueError("Issuer not authorized to issue credential: Issuer=%s Target=%s" % (issuer_gid.get_urn(), object_gid.get_urn()))
ucred = cred.Credential()
# FIXME: Validate the caller_gid and object_gid
# are my user and slice
# Do get_issuer and compare to the issuer cert?
# Or do gid.is_signed_by_cert(issuer_certfile)?
ucred.set_gid_caller(caller_gid)
ucred.set_gid_object(object_gid)
ucred.set_expiration(expiration)
# Use sfa/trust/rights.py to figure out what privileges
# the credential should have.
# user means refresh, resolve, info
# per the privilege_table that lets users do
# remove, update, resolve, list, getcredential,
# listslices, listnodes, getpolicy
# Note that it does not allow manipulating slivers
# And every right is delegatable if any are delegatable (default False)
privileges = rights.determine_rights(typename, None)
privileges.delegate_all_privileges(delegatable)
ucred.set_privileges(privileges)
ucred.encode()
ucred.set_issuer_keys(issuer_keyfile, issuer_certfile)
ucred.sign()
try:
ucred.verify(trusted_roots)
except Exception, exc:
raise Exception("Create Credential failed to verify new credential from trusted roots: %s" % exc)
return ucred
| plantigrade/geni-tools | src/gcf/geni/util/cred_util.py | Python | mit | 18,496 |
# Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.conf import settings as django_settings
from synnefo_branding import settings
from django.template.loader import render_to_string as django_render_to_string
def get_branding_dict(prepend=None):
# CONTACT_EMAIL may not be a branding setting. We include it here though
# for practial reasons.
dct = {'support': django_settings.CONTACT_EMAIL}
for key in dir(settings):
if key == key.upper():
newkey = key.lower()
if prepend:
newkey = '%s_%s' % (prepend, newkey)
dct[newkey.upper()] = getattr(settings, key)
return dct
def brand_message(msg, **extra_args):
params = get_branding_dict()
params.update(extra_args)
return msg % params
def render_to_string(template_name, dictionary=None, context_instance=None):
if not dictionary:
dictionary = {}
if isinstance(dictionary, dict):
newdict = get_branding_dict("BRANDING")
newdict.update(dictionary)
else:
newdict = dictionary
return django_render_to_string(template_name, newdict, context_instance)
| grnet/synnefo | snf-branding/synnefo_branding/utils.py | Python | gpl-3.0 | 1,774 |
"""
Windows executable build with py2exe
"""
from distutils.core import setup
import py2exe
import os
data_files = []
base_path = os.path.abspath(os.path.dirname(__file__))
def prep_data_files(dir):
for root, dirs, files in os.walk(base_path + dir):
path = root.split('/')
_tfiles = []
for file in files:
_tfiles.append(root + '/' + file)
data_files.append((path[1], _tfiles))
prep_data_files('/static')
prep_data_files('/views')
prep_data_files('/docs')
data_files.append(('./', [base_path + '/LICENSE',
base_path + '/README.md'])
)
setup(
console=['app.py'],
data_files=data_files,
#zipfile=None, #--> If zipfile is set to None, the files will be bundled within the executable instead of 'library.zip'.
options={
'py2exe': {
'packages': ['sub', 'pygments.styles'],
'optimize': 2, #0 - 1 - 2
'compressed': True,
#'xref': True,
#'bundle_files': 3 #1: evryting 2:except python.exe 3:nothing (default)
}
}
)
# bof fix: timezone'un eklenmesi
"""
import pygments
import zipfile
zipfile_path = os.path.join("dist/" 'library.zip')
z = zipfile.ZipFile(zipfile_path, 'a')
zoneinfo_dir = os.path.join(os.path.dirname(pygments.__file__), 'styleS')
disk_basedir = os.path.dirname(os.path.dirname(pygments.__file__))
for absdir, directories, filenames in os.walk(zoneinfo_dir):
assert absdir.startswith(disk_basedir), (absdir, disk_basedir)
zip_dir = absdir[len(disk_basedir):]
for f in filenames:
z.write(os.path.join(absdir, f), os.path.join(zip_dir, f))
z.close()
"""
##eof fix | ctengiz/firewad | build_py2exe.py | Python | mit | 1,683 |
import os
from os.path import join
import sys
import re
def getOrDef(dic, key):
if not key in dic.keys():
return 0
return dic[key]
def readList(filename):
f = open(filename)
res = list(map(lambda w: w.replace("\n", ""), f.readlines()))
f.close()
return res
out = open("out.csv", "w")
languages = readList("langs.txt")
for language in languages:
onlyfiles = [ f for f in os.listdir(language) if os.isfile(join(language,f)) ]
print onlyfiles
# # caption
# out.write(','.join(dic) + "\n")
#
# for arg in sys.argv[2:]:
# try:
# f = open(arg, 'r')
# conts = f.read()
# print "processing " + arg
# out.write(','.join(map(lambda key: str(len(re.findall(key, conts))), dic)) + "\n")
# except IOError as e:
# print(e)
out.close()
| sayon/ignoreme | lexers/utils/frequencies_kw.py | Python | mit | 899 |
"""Implement the hil-admin command."""
from hil import config, model, deferred, server, migrations, rest
from hil.commands import db
from hil.commands.migrate_ipmi_info import MigrateIpmiInfo
from hil.commands.util import ensure_not_root
from hil.flaskapp import app
from time import sleep
from flask_script import Manager, Command, Option
import sys
import logging
from click import IntRange
manager = Manager(app)
class ServeNetworks(Command):
"""Start the HIL networking server"""
# pylint: disable=arguments-differ
def run(self):
logger = logging.getLogger(__name__)
server.init()
server.register_drivers()
server.validate_state()
migrations.check_db_schema()
# Check if config contains usable sleep_time
if (config.cfg.has_section('network-daemon') and
config.cfg.has_option('network-daemon', 'sleep_time')):
try:
sleep_time = config.cfg.getfloat(
'network-daemon', 'sleep_time')
except (ValueError):
sys.exit("Error: sleep_time set to non-float value")
if sleep_time <= 0 or sleep_time >= 3600:
sys.exit("Error: sleep_time not within bounds "
"0 < sleep_time < 3600")
if sleep_time > 60:
logger.warn('sleep_time greater than 1 minute.')
else:
sleep_time = 2
while True:
# Empty the journal until it's empty; then delay so we don't tight
# loop.
while deferred.apply_networking():
pass
sleep(sleep_time)
class RunDevelopmentServer(Command):
"""Run a development api server. Don't use this in production.
Specify the port with -p or --port otherwise defaults to 5000"""
option_list = (
Option('--port', '-p', dest='port',
type=IntRange(0, 2**16-1), default=5000),
)
# pylint: disable=arguments-differ
def run(self, port):
if config.cfg.has_option('devel', 'debug'):
debug = config.cfg.getboolean('devel', 'debug')
else:
debug = False
# We need to import api here so that the functions within it get
# registered (via `rest_call`), though we don't use it directly:
# pylint: disable=unused-variable
from hil import api
server.init()
migrations.check_db_schema()
server.stop_orphan_consoles()
rest.serve(port, debug=debug)
class CreateAdminUser(Command):
"""Create an admin user. Only valid for the database auth backend.
This must be run on the HIL API server, with access to hil.cfg and the
database. It will create a user named <username> with password
<password>, who will have administrator privileges.
This command should only be used for bootstrapping the system; once you
have an initial admin, you can (and should) create additional users via
the API.
"""
# these are actually positional arguments
option_list = (Option('username'), Option('password'))
# pylint: disable=arguments-differ
def run(self, username, password):
if not config.cfg.has_option('extensions', 'hil.ext.auth.database'):
sys.exit("'create-admin-user' is only valid with the database auth"
" backend.")
from hil.ext.auth.database import User
model.db.session.add(User(label=username, password=password,
is_admin=True))
model.db.session.commit()
manager.add_command('db', db.command)
manager.add_command('migrate-ipmi-info', MigrateIpmiInfo())
manager.add_command('serve-networks', ServeNetworks())
manager.add_command('run-dev-server', RunDevelopmentServer())
manager.add_command('create-admin-user', CreateAdminUser())
def main():
"""Entrypoint for the hil-admin command."""
ensure_not_root()
config.setup()
model.init_db()
manager.run()
| SahilTikale/haas | hil/commands/admin.py | Python | apache-2.0 | 3,992 |
from pylab import *
def log_likelihood(params, xx):
mu, sigma = params[0], params[1]
logL = 0.
for x in xx:
f = 1./(sigma*sqrt(2.*pi))*exp(-0.5*((x - mu)/sigma)**2)
logL += log(mean(f))
return logL
# Load all of the posterior samples
xx = []
for i in xrange(0, 100):
posterior_sample = loadtxt(str(i) + '.txt', usecols=[0])
xx.append(log10(posterior_sample/10.))
num = 512
mu = linspace(-2., 2., num)
sigma = linspace(0.05, 1., num)
[mu, sigma] = meshgrid(mu, sigma)
sigma = sigma[::-1, :]
logL = zeros(mu.shape)
for i in xrange(0, logL.shape[0]):
for j in xrange(0, logL.shape[1]):
logL[i, j] = log_likelihood([mu[i, j], sigma[i, j]], xx)
print(i)
rc("font", size=16, family="serif", serif="Computer Sans")
rc("text", usetex=True)
post = exp(logL - logL.max())
post = post/post.sum()
imshow(-post, aspect=4./0.99, extent=[-2, 2, 0.05, 1], cmap='gray')
hold(True)
plot(0.867, 0.157, 'w*', markersize=10)
ylim(0.05)
xlabel('$\\mu$')
ylabel('$\\sigma$')
savefig('posterior.pdf', bbox_inches='tight')
show()
figure(figsize=(10, 8))
# Compute marginal posterior for mu
# and predictive distribution for a new tau
p = post.sum(axis=0)
p /= trapz(p, x=mu[0, :])
plot(mu[0, :], p, 'b', linewidth=2, label='Posterior distribution for $\\mu$')
m1 = trapz(p*mu[0, :], x=mu[0, :])
m2 = trapz(p*mu[0, :]**2, x=mu[0, :])
print(m1, sqrt(m2 - m1**2))
predictive = zeros(mu[0, :].shape)
for i in xrange(0, post.shape[0]):
for j in xrange(0, post.shape[1]):
predictive += post[i, j]/(sigma[i, j]*sqrt(2.*pi))*exp(-0.5*((mu[0, :] - mu[i, j])/sigma[i, j])**2)
plot(mu[0, :], predictive, 'r--', linewidth=2, label='Predictive distribution for new $\\bar{\\tau}$')
xlim([-2, 2])
legend(loc = 'upper left')
ylabel('Probability Density', fontsize=18)
xlabel('$\\mu$, log$_{10}(\\bar{\\tau}/(\\textnormal{1 day}))$', fontsize=20)
savefig('posterior2.pdf', bbox_inches='tight')
m1 = trapz(predictive*mu[0, :], x=mu[0, :])
m2 = trapz(predictive*mu[0, :]**2, x=mu[0, :])
print(m1, sqrt(m2 - m1**2))
show()
| eggplantbren/RMHB | Code/Results/combine.py | Python | gpl-3.0 | 2,012 |
import collections
from sympy import (
Abs, Add, E, Float, I, Integer, Max, Min, N, Poly, Pow, PurePoly, Rational,
S, Symbol, cos, exp, oo, pi, signsimp, simplify, sin, sqrt, symbols,
sympify, trigsimp, sstr)
from sympy.matrices.matrices import (ShapeError, MatrixError,
NonSquareMatrixError, DeferredVector)
from sympy.matrices import (
GramSchmidt, ImmutableMatrix, ImmutableSparseMatrix, Matrix,
SparseMatrix, casoratian, diag, eye, hessian,
matrix_multiply_elementwise, ones, randMatrix, rot_axis1, rot_axis2,
rot_axis3, wronskian, zeros)
from sympy.core.compatibility import long, iterable, u, range
from sympy.utilities.iterables import flatten, capture
from sympy.utilities.pytest import raises, XFAIL, slow, skip
from sympy.solvers import solve
from sympy.abc import a, b, c, d, x, y, z
# don't re-order this list
classes = (Matrix, SparseMatrix, ImmutableMatrix, ImmutableSparseMatrix)
def test_args():
for c, cls in enumerate(classes):
m = cls.zeros(3, 2)
# all should give back the same type of arguments, e.g. ints for shape
assert m.shape == (3, 2) and all(type(i) is int for i in m.shape)
assert m.rows == 3 and type(m.rows) is int
assert m.cols == 2 and type(m.cols) is int
if not c % 2:
assert type(m._mat) is list
else:
assert type(m._smat) is dict
def test_division():
v = Matrix(1, 2, [x, y])
assert v.__div__(z) == Matrix(1, 2, [x/z, y/z])
assert v.__truediv__(z) == Matrix(1, 2, [x/z, y/z])
assert v/z == Matrix(1, 2, [x/z, y/z])
def test_sum():
m = Matrix([[1, 2, 3], [x, y, x], [2*y, -50, z*x]])
assert m + m == Matrix([[2, 4, 6], [2*x, 2*y, 2*x], [4*y, -100, 2*z*x]])
n = Matrix(1, 2, [1, 2])
raises(ShapeError, lambda: m + n)
def test_addition():
a = Matrix((
(1, 2),
(3, 1),
))
b = Matrix((
(1, 2),
(3, 0),
))
assert a + b == a.add(b) == Matrix([[2, 4], [6, 1]])
def test_fancy_index_matrix():
for M in (Matrix, SparseMatrix):
a = M(3, 3, range(9))
assert a == a[:, :]
assert a[1, :] == Matrix(1, 3, [3, 4, 5])
assert a[:, 1] == Matrix([1, 4, 7])
assert a[[0, 1], :] == Matrix([[0, 1, 2], [3, 4, 5]])
assert a[[0, 1], 2] == a[[0, 1], [2]]
assert a[2, [0, 1]] == a[[2], [0, 1]]
assert a[:, [0, 1]] == Matrix([[0, 1], [3, 4], [6, 7]])
assert a[0, 0] == 0
assert a[0:2, :] == Matrix([[0, 1, 2], [3, 4, 5]])
assert a[:, 0:2] == Matrix([[0, 1], [3, 4], [6, 7]])
assert a[::2, 1] == a[[0, 2], 1]
assert a[1, ::2] == a[1, [0, 2]]
a = M(3, 3, range(9))
assert a[[0, 2, 1, 2, 1], :] == Matrix([
[0, 1, 2],
[6, 7, 8],
[3, 4, 5],
[6, 7, 8],
[3, 4, 5]])
assert a[:, [0,2,1,2,1]] == Matrix([
[0, 2, 1, 2, 1],
[3, 5, 4, 5, 4],
[6, 8, 7, 8, 7]])
a = SparseMatrix.zeros(3)
a[1, 2] = 2
a[0, 1] = 3
a[2, 0] = 4
assert a.extract([1, 1], [2]) == Matrix([
[2],
[2]])
assert a.extract([1, 0], [2, 2, 2]) == Matrix([
[2, 2, 2],
[0, 0, 0]])
assert a.extract([1, 0, 1, 2], [2, 0, 1, 0]) == Matrix([
[2, 0, 0, 0],
[0, 0, 3, 0],
[2, 0, 0, 0],
[0, 4, 0, 4]])
def test_multiplication():
a = Matrix((
(1, 2),
(3, 1),
(0, 6),
))
b = Matrix((
(1, 2),
(3, 0),
))
c = a*b
assert c[0, 0] == 7
assert c[0, 1] == 2
assert c[1, 0] == 6
assert c[1, 1] == 6
assert c[2, 0] == 18
assert c[2, 1] == 0
h = matrix_multiply_elementwise(a, c)
assert h == a.multiply_elementwise(c)
assert h[0, 0] == 7
assert h[0, 1] == 4
assert h[1, 0] == 18
assert h[1, 1] == 6
assert h[2, 0] == 0
assert h[2, 1] == 0
raises(ShapeError, lambda: matrix_multiply_elementwise(a, b))
c = b * Symbol("x")
assert isinstance(c, Matrix)
assert c[0, 0] == x
assert c[0, 1] == 2*x
assert c[1, 0] == 3*x
assert c[1, 1] == 0
c2 = x * b
assert c == c2
c = 5 * b
assert isinstance(c, Matrix)
assert c[0, 0] == 5
assert c[0, 1] == 2*5
assert c[1, 0] == 3*5
assert c[1, 1] == 0
def test_power():
raises(NonSquareMatrixError, lambda: Matrix((1, 2))**2)
R = Rational
A = Matrix([[2, 3], [4, 5]])
assert (A**-3)[:] == [R(-269)/8, R(153)/8, R(51)/2, R(-29)/2]
assert (A**5)[:] == [6140, 8097, 10796, 14237]
A = Matrix([[2, 1, 3], [4, 2, 4], [6, 12, 1]])
assert (A**3)[:] == [290, 262, 251, 448, 440, 368, 702, 954, 433]
assert A**0 == eye(3)
assert A**1 == A
assert (Matrix([[2]]) ** 100)[0, 0] == 2**100
assert eye(2)**10000000 == eye(2)
assert Matrix([[1, 2], [3, 4]])**Integer(2) == Matrix([[7, 10], [15, 22]])
A = Matrix([[33, 24], [48, 57]])
assert (A**(S(1)/2))[:] == [5, 2, 4, 7]
A = Matrix([[0, 4], [-1, 5]])
assert (A**(S(1)/2))**2 == A
def test_creation():
raises(ValueError, lambda: Matrix(5, 5, range(20)))
raises(IndexError, lambda: Matrix((1, 2))[2])
with raises(IndexError):
Matrix((1, 2))[1:2] = 5
with raises(IndexError):
Matrix((1, 2))[3] = 5
assert Matrix() == Matrix([]) == Matrix([[]]) == Matrix(0, 0, [])
a = Matrix([[x, 0], [0, 0]])
m = a
assert m.cols == m.rows
assert m.cols == 2
assert m[:] == [x, 0, 0, 0]
b = Matrix(2, 2, [x, 0, 0, 0])
m = b
assert m.cols == m.rows
assert m.cols == 2
assert m[:] == [x, 0, 0, 0]
assert a == b
assert Matrix(b) == b
c = Matrix((
Matrix((
(1, 2, 3),
(4, 5, 6)
)),
(7, 8, 9)
))
assert c.cols == 3
assert c.rows == 3
assert c[:] == [1, 2, 3, 4, 5, 6, 7, 8, 9]
assert Matrix(eye(2)) == eye(2)
assert ImmutableMatrix(ImmutableMatrix(eye(2))) == ImmutableMatrix(eye(2))
assert ImmutableMatrix(c) == c.as_immutable()
assert Matrix(ImmutableMatrix(c)) == ImmutableMatrix(c).as_mutable()
assert c is not Matrix(c)
def test_tolist():
lst = [[S.One, S.Half, x*y, S.Zero], [x, y, z, x**2], [y, -S.One, z*x, 3]]
m = Matrix(lst)
assert m.tolist() == lst
def test_as_mutable():
assert zeros(0, 3).as_mutable() == zeros(0, 3)
assert zeros(0, 3).as_immutable() == ImmutableMatrix(zeros(0, 3))
assert zeros(3, 0).as_immutable() == ImmutableMatrix(zeros(3, 0))
def test_determinant():
for M in [Matrix(), Matrix([[1]])]:
assert (
M.det() ==
M.det_bareis() ==
M.berkowitz_det() ==
M.det_LU_decomposition() ==
1)
M = Matrix(( (-3, 2),
( 8, -5) ))
assert M.det(method="bareis") == -1
assert M.det(method="berkowitz") == -1
M = Matrix(( (x, 1),
(y, 2*y) ))
assert M.det(method="bareis") == 2*x*y - y
assert M.det(method="berkowitz") == 2*x*y - y
M = Matrix(( (1, 1, 1),
(1, 2, 3),
(1, 3, 6) ))
assert M.det(method="bareis") == 1
assert M.det(method="berkowitz") == 1
M = Matrix(( ( 3, -2, 0, 5),
(-2, 1, -2, 2),
( 0, -2, 5, 0),
( 5, 0, 3, 4) ))
assert M.det(method="bareis") == -289
assert M.det(method="berkowitz") == -289
M = Matrix(( ( 1, 2, 3, 4),
( 5, 6, 7, 8),
( 9, 10, 11, 12),
(13, 14, 15, 16) ))
assert M.det(method="bareis") == 0
assert M.det(method="berkowitz") == 0
M = Matrix(( (3, 2, 0, 0, 0),
(0, 3, 2, 0, 0),
(0, 0, 3, 2, 0),
(0, 0, 0, 3, 2),
(2, 0, 0, 0, 3) ))
assert M.det(method="bareis") == 275
assert M.det(method="berkowitz") == 275
M = Matrix(( (1, 0, 1, 2, 12),
(2, 0, 1, 1, 4),
(2, 1, 1, -1, 3),
(3, 2, -1, 1, 8),
(1, 1, 1, 0, 6) ))
assert M.det(method="bareis") == -55
assert M.det(method="berkowitz") == -55
M = Matrix(( (-5, 2, 3, 4, 5),
( 1, -4, 3, 4, 5),
( 1, 2, -3, 4, 5),
( 1, 2, 3, -2, 5),
( 1, 2, 3, 4, -1) ))
assert M.det(method="bareis") == 11664
assert M.det(method="berkowitz") == 11664
M = Matrix(( ( 2, 7, -1, 3, 2),
( 0, 0, 1, 0, 1),
(-2, 0, 7, 0, 2),
(-3, -2, 4, 5, 3),
( 1, 0, 0, 0, 1) ))
assert M.det(method="bareis") == 123
assert M.det(method="berkowitz") == 123
M = Matrix(( (x, y, z),
(1, 0, 0),
(y, z, x) ))
assert M.det(method="bareis") == z**2 - x*y
assert M.det(method="berkowitz") == z**2 - x*y
def test_det_LU_decomposition():
for M in [Matrix(), Matrix([[1]])]:
assert M.det(method="det_LU") == 1
M = Matrix(( (-3, 2),
( 8, -5) ))
assert M.det(method="det_LU") == -1
M = Matrix(( (x, 1),
(y, 2*y) ))
assert M.det(method="det_LU") == 2*x*y - y
M = Matrix(( (1, 1, 1),
(1, 2, 3),
(1, 3, 6) ))
assert M.det(method="det_LU") == 1
M = Matrix(( ( 3, -2, 0, 5),
(-2, 1, -2, 2),
( 0, -2, 5, 0),
( 5, 0, 3, 4) ))
assert M.det(method="det_LU") == -289
M = Matrix(( (3, 2, 0, 0, 0),
(0, 3, 2, 0, 0),
(0, 0, 3, 2, 0),
(0, 0, 0, 3, 2),
(2, 0, 0, 0, 3) ))
assert M.det(method="det_LU") == 275
M = Matrix(( (1, 0, 1, 2, 12),
(2, 0, 1, 1, 4),
(2, 1, 1, -1, 3),
(3, 2, -1, 1, 8),
(1, 1, 1, 0, 6) ))
assert M.det(method="det_LU") == -55
M = Matrix(( (-5, 2, 3, 4, 5),
( 1, -4, 3, 4, 5),
( 1, 2, -3, 4, 5),
( 1, 2, 3, -2, 5),
( 1, 2, 3, 4, -1) ))
assert M.det(method="det_LU") == 11664
M = Matrix(( ( 2, 7, -1, 3, 2),
( 0, 0, 1, 0, 1),
(-2, 0, 7, 0, 2),
(-3, -2, 4, 5, 3),
( 1, 0, 0, 0, 1) ))
assert M.det(method="det_LU") == 123
M = Matrix(( (x, y, z),
(1, 0, 0),
(y, z, x) ))
assert M.det(method="det_LU") == z**2 - x*y
def test_berkowitz_minors():
B = Matrix(2, 2, [1, 2, 2, 1])
assert B.berkowitz_minors() == (1, -3)
def test_slicing():
m0 = eye(4)
assert m0[:3, :3] == eye(3)
assert m0[2:4, 0:2] == zeros(2)
m1 = Matrix(3, 3, lambda i, j: i + j)
assert m1[0, :] == Matrix(1, 3, (0, 1, 2))
assert m1[1:3, 1] == Matrix(2, 1, (2, 3))
m2 = Matrix([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]])
assert m2[:, -1] == Matrix(4, 1, [3, 7, 11, 15])
assert m2[-2:, :] == Matrix([[8, 9, 10, 11], [12, 13, 14, 15]])
def test_submatrix_assignment():
m = zeros(4)
m[2:4, 2:4] = eye(2)
assert m == Matrix(((0, 0, 0, 0),
(0, 0, 0, 0),
(0, 0, 1, 0),
(0, 0, 0, 1)))
m[:2, :2] = eye(2)
assert m == eye(4)
m[:, 0] = Matrix(4, 1, (1, 2, 3, 4))
assert m == Matrix(((1, 0, 0, 0),
(2, 1, 0, 0),
(3, 0, 1, 0),
(4, 0, 0, 1)))
m[:, :] = zeros(4)
assert m == zeros(4)
m[:, :] = [(1, 2, 3, 4), (5, 6, 7, 8), (9, 10, 11, 12), (13, 14, 15, 16)]
assert m == Matrix(((1, 2, 3, 4),
(5, 6, 7, 8),
(9, 10, 11, 12),
(13, 14, 15, 16)))
m[:2, 0] = [0, 0]
assert m == Matrix(((0, 2, 3, 4),
(0, 6, 7, 8),
(9, 10, 11, 12),
(13, 14, 15, 16)))
def test_extract():
m = Matrix(4, 3, lambda i, j: i*3 + j)
assert m.extract([0, 1, 3], [0, 1]) == Matrix(3, 2, [0, 1, 3, 4, 9, 10])
assert m.extract([0, 3], [0, 0, 2]) == Matrix(2, 3, [0, 0, 2, 9, 9, 11])
assert m.extract(range(4), range(3)) == m
raises(IndexError, lambda: m.extract([4], [0]))
raises(IndexError, lambda: m.extract([0], [3]))
def test_reshape():
m0 = eye(3)
assert m0.reshape(1, 9) == Matrix(1, 9, (1, 0, 0, 0, 1, 0, 0, 0, 1))
m1 = Matrix(3, 4, lambda i, j: i + j)
assert m1.reshape(
4, 3) == Matrix(((0, 1, 2), (3, 1, 2), (3, 4, 2), (3, 4, 5)))
assert m1.reshape(2, 6) == Matrix(((0, 1, 2, 3, 1, 2), (3, 4, 2, 3, 4, 5)))
def test_applyfunc():
m0 = eye(3)
assert m0.applyfunc(lambda x: 2*x) == eye(3)*2
assert m0.applyfunc(lambda x: 0) == zeros(3)
def test_expand():
m0 = Matrix([[x*(x + y), 2], [((x + y)*y)*x, x*(y + x*(x + y))]])
# Test if expand() returns a matrix
m1 = m0.expand()
assert m1 == Matrix(
[[x*y + x**2, 2], [x*y**2 + y*x**2, x*y + y*x**2 + x**3]])
a = Symbol('a', real=True)
assert Matrix([exp(I*a)]).expand(complex=True) == \
Matrix([cos(a) + I*sin(a)])
assert Matrix([[0, 1, 2], [0, 0, -1], [0, 0, 0]]).exp() == Matrix([
[1, 1, Rational(3, 2)],
[0, 1, -1],
[0, 0, 1]]
)
def test_random():
M = randMatrix(3, 3)
M = randMatrix(3, 3, seed=3)
M = randMatrix(3, 4, 0, 150)
M = randMatrix(3, symmetric=True)
S = M.copy()
S.simplify()
assert S == M # doesn't fail when elements are Numbers, not int
def test_LUdecomp():
testmat = Matrix([[0, 2, 5, 3],
[3, 3, 7, 4],
[8, 4, 0, 2],
[-2, 6, 3, 4]])
L, U, p = testmat.LUdecomposition()
assert L.is_lower
assert U.is_upper
assert (L*U).permuteBkwd(p) - testmat == zeros(4)
testmat = Matrix([[6, -2, 7, 4],
[0, 3, 6, 7],
[1, -2, 7, 4],
[-9, 2, 6, 3]])
L, U, p = testmat.LUdecomposition()
assert L.is_lower
assert U.is_upper
assert (L*U).permuteBkwd(p) - testmat == zeros(4)
M = Matrix(((1, x, 1), (2, y, 0), (y, 0, z)))
L, U, p = M.LUdecomposition()
assert L.is_lower
assert U.is_upper
assert (L*U).permuteBkwd(p) - M == zeros(3)
mL = Matrix((
(1, 0, 0),
(2, 3, 0),
))
assert mL.is_lower is True
assert mL.is_upper is False
mU = Matrix((
(1, 2, 3),
(0, 4, 5),
))
assert mU.is_lower is False
assert mU.is_upper is True
# test FF LUdecomp
M = Matrix([[1, 3, 3],
[3, 2, 6],
[3, 2, 2]])
P, L, Dee, U = M.LUdecompositionFF()
assert P*M == L*Dee.inv()*U
M = Matrix([[1, 2, 3, 4],
[3, -1, 2, 3],
[3, 1, 3, -2],
[6, -1, 0, 2]])
P, L, Dee, U = M.LUdecompositionFF()
assert P*M == L*Dee.inv()*U
M = Matrix([[0, 0, 1],
[2, 3, 0],
[3, 1, 4]])
P, L, Dee, U = M.LUdecompositionFF()
assert P*M == L*Dee.inv()*U
def test_LUsolve():
A = Matrix([[2, 3, 5],
[3, 6, 2],
[8, 3, 6]])
x = Matrix(3, 1, [3, 7, 5])
b = A*x
soln = A.LUsolve(b)
assert soln == x
A = Matrix([[0, -1, 2],
[5, 10, 7],
[8, 3, 4]])
x = Matrix(3, 1, [-1, 2, 5])
b = A*x
soln = A.LUsolve(b)
assert soln == x
def test_QRsolve():
A = Matrix([[2, 3, 5],
[3, 6, 2],
[8, 3, 6]])
x = Matrix(3, 1, [3, 7, 5])
b = A*x
soln = A.QRsolve(b)
assert soln == x
x = Matrix([[1, 2], [3, 4], [5, 6]])
b = A*x
soln = A.QRsolve(b)
assert soln == x
A = Matrix([[0, -1, 2],
[5, 10, 7],
[8, 3, 4]])
x = Matrix(3, 1, [-1, 2, 5])
b = A*x
soln = A.QRsolve(b)
assert soln == x
x = Matrix([[7, 8], [9, 10], [11, 12]])
b = A*x
soln = A.QRsolve(b)
assert soln == x
def test_inverse():
A = eye(4)
assert A.inv() == eye(4)
assert A.inv(method="LU") == eye(4)
assert A.inv(method="ADJ") == eye(4)
A = Matrix([[2, 3, 5],
[3, 6, 2],
[8, 3, 6]])
Ainv = A.inv()
assert A*Ainv == eye(3)
assert A.inv(method="LU") == Ainv
assert A.inv(method="ADJ") == Ainv
# test that immutability is not a problem
cls = ImmutableMatrix
m = cls([[48, 49, 31],
[ 9, 71, 94],
[59, 28, 65]])
assert all(type(m.inv(s)) is cls for s in 'GE ADJ LU'.split())
cls = ImmutableSparseMatrix
m = cls([[48, 49, 31],
[ 9, 71, 94],
[59, 28, 65]])
assert all(type(m.inv(s)) is cls for s in 'CH LDL'.split())
def test_matrix_inverse_mod():
A = Matrix(2, 1, [1, 0])
raises(NonSquareMatrixError, lambda: A.inv_mod(2))
A = Matrix(2, 2, [1, 0, 0, 0])
raises(ValueError, lambda: A.inv_mod(2))
A = Matrix(2, 2, [1, 2, 3, 4])
Ai = Matrix(2, 2, [1, 1, 0, 1])
assert A.inv_mod(3) == Ai
A = Matrix(2, 2, [1, 0, 0, 1])
assert A.inv_mod(2) == A
def test_util():
R = Rational
v1 = Matrix(1, 3, [1, 2, 3])
v2 = Matrix(1, 3, [3, 4, 5])
assert v1.norm() == sqrt(14)
assert v1.project(v2) == Matrix(1, 3, [R(39)/25, R(52)/25, R(13)/5])
assert Matrix.zeros(1, 2) == Matrix(1, 2, [0, 0])
assert ones(1, 2) == Matrix(1, 2, [1, 1])
assert v1.copy() == v1
# cofactor
assert eye(3) == eye(3).cofactorMatrix()
test = Matrix([[1, 3, 2], [2, 6, 3], [2, 3, 6]])
assert test.cofactorMatrix() == \
Matrix([[27, -6, -6], [-12, 2, 3], [-3, 1, 0]])
test = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert test.cofactorMatrix() == \
Matrix([[-3, 6, -3], [6, -12, 6], [-3, 6, -3]])
def test_jacobian_hessian():
L = Matrix(1, 2, [x**2*y, 2*y**2 + x*y])
syms = [x, y]
assert L.jacobian(syms) == Matrix([[2*x*y, x**2], [y, 4*y + x]])
L = Matrix(1, 2, [x, x**2*y**3])
assert L.jacobian(syms) == Matrix([[1, 0], [2*x*y**3, x**2*3*y**2]])
f = x**2*y
syms = [x, y]
assert hessian(f, syms) == Matrix([[2*y, 2*x], [2*x, 0]])
f = x**2*y**3
assert hessian(f, syms) == \
Matrix([[2*y**3, 6*x*y**2], [6*x*y**2, 6*x**2*y]])
f = z + x*y**2
g = x**2 + 2*y**3
ans = Matrix([[0, 2*y],
[2*y, 2*x]])
assert ans == hessian(f, Matrix([x, y]))
assert ans == hessian(f, Matrix([x, y]).T)
assert hessian(f, (y, x), [g]) == Matrix([
[ 0, 6*y**2, 2*x],
[6*y**2, 2*x, 2*y],
[ 2*x, 2*y, 0]])
def test_QR():
A = Matrix([[1, 2], [2, 3]])
Q, S = A.QRdecomposition()
R = Rational
assert Q == Matrix([
[ 5**R(-1, 2), (R(2)/5)*(R(1)/5)**R(-1, 2)],
[2*5**R(-1, 2), (-R(1)/5)*(R(1)/5)**R(-1, 2)]])
assert S == Matrix([[5**R(1, 2), 8*5**R(-1, 2)], [0, (R(1)/5)**R(1, 2)]])
assert Q*S == A
assert Q.T * Q == eye(2)
A = Matrix([[1, 1, 1], [1, 1, 3], [2, 3, 4]])
Q, R = A.QRdecomposition()
assert Q.T * Q == eye(Q.cols)
assert R.is_upper
assert A == Q*R
def test_QR_non_square():
A = Matrix([[9, 0, 26], [12, 0, -7], [0, 4, 4], [0, -3, -3]])
Q, R = A.QRdecomposition()
assert Q.T * Q == eye(Q.cols)
assert R.is_upper
assert A == Q*R
A = Matrix([[1, -1, 4], [1, 4, -2], [1, 4, 2], [1, -1, 0]])
Q, R = A.QRdecomposition()
assert Q.T * Q == eye(Q.cols)
assert R.is_upper
assert A == Q*R
def test_nullspace():
# first test reduced row-ech form
R = Rational
M = Matrix([[5, 7, 2, 1],
[1, 6, 2, -1]])
out, tmp = M.rref()
assert out == Matrix([[1, 0, -R(2)/23, R(13)/23],
[0, 1, R(8)/23, R(-6)/23]])
M = Matrix([[-5, -1, 4, -3, -1],
[ 1, -1, -1, 1, 0],
[-1, 0, 0, 0, 0],
[ 4, 1, -4, 3, 1],
[-2, 0, 2, -2, -1]])
assert M*M.nullspace()[0] == Matrix(5, 1, [0]*5)
M = Matrix([[ 1, 3, 0, 2, 6, 3, 1],
[-2, -6, 0, -2, -8, 3, 1],
[ 3, 9, 0, 0, 6, 6, 2],
[-1, -3, 0, 1, 0, 9, 3]])
out, tmp = M.rref()
assert out == Matrix([[1, 3, 0, 0, 2, 0, 0],
[0, 0, 0, 1, 2, 0, 0],
[0, 0, 0, 0, 0, 1, R(1)/3],
[0, 0, 0, 0, 0, 0, 0]])
# now check the vectors
basis = M.nullspace()
assert basis[0] == Matrix([-3, 1, 0, 0, 0, 0, 0])
assert basis[1] == Matrix([0, 0, 1, 0, 0, 0, 0])
assert basis[2] == Matrix([-2, 0, 0, -2, 1, 0, 0])
assert basis[3] == Matrix([0, 0, 0, 0, 0, R(-1)/3, 1])
# issue 4797; just see that we can do it when rows > cols
M = Matrix([[1, 2], [2, 4], [3, 6]])
assert M.nullspace()
def test_columnspace():
M = Matrix([[ 1, 2, 0, 2, 5],
[-2, -5, 1, -1, -8],
[ 0, -3, 3, 4, 1],
[ 3, 6, 0, -7, 2]])
# now check the vectors
basis = M.columnspace()
assert basis[0] == Matrix([1, -2, 0, 3])
assert basis[1] == Matrix([2, -5, -3, 6])
assert basis[2] == Matrix([2, -1, 4, -7])
#check by columnspace definition
a, b, c, d, e = symbols('a b c d e')
X = Matrix([a, b, c, d, e])
for i in range(len(basis)):
eq=M*X-basis[i]
assert len(solve(eq, X)) != 0
#check if rank-nullity theorem holds
assert M.rank() == len(basis)
assert len(M.nullspace()) + len(M.columnspace()) == M.cols
def test_wronskian():
assert wronskian([cos(x), sin(x)], x) == cos(x)**2 + sin(x)**2
assert wronskian([exp(x), exp(2*x)], x) == exp(3*x)
assert wronskian([exp(x), x], x) == exp(x) - x*exp(x)
assert wronskian([1, x, x**2], x) == 2
w1 = -6*exp(x)*sin(x)*x + 6*cos(x)*exp(x)*x**2 - 6*exp(x)*cos(x)*x - \
exp(x)*cos(x)*x**3 + exp(x)*sin(x)*x**3
assert wronskian([exp(x), cos(x), x**3], x).expand() == w1
assert wronskian([exp(x), cos(x), x**3], x, method='berkowitz').expand() \
== w1
w2 = -x**3*cos(x)**2 - x**3*sin(x)**2 - 6*x*cos(x)**2 - 6*x*sin(x)**2
assert wronskian([sin(x), cos(x), x**3], x).expand() == w2
assert wronskian([sin(x), cos(x), x**3], x, method='berkowitz').expand() \
== w2
assert wronskian([], x) == 1
def test_eigen():
R = Rational
assert eye(3).charpoly(x) == Poly((x - 1)**3, x)
assert eye(3).charpoly(y) == Poly((y - 1)**3, y)
M = Matrix([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
assert M.eigenvals(multiple=False) == {S.One: 3}
assert M.eigenvects() == (
[(1, 3, [Matrix([1, 0, 0]),
Matrix([0, 1, 0]),
Matrix([0, 0, 1])])])
assert M.left_eigenvects() == (
[(1, 3, [Matrix([[1, 0, 0]]),
Matrix([[0, 1, 0]]),
Matrix([[0, 0, 1]])])])
M = Matrix([[0, 1, 1],
[1, 0, 0],
[1, 1, 1]])
assert M.eigenvals() == {2*S.One: 1, -S.One: 1, S.Zero: 1}
assert M.eigenvects() == (
[
(-1, 1, [Matrix([-1, 1, 0])]),
( 0, 1, [Matrix([0, -1, 1])]),
( 2, 1, [Matrix([R(2, 3), R(1, 3), 1])])
])
assert M.left_eigenvects() == (
[
(-1, 1, [Matrix([[-2, 1, 1]])]),
(0, 1, [Matrix([[-1, -1, 1]])]),
(2, 1, [Matrix([[1, 1, 1]])])
])
a = Symbol('a')
M = Matrix([[a, 0],
[0, 1]])
assert M.eigenvals() == {a: 1, S.One: 1}
M = Matrix([[1, -1],
[1, 3]])
assert M.eigenvects() == ([(2, 2, [Matrix(2, 1, [-1, 1])])])
assert M.left_eigenvects() == ([(2, 2, [Matrix([[1, 1]])])])
M = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
a = R(15, 2)
b = 3*33**R(1, 2)
c = R(13, 2)
d = (R(33, 8) + 3*b/8)
e = (R(33, 8) - 3*b/8)
def NS(e, n):
return str(N(e, n))
r = [
(a - b/2, 1, [Matrix([(12 + 24/(c - b/2))/((c - b/2)*e) + 3/(c - b/2),
(6 + 12/(c - b/2))/e, 1])]),
( 0, 1, [Matrix([1, -2, 1])]),
(a + b/2, 1, [Matrix([(12 + 24/(c + b/2))/((c + b/2)*d) + 3/(c + b/2),
(6 + 12/(c + b/2))/d, 1])]),
]
r1 = [(NS(r[i][0], 2), NS(r[i][1], 2),
[NS(j, 2) for j in r[i][2][0]]) for i in range(len(r))]
r = M.eigenvects()
r2 = [(NS(r[i][0], 2), NS(r[i][1], 2),
[NS(j, 2) for j in r[i][2][0]]) for i in range(len(r))]
assert sorted(r1) == sorted(r2)
eps = Symbol('eps', real=True)
M = Matrix([[abs(eps), I*eps ],
[-I*eps, abs(eps) ]])
assert M.eigenvects() == (
[
( 0, 1, [Matrix([[-I*eps/abs(eps)], [1]])]),
( 2*abs(eps), 1, [ Matrix([[I*eps/abs(eps)], [1]]) ] ),
])
assert M.left_eigenvects() == (
[
(0, 1, [Matrix([[I*eps/Abs(eps), 1]])]),
(2*Abs(eps), 1, [Matrix([[-I*eps/Abs(eps), 1]])])
])
M = Matrix(3, 3, [1, 2, 0, 0, 3, 0, 2, -4, 2])
M._eigenvects = M.eigenvects(simplify=False)
assert max(i.q for i in M._eigenvects[0][2][0]) > 1
M._eigenvects = M.eigenvects(simplify=True)
assert max(i.q for i in M._eigenvects[0][2][0]) == 1
M = Matrix([[S(1)/4, 1], [1, 1]])
assert M.eigenvects(simplify=True) == [
(S(5)/8 + sqrt(73)/8, 1, [Matrix([[8/(3 + sqrt(73))], [1]])]),
(-sqrt(73)/8 + S(5)/8, 1, [Matrix([[8/(-sqrt(73) + 3)], [1]])])]
assert M.eigenvects(simplify=False) == [
(Rational(5, 8) + sqrt(73)/8, 1,
[Matrix([[-1/(-sqrt(73)/8 + Rational(-3, 8))], [1]])]),
(-sqrt(73)/8 + Rational(5, 8), 1,
[Matrix([[-1/(Rational(-3, 8) + sqrt(73)/8)], [1]])]),
]
m = Matrix([[1, .6, .6], [.6, .9, .9], [.9, .6, .6]])
evals = {-sqrt(385)/20 + S(5)/4: 1, sqrt(385)/20 + S(5)/4: 1, S.Zero: 1}
assert m.eigenvals() == evals
nevals = list(sorted(m.eigenvals(rational=False).keys()))
sevals = list(sorted(evals.keys()))
assert all(abs(nevals[i] - sevals[i]) < 1e-9 for i in range(len(nevals)))
def test_subs():
assert Matrix([[1, x], [x, 4]]).subs(x, 5) == Matrix([[1, 5], [5, 4]])
assert Matrix([[x, 2], [x + y, 4]]).subs([[x, -1], [y, -2]]) == \
Matrix([[-1, 2], [-3, 4]])
assert Matrix([[x, 2], [x + y, 4]]).subs([(x, -1), (y, -2)]) == \
Matrix([[-1, 2], [-3, 4]])
assert Matrix([[x, 2], [x + y, 4]]).subs({x: -1, y: -2}) == \
Matrix([[-1, 2], [-3, 4]])
assert Matrix([x*y]).subs({x: y - 1, y: x - 1}, simultaneous=True) == \
Matrix([(x - 1)*(y - 1)])
for cls in classes:
assert Matrix([[2, 0], [0, 2]]) == cls.eye(2).subs(1, 2)
def test_simplify():
f, n = symbols('f, n')
m = Matrix([[1, x], [x + 1/x, x - 1]])
m = m.row_join(eye(m.cols))
raw = m.rref(simplify=lambda x: x)[0]
assert raw != m.rref(simplify=True)[0]
M = Matrix([[ 1/x + 1/y, (x + x*y) / x ],
[ (f(x) + y*f(x))/f(x), 2 * (1/n - cos(n * pi)/n) / pi ]])
M.simplify()
assert M == Matrix([[ (x + y)/(x * y), 1 + y ],
[ 1 + y, 2*((1 - 1*cos(pi*n))/(pi*n)) ]])
eq = (1 + x)**2
M = Matrix([[eq]])
M.simplify()
assert M == Matrix([[eq]])
M.simplify(ratio=oo) == M
assert M == Matrix([[eq.simplify(ratio=oo)]])
def test_transpose():
M = Matrix([[1, 2, 3, 4, 5, 6, 7, 8, 9, 0],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 0]])
assert M.T == Matrix( [ [1, 1],
[2, 2],
[3, 3],
[4, 4],
[5, 5],
[6, 6],
[7, 7],
[8, 8],
[9, 9],
[0, 0] ])
assert M.T.T == M
assert M.T == M.transpose()
def test_conjugate():
M = Matrix([[0, I, 5],
[1, 2, 0]])
assert M.T == Matrix([[0, 1],
[I, 2],
[5, 0]])
assert M.C == Matrix([[0, -I, 5],
[1, 2, 0]])
assert M.C == M.conjugate()
assert M.H == M.T.C
assert M.H == Matrix([[ 0, 1],
[-I, 2],
[ 5, 0]])
def test_conj_dirac():
raises(AttributeError, lambda: eye(3).D)
M = Matrix([[1, I, I, I],
[0, 1, I, I],
[0, 0, 1, I],
[0, 0, 0, 1]])
assert M.D == Matrix([[ 1, 0, 0, 0],
[-I, 1, 0, 0],
[-I, -I, -1, 0],
[-I, -I, I, -1]])
def test_trace():
M = Matrix([[1, 0, 0],
[0, 5, 0],
[0, 0, 8]])
assert M.trace() == 14
def test_shape():
M = Matrix([[x, 0, 0],
[0, y, 0]])
assert M.shape == (2, 3)
def test_col_row_op():
M = Matrix([[x, 0, 0],
[0, y, 0]])
M.row_op(1, lambda r, j: r + j + 1)
assert M == Matrix([[x, 0, 0],
[1, y + 2, 3]])
M.col_op(0, lambda c, j: c + y**j)
assert M == Matrix([[x + 1, 0, 0],
[1 + y, y + 2, 3]])
# neither row nor slice give copies that allow the original matrix to
# be changed
assert M.row(0) == Matrix([[x + 1, 0, 0]])
r1 = M.row(0)
r1[0] = 42
assert M[0, 0] == x + 1
r1 = M[0, :-1] # also testing negative slice
r1[0] = 42
assert M[0, 0] == x + 1
c1 = M.col(0)
assert c1 == Matrix([x + 1, 1 + y])
c1[0] = 0
assert M[0, 0] == x + 1
c1 = M[:, 0]
c1[0] = 42
assert M[0, 0] == x + 1
def test_zip_row_op():
for cls in classes[:2]: # XXX: immutable matrices don't support row ops
M = cls.eye(3)
M.zip_row_op(1, 0, lambda v, u: v + 2*u)
assert M == cls([[1, 0, 0],
[2, 1, 0],
[0, 0, 1]])
M = cls.eye(3)*2
M[0, 1] = -1
M.zip_row_op(1, 0, lambda v, u: v + 2*u); M
assert M == cls([[2, -1, 0],
[4, 0, 0],
[0, 0, 2]])
def test_issue_3950():
m = Matrix([1, 2, 3])
a = Matrix([1, 2, 3])
b = Matrix([2, 2, 3])
assert not (m in [])
assert not (m in [1])
assert m != 1
assert m == a
assert m != b
def test_issue_3981():
class Index1(object):
def __index__(self):
return 1
class Index2(object):
def __index__(self):
return 2
index1 = Index1()
index2 = Index2()
m = Matrix([1, 2, 3])
assert m[index2] == 3
m[index2] = 5
assert m[2] == 5
m = Matrix([[1, 2, 3], [4, 5, 6]])
assert m[index1, index2] == 6
assert m[1, index2] == 6
assert m[index1, 2] == 6
m[index1, index2] = 4
assert m[1, 2] == 4
m[1, index2] = 6
assert m[1, 2] == 6
m[index1, 2] = 8
assert m[1, 2] == 8
def test_evalf():
a = Matrix([sqrt(5), 6])
assert all(a.evalf()[i] == a[i].evalf() for i in range(2))
assert all(a.evalf(2)[i] == a[i].evalf(2) for i in range(2))
assert all(a.n(2)[i] == a[i].n(2) for i in range(2))
def test_is_symbolic():
a = Matrix([[x, x], [x, x]])
assert a.is_symbolic() is True
a = Matrix([[1, 2, 3, 4], [5, 6, 7, 8]])
assert a.is_symbolic() is False
a = Matrix([[1, 2, 3, 4], [5, 6, x, 8]])
assert a.is_symbolic() is True
a = Matrix([[1, x, 3]])
assert a.is_symbolic() is True
a = Matrix([[1, 2, 3]])
assert a.is_symbolic() is False
a = Matrix([[1], [x], [3]])
assert a.is_symbolic() is True
a = Matrix([[1], [2], [3]])
assert a.is_symbolic() is False
def test_is_upper():
a = Matrix([[1, 2, 3]])
assert a.is_upper is True
a = Matrix([[1], [2], [3]])
assert a.is_upper is False
def test_is_lower():
a = Matrix([[1, 2, 3]])
assert a.is_lower is False
a = Matrix([[1], [2], [3]])
assert a.is_lower is True
def test_is_nilpotent():
a = Matrix(4, 4, [0, 2, 1, 6, 0, 0, 1, 2, 0, 0, 0, 3, 0, 0, 0, 0])
assert a.is_nilpotent()
a = Matrix([[1, 0], [0, 1]])
assert not a.is_nilpotent()
def test_zeros_ones_fill():
n, m = 3, 5
a = zeros(n, m)
a.fill( 5 )
b = 5 * ones(n, m)
assert a == b
assert a.rows == b.rows == 3
assert a.cols == b.cols == 5
assert a.shape == b.shape == (3, 5)
assert zeros(2) == zeros(2, 2)
assert ones(2) == ones(2, 2)
assert zeros(2, 3) == Matrix(2, 3, [0]*6)
assert ones(2, 3) == Matrix(2, 3, [1]*6)
def test_empty_zeros():
a = zeros(0)
assert a == Matrix()
a = zeros(0, 2)
assert a.rows == 0
assert a.cols == 2
a = zeros(2, 0)
assert a.rows == 2
assert a.cols == 0
def test_issue_3749():
a = Matrix([[x**2, x*y], [x*sin(y), x*cos(y)]])
assert a.diff(x) == Matrix([[2*x, y], [sin(y), cos(y)]])
assert Matrix([
[x, -x, x**2],
[exp(x), 1/x - exp(-x), x + 1/x]]).limit(x, oo) == \
Matrix([[oo, -oo, oo], [oo, 0, oo]])
assert Matrix([
[(exp(x) - 1)/x, 2*x + y*x, x**x ],
[1/x, abs(x), abs(sin(x + 1))]]).limit(x, 0) == \
Matrix([[1, 0, 1], [oo, 0, sin(1)]])
assert a.integrate(x) == Matrix([
[Rational(1, 3)*x**3, y*x**2/2],
[x**2*sin(y)/2, x**2*cos(y)/2]])
def test_inv_iszerofunc():
A = eye(4)
A.col_swap(0, 1)
for method in "GE", "LU":
assert A.inv(method=method, iszerofunc=lambda x: x == 0) == \
A.inv(method="ADJ")
def test_jacobian_metrics():
rho, phi = symbols("rho,phi")
X = Matrix([rho*cos(phi), rho*sin(phi)])
Y = Matrix([rho, phi])
J = X.jacobian(Y)
assert J == X.jacobian(Y.T)
assert J == (X.T).jacobian(Y)
assert J == (X.T).jacobian(Y.T)
g = J.T*eye(J.shape[0])*J
g = g.applyfunc(trigsimp)
assert g == Matrix([[1, 0], [0, rho**2]])
def test_jacobian2():
rho, phi = symbols("rho,phi")
X = Matrix([rho*cos(phi), rho*sin(phi), rho**2])
Y = Matrix([rho, phi])
J = Matrix([
[cos(phi), -rho*sin(phi)],
[sin(phi), rho*cos(phi)],
[ 2*rho, 0],
])
assert X.jacobian(Y) == J
def test_issue_4564():
X = Matrix([exp(x + y + z), exp(x + y + z), exp(x + y + z)])
Y = Matrix([x, y, z])
for i in range(1, 3):
for j in range(1, 3):
X_slice = X[:i, :]
Y_slice = Y[:j, :]
J = X_slice.jacobian(Y_slice)
assert J.rows == i
assert J.cols == j
for k in range(j):
assert J[:, k] == X_slice
def test_nonvectorJacobian():
X = Matrix([[exp(x + y + z), exp(x + y + z)],
[exp(x + y + z), exp(x + y + z)]])
raises(TypeError, lambda: X.jacobian(Matrix([x, y, z])))
X = X[0, :]
Y = Matrix([[x, y], [x, z]])
raises(TypeError, lambda: X.jacobian(Y))
raises(TypeError, lambda: X.jacobian(Matrix([ [x, y], [x, z] ])))
def test_vec():
m = Matrix([[1, 3], [2, 4]])
m_vec = m.vec()
assert m_vec.cols == 1
for i in range(4):
assert m_vec[i] == i + 1
def test_vech():
m = Matrix([[1, 2], [2, 3]])
m_vech = m.vech()
assert m_vech.cols == 1
for i in range(3):
assert m_vech[i] == i + 1
m_vech = m.vech(diagonal=False)
assert m_vech[0] == 2
m = Matrix([[1, x*(x + y)], [y*x + x**2, 1]])
m_vech = m.vech(diagonal=False)
assert m_vech[0] == x*(x + y)
m = Matrix([[1, x*(x + y)], [y*x, 1]])
m_vech = m.vech(diagonal=False, check_symmetry=False)
assert m_vech[0] == y*x
def test_vech_errors():
m = Matrix([[1, 3]])
raises(ShapeError, lambda: m.vech())
m = Matrix([[1, 3], [2, 4]])
raises(ValueError, lambda: m.vech())
raises(ShapeError, lambda: Matrix([ [1, 3] ]).vech())
raises(ValueError, lambda: Matrix([ [1, 3], [2, 4] ]).vech())
def test_diag():
a = Matrix([[1, 2], [2, 3]])
b = Matrix([[3, x], [y, 3]])
c = Matrix([[3, x, 3], [y, 3, z], [x, y, z]])
assert diag(a, b, b) == Matrix([
[1, 2, 0, 0, 0, 0],
[2, 3, 0, 0, 0, 0],
[0, 0, 3, x, 0, 0],
[0, 0, y, 3, 0, 0],
[0, 0, 0, 0, 3, x],
[0, 0, 0, 0, y, 3],
])
assert diag(a, b, c) == Matrix([
[1, 2, 0, 0, 0, 0, 0],
[2, 3, 0, 0, 0, 0, 0],
[0, 0, 3, x, 0, 0, 0],
[0, 0, y, 3, 0, 0, 0],
[0, 0, 0, 0, 3, x, 3],
[0, 0, 0, 0, y, 3, z],
[0, 0, 0, 0, x, y, z],
])
assert diag(a, c, b) == Matrix([
[1, 2, 0, 0, 0, 0, 0],
[2, 3, 0, 0, 0, 0, 0],
[0, 0, 3, x, 3, 0, 0],
[0, 0, y, 3, z, 0, 0],
[0, 0, x, y, z, 0, 0],
[0, 0, 0, 0, 0, 3, x],
[0, 0, 0, 0, 0, y, 3],
])
a = Matrix([x, y, z])
b = Matrix([[1, 2], [3, 4]])
c = Matrix([[5, 6]])
assert diag(a, 7, b, c) == Matrix([
[x, 0, 0, 0, 0, 0],
[y, 0, 0, 0, 0, 0],
[z, 0, 0, 0, 0, 0],
[0, 7, 0, 0, 0, 0],
[0, 0, 1, 2, 0, 0],
[0, 0, 3, 4, 0, 0],
[0, 0, 0, 0, 5, 6],
])
assert diag(1, [2, 3], [[4, 5]]) == Matrix([
[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 3, 0, 0],
[0, 0, 4, 5]])
def test_get_diag_blocks1():
a = Matrix([[1, 2], [2, 3]])
b = Matrix([[3, x], [y, 3]])
c = Matrix([[3, x, 3], [y, 3, z], [x, y, z]])
assert a.get_diag_blocks() == [a]
assert b.get_diag_blocks() == [b]
assert c.get_diag_blocks() == [c]
def test_get_diag_blocks2():
a = Matrix([[1, 2], [2, 3]])
b = Matrix([[3, x], [y, 3]])
c = Matrix([[3, x, 3], [y, 3, z], [x, y, z]])
assert diag(a, b, b).get_diag_blocks() == [a, b, b]
assert diag(a, b, c).get_diag_blocks() == [a, b, c]
assert diag(a, c, b).get_diag_blocks() == [a, c, b]
assert diag(c, c, b).get_diag_blocks() == [c, c, b]
def test_inv_block():
a = Matrix([[1, 2], [2, 3]])
b = Matrix([[3, x], [y, 3]])
c = Matrix([[3, x, 3], [y, 3, z], [x, y, z]])
A = diag(a, b, b)
assert A.inv(try_block_diag=True) == diag(a.inv(), b.inv(), b.inv())
A = diag(a, b, c)
assert A.inv(try_block_diag=True) == diag(a.inv(), b.inv(), c.inv())
A = diag(a, c, b)
assert A.inv(try_block_diag=True) == diag(a.inv(), c.inv(), b.inv())
A = diag(a, a, b, a, c, a)
assert A.inv(try_block_diag=True) == diag(
a.inv(), a.inv(), b.inv(), a.inv(), c.inv(), a.inv())
assert A.inv(try_block_diag=True, method="ADJ") == diag(
a.inv(method="ADJ"), a.inv(method="ADJ"), b.inv(method="ADJ"),
a.inv(method="ADJ"), c.inv(method="ADJ"), a.inv(method="ADJ"))
def test_creation_args():
"""
Check that matrix dimensions can be specified using any reasonable type
(see issue 4614).
"""
raises(ValueError, lambda: zeros(3, -1))
raises(TypeError, lambda: zeros(1, 2, 3, 4))
assert zeros(long(3)) == zeros(3)
assert zeros(Integer(3)) == zeros(3)
assert zeros(3.) == zeros(3)
assert eye(long(3)) == eye(3)
assert eye(Integer(3)) == eye(3)
assert eye(3.) == eye(3)
assert ones(long(3), Integer(4)) == ones(3, 4)
raises(TypeError, lambda: Matrix(5))
raises(TypeError, lambda: Matrix(1, 2))
def test_diagonal_symmetrical():
m = Matrix(2, 2, [0, 1, 1, 0])
assert not m.is_diagonal()
assert m.is_symmetric()
assert m.is_symmetric(simplify=False)
m = Matrix(2, 2, [1, 0, 0, 1])
assert m.is_diagonal()
m = diag(1, 2, 3)
assert m.is_diagonal()
assert m.is_symmetric()
m = Matrix(3, 3, [1, 0, 0, 0, 2, 0, 0, 0, 3])
assert m == diag(1, 2, 3)
m = Matrix(2, 3, zeros(2, 3))
assert not m.is_symmetric()
assert m.is_diagonal()
m = Matrix(((5, 0), (0, 6), (0, 0)))
assert m.is_diagonal()
m = Matrix(((5, 0, 0), (0, 6, 0)))
assert m.is_diagonal()
m = Matrix(3, 3, [1, x**2 + 2*x + 1, y, (x + 1)**2, 2, 0, y, 0, 3])
assert m.is_symmetric()
assert not m.is_symmetric(simplify=False)
assert m.expand().is_symmetric(simplify=False)
def test_diagonalization():
m = Matrix(3, 2, [-3, 1, -3, 20, 3, 10])
assert not m.is_diagonalizable()
assert not m.is_symmetric()
raises(NonSquareMatrixError, lambda: m.diagonalize())
# diagonalizable
m = diag(1, 2, 3)
(P, D) = m.diagonalize()
assert P == eye(3)
assert D == m
m = Matrix(2, 2, [0, 1, 1, 0])
assert m.is_symmetric()
assert m.is_diagonalizable()
(P, D) = m.diagonalize()
assert P.inv() * m * P == D
m = Matrix(2, 2, [1, 0, 0, 3])
assert m.is_symmetric()
assert m.is_diagonalizable()
(P, D) = m.diagonalize()
assert P.inv() * m * P == D
assert P == eye(2)
assert D == m
m = Matrix(2, 2, [1, 1, 0, 0])
assert m.is_diagonalizable()
(P, D) = m.diagonalize()
assert P.inv() * m * P == D
m = Matrix(3, 3, [1, 2, 0, 0, 3, 0, 2, -4, 2])
assert m.is_diagonalizable()
(P, D) = m.diagonalize()
assert P.inv() * m * P == D
for i in P:
assert i.as_numer_denom()[1] == 1
m = Matrix(2, 2, [1, 0, 0, 0])
assert m.is_diagonal()
assert m.is_diagonalizable()
(P, D) = m.diagonalize()
assert P.inv() * m * P == D
assert P == Matrix([[0, 1], [1, 0]])
# diagonalizable, complex only
m = Matrix(2, 2, [0, 1, -1, 0])
assert not m.is_diagonalizable(True)
raises(MatrixError, lambda: m.diagonalize(True))
assert m.is_diagonalizable()
(P, D) = m.diagonalize()
assert P.inv() * m * P == D
# not diagonalizable
m = Matrix(2, 2, [0, 1, 0, 0])
assert not m.is_diagonalizable()
raises(MatrixError, lambda: m.diagonalize())
m = Matrix(3, 3, [-3, 1, -3, 20, 3, 10, 2, -2, 4])
assert not m.is_diagonalizable()
raises(MatrixError, lambda: m.diagonalize())
# symbolic
a, b, c, d = symbols('a b c d')
m = Matrix(2, 2, [a, c, c, b])
assert m.is_symmetric()
assert m.is_diagonalizable()
@XFAIL
def test_eigen_vects():
m = Matrix(2, 2, [1, 0, 0, I])
raises(NotImplementedError, lambda: m.is_diagonalizable(True))
# !!! bug because of eigenvects() or roots(x**2 + (-1 - I)*x + I, x)
# see issue 5292
assert not m.is_diagonalizable(True)
raises(MatrixError, lambda: m.diagonalize(True))
(P, D) = m.diagonalize(True)
def test_jordan_form():
m = Matrix(3, 2, [-3, 1, -3, 20, 3, 10])
raises(NonSquareMatrixError, lambda: m.jordan_form())
# diagonalizable
m = Matrix(3, 3, [7, -12, 6, 10, -19, 10, 12, -24, 13])
Jmust = Matrix(3, 3, [-1, 0, 0, 0, 1, 0, 0, 0, 1])
P, J = m.jordan_form()
assert Jmust == J
assert Jmust == m.diagonalize()[1]
# m = Matrix(3, 3, [0, 6, 3, 1, 3, 1, -2, 2, 1])
# m.jordan_form() # very long
# m.jordan_form() #
# diagonalizable, complex only
# Jordan cells
# complexity: one of eigenvalues is zero
m = Matrix(3, 3, [0, 1, 0, -4, 4, 0, -2, 1, 2])
# The blocks are ordered according to the value of their eigenvalues,
# in order to make the matrix compatible with .diagonalize()
Jmust = Matrix(3, 3, [2, 1, 0, 0, 2, 0, 0, 0, 2])
P, J = m.jordan_form()
assert Jmust == J
P, Jcells = m.jordan_cells()
# same here see 1456ff
assert Jcells[1] == Matrix(1, 1, [2])
assert Jcells[0] == Matrix(2, 2, [2, 1, 0, 2])
# complexity: all of eigenvalues are equal
m = Matrix(3, 3, [2, 6, -15, 1, 1, -5, 1, 2, -6])
# Jmust = Matrix(3, 3, [-1, 0, 0, 0, -1, 1, 0, 0, -1])
# same here see 1456ff
Jmust = Matrix(3, 3, [-1, 1, 0, 0, -1, 0, 0, 0, -1])
P, J = m.jordan_form()
assert Jmust == J
# complexity: two of eigenvalues are zero
m = Matrix(3, 3, [4, -5, 2, 5, -7, 3, 6, -9, 4])
Jmust = Matrix(3, 3, [0, 1, 0, 0, 0, 0, 0, 0, 1])
P, J = m.jordan_form()
assert Jmust == J
m = Matrix(4, 4, [6, 5, -2, -3, -3, -1, 3, 3, 2, 1, -2, -3, -1, 1, 5, 5])
Jmust = Matrix(4, 4, [2, 1, 0, 0,
0, 2, 0, 0,
0, 0, 2, 1,
0, 0, 0, 2]
)
P, J = m.jordan_form()
assert Jmust == J
m = Matrix(4, 4, [6, 2, -8, -6, -3, 2, 9, 6, 2, -2, -8, -6, -1, 0, 3, 4])
# Jmust = Matrix(4, 4, [2, 0, 0, 0, 0, 2, 1, 0, 0, 0, 2, 0, 0, 0, 0, -2])
# same here see 1456ff
Jmust = Matrix(4, 4, [-2, 0, 0, 0,
0, 2, 1, 0,
0, 0, 2, 0,
0, 0, 0, 2])
P, J = m.jordan_form()
assert Jmust == J
m = Matrix(4, 4, [5, 4, 2, 1, 0, 1, -1, -1, -1, -1, 3, 0, 1, 1, -1, 2])
assert not m.is_diagonalizable()
Jmust = Matrix(4, 4, [1, 0, 0, 0, 0, 2, 0, 0, 0, 0, 4, 1, 0, 0, 0, 4])
P, J = m.jordan_form()
assert Jmust == J
# the following tests are new and include (some) test the cases where the old
# algorithm failed due to the fact that the block structure can
# *NOT* be determined from algebraic and geometric multiplicity alone
# This can be seen most easily when one lets compute the J.c.f. of a matrix that
# is in J.c.f already.
m = Matrix(4, 4, [2, 1, 0, 0,
0, 2, 1, 0,
0, 0, 2, 0,
0, 0, 0, 2
])
P, J = m.jordan_form()
assert m == J
m = Matrix(4, 4, [2, 1, 0, 0,
0, 2, 0, 0,
0, 0, 2, 1,
0, 0, 0, 2
])
P, J = m.jordan_form()
assert m == J
def test_jordan_form_complex_issue_9274():
A = Matrix([[ 2, 4, 1, 0],
[-4, 2, 0, 1],
[ 0, 0, 2, 4],
[ 0, 0, -4, 2]])
p = 2 - 4*I;
q = 2 + 4*I;
Jmust1 = Matrix([[p, 1, 0, 0],
[0, p, 0, 0],
[0, 0, q, 1],
[0, 0, 0, q]])
Jmust2 = Matrix([[q, 1, 0, 0],
[0, q, 0, 0],
[0, 0, p, 1],
[0, 0, 0, p]])
P, J = A.jordan_form()
assert J == Jmust1 or J == Jmust2
assert simplify(P*J*P.inv()) == A
def test_Matrix_berkowitz_charpoly():
UA, K_i, K_w = symbols('UA K_i K_w')
A = Matrix([[-K_i - UA + K_i**2/(K_i + K_w), K_i*K_w/(K_i + K_w)],
[ K_i*K_w/(K_i + K_w), -K_w + K_w**2/(K_i + K_w)]])
charpoly = A.berkowitz_charpoly(x)
assert charpoly == \
Poly(x**2 + (K_i*UA + K_w*UA + 2*K_i*K_w)/(K_i + K_w)*x +
K_i*K_w*UA/(K_i + K_w), x, domain='ZZ(K_i,K_w,UA)')
assert type(charpoly) is PurePoly
A = Matrix([[1, 3], [2, 0]])
assert A.charpoly() == A.charpoly(x) == PurePoly(x**2 - x - 6)
def test_exp():
m = Matrix([[3, 4], [0, -2]])
m_exp = Matrix([[exp(3), -4*exp(-2)/5 + 4*exp(3)/5], [0, exp(-2)]])
assert m.exp() == m_exp
assert exp(m) == m_exp
m = Matrix([[1, 0], [0, 1]])
assert m.exp() == Matrix([[E, 0], [0, E]])
assert exp(m) == Matrix([[E, 0], [0, E]])
def test_has():
A = Matrix(((x, y), (2, 3)))
assert A.has(x)
assert not A.has(z)
assert A.has(Symbol)
A = A.subs(x, 2)
assert not A.has(x)
def test_errors():
raises(ValueError, lambda: Matrix([[1, 2], [1]]))
raises(IndexError, lambda: Matrix([[1, 2]])[1.2, 5])
raises(IndexError, lambda: Matrix([[1, 2]])[1, 5.2])
raises(ValueError, lambda: randMatrix(3, c=4, symmetric=True))
raises(ValueError, lambda: Matrix([1, 2]).reshape(4, 6))
raises(ShapeError,
lambda: Matrix([[1, 2], [3, 4]]).copyin_matrix([1, 0], Matrix([1, 2])))
raises(TypeError, lambda: Matrix([[1, 2], [3, 4]]).copyin_list([0,
1], set([])))
raises(NonSquareMatrixError, lambda: Matrix([[1, 2, 3], [2, 3, 0]]).inv())
raises(ShapeError,
lambda: Matrix(1, 2, [1, 2]).row_join(Matrix([[1, 2], [3, 4]])))
raises(
ShapeError, lambda: Matrix([1, 2]).col_join(Matrix([[1, 2], [3, 4]])))
raises(ShapeError, lambda: Matrix([1]).row_insert(1, Matrix([[1,
2], [3, 4]])))
raises(ShapeError, lambda: Matrix([1]).col_insert(1, Matrix([[1,
2], [3, 4]])))
raises(NonSquareMatrixError, lambda: Matrix([1, 2]).trace())
raises(TypeError, lambda: Matrix([1]).applyfunc(1))
raises(ShapeError, lambda: Matrix([1]).LUsolve(Matrix([[1, 2], [3, 4]])))
raises(MatrixError, lambda: Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]
]).QRdecomposition())
raises(MatrixError, lambda: Matrix(1, 2, [1, 2]).QRdecomposition())
raises(
NonSquareMatrixError, lambda: Matrix([1, 2]).LUdecomposition_Simple())
raises(ValueError, lambda: Matrix([[1, 2], [3, 4]]).minorEntry(4, 5))
raises(ValueError, lambda: Matrix([[1, 2], [3, 4]]).minorMatrix(4, 5))
raises(TypeError, lambda: Matrix([1, 2, 3]).cross(1))
raises(TypeError, lambda: Matrix([1, 2, 3]).dot(1))
raises(ShapeError, lambda: Matrix([1, 2, 3]).dot(Matrix([1, 2])))
raises(ShapeError, lambda: Matrix([1, 2]).dot([]))
raises(TypeError, lambda: Matrix([1, 2]).dot('a'))
raises(NonSquareMatrixError, lambda: Matrix([1, 2, 3]).exp())
raises(ShapeError, lambda: Matrix([[1, 2], [3, 4]]).normalized())
raises(ValueError, lambda: Matrix([1, 2]).inv(method='not a method'))
raises(NonSquareMatrixError, lambda: Matrix([1, 2]).inverse_GE())
raises(ValueError, lambda: Matrix([[1, 2], [1, 2]]).inverse_GE())
raises(NonSquareMatrixError, lambda: Matrix([1, 2]).inverse_ADJ())
raises(ValueError, lambda: Matrix([[1, 2], [1, 2]]).inverse_ADJ())
raises(NonSquareMatrixError, lambda: Matrix([1, 2]).inverse_LU())
raises(NonSquareMatrixError, lambda: Matrix([1, 2]).is_nilpotent())
raises(NonSquareMatrixError, lambda: Matrix([1, 2]).det())
raises(ValueError,
lambda: Matrix([[1, 2], [3, 4]]).det(method='Not a real method'))
raises(NonSquareMatrixError, lambda: Matrix([1, 2]).det_bareis())
raises(NonSquareMatrixError, lambda: Matrix([1, 2]).berkowitz())
raises(NonSquareMatrixError, lambda: Matrix([1, 2]).berkowitz_det())
raises(ValueError,
lambda: hessian(Matrix([[1, 2], [3, 4]]), Matrix([[1, 2], [2, 1]])))
raises(ValueError, lambda: hessian(Matrix([[1, 2], [3, 4]]), []))
raises(ValueError, lambda: hessian(Symbol('x')**2, 'a'))
raises(ValueError,
lambda: Matrix([[5, 10, 7], [0, -1, 2], [8, 3, 4]]
).LUdecomposition_Simple(iszerofunc=lambda x: abs(x) <= 4))
raises(NotImplementedError, lambda: Matrix([[1, 0], [1, 1]])**(S(1)/2))
raises(NotImplementedError,
lambda: Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])**(0.5))
raises(IndexError, lambda: eye(3)[5, 2])
raises(IndexError, lambda: eye(3)[2, 5])
M = Matrix(((1, 2, 3, 4), (5, 6, 7, 8), (9, 10, 11, 12), (13, 14, 15, 16)))
raises(ValueError, lambda: M.det('method=LU_decomposition()'))
def test_len():
assert len(Matrix()) == 0
assert len(Matrix([[1, 2]])) == len(Matrix([[1], [2]])) == 2
assert len(Matrix(0, 2, lambda i, j: 0)) == \
len(Matrix(2, 0, lambda i, j: 0)) == 0
assert len(Matrix([[0, 1, 2], [3, 4, 5]])) == 6
assert Matrix([1]) == Matrix([[1]])
assert not Matrix()
assert Matrix() == Matrix([])
def test_integrate():
A = Matrix(((1, 4, x), (y, 2, 4), (10, 5, x**2)))
assert A.integrate(x) == \
Matrix(((x, 4*x, x**2/2), (x*y, 2*x, 4*x), (10*x, 5*x, x**3/3)))
assert A.integrate(y) == \
Matrix(((y, 4*y, x*y), (y**2/2, 2*y, 4*y), (10*y, 5*y, y*x**2)))
def test_limit():
A = Matrix(((1, 4, sin(x)/x), (y, 2, 4), (10, 5, x**2 + 1)))
assert A.limit(x, 0) == Matrix(((1, 4, 1), (y, 2, 4), (10, 5, 1)))
def test_diff():
A = Matrix(((1, 4, x), (y, 2, 4), (10, 5, x**2 + 1)))
assert A.diff(x) == Matrix(((0, 0, 1), (0, 0, 0), (0, 0, 2*x)))
assert A.diff(y) == Matrix(((0, 0, 0), (1, 0, 0), (0, 0, 0)))
def test_getattr():
A = Matrix(((1, 4, x), (y, 2, 4), (10, 5, x**2 + 1)))
raises(AttributeError, lambda: A.nonexistantattribute)
assert getattr(A, 'diff')(x) == Matrix(((0, 0, 1), (0, 0, 0), (0, 0, 2*x)))
def test_hessenberg():
A = Matrix([[3, 4, 1], [2, 4, 5], [0, 1, 2]])
assert A.is_upper_hessenberg
A = A.T
assert A.is_lower_hessenberg
A[0, -1] = 1
assert A.is_lower_hessenberg is False
A = Matrix([[3, 4, 1], [2, 4, 5], [3, 1, 2]])
assert not A.is_upper_hessenberg
def test_cholesky():
raises(NonSquareMatrixError, lambda: Matrix((1, 2)).cholesky())
raises(ValueError, lambda: Matrix(((1, 2), (3, 4))).cholesky())
A = Matrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
assert A.cholesky() * A.cholesky().T == A
assert A.cholesky().is_lower
assert A.cholesky() == Matrix([[5, 0, 0], [3, 3, 0], [-1, 1, 3]])
def test_LDLdecomposition():
raises(NonSquareMatrixError, lambda: Matrix((1, 2)).LDLdecomposition())
raises(ValueError, lambda: Matrix(((1, 2), (3, 4))).LDLdecomposition())
A = Matrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
L, D = A.LDLdecomposition()
assert L * D * L.T == A
assert L.is_lower
assert L == Matrix([[1, 0, 0], [ S(3)/5, 1, 0], [S(-1)/5, S(1)/3, 1]])
assert D.is_diagonal()
assert D == Matrix([[25, 0, 0], [0, 9, 0], [0, 0, 9]])
def test_cholesky_solve():
A = Matrix([[2, 3, 5],
[3, 6, 2],
[8, 3, 6]])
x = Matrix(3, 1, [3, 7, 5])
b = A*x
soln = A.cholesky_solve(b)
assert soln == x
A = Matrix([[0, -1, 2],
[5, 10, 7],
[8, 3, 4]])
x = Matrix(3, 1, [-1, 2, 5])
b = A*x
soln = A.cholesky_solve(b)
assert soln == x
def test_LDLsolve():
A = Matrix([[2, 3, 5],
[3, 6, 2],
[8, 3, 6]])
x = Matrix(3, 1, [3, 7, 5])
b = A*x
soln = A.LDLsolve(b)
assert soln == x
A = Matrix([[0, -1, 2],
[5, 10, 7],
[8, 3, 4]])
x = Matrix(3, 1, [-1, 2, 5])
b = A*x
soln = A.LDLsolve(b)
assert soln == x
def test_lower_triangular_solve():
raises(NonSquareMatrixError,
lambda: Matrix([1, 0]).lower_triangular_solve(Matrix([0, 1])))
raises(ShapeError,
lambda: Matrix([[1, 0], [0, 1]]).lower_triangular_solve(Matrix([1])))
raises(ValueError,
lambda: Matrix([[2, 1], [1, 2]]).lower_triangular_solve(
Matrix([[1, 0], [0, 1]])))
A = Matrix([[1, 0], [0, 1]])
B = Matrix([[x, y], [y, x]])
C = Matrix([[4, 8], [2, 9]])
assert A.lower_triangular_solve(B) == B
assert A.lower_triangular_solve(C) == C
def test_upper_triangular_solve():
raises(NonSquareMatrixError,
lambda: Matrix([1, 0]).upper_triangular_solve(Matrix([0, 1])))
raises(TypeError,
lambda: Matrix([[1, 0], [0, 1]]).upper_triangular_solve(Matrix([1])))
raises(TypeError,
lambda: Matrix([[2, 1], [1, 2]]).upper_triangular_solve(
Matrix([[1, 0], [0, 1]])))
A = Matrix([[1, 0], [0, 1]])
B = Matrix([[x, y], [y, x]])
C = Matrix([[2, 4], [3, 8]])
assert A.upper_triangular_solve(B) == B
assert A.upper_triangular_solve(C) == C
def test_diagonal_solve():
raises(TypeError, lambda: Matrix([1, 1]).diagonal_solve(Matrix([1])))
A = Matrix([[1, 0], [0, 1]])*2
B = Matrix([[x, y], [y, x]])
assert A.diagonal_solve(B) == B/2
def test_matrix_norm():
# Vector Tests
# Test columns and symbols
x = Symbol('x', real=True)
v = Matrix([cos(x), sin(x)])
assert trigsimp(v.norm(2)) == 1
assert v.norm(10) == Pow(cos(x)**10 + sin(x)**10, S(1)/10)
# Test Rows
A = Matrix([[5, Rational(3, 2)]])
assert A.norm() == Pow(25 + Rational(9, 4), S(1)/2)
assert A.norm(oo) == max(A._mat)
assert A.norm(-oo) == min(A._mat)
# Matrix Tests
# Intuitive test
A = Matrix([[1, 1], [1, 1]])
assert A.norm(2) == 2
assert A.norm(-2) == 0
assert A.norm('frobenius') == 2
assert eye(10).norm(2) == eye(10).norm(-2) == 1
# Test with Symbols and more complex entries
A = Matrix([[3, y, y], [x, S(1)/2, -pi]])
assert (A.norm('fro')
== sqrt(S(37)/4 + 2*abs(y)**2 + pi**2 + x**2))
# Check non-square
A = Matrix([[1, 2, -3], [4, 5, Rational(13, 2)]])
assert A.norm(2) == sqrt(S(389)/8 + sqrt(78665)/8)
assert A.norm(-2) == S(0)
assert A.norm('frobenius') == sqrt(389)/2
# Test properties of matrix norms
# http://en.wikipedia.org/wiki/Matrix_norm#Definition
# Two matrices
A = Matrix([[1, 2], [3, 4]])
B = Matrix([[5, 5], [-2, 2]])
C = Matrix([[0, -I], [I, 0]])
D = Matrix([[1, 0], [0, -1]])
L = [A, B, C, D]
alpha = Symbol('alpha', real=True)
for order in ['fro', 2, -2]:
# Zero Check
assert zeros(3).norm(order) == S(0)
# Check Triangle Inequality for all Pairs of Matrices
for X in L:
for Y in L:
assert simplify(X.norm(order) + Y.norm(order) >=
(X + Y).norm(order))
# Scalar multiplication linearity
for M in [A, B, C, D]:
if order in [2, -2]:
# Abs is causing tests to fail when Abs(alpha) is inside a Max
# or Min. The tests produce mathematically true statements that
# are too complex to be simplified well.
continue
try:
assert ((alpha*M).norm(order) ==
abs(alpha) * M.norm(order))
except NotImplementedError:
pass # Some Norms fail on symbolic matrices due to Max issue
# Test Properties of Vector Norms
# http://en.wikipedia.org/wiki/Vector_norm
# Two column vectors
a = Matrix([1, 1 - 1*I, -3])
b = Matrix([S(1)/2, 1*I, 1])
c = Matrix([-1, -1, -1])
d = Matrix([3, 2, I])
e = Matrix([Integer(1e2), Rational(1, 1e2), 1])
L = [a, b, c, d, e]
alpha = Symbol('alpha', real=True)
for order in [1, 2, -1, -2, S.Infinity, S.NegativeInfinity, pi]:
# Zero Check
if order > 0:
assert Matrix([0, 0, 0]).norm(order) == S(0)
# Triangle inequality on all pairs
if order >= 1: # Triangle InEq holds only for these norms
for v in L:
for w in L:
assert simplify(v.norm(order) + w.norm(order) >=
(v + w).norm(order))
# Linear to scalar multiplication
if order in [1, 2, -1, -2, S.Infinity, S.NegativeInfinity]:
for vec in L:
try:
assert simplify((alpha*v).norm(order) -
(abs(alpha) * v.norm(order))) == 0
except NotImplementedError:
pass # Some Norms fail on symbolics due to Max issue
def test_singular_values():
x = Symbol('x', real=True)
A = Matrix([[0, 1*I], [2, 0]])
assert A.singular_values() == [2, 1]
A = eye(3)
A[1, 1] = x
A[2, 2] = 5
vals = A.singular_values()
assert 1 in vals and 5 in vals and abs(x) in vals
A = Matrix([[sin(x), cos(x)], [-cos(x), sin(x)]])
vals = [sv.trigsimp() for sv in A.singular_values()]
assert vals == [S(1), S(1)]
def test_condition_number():
x = Symbol('x', real=True)
A = eye(3)
A[0, 0] = 10
A[2, 2] = S(1)/10
assert A.condition_number() == 100
A[1, 1] = x
assert A.condition_number() == Max(10, Abs(x)) / Min(S(1)/10, Abs(x))
M = Matrix([[cos(x), sin(x)], [-sin(x), cos(x)]])
Mc = M.condition_number()
assert all(Float(1.).epsilon_eq(Mc.subs(x, val).evalf()) for val in
[Rational(1, 5), Rational(1, 2), Rational(1, 10), pi/2, pi, 7*pi/4 ])
def test_equality():
A = Matrix(((1, 2, 3), (4, 5, 6), (7, 8, 9)))
B = Matrix(((9, 8, 7), (6, 5, 4), (3, 2, 1)))
assert A == A[:, :]
assert not A != A[:, :]
assert not A == B
assert A != B
assert A != 10
assert not A == 10
# A SparseMatrix can be equal to a Matrix
C = SparseMatrix(((1, 0, 0), (0, 1, 0), (0, 0, 1)))
D = Matrix(((1, 0, 0), (0, 1, 0), (0, 0, 1)))
assert C == D
assert not C != D
def test_col_join():
assert eye(3).col_join(Matrix([[7, 7, 7]])) == \
Matrix([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[7, 7, 7]])
def test_row_insert():
r4 = Matrix([[4, 4, 4]])
for i in range(-4, 5):
l = [1, 0, 0]
l.insert(i, 4)
assert flatten(eye(3).row_insert(i, r4).col(0).tolist()) == l
def test_col_insert():
c4 = Matrix([4, 4, 4])
for i in range(-4, 5):
l = [0, 0, 0]
l.insert(i, 4)
assert flatten(zeros(3).col_insert(i, c4).row(0).tolist()) == l
def test_normalized():
assert Matrix([3, 4]).normalized() == \
Matrix([Rational(3, 5), Rational(4, 5)])
def test_print_nonzero():
assert capture(lambda: eye(3).print_nonzero()) == \
'[X ]\n[ X ]\n[ X]\n'
assert capture(lambda: eye(3).print_nonzero('.')) == \
'[. ]\n[ . ]\n[ .]\n'
def test_zeros_eye():
assert Matrix.eye(3) == eye(3)
assert Matrix.zeros(3) == zeros(3)
assert ones(3, 4) == Matrix(3, 4, [1]*12)
i = Matrix([[1, 0], [0, 1]])
z = Matrix([[0, 0], [0, 0]])
for cls in classes:
m = cls.eye(2)
assert i == m # but m == i will fail if m is immutable
assert i == eye(2, cls=cls)
assert type(m) == cls
m = cls.zeros(2)
assert z == m
assert z == zeros(2, cls=cls)
assert type(m) == cls
def test_is_zero():
assert Matrix().is_zero
assert Matrix([[0, 0], [0, 0]]).is_zero
assert zeros(3, 4).is_zero
assert not eye(3).is_zero
assert Matrix([[x, 0], [0, 0]]).is_zero == None
assert SparseMatrix([[x, 0], [0, 0]]).is_zero == None
assert ImmutableMatrix([[x, 0], [0, 0]]).is_zero == None
assert ImmutableSparseMatrix([[x, 0], [0, 0]]).is_zero == None
assert Matrix([[x, 1], [0, 0]]).is_zero == False
a = Symbol('a', nonzero=True)
assert Matrix([[a, 0], [0, 0]]).is_zero == False
def test_rotation_matrices():
# This tests the rotation matrices by rotating about an axis and back.
theta = pi/3
r3_plus = rot_axis3(theta)
r3_minus = rot_axis3(-theta)
r2_plus = rot_axis2(theta)
r2_minus = rot_axis2(-theta)
r1_plus = rot_axis1(theta)
r1_minus = rot_axis1(-theta)
assert r3_minus*r3_plus*eye(3) == eye(3)
assert r2_minus*r2_plus*eye(3) == eye(3)
assert r1_minus*r1_plus*eye(3) == eye(3)
# Check the correctness of the trace of the rotation matrix
assert r1_plus.trace() == 1 + 2*cos(theta)
assert r2_plus.trace() == 1 + 2*cos(theta)
assert r3_plus.trace() == 1 + 2*cos(theta)
# Check that a rotation with zero angle doesn't change anything.
assert rot_axis1(0) == eye(3)
assert rot_axis2(0) == eye(3)
assert rot_axis3(0) == eye(3)
def test_DeferredVector():
assert str(DeferredVector("vector")[4]) == "vector[4]"
assert sympify(DeferredVector("d")) == DeferredVector("d")
def test_DeferredVector_not_iterable():
assert not iterable(DeferredVector('X'))
def test_DeferredVector_Matrix():
raises(TypeError, lambda: Matrix(DeferredVector("V")))
def test_GramSchmidt():
R = Rational
m1 = Matrix(1, 2, [1, 2])
m2 = Matrix(1, 2, [2, 3])
assert GramSchmidt([m1, m2]) == \
[Matrix(1, 2, [1, 2]), Matrix(1, 2, [R(2)/5, R(-1)/5])]
assert GramSchmidt([m1.T, m2.T]) == \
[Matrix(2, 1, [1, 2]), Matrix(2, 1, [R(2)/5, R(-1)/5])]
# from wikipedia
assert GramSchmidt([Matrix([3, 1]), Matrix([2, 2])], True) == [
Matrix([3*sqrt(10)/10, sqrt(10)/10]),
Matrix([-sqrt(10)/10, 3*sqrt(10)/10])]
def test_casoratian():
assert casoratian([1, 2, 3, 4], 1) == 0
assert casoratian([1, 2, 3, 4], 1, zero=False) == 0
def test_zero_dimension_multiply():
assert (Matrix()*zeros(0, 3)).shape == (0, 3)
assert zeros(3, 0)*zeros(0, 3) == zeros(3, 3)
assert zeros(0, 3)*zeros(3, 0) == Matrix()
def test_slice_issue_2884():
m = Matrix(2, 2, range(4))
assert m[1, :] == Matrix([[2, 3]])
assert m[-1, :] == Matrix([[2, 3]])
assert m[:, 1] == Matrix([[1, 3]]).T
assert m[:, -1] == Matrix([[1, 3]]).T
raises(IndexError, lambda: m[2, :])
raises(IndexError, lambda: m[2, 2])
def test_slice_issue_3401():
assert zeros(0, 3)[:, -1].shape == (0, 1)
assert zeros(3, 0)[0, :] == Matrix(1, 0, [])
def test_copyin():
s = zeros(3, 3)
s[3] = 1
assert s[:, 0] == Matrix([0, 1, 0])
assert s[3] == 1
assert s[3: 4] == [1]
s[1, 1] = 42
assert s[1, 1] == 42
assert s[1, 1:] == Matrix([[42, 0]])
s[1, 1:] = Matrix([[5, 6]])
assert s[1, :] == Matrix([[1, 5, 6]])
s[1, 1:] = [[42, 43]]
assert s[1, :] == Matrix([[1, 42, 43]])
s[0, 0] = 17
assert s[:, :1] == Matrix([17, 1, 0])
s[0, 0] = [1, 1, 1]
assert s[:, 0] == Matrix([1, 1, 1])
s[0, 0] = Matrix([1, 1, 1])
assert s[:, 0] == Matrix([1, 1, 1])
s[0, 0] = SparseMatrix([1, 1, 1])
assert s[:, 0] == Matrix([1, 1, 1])
def test_invertible_check():
# sometimes a singular matrix will have a pivot vector shorter than
# the number of rows in a matrix...
assert Matrix([[1, 2], [1, 2]]).rref() == (Matrix([[1, 2], [0, 0]]), [0])
raises(ValueError, lambda: Matrix([[1, 2], [1, 2]]).inv())
# ... but sometimes it won't, so that is an insufficient test of
# whether something is invertible.
m = Matrix([
[-1, -1, 0],
[ x, 1, 1],
[ 1, x, -1],
])
assert len(m.rref()[1]) == m.rows
# in addition, unless simplify=True in the call to rref, the identity
# matrix will be returned even though m is not invertible
assert m.rref()[0] == eye(3)
assert m.rref(simplify=signsimp)[0] != eye(3)
raises(ValueError, lambda: m.inv(method="ADJ"))
raises(ValueError, lambda: m.inv(method="GE"))
raises(ValueError, lambda: m.inv(method="LU"))
@XFAIL
def test_issue_3959():
x, y = symbols('x, y')
e = x*y
assert e.subs(x, Matrix([3, 5, 3])) == Matrix([3, 5, 3])*y
def test_issue_5964():
assert str(Matrix([[1, 2], [3, 4]])) == 'Matrix([[1, 2], [3, 4]])'
def test_issue_7604():
x, y = symbols(u("x y"))
assert sstr(Matrix([[x, 2*y], [y**2, x + 3]])) == \
'Matrix([\n[ x, 2*y],\n[y**2, x + 3]])'
def test_is_Identity():
assert eye(3).is_Identity
assert eye(3).as_immutable().is_Identity
assert not zeros(3).is_Identity
assert not ones(3).is_Identity
# issue 6242
assert not Matrix([[1, 0, 0]]).is_Identity
# issue 8854
assert SparseMatrix(3,3, {(0,0):1, (1,1):1, (2,2):1}).is_Identity
assert not SparseMatrix(2,3, range(6)).is_Identity
assert not SparseMatrix(3,3, {(0,0):1, (1,1):1}).is_Identity
assert not SparseMatrix(3,3, {(0,0):1, (1,1):1, (2,2):1, (0,1):2, (0,2):3}).is_Identity
def test_dot():
assert ones(1, 3).dot(ones(3, 1)) == 3
assert ones(1, 3).dot([1, 1, 1]) == 3
def test_dual():
B_x, B_y, B_z, E_x, E_y, E_z = symbols(
'B_x B_y B_z E_x E_y E_z', real=True)
F = Matrix((
( 0, E_x, E_y, E_z),
(-E_x, 0, B_z, -B_y),
(-E_y, -B_z, 0, B_x),
(-E_z, B_y, -B_x, 0)
))
Fd = Matrix((
( 0, -B_x, -B_y, -B_z),
(B_x, 0, E_z, -E_y),
(B_y, -E_z, 0, E_x),
(B_z, E_y, -E_x, 0)
))
assert F.dual().equals(Fd)
assert eye(3).dual().equals(zeros(3))
assert F.dual().dual().equals(-F)
def test_anti_symmetric():
assert Matrix([1, 2]).is_anti_symmetric() is False
m = Matrix(3, 3, [0, x**2 + 2*x + 1, y, -(x + 1)**2, 0, x*y, -y, -x*y, 0])
assert m.is_anti_symmetric() is True
assert m.is_anti_symmetric(simplify=False) is False
assert m.is_anti_symmetric(simplify=lambda x: x) is False
# tweak to fail
m[2, 1] = -m[2, 1]
assert m.is_anti_symmetric() is False
# untweak
m[2, 1] = -m[2, 1]
m = m.expand()
assert m.is_anti_symmetric(simplify=False) is True
m[0, 0] = 1
assert m.is_anti_symmetric() is False
def test_normalize_sort_diogonalization():
A = Matrix(((1, 2), (2, 1)))
P, Q = A.diagonalize(normalize=True)
assert P*P.T == P.T*P == eye(P.cols)
P, Q = A.diagonalize(normalize=True, sort=True)
assert P*P.T == P.T*P == eye(P.cols)
assert P*Q*P.inv() == A
def test_issue_5321():
raises(ValueError, lambda: Matrix([[1, 2, 3], Matrix(0, 1, [])]))
def test_issue_5320():
assert Matrix.hstack(eye(2), 2*eye(2)) == Matrix([
[1, 0, 2, 0],
[0, 1, 0, 2]
])
assert Matrix.vstack(eye(2), 2*eye(2)) == Matrix([
[1, 0],
[0, 1],
[2, 0],
[0, 2]
])
cls = SparseMatrix
assert cls.hstack(cls(eye(2)), cls(2*eye(2))) == Matrix([
[1, 0, 2, 0],
[0, 1, 0, 2]
])
def test_cross():
a = [1, 2, 3]
b = [3, 4, 5]
col = Matrix([-2, 4, -2])
row = col.T
def test(M, ans):
assert ans == M
assert type(M) == cls
for cls in classes:
A = cls(a)
B = cls(b)
test(A.cross(B), col)
test(A.cross(B.T), col)
test(A.T.cross(B.T), row)
test(A.T.cross(B), row)
raises(ShapeError, lambda:
Matrix(1, 2, [1, 1]).cross(Matrix(1, 2, [1, 1])))
def test_hash():
for cls in classes[-2:]:
s = set([cls.eye(1), cls.eye(1)])
assert len(s) == 1 and s.pop() == cls.eye(1)
# issue 3979
for cls in classes[:2]:
assert not isinstance(cls.eye(1), collections.Hashable)
@XFAIL
def test_issue_3979():
# when this passes, delete this and change the [1:2]
# to [:2] in the test_hash above for issue 3979
cls = classes[0]
raises(AttributeError, lambda: hash(cls.eye(1)))
def test_adjoint():
dat = [[0, I], [1, 0]]
ans = Matrix([[0, 1], [-I, 0]])
for cls in classes:
assert ans == cls(dat).adjoint()
def test_simplify_immutable():
from sympy import simplify, sin, cos
assert simplify(ImmutableMatrix([[sin(x)**2 + cos(x)**2]])) == \
ImmutableMatrix([[1]])
def test_rank():
from sympy.abc import x
m = Matrix([[1, 2], [x, 1 - 1/x]])
assert m.rank() == 2
n = Matrix(3, 3, range(1, 10))
assert n.rank() == 2
p = zeros(3)
assert p.rank() == 0
def test_replace():
from sympy import symbols, Function, Matrix
F, G = symbols('F, G', cls=Function)
K = Matrix(2, 2, lambda i, j: G(i+j))
M = Matrix(2, 2, lambda i, j: F(i+j))
N = M.replace(F, G)
assert N == K
def test_replace_map():
from sympy import symbols, Function, Matrix
F, G = symbols('F, G', cls=Function)
K = Matrix(2, 2, [(G(0), {F(0): G(0)}), (G(1), {F(1): G(1)}), (G(1), {F(1)\
: G(1)}), (G(2), {F(2): G(2)})])
M = Matrix(2, 2, lambda i, j: F(i+j))
N = M.replace(F, G, True)
assert N == K
def test_atoms():
m = Matrix([[1, 2], [x, 1 - 1/x]])
assert m.atoms() == set([S(1),S(2),S(-1), x])
assert m.atoms(Symbol) == set([x])
@slow
def test_pinv():
# Pseudoinverse of an invertible matrix is the inverse.
A1 = Matrix([[a, b], [c, d]])
assert simplify(A1.pinv()) == simplify(A1.inv())
# Test the four properties of the pseudoinverse for various matrices.
As = [Matrix([[13, 104], [2212, 3], [-3, 5]]),
Matrix([[1, 7, 9], [11, 17, 19]]),
Matrix([a, b])]
for A in As:
A_pinv = A.pinv()
AAp = A * A_pinv
ApA = A_pinv * A
assert simplify(AAp * A) == A
assert simplify(ApA * A_pinv) == A_pinv
assert AAp.H == AAp
assert ApA.H == ApA
def test_pinv_solve():
# Fully determined system (unique result, identical to other solvers).
A = Matrix([[1, 5], [7, 9]])
B = Matrix([12, 13])
assert A.pinv_solve(B) == A.cholesky_solve(B)
assert A.pinv_solve(B) == A.LDLsolve(B)
assert A.pinv_solve(B) == Matrix([sympify('-43/26'), sympify('71/26')])
assert A * A.pinv() * B == B
# Fully determined, with two-dimensional B matrix.
B = Matrix([[12, 13, 14], [15, 16, 17]])
assert A.pinv_solve(B) == A.cholesky_solve(B)
assert A.pinv_solve(B) == A.LDLsolve(B)
assert A.pinv_solve(B) == Matrix([[-33, -37, -41], [69, 75, 81]]) / 26
assert A * A.pinv() * B == B
# Underdetermined system (infinite results).
A = Matrix([[1, 0, 1], [0, 1, 1]])
B = Matrix([5, 7])
solution = A.pinv_solve(B)
w = {}
for s in solution.atoms(Symbol):
# Extract dummy symbols used in the solution.
w[s.name] = s
assert solution == Matrix([[w['w0_0']/3 + w['w1_0']/3 - w['w2_0']/3 + 1],
[w['w0_0']/3 + w['w1_0']/3 - w['w2_0']/3 + 3],
[-w['w0_0']/3 - w['w1_0']/3 + w['w2_0']/3 + 4]])
assert A * A.pinv() * B == B
# Overdetermined system (least squares results).
A = Matrix([[1, 0], [0, 0], [0, 1]])
B = Matrix([3, 2, 1])
assert A.pinv_solve(B) == Matrix([3, 1])
# Proof the solution is not exact.
assert A * A.pinv() * B != B
@XFAIL
def test_pinv_rank_deficient():
# Test the four properties of the pseudoinverse for various matrices.
As = [Matrix([[1, 1, 1], [2, 2, 2]]),
Matrix([[1, 0], [0, 0]])]
for A in As:
A_pinv = A.pinv()
AAp = A * A_pinv
ApA = A_pinv * A
assert simplify(AAp * A) == A
assert simplify(ApA * A_pinv) == A_pinv
assert AAp.H == AAp
assert ApA.H == ApA
# Test solving with rank-deficient matrices.
A = Matrix([[1, 0], [0, 0]])
# Exact, non-unique solution.
B = Matrix([3, 0])
solution = A.pinv_solve(B)
w1 = solution.atoms(Symbol).pop()
assert w1.name == 'w1_0'
assert solution == Matrix([3, w1])
assert A * A.pinv() * B == B
# Least squares, non-unique solution.
B = Matrix([3, 1])
solution = A.pinv_solve(B)
w1 = solution.atoms(Symbol).pop()
assert w1.name == 'w1_0'
assert solution == Matrix([3, w1])
assert A * A.pinv() * B != B
def test_gauss_jordan_solve():
# Square, full rank, unique solution
A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
b = Matrix([3, 6, 9])
sol, params = A.gauss_jordan_solve(b)
assert sol == Matrix([[-1], [2], [0]])
assert params == Matrix(0, 1, [])
# Square, reduced rank, parametrized solution
A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
b = Matrix([3, 6, 9])
sol, params, freevar = A.gauss_jordan_solve(b, freevar=True)
w = {}
for s in sol.atoms(Symbol):
# Extract dummy symbols used in the solution.
w[s.name] = s
assert sol == Matrix([[w['tau0'] - 1], [-2*w['tau0'] + 2], [w['tau0']]])
assert params == Matrix([[w['tau0']]])
assert freevar == [2]
# Square, reduced rank, parametrized solution
A = Matrix([[1, 2, 3], [2, 4, 6], [3, 6, 9]])
b = Matrix([0, 0, 0])
sol, params = A.gauss_jordan_solve(b)
w = {}
for s in sol.atoms(Symbol):
w[s.name] = s
assert sol == Matrix([[-2*w['tau0'] - 3*w['tau1']],
[w['tau0']], [w['tau1']]])
assert params == Matrix([[w['tau0']], [w['tau1']]])
# Square, reduced rank, parametrized solution
A = Matrix([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
b = Matrix([0, 0, 0])
sol, params = A.gauss_jordan_solve(b)
w = {}
for s in sol.atoms(Symbol):
w[s.name] = s
assert sol == Matrix([[w['tau0']], [w['tau1']], [w['tau2']]])
assert params == Matrix([[w['tau0']], [w['tau1']], [w['tau2']]])
# Square, reduced rank, no solution
A = Matrix([[1, 2, 3], [2, 4, 6], [3, 6, 9]])
b = Matrix([0, 0, 1])
raises(ValueError, lambda: A.gauss_jordan_solve(b))
# Rectangular, tall, full rank, unique solution
A = Matrix([[1, 5, 3], [2, 1, 6], [1, 7, 9], [1, 4, 3]])
b = Matrix([0, 0, 1, 0])
sol, params = A.gauss_jordan_solve(b)
assert sol == Matrix([[-S(1)/2], [0], [S(1)/6]])
assert params == Matrix(0, 1, [])
# Rectangular, tall, full rank, no solution
A = Matrix([[1, 5, 3], [2, 1, 6], [1, 7, 9], [1, 4, 3]])
b = Matrix([0, 0, 0, 1])
raises(ValueError, lambda: A.gauss_jordan_solve(b))
# Rectangular, tall, reduced rank, parametrized solution
A = Matrix([[1, 5, 3], [2, 10, 6], [3, 15, 9], [1, 4, 3]])
b = Matrix([0, 0, 0, 1])
sol, params = A.gauss_jordan_solve(b)
w = {}
for s in sol.atoms(Symbol):
w[s.name] = s
assert sol == Matrix([[-3*w['tau0'] + 5], [-1], [w['tau0']]])
assert params == Matrix([[w['tau0']]])
# Rectangular, tall, reduced rank, no solution
A = Matrix([[1, 5, 3], [2, 10, 6], [3, 15, 9], [1, 4, 3]])
b = Matrix([0, 0, 1, 1])
raises(ValueError, lambda: A.gauss_jordan_solve(b))
# Rectangular, wide, full rank, parametrized solution
A = Matrix([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 1, 12]])
b = Matrix([1, 1, 1])
sol, params = A.gauss_jordan_solve(b)
w = {}
for s in sol.atoms(Symbol):
w[s.name] = s
assert sol == Matrix([[2*w['tau0'] - 1], [-3*w['tau0'] + 1], [0],
[w['tau0']]])
assert params == Matrix([[w['tau0']]])
# Rectangular, wide, reduced rank, parametrized solution
A = Matrix([[1, 2, 3, 4], [5, 6, 7, 8], [2, 4, 6, 8]])
b = Matrix([0, 1, 0])
sol, params = A.gauss_jordan_solve(b)
w = {}
for s in sol.atoms(Symbol):
w[s.name] = s
assert sol == Matrix([[w['tau0'] + 2*w['tau1'] + 1/S(2)],
[-2*w['tau0'] - 3*w['tau1'] - 1/S(4)],
[w['tau0']], [w['tau1']]])
assert params == Matrix([[w['tau0']], [w['tau1']]])
# Rectangular, wide, reduced rank, no solution
A = Matrix([[1, 2, 3, 4], [5, 6, 7, 8], [2, 4, 6, 8]])
b = Matrix([1, 1, 1])
raises(ValueError, lambda: A.gauss_jordan_solve(b))
def test_issue_7201():
assert ones(0, 1) + ones(0, 1) == Matrix(0, 1, [])
assert ones(1, 0) + ones(1, 0) == Matrix(1, 0, [])
def test_free_symbols():
for M in ImmutableMatrix, ImmutableSparseMatrix, Matrix, SparseMatrix:
assert M([[x], [0]]).free_symbols == set([x])
def test_from_ndarray():
"""See issue 7465."""
try:
from numpy import array
except ImportError:
skip('NumPy must be available to test creating matrices from ndarrays')
assert Matrix(array([1, 2, 3])) == Matrix([1, 2, 3])
assert Matrix(array([[1, 2, 3]])) == Matrix([[1, 2, 3]])
assert Matrix(array([[1, 2, 3], [4, 5, 6]])) == \
Matrix([[1, 2, 3], [4, 5, 6]])
assert Matrix(array([x, y, z])) == Matrix([x, y, z])
raises(NotImplementedError, lambda: Matrix(array([[
[1, 2], [3, 4]], [[5, 6], [7, 8]]])))
def test_hermitian():
a = Matrix([[1, I], [-I, 1]])
assert a.is_hermitian
a[0, 0] = 2*I
assert a.is_hermitian is False
a[0, 0] = x
assert a.is_hermitian is None
a[0, 1] = a[1, 0]*I
assert a.is_hermitian is False
def test_doit():
a = Matrix([[Add(x,x, evaluate=False)]])
assert a[0] != 2*x
assert a.doit() == Matrix([[2*x]])
def test_issue_9457_9467():
# for row_del(index)
M = Matrix([[1, 2, 3], [2, 3, 4], [3, 4, 5]])
M.row_del(1)
assert M == Matrix([[1, 2, 3], [3, 4, 5]])
N = Matrix([[1, 2, 3], [2, 3, 4], [3, 4, 5]])
N.row_del(-2)
assert N == Matrix([[1, 2, 3], [3, 4, 5]])
P = Matrix([[1, 2, 3], [2, 3, 4], [3, 4, 5]])
raises(IndexError, lambda: P.row_del(10))
Q = Matrix([[1, 2, 3], [2, 3, 4], [3, 4, 5]])
raises(IndexError, lambda: Q.row_del(-10))
# for col_del(index)
M = Matrix([[1, 2, 3], [2, 3, 4], [3, 4, 5]])
M.col_del(1)
assert M == Matrix([[1, 3], [2, 4], [3, 5]])
N = Matrix([[1, 2, 3], [2, 3, 4], [3, 4, 5]])
N.col_del(-2)
assert N == Matrix([[1, 3], [2, 4], [3, 5]])
P = Matrix([[1, 2, 3], [2, 3, 4], [3, 4, 5]])
raises(IndexError, lambda: P.col_del(10))
Q = Matrix([[1, 2, 3], [2, 3, 4], [3, 4, 5]])
raises(IndexError, lambda: Q.col_del(-10))
def test_issue_9422():
x, y = symbols('x y', commutative=False)
a, b = symbols('a b')
M = eye(2)
M1 = Matrix(2, 2, [x, y, y, z])
assert y*x*M != x*y*M
assert b*a*M == a*b*M
assert x*M1 != M1*x
assert a*M1 == M1*a
assert y*x*M == Matrix([[y*x, 0], [0, y*x]])
| grevutiu-gabriel/sympy | sympy/matrices/tests/test_matrices.py | Python | bsd-3-clause | 80,543 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019~2999 - Cologler <skyoflw@gmail.com>
# ----------
#
# ----------
from jasily.subclasses import BaseClass
def test_subclass():
class A(BaseClass):
pass
assert A.subclasses() == (A, )
def test_subclass_subsubclass():
class A(BaseClass):
pass
class B(A):
pass
assert A.subclasses() == (A, B)
assert B.subclasses() == (B, )
| Jasily/jasily-python | tests/test_subclasses.py | Python | mit | 421 |
# -*- coding: utf-8 -*-
#
# OpenCraft -- tools to aid developing and hosting free software projects
# Copyright (C) 2015 OpenCraft <xavier@opencraft.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Worker tasks for instance hosting & management
"""
# Imports #####################################################################
from huey.djhuey import crontab, periodic_task, task
from django.conf import settings
from django.template.defaultfilters import truncatewords
from instance.github import get_username_list_from_team, get_pr_list_from_username
from instance.models.instance import OpenEdXInstance
# Logging #####################################################################
import logging
logger = logging.getLogger(__name__)
# Tasks #######################################################################
@task()
def provision_instance(instance_pk):
"""
Run provisioning on an existing instance
"""
logger.info('Retreiving instance: pk=%s', instance_pk)
instance = OpenEdXInstance.objects.get(pk=instance_pk)
logger.info('Running provisioning on %s', instance)
instance.provision()
@periodic_task(crontab(minute='*/1'))
def watch_pr():
"""
Automatically create/update sandboxes for PRs opened by members of the watched
organization on the watched repository
"""
team_username_list = get_username_list_from_team(settings.WATCH_ORGANIZATION)
for username in team_username_list:
for pr in get_pr_list_from_username(username, settings.WATCH_FORK):
pr_sub_domain = 'pr{number}.sandbox'.format(number=pr.number)
instance, created = OpenEdXInstance.objects.get_or_create(
sub_domain=pr_sub_domain,
fork_name=pr.fork_name,
branch_name=pr.branch_name,
)
truncated_title = truncatewords(pr.title, 4)
instance.name = 'PR#{pr.number}: {truncated_title} ({pr.username}) - {i.reference_name}'\
.format(pr=pr, i=instance, truncated_title=truncated_title)
instance.github_pr_number = pr.number
instance.ansible_extra_settings = pr.extra_settings
instance.save()
if created:
logger.info('New PR found, creating sandbox: %s', pr)
provision_instance(instance.pk)
| brousch/opencraft | instance/tasks.py | Python | agpl-3.0 | 2,970 |
from collections import defaultdict
from tech.models import *
import logging
# load all the current ParamProperty and ParamValues in to memory
# the property_vals dict holds all the possible values of a given property.
# This allows us to do a fast look up to see if a given proprety name already has the same value ingested
# Cached ParamProperties, values, products and categories
property_vals = defaultdict(list)
__param_properties = {}
__ROOT = {"name": "root", "children": []}
category_hierarchy = __ROOT
cat_rev_lookup = {"root": __ROOT}
__cat_id_mapping = {}
__cat_prop_mapping = defaultdict(list)
__products = {}
__LOGGER = logging.getLogger("h2h")
loaded = False
def load():
"""
attempts to create a cache of the ParamProperty data when the system loads
Currently is invoked from multiple points of API with the global loaded ensuring that it
is done once in the app lifetime
:return:
"""
# TODO use django's app load hooks to load this cache
global loaded, __products
if not loaded:
print("pre-loading data")
for cat in Category.objects.all():
cat_name = cat.category_name
current = cat_rev_lookup.get(cat_name, None)
parent = None
if cat.parent:
cat_parent = cat.parent.category_name
parent = cat_rev_lookup.get(cat_parent, None)
if not current:
# we havent visited this guy ever
current = {"name": cat_name, "children": [], "_id": cat.id}
cat_rev_lookup[cat_name] = current
__cat_id_mapping[cat.id] = cat_name
if parent:
# stick current as is into the parent. This is the cache load phase
parent['children'].append(current)
else:
# this is a root node
cat_rev_lookup["root"]["children"].append(current)
for param_property in ParamProperty.objects.all():
param_name = param_property.param_name
__param_properties[param_name] = param_property
cat_id = param_property.category_id
__cat_prop_mapping[cat_id].append(param_property)
for vals in param_property.paramvalue_set.all():
property_vals[param_name].append(vals.param_value)
__products = {item.name: item for item in Item.objects.all()}
loaded = True
def add_value_to_property(value, param_property):
"""
Add the new value to the ParamProperty cache.
:param value:
:param param_property:
:return:
"""
if not value in property_vals[param_property.param_name]:
ParamValue(param_property=param_property, param_value=value).save()
property_vals[param_property.param_name].append(value)
def property_has_value(property_name, value):
return value in property_vals[property_name]
def __get_best_type_for_property(value):
"""
Given a value, attempt to identify the type of data. This is due to the fact that all
data is stored as "string" in the DB
:param value:
:return:
"""
uval = value
if type(uval) is bool or uval.lower() in ["y", "n", "yes", "no", "true", "false"]:
return "BOOL"
elif uval.isdigit():
return "INT"
elif type(uval) is str:
return "STRING"
else:
raise RuntimeError
def create_property_for_value(property_name, value, category):
"""
Create a new __property as we have never seen this before
:param property_name:
:param value:
:return:
"""
__property_type = __get_best_type_for_property(value)
return create_property(property_name, __property_type, category)
def create_property(property_name, type, category):
__property = ParamProperty(param_type=type, param_name=property_name, category=category)
__property.save()
__param_properties[property_name] = __property
__cat_prop_mapping[category.id].append(__property)
return __property
def find_item_param(item, param_name):
"""
Find an item_param, given the param_name in this item.
:param item:
:param param_name:
:return:
"""
for ip in item.itemparam_set.all():
if ip.param_name == param_name:
return ip
def create_category(category_name, parent_name=None):
cat = cat_rev_lookup.get(category_name, None)
parent = None
if parent_name:
parent = cat_rev_lookup.get(parent_name, None)
if cat:
# we already have one, and this may be a addition/modification of the parent
# this means we a) have to look up who owned this and move the child b) add this as a child to new parent
# for now, we will just stick it to the new parent and this becomes a FIXME
if parent:
parent['children'].append(cat)
else:
# we have never seen this fellow. lets create a new one
_cat_model = Category.objects.update_or_create(category_name=category_name)[0]
cat = cat_rev_lookup[category_name] = {"name": category_name, "children": [], "_id": _cat_model.id}
__cat_id_mapping[_cat_model.id] = category_name
if parent:
parent["children"].append(cat)
else:
# no parent, this is a root
cat_rev_lookup.get("root").get("children").append(cat)
return cat
def get_properties_for_category(cat_id):
__cat = Category.objects.get(id=cat_id)
hierarchy = __get_cat_hierarchy(__cat, cat_id)
answer = []
for __cat_id in hierarchy:
for param_prop in __cat_prop_mapping[int(__cat_id)]:
answer.append({ "property_name" : param_prop.param_name , "property_type" : param_prop.param_type })
return answer
def __get_cat_hierarchy(__cat, cat_id):
current = __cat.parent_id
hierarchy = [cat_id]
while (current):
parent = Category.objects.get(id=current)
hierarchy.append(parent.id)
current = parent.parent_id
return hierarchy
def get_param_properties(cat_id=None):
if cat_id:
hierarchy = __get_cat_hierarchy(Category.objects.get(id=cat_id), cat_id)
answer = {}
for __cat_id in hierarchy:
if __cat_id in __cat_prop_mapping:
for param_prop in __cat_prop_mapping.get(__cat_id):
answer[param_prop.param_name] = param_prop
return answer
return __param_properties
def ingest_bulk(data):
try:
for d in data:
category_name = d['category']
category = Category.objects.get_or_create(category_name=category_name)[0]
name = d['name']
item = Item.objects.get_or_create(name=name, category=category)[0]
for prop in d['parameters']:
param_name = prop['param_name']
param_value = prop['param_value']
if param_value:
if not param_name in get_param_properties(category.id):
param_property = create_property_for_value(param_name, param_value, category)
else :
param_property = get_param_properties()[param_name]
add_value_to_property(param_value, param_property)
ip = find_item_param(item, param_name)
if not ip:
ip = ItemParam(param_name=param_name, param_property=param_property, item=item)
ip.param_value = param_value
if param_property.param_type == "INT" :
ip.param_value_as_int = int(param_value)
ip.save()
__products[name] = item
except Exception as e:
__LOGGER.error(e)
def products():
return __products
| conlini/h2h | tech/repo.py | Python | mit | 7,754 |
"""
WSGI config for arda_db project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "arda_db.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| rwspicer/ARDA | arda_db/arda_db/wsgi.py | Python | mit | 389 |
mealCost = float(input())
tipPercent = int(input())
taxPercent = int(input())
tip0 = mealCost * (tipPercent / 100.0)
tax0 = mealCost * (taxPercent / 100.0)
totalCost = mealCost + tip0 + tax0
#'totalCost=totalCost+(mealCost * tipPercent / 100.0)+(mealCost * taxPercent / 100.0)
print("The total meal cost is {} dollars.".format(round(totalCost)))
| reza-arjmandi/rpi-course | python practice/day2/solution.py | Python | mit | 348 |
# encoding=utf-8
__author__ = 'wangchao'
#python2 pip install jieba
#python3 pip3 install jieba3k
import jieba
seg_list = jieba.cut("我来到北京清华大学", cut_all=True)
print("Full Mode:", "/ ".join(seg_list)) # 全模式
seg_list = jieba.cut("我来到北京清华大学", cut_all=False)
print("Default Mode:", "/ ".join(seg_list)) # 精确模式
seg_list = jieba.cut("他来到了网易杭研大厦") # 默认是精确模式
print(", ".join(seg_list))
print '=============='
seg_list = jieba.cut_for_search("6号线 房主诚心招三居合租 宜家风格 拎包入住 配备齐全") # 搜索引擎模式
aa= ", ".join(seg_list)
print(aa) | wang153723482/HelloWorld_my | HelloWorld_python/jieba/demo.py | Python | apache-2.0 | 659 |
# coding=utf-8
from django import template
register = template.Library()
@register.filter
def add_class(field, css):
return field.as_widget(attrs={"class":css})
| gfavre/beyondthehost | beyondthehost/beyondthehost/templatetags/registration_bootstrap.py | Python | mit | 165 |
# -*- coding: utf-8 -*-
"""
"""
# 定义对话相关
DIALOG_TYPE_NORMAL = 0 # 普通对话
DIALOG_TYPE_QUEST = 1 # 任务对话 | theheros/kbengine | demo/res/scripts/common/GlobalDefine.py | Python | lgpl-3.0 | 140 |
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model for bug comments."""
__author__ = 'alexis.torres@gmail.com (Alexis O. Torres)'
from google.appengine.ext import db
from models import bugs
class Comment(db.Model):
"""Models a bug comment stored in AppEngine's Datastore."""
message = db.TextProperty(required=True)
bug = db.ReferenceProperty(reference_class=bugs.Bug,
collection_name='bug_comments')
# Tracks when an entry is added and modified.
added = db.DateTimeProperty(required=False, auto_now_add=True)
def AddComment(bug_key, message):
"""Adds a new comment."""
bug = bugs.GetBugByKey(bug_key)
comment = Comment(message=message, bug=bug)
comment.put()
return comment
| masamichi/bite-project | server/models/comments.py | Python | apache-2.0 | 1,287 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-27 22:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('suite', '0020_merge_20170227_2231'),
]
operations = [
migrations.AlterField(
model_name='event',
name='event_name',
field=models.CharField(max_length=100),
),
]
| fsxfreak/club-suite | clubsuite/suite/migrations/0021_auto_20170227_2231.py | Python | mit | 454 |
#!/usr/bin/env python
#-*-coding: utf-8 -*-
class Passenager(object):
def __init__(self, rsp_data_item):
self.__dict__.update(rsp_data_item)
class Ticket(object):
def __init__(self, rsp_data_item):
self.__dict__['secretStr'] = rsp_data_item.get('secretStr')
self.__dict__.update(rsp_data_item['queryLeftNewDTO'])
| lilinux/piao | piao/objects.py | Python | apache-2.0 | 349 |
#!/usr/bin/python
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x, n = None):
self.val = x
self.next = n
def __str__(self):
cur = self
s = "%s" % cur.val
while cur.next:
cur = cur.next
s += "->%s" % cur.val
return s
class Solution(object):
def swapPairs(self, head):
"""
:type head: ListNode
:type k: int
:rtype: ListNode
"""
#if 1 == k or None == head:
# return head
k = 2
# define a fake head
fh = ListNode(None)
fh.next = head
# define 4 cursor and 1 counter for k
gc = head # gc:group-cursor
ocr = head # ocr:operating-cursor-for-reversing
ocl = fh # orl:operating-cursor-for-last-group
ocls = None # orls:operating-cursor-for-last-group-start-mark
count = 0
while gc:
if 0 == count % k:
ocls = gc
gc = gc.next
count += 1
if 0 == count % k:
tmp = ocr.next
ocr.next = gc
last = ocr
ocr = tmp
for i in range(k - 2):
tmp = ocr.next
ocr.next = last
last = ocr
ocr = tmp
ocl.next = ocr
ocr.next = last
ocr = gc
ocl = ocls
return fh.next
if "__main__" == __name__:
s = Solution()
print ListNode(1, ListNode(2, ListNode(3, ListNode(4, ListNode(5)))))
print s.swapPairs(ListNode(1, ListNode(2, ListNode(3, ListNode(4, ListNode(5))))))
print s.swapPairs(ListNode(1, ListNode(2, ListNode(3, ListNode(4, ListNode(5))))))
print s.swapPairs(ListNode(1, ListNode(2)))
print s.swapPairs(ListNode(1, ListNode(2, ListNode(3, ListNode(4)))))
print s.swapPairs(ListNode(1, ListNode(2, ListNode(3, ListNode(4, ListNode(5))))))
print s.swapPairs(ListNode(1, ListNode(2, ListNode(3, ListNode(4, ListNode(5))))))
print s.swapPairs(ListNode(1, ListNode(2, ListNode(3, ListNode(4, ListNode(5))))))
| pandaoknight/leetcode | neo_medium/linked_list/swap-nodes-in-pairs/main.py | Python | gpl-2.0 | 2,183 |
#!/usr/bin/env python
import unittest
import textwrap
import snortpager
import StringIO
class AlertParseTest(unittest.TestCase):
alert_full_format = '[**] [1:10000001:1] ICMP test [**]\
[Priority: 0]\
01/21-05:14:56.944587 192.168.2.18 -> 184.150.183.114\
ICMP TTL:64 TOS:0x0 ID:157 IpLen:20 DgmLen:84\
Type:8 Code:0 ID:60162 Seq:0 ECHO\
\
[**] [1:10000001:1] ICMP test [**]\
[Priority: 0]\
01/21-05:14:56.961267 184.150.183.114 -> 192.168.2.18\
ICMP TTL:57 TOS:0x0 ID:16527 IpLen:20 DgmLen:84\
Type:0 Code:0 ID:60162 Seq:0 ECHO REPLY\
\
[**] [1:10000001:1] ICMP test [**]\
[Priority: 0]\
01/21-05:14:57.971724 192.168.2.18 -> 184.150.183.114\
ICMP TTL:64 TOS:0x0 ID:160 IpLen:20 DgmLen:84\
Type:8 Code:0 ID:60162 Seq:1 ECHO\
\
'
alert_fast_format_1 = '01/21-05:09:48.809540 [**] [1:10000001:1] ICMP test [**] [Priority: 0] {ICMP} 192.168.2.18 -> 184.150.183.178'
alert_fast_format_2 = '01/21-05:10:15.652628 [**] [1:10000001:1] ICMP test [**] [Priority: 0] {IPV6-ICMP} fe80::cc1:e6f9:74e0:d69f -> ff02::16'
alert_fast_format_3 = '01/21-05:09:48.809540 [**] [1:10000001:1] ICMP test [**] [Priority: 0] {ICMP} 192.168.2.18 -> 184.150.183.178'
def setUp(self):
self.alert_list = []
def tearDown(self):
self.alert_list = None
def test_parse_full(self):
alert_file = StringIO.StringIO(textwrap.dedent(self.alert_full_format))
line = alert_file.readline()
for y in range(0,3):
self.alert_list.append(snortpager.parse_alert_full(alert_file, line))
self.assertEqual(len(self.alert_list), 3, 'alert_full parsing failed')
def test_parse_fast(self):
test1 = snortpager.parse_alert_fast(self.alert_fast_format_1)
self.assertIsNotNone(test1)
test2 = snortpager.parse_alert_fast(self.alert_fast_format_2)
self.assertIsNotNone(test2)
test3 = snortpager.parse_alert_fast(self.alert_fast_format_3)
self.assertIsNotNone(test3)
if __name__ == "__main__":
unittest.main()
| kahubbard/snortpager | tests/alerttest.py | Python | gpl-3.0 | 2,484 |
import os
import pymongo
from bson.json_util import dumps
from flask import Flask, request, jsonify
from flask_pymongo import PyMongo
from datetime import datetime
app = Flask(__name__)
app.config['MONGO_URI'] = 'mongodb://{host}:{port}/{database}'.format(
host=os.environ.get('MONGODB_HOST', 'localhost'),
port=os.environ.get('MONGODB_PORT', 27017),
database=os.environ.get('MONGODB_DB', 'movieweb')
)
mongo = PyMongo(app)
@app.route('/ratings/<int:movieid>', methods=['POST'])
def update_movie_rating(movieid):
user = request.headers.get('user', None)
if not user:
return dumps('unauthorized'), 401
query = {
'userid': user,
'movieid': movieid
}
update = {'$set': {
'rating': int(request.form['rating']),
'ts': datetime.now()
}}
mongo.db.ratings.update(query, update, upsert=True)
return dumps(''), 204
@app.route('/ratings/<int:movieid>', methods=['GET'])
def get_movie_ratings(movieid):
skip = request.args.get('skip', 0)
ratings = mongo.db.ratings.find({
'movieid': movieid
}).sort('ts', pymongo.DESCENDING).skip(skip).limit(10)
return dumps(ratings), 200
@app.route('/ratings', methods=['GET'])
def get_user_ratings():
user = request.headers.get('user', None)
if not user:
return dumps('unauthorized'), 401
skip = request.args.get('skip', 0)
ratings = mongo.db.ratings.find({
'userid': user
}).sort('ts', pymongo.DESCENDING).skip(skip).limit(20)
return dumps(ratings), 200
if __name__ == "__main__":
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port, debug=True) | crcsmnky/movieweb | unused/ratings/ratings.py | Python | apache-2.0 | 1,674 |
# Copyright (C) 2011-2012 Canonical Services Ltd
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import time
from txstatsd.server.configurableprocessor import ConfigurableMessageProcessor
class LoggingMessageProcessor(ConfigurableMessageProcessor):
"""
This specialised C{MessageProcessor} logs the received metrics
using the supplied logger (which should have a callable C{info}
attribute.)
"""
def __init__(self, logger, time_function=time.time,
message_prefix="", plugins=None, **kwz):
super(LoggingMessageProcessor, self).__init__(
time_function=time_function, message_prefix=message_prefix,
plugins=plugins, **kwz)
logger_info = getattr(logger, "info", None)
if logger_info is None or not callable(logger_info):
raise TypeError()
self.logger = logger
def process_message(self, message, metric_type, key, fields):
self.logger.info("In: %s" % message)
return super(LoggingMessageProcessor, self).process_message(
message, metric_type, key, fields)
def flush(self, interval=10000, percent=90):
"""Log all received metric samples to the supplied logger."""
parent = super(LoggingMessageProcessor, self)
for msg in parent.flush(interval=interval, percent=percent):
self.logger.info("Out: %s %s %s" % msg)
yield msg
| wikimedia/operations-debs-txstatsd | txstatsd/server/loggingprocessor.py | Python | mit | 2,426 |
from pytest import raises
from wikitextparser import Argument, Template, parse
def test_basics():
a = Argument('| a = b ')
assert ' a ' == a.name
assert ' b ' == a.value
assert not a.positional
assert repr(a) == "Argument('| a = b ')"
def test_anonymous_parameter():
a = Argument('| a ')
assert '1' == a.name
assert ' a ' == a.value
def test_set_name():
a = Argument('| a = b ')
a.name = ' c '
assert '| c = b ' == a.string
def test_set_name_at_subspan_boundary():
a = Argument('|{{ a }}={{ b }}')
a.name = ' c '
assert '| c ={{ b }}' == a.string
assert '{{ b }}' == a.value
def test_set_name_for_positional_args():
a = Argument('| b ')
a.name = a.name
assert '|1= b ' == a.string
def test_value_setter():
a = Argument('| a = b ')
a.value = ' c '
assert '| a = c ' == a.string
def test_removing_last_arg_should_not_effect_the_others():
a, b, c = Template('{{t|1=v|v|1=v}}').arguments
del c[:]
assert '|1=v' == a.string
assert '|v' == b.string
def test_nowikied_arg():
a = Argument('|<nowiki>1=3</nowiki>')
assert a.positional is True
assert '1' == a.name
assert '<nowiki>1=3</nowiki>' == a.value
def test_value_after_convertion_of_positional_to_keywordk():
a = Argument("""|{{{a|{{{b}}}}}}""")
a.name = ' 1 '
assert '{{{a|{{{b}}}}}}' == a.value
def test_name_of_positionals():
assert \
['1', '2', '3'] == \
[a.name for a in parse('{{t|a|b|c}}').templates[0].arguments]
def test_dont_confuse_subspan_equal_with_keyword_arg_equal():
p = parse('{{text| {{text|1=first}} | b }}')
a0, a1 = p.templates[0].arguments
assert ' {{text|1=first}} ' == a0.value
assert '1' == a0.name
assert ' b ' == a1.value
assert '2' == a1.name
def test_setting_positionality():
a = Argument("|1=v")
a.positional = False
assert '|1=v' == a.string
a.positional = True
assert '|v' == a.string
a.positional = True
assert '|v' == a.string
raises(ValueError, setattr, a, 'positional', False)
def test_parser_functions_at_the_end():
pfs = Argument('| 1 ={{#ifeq:||yes}}').parser_functions
assert 1 == len(pfs)
def test_section_not_keyword_arg():
a = Argument('|1=foo\n== section ==\nbar')
assert (a.name, a.value) == ('1', 'foo\n== section ==\nbar')
a = Argument('|\n==t==\nx')
assert (a.name, a.value) == ('1', '\n==t==\nx')
# Following cases is not treated as a section headings
a = Argument('|==1==\n')
assert (a.name, a.value) == ('', '=1==\n')
# Todo: Prevents forming a template!
# a = Argument('|\n==1==')
# assert
# (a.name == a.value), ('1', '\n==1==')
def test_argument_name_not_external_link():
# MediaWiki parses template parameters before external links,
# so it goes with the named parameter in both cases.
a = Argument('|[http://example.com?foo=bar]')
assert (a.name, a.value) == ('[http://example.com?foo', 'bar]')
a = Argument('|http://example.com?foo=bar')
assert (a.name, a.value) == ('http://example.com?foo', 'bar')
def test_lists():
assert Argument('|list=*a\n*b').get_lists()[0].items == ['a', 'b']
assert Argument('|lst= *a\n*b').get_lists()[0].items == ['a', 'b']
assert Argument('|*a\n*b').get_lists()[0].items == ['a', 'b']
# the space at the beginning of a positional argument should not be
# ignored. (?)
assert Argument('| *a\n*b').get_lists()[0].items == ['b']
def test_equal_sign_in_val():
a, c = Template('{{t|a==b|c}}').arguments
assert a.value == '=b'
assert c.name == '1'
| 5j9/wikitextparser | tests/test_argument.py | Python | gpl-3.0 | 3,632 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
## @copyright
# Software License Agreement (BSD License)
#
# Copyright (c) 2017, Jorge De La Cruz, Carmen Castano.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__author__ = 'Jorge De La Cruz, Carmen Castano'
__copyright__ = 'Copyright (c) 2017 Jorge De La Cruz, Carmen Castano'
__license__ = 'BSD'
__maintainer__ = 'Jorge De La Cruz'
__email__ = 'delacruz@igm.rwth-aachen.de'
import sys
## Path to FreeCAD library
# change this by your FreeCAD library path
sys.path.append('/usr/lib/freecad/lib')
import FreeCAD as App
import Import
from datetime import datetime
class GetParameters:
def __init__(self):
self.filePath = '/home/jdelacruz/Downloads/KonzeptB_lang090715.stp'
def loadCAD(self):
print('Starting to load the CAD file, please be patient!...')
Import.open(self.filePath)
self.handler = App.ActiveDocument
self.parts = self.handler.Objects
print('CAD model loaded!')
def writeTxt(self):
f = open('data.txt','a')
print >>f, 'Name \t Label'
self.i = 0
self.size = len(self.parts)
self.names = range(self.size)
self.labels = range(self.size)
for self.part in self.parts:
self.names[self.i] = self.part.Name
self.labels[self.i] = self.part.Label
print >>f, self.part.Name+"\t"+self.part.Label
self.i += 1
f.close()
print('The txt file has been created successfully!')
if __name__ == '__main__':
data = GetParameters()
data.loadCAD()
data.writeTxt()
| jdelacruz26/misccode | cad2xls.py | Python | bsd-3-clause | 2,955 |
from nbodykit.core import Algorithm, DataSource
from nbodykit import fof, utils
import numpy
def RaDecDataSource(d):
from nbodykit import plugin_manager
source = plugin_manager.get_plugin('RaDecRedshift')
d['unit_sphere'] = True
return source.from_config(d)
class FiberCollisionsAlgorithm(Algorithm):
"""
Run an angular FOF algorithm to determine fiber collision
groups from an input catalog, and then assign fibers such that
the maximum amount of object receive a fiber. This amounts
to determining the following population of objects:
* population 1:
the maximal "clean" sample of objects in which each object is not
angularly collided with any other object in this subsample
* population 2:
the potentially-collided objects; these objects are those
that are fiber collided + those that have been "resolved"
due to multiple coverage in tile overlap regions
See Guo et al. 2010 (http://arxiv.org/abs/1111.6598)for further details
"""
plugin_name = "FiberCollisions"
def __init__(self, datasource, collision_radius=62/60./60., seed=None):
# set the input parameters
self.datasource = datasource
self.collision_radius = collision_radius
self.seed = seed
# store collision radius in radians
self._collision_radius_rad = self.collision_radius * numpy.pi/180.
if self.comm.rank == 0:
self.logger.info("collision radius in degrees = %.4f" %collision_radius)
# create the local random seed from the global seed and comm size
self.local_seed = utils.local_random_seed(self.seed, self.comm)
self.logger.info("local_seed = %d" %self.local_seed)
@classmethod
def fill_schema(cls):
s = cls.schema
s.description = "the application of fiber collisions to a galaxy survey"
s.add_argument("datasource", type=RaDecDataSource,
help='`RaDecRedshift DataSource; run `nbkit.py --list-datasources RaDecRedshift` for details')
s.add_argument("collision_radius", type=float,
help="the size of the angular collision radius (in degrees)")
s.add_argument("seed", type=int,
help="seed the random number generator explicitly, for reproducibility")
def run(self):
"""
Compute the FOF collision groups and assign fibers, such that
the maximum number of objects receive fibers
Returns
-------
result: array_like
a structured array with 3 fields:
Label :
the group labels for each object in the input
DataSource; label == 0 objects are not in a group
Collided :
a flag array specifying which objects are
collided, i.e., do not receive a fiber
NeighborID :
for those objects that are collided, this
gives the (global) index of the nearest neighbor
on the sky (0-indexed), else it is set to -1
"""
from nbodykit import fof
from astropy.utils.misc import NumpyRNGContext
# open a persistent cache
with self.datasource.keep_cache():
# run the angular FoF algorithm to get group labels
# labels gives the global group ID corresponding to each object in Position
# on this rank
labels = fof.fof(self.datasource, self._collision_radius_rad, 1, comm=self.comm)
# assign the fibers (in parallel)
with NumpyRNGContext(self.local_seed):
collided, neighbors = self._assign_fibers(labels)
# all reduce to get summary statistics
N_pop1 = self.comm.allreduce((collided^1).sum())
N_pop2 = self.comm.allreduce((collided).sum())
f = N_pop2 * 1. / (N_pop1 + N_pop2)
# print out some info
if self.comm.rank == 0:
self.logger.info("population 1 (clean) size = %d" %N_pop1)
self.logger.info("population 2 (collided) size = %d" %N_pop2)
self.logger.info("collision fraction = %.4f" %f)
# return a structured array
d = list(zip(['Label', 'Collided', 'NeighborID'], [labels, collided, neighbors]))
dtype = numpy.dtype([(col, x.dtype) for col, x in d])
result = numpy.empty(len(labels), dtype=dtype)
for col, x in d: result[col] = x
return result
def _assign_fibers(self, Label):
"""
Initernal function to divide the data by collision group
across ranks and assign fibers, such that the minimum
number of objects are collided out of the survey
"""
import mpsort
from mpi4py import MPI
comm = self.comm
mask = Label != 0
dtype = numpy.dtype([
('Position', ('f4', 3)),
('Label', ('i4')),
('Rank', ('i4')),
('Index', ('i4')),
('Collided', ('i4')),
('NeighborID', ('i4'))
])
PIG = numpy.empty(mask.sum(), dtype=dtype)
PIG['Label'] = Label[mask]
size = len(Label)
size = comm.allgather(size)
Ntot = sum(size)
offset = sum(size[:comm.rank])
PIG['Index'] = offset + numpy.where(mask == True)[0]
del Label
with self.datasource.open() as stream:
[[Position]] = stream.read(['Position'], full=True)
PIG['Position'] = Position[mask]
del Position
Ntot = comm.allreduce(len(mask))
Nhalo = comm.allreduce(
PIG['Label'].max() if len(PIG['Label']) > 0 else 0, op=MPI.MAX) + 1
# now count number of particles per halo
PIG['Rank'] = PIG['Label'] % comm.size
cnt = numpy.bincount(PIG['Rank'], minlength=comm.size)
Nlocal = comm.allreduce(cnt)[comm.rank]
# sort by rank and then label
PIG2 = numpy.empty(Nlocal, PIG.dtype)
mpsort.sort(PIG, orderby='Rank', out=PIG2, comm=self.comm)
assert (PIG2['Rank'] == comm.rank).all()
PIG2.sort(order=['Label'])
if self.comm.rank == 0:
self.logger.info('total number of collision groups = %d', Nhalo-1)
self.logger.info("Started fiber assignment")
# loop over unique group ids
for group_id in numpy.unique(PIG2['Label']):
start = PIG2['Label'].searchsorted(group_id, side='left')
end = PIG2['Label'].searchsorted(group_id, side='right')
N = end-start
assert(PIG2['Label'][start:end] == group_id).all()
# pairs (random selection)
if N == 2:
# randomly choose, with fixed local seed
which = numpy.random.choice([0,1])
indices = [start+which, start+(which^1)]
PIG2['Collided'][indices] = [1, 0]
PIG2['NeighborID'][indices] = [PIG2['Index'][start+(which^1)], -1]
# multiplets (minimize collidedness)
elif N > 2:
collided, nearest = self._assign_multiplets(PIG2['Position'][start:end])
PIG2['Collided'][start:end] = collided[:]
PIG2['NeighborID'][start:end] = -1
PIG2['NeighborID'][start:end][collided==1] = PIG2['Index'][start+nearest][:]
if self.comm.rank == 0: self.logger.info("Finished fiber assignment")
# return to the order specified by the global unique index
mpsort.sort(PIG2, orderby='Index', out=PIG, comm=self.comm)
# return arrays including the objects not in any groups
collided = numpy.zeros(size[comm.rank], dtype='i4')
collided[mask] = PIG['Collided'][:]
neighbors = numpy.zeros(size[comm.rank], dtype='i4') - 1
neighbors[mask] = PIG['NeighborID'][:]
del PIG
return collided, neighbors
def _assign_multiplets(self, Position):
"""
Internal function to assign the maximal amount of fibers
in collision groups of size N > 2
"""
from scipy.spatial.distance import pdist, squareform
def count(slice, n):
return n[numpy.nonzero(slice)[0]].sum()
# first shuffle the member ids, so we select random element when tied
N = len(Position)
group_ids = list(range(N))
collided_ids = []
while len(group_ids) > 1:
# compute dists and find where dists < collision radius
dists = squareform(pdist(Position[group_ids], metric='euclidean'))
numpy.fill_diagonal(dists, numpy.inf) # ignore self-pairs
collisions = dists <= self._collision_radius_rad
# total # of collisions for each group member
n_collisions = numpy.sum(collisions, axis=0)
# total # of collisions for those objects that collide with each group member
n_other = numpy.apply_along_axis(count, 0, collisions, n_collisions)
# remove object that has most # of collisions
# and those colliding objects have least # of collisions
idx = numpy.where(n_collisions == n_collisions.max())[0]
# choose randomly, with a fixed local seed
ii = numpy.random.choice(numpy.where(n_other[idx] == n_other[idx].min())[0])
collided_index = idx[ii]
# make the collided galaxy and remove from group
collided_id = group_ids.pop(collided_index)
# only make this a collided object if its n_collisions > 0
# if n_collisions = 0, then the object can get a fiber for free
if n_collisions[collided_index] > 0:
collided_ids.append(collided_id)
# compute the nearest neighbors
neighbor_ids = []
group_indices = list(range(N))
dists = squareform(pdist(Position, metric='euclidean'))
uncollided = [i for i in group_indices if i not in collided_ids]
for i in sorted(collided_ids):
neighbor = uncollided[dists[i][uncollided].argmin()]
neighbor_ids.append(neighbor)
collided = numpy.zeros(N)
collided[collided_ids] = 1
return collided, neighbor_ids
def save(self, output, result):
"""
Write the `Label`, `Collided`, and `NeighborID` arrays
as a Pandas DataFrame to an HDF file, with key `FiberCollisonGroups`
"""
import pandas as pd
import os
# gather the result to root and output
result = self.comm.gather(result, root=0)
if self.comm.rank == 0:
# enforce a default extension
_, ext = os.path.splitext(output)
if 'hdf' not in ext: output += '.hdf5'
self.logger.info("saving (Label, Collided, NeighborID) as Pandas HDF with name %s" %output)
result = numpy.concatenate(result, axis=0)
df = pd.DataFrame.from_records(result)
df.to_hdf(output, 'FiberCollisionGroups')
| mschmittfull/nbodykit | nbodykit/core/algorithms/FiberCollisions.py | Python | gpl-3.0 | 11,480 |
# Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from main import three_plus_four
def test_three_plus_four():
assert three_plus_four() == 7
| GoogleCloudPlatform/guest-test-infra | container_images/pytest/example/src/application/main_test.py | Python | apache-2.0 | 688 |
import numpy as np
import pandas.util.testing as tm
from pandas import DataFrame, Series, read_csv, factorize, date_range
from pandas.core.algorithms import take_1d
try:
from pandas import (rolling_median, rolling_mean, rolling_min, rolling_max,
rolling_var, rolling_skew, rolling_kurt, rolling_std)
have_rolling_methods = True
except ImportError:
have_rolling_methods = False
try:
from pandas._libs import algos
except ImportError:
from pandas import algos
try:
from pandas.util.testing import test_parallel
have_real_test_parallel = True
except ImportError:
have_real_test_parallel = False
def test_parallel(num_threads=1):
def wrapper(fname):
return fname
return wrapper
from .pandas_vb_common import BaseIO
class ParallelGroupbyMethods(object):
params = ([2, 4, 8], ['count', 'last', 'max', 'mean', 'min', 'prod',
'sum', 'var'])
param_names = ['threads', 'method']
def setup(self, threads, method):
if not have_real_test_parallel:
raise NotImplementedError
N = 10**6
ngroups = 10**3
df = DataFrame({'key': np.random.randint(0, ngroups, size=N),
'data': np.random.randn(N)})
@test_parallel(num_threads=threads)
def parallel():
getattr(df.groupby('key')['data'], method)()
self.parallel = parallel
def loop():
getattr(df.groupby('key')['data'], method)()
self.loop = loop
def time_parallel(self, threads, method):
self.parallel()
def time_loop(self, threads, method):
for i in range(threads):
self.loop()
class ParallelGroups(object):
params = [2, 4, 8]
param_names = ['threads']
def setup(self, threads):
if not have_real_test_parallel:
raise NotImplementedError
size = 2**22
ngroups = 10**3
data = Series(np.random.randint(0, ngroups, size=size))
@test_parallel(num_threads=threads)
def get_groups():
data.groupby(data).groups
self.get_groups = get_groups
def time_get_groups(self, threads):
self.get_groups()
class ParallelTake1D(object):
params = ['int64', 'float64']
param_names = ['dtype']
def setup(self, dtype):
if not have_real_test_parallel:
raise NotImplementedError
N = 10**6
df = DataFrame({'col': np.arange(N, dtype=dtype)})
indexer = np.arange(100, len(df) - 100)
@test_parallel(num_threads=2)
def parallel_take1d():
take_1d(df['col'].values, indexer)
self.parallel_take1d = parallel_take1d
def time_take1d(self, dtype):
self.parallel_take1d()
class ParallelKth(object):
number = 1
repeat = 5
def setup(self):
if not have_real_test_parallel:
raise NotImplementedError
N = 10**7
k = 5 * 10**5
kwargs_list = [{'arr': np.random.randn(N)},
{'arr': np.random.randn(N)}]
@test_parallel(num_threads=2, kwargs_list=kwargs_list)
def parallel_kth_smallest(arr):
algos.kth_smallest(arr, k)
self.parallel_kth_smallest = parallel_kth_smallest
def time_kth_smallest(self):
self.parallel_kth_smallest()
class ParallelDatetimeFields(object):
def setup(self):
if not have_real_test_parallel:
raise NotImplementedError
N = 10**6
self.dti = date_range('1900-01-01', periods=N, freq='T')
self.period = self.dti.to_period('D')
def time_datetime_field_year(self):
@test_parallel(num_threads=2)
def run(dti):
dti.year
run(self.dti)
def time_datetime_field_day(self):
@test_parallel(num_threads=2)
def run(dti):
dti.day
run(self.dti)
def time_datetime_field_daysinmonth(self):
@test_parallel(num_threads=2)
def run(dti):
dti.days_in_month
run(self.dti)
def time_datetime_field_normalize(self):
@test_parallel(num_threads=2)
def run(dti):
dti.normalize()
run(self.dti)
def time_datetime_to_period(self):
@test_parallel(num_threads=2)
def run(dti):
dti.to_period('S')
run(self.dti)
def time_period_to_datetime(self):
@test_parallel(num_threads=2)
def run(period):
period.to_timestamp()
run(self.period)
class ParallelRolling(object):
params = ['median', 'mean', 'min', 'max', 'var', 'skew', 'kurt', 'std']
param_names = ['method']
def setup(self, method):
if not have_real_test_parallel:
raise NotImplementedError
win = 100
arr = np.random.rand(100000)
if hasattr(DataFrame, 'rolling'):
df = DataFrame(arr).rolling(win)
@test_parallel(num_threads=2)
def parallel_rolling():
getattr(df, method)()
self.parallel_rolling = parallel_rolling
elif have_rolling_methods:
rolling = {'median': rolling_median,
'mean': rolling_mean,
'min': rolling_min,
'max': rolling_max,
'var': rolling_var,
'skew': rolling_skew,
'kurt': rolling_kurt,
'std': rolling_std}
@test_parallel(num_threads=2)
def parallel_rolling():
rolling[method](arr, win)
self.parallel_rolling = parallel_rolling
else:
raise NotImplementedError
def time_rolling(self, method):
self.parallel_rolling()
class ParallelReadCSV(BaseIO):
number = 1
repeat = 5
params = ['float', 'object', 'datetime']
param_names = ['dtype']
def setup(self, dtype):
if not have_real_test_parallel:
raise NotImplementedError
rows = 10000
cols = 50
data = {'float': DataFrame(np.random.randn(rows, cols)),
'datetime': DataFrame(np.random.randn(rows, cols),
index=date_range('1/1/2000',
periods=rows)),
'object': DataFrame('foo',
index=range(rows),
columns=['object%03d'.format(i)
for i in range(5)])}
self.fname = '__test_{}__.csv'.format(dtype)
df = data[dtype]
df.to_csv(self.fname)
@test_parallel(num_threads=2)
def parallel_read_csv():
read_csv(self.fname)
self.parallel_read_csv = parallel_read_csv
def time_read_csv(self, dtype):
self.parallel_read_csv()
class ParallelFactorize(object):
number = 1
repeat = 5
params = [2, 4, 8]
param_names = ['threads']
def setup(self, threads):
if not have_real_test_parallel:
raise NotImplementedError
strings = tm.makeStringIndex(100000)
@test_parallel(num_threads=threads)
def parallel():
factorize(strings)
self.parallel = parallel
def loop():
factorize(strings)
self.loop = loop
def time_parallel(self, threads):
self.parallel()
def time_loop(self, threads):
for i in range(threads):
self.loop()
from .pandas_vb_common import setup # noqa: F401
| GuessWhoSamFoo/pandas | asv_bench/benchmarks/gil.py | Python | bsd-3-clause | 7,604 |
"""initial migration
Revision ID: 1e278961df6c
Revises:
Create Date: 2017-07-18 07:49:43.559270
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1e278961df6c'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['roles.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_users_username'), 'users', ['username'], unique=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_users_username'), table_name='users')
op.drop_table('users')
op.drop_table('roles')
# ### end Alembic commands ###
| weqopy/blog_instance | migrations/versions/1e278961df6c_initial_migration.py | Python | mit | 1,299 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# @Author: oesteban
# @Date: 2016-06-03 09:35:13
# @Last Modified by: oesteban
# @Last Modified time: 2016-08-17 17:41:23
import os
import numpy as np
import os.path as op
from nipype.interfaces.base import (traits, isdefined, TraitedSpec, BaseInterface,
BaseInterfaceInputSpec, File, InputMultiPath,
OutputMultiPath)
from nipype.interfaces import fsl
class IntraModalMergeInputSpec(BaseInterfaceInputSpec):
in_files = InputMultiPath(File(exists=True), mandatory=True,
desc='input files')
class IntraModalMergeOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='merged image')
out_avg = File(exists=True, desc='average image')
out_mats = OutputMultiPath(exists=True, desc='output matrices')
out_movpar = OutputMultiPath(exists=True, desc='output movement parameters')
class IntraModalMerge(BaseInterface):
input_spec = IntraModalMergeInputSpec
output_spec = IntraModalMergeOutputSpec
def __init__(self, **inputs):
self._results = {}
super(IntraModalMerge, self).__init__(**inputs)
def _run_interface(self, runtime):
if len(self.inputs.in_files) == 1:
self._results['out_file'] = self.inputs.in_files[0]
self._results['out_avg'] = self.inputs.in_files[0]
# TODO: generate identity out_mats and zero-filled out_movpar
return runtime
magmrg = fsl.Merge(dimension='t', in_files=self.inputs.in_files)
mcflirt = fsl.MCFLIRT(cost='normcorr', save_mats=True, save_plots=True,
ref_vol=0, in_file=magmrg.run().outputs.merged_file)
mcres = mcflirt.run()
self._results['out_mats'] = mcres.outputs.mat_file
self._results['out_movpar'] = mcres.outputs.par_file
self._results['out_file'] = mcres.outputs.out_file
mean = fsl.MeanImage(dimension='T', in_file=mcres.outputs.out_file)
self._results['out_avg'] = mean.run().outputs.out_file
return runtime
def _list_outputs(self):
return self._results
class FormatHMCParamInputSpec(BaseInterfaceInputSpec):
translations = traits.List(traits.Tuple(traits.Float, traits.Float, traits.Float),
mandatory=True, desc='three translations in mm')
rot_angles = traits.List(traits.Tuple(traits.Float, traits.Float, traits.Float),
mandatory=True, desc='three rotations in rad')
fmt = traits.Enum('confounds', 'movpar_file', usedefault=True,
desc='type of resulting file')
class FormatHMCParamOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='written file path')
class FormatHMCParam(BaseInterface):
input_spec = FormatHMCParamInputSpec
output_spec = FormatHMCParamOutputSpec
def __init__(self, **inputs):
self._results = {}
super(FormatHMCParam, self).__init__(**inputs)
def _run_interface(self, runtime):
self._results['out_file'] = _tsv_format(
self.inputs.translations, self.inputs.rot_angles,
fmt=self.inputs.fmt)
return runtime
def _list_outputs(self):
return self._results
def _tsv_format(translations, rot_angles, fmt='confounds'):
parameters = np.hstack((translations, rot_angles)).astype(np.float32)
if fmt == 'movpar_file':
out_file = op.abspath('movpar.txt')
np.savetxt(out_file, parameters)
elif fmt == 'confounds':
out_file = op.abspath('movpar.tsv')
np.savetxt(out_file, parameters,
header='X\tY\tZ\tRotX\tRotY\tRotZ',
delimiter='\t')
else:
raise NotImplementedError
return out_file
def nii_concat(in_files):
from nibabel.funcs import concat_images
import os
new_nii = concat_images(in_files, check_affines=False)
new_nii.to_filename("merged.nii.gz")
return os.path.abspath("merged.nii.gz")
def reorient(in_file):
import os
import nibabel as nb
_, outfile = os.path.split(in_file)
nii = nb.as_closest_canonical(nb.load(in_file))
nii.to_filename(outfile)
return os.path.abspath(outfile)
def prepare_roi_from_probtissue(in_file, epi_mask, epi_mask_erosion_mm=0,
erosion_mm=0):
import os
import nibabel as nb
import scipy.ndimage as nd
probability_map_nii = nb.load(in_file)
probability_map_data = probability_map_nii.get_data()
# thresholding
probability_map_data[probability_map_data < 0.95] = 0
probability_map_data[probability_map_data != 0] = 1
epi_mask_nii = nb.load(epi_mask)
epi_mask_data = epi_mask_nii.get_data()
if epi_mask_erosion_mm:
epi_mask_data = nd.binary_erosion(epi_mask_data,
iterations=int(epi_mask_erosion_mm/max(probability_map_nii.header.get_zooms()))).astype(int)
eroded_mask_file = os.path.abspath("erodd_mask.nii.gz")
nb.Nifti1Image(epi_mask_data, epi_mask_nii.affine, epi_mask_nii.header).to_filename(eroded_mask_file)
else:
eroded_mask_file = epi_mask
probability_map_data[epi_mask_data != 1] = 0
# shrinking
if erosion_mm:
iter_n = int(erosion_mm/max(probability_map_nii.header.get_zooms()))
probability_map_data = nd.binary_erosion(probability_map_data,
iterations=iter_n).astype(int)
new_nii = nb.Nifti1Image(probability_map_data, probability_map_nii.affine,
probability_map_nii.header)
new_nii.to_filename("roi.nii.gz")
return os.path.abspath("roi.nii.gz"), eroded_mask_file
| shoshber/fmriprep | fmriprep/interfaces/utils.py | Python | bsd-3-clause | 5,876 |
# -*- coding: utf-8 -*-
"""
search.py
~~~~~~~~~~~~
This module implements search HPE OneView REST API
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
__title__ = 'search'
__version__ = '0.0.1'
__copyright__ = '(C) Copyright (2012-2015) Hewlett Packard Enterprise ' \
' Development LP'
__license__ = 'MIT'
__status__ = 'Development'
###
# (C) Copyright (2012-2015) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
from hpOneView.common import get_members, uri
class search(object):
def __init__(self, con):
self._con = con
###########################################################################
# Get Resources
###########################################################################
def get_resources(self, query=''):
if type(query) is dict:
sQuery = ''
for key in query:
sQuery = sQuery + key + '=' + query[key] + '&'
else:
sQuery = query
body = self._con.get(uri['resource'] + '?' + sQuery)
return get_members(body)
def get_associations(self, query=''):
if type(query) is dict:
sQuery = ''
for key in query:
sQuery = sQuery + key + '=' + query[key] + '&'
else:
sQuery = query
body = self._con.get(uri['association'] + '?' + sQuery)
return get_members(body)
def get_trees(self, query=''):
if type(query) is dict:
sQuery = ''
for key in query:
sQuery = sQuery + key + '=' + query[key] + '&'
else:
sQuery = query
body = self._con.get(uri['tree'] + '?' + sQuery)
return get_members(body)
def get_search_suggestions(self, query):
if type(query) is dict:
sQuery = ''
for key in query:
sQuery = sQuery + key + '=' + query[key] + '&'
else:
sQuery = query
body = self._con.get(uri['search-suggestion'] + '?userQuery=' + sQuery)
return body
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
| danielreed/python-hpOneView | hpOneView/search.py | Python | mit | 3,333 |
import pytest
from openshift_checks.docker_image_availability import DockerImageAvailability
@pytest.fixture()
def task_vars():
return dict(
openshift=dict(
common=dict(
service_type='origin',
is_containerized=False,
is_atomic=False,
),
docker=dict(),
),
openshift_deployment_type='origin',
openshift_image_tag='',
group_names=['oo_nodes_to_config', 'oo_masters_to_config'],
)
@pytest.mark.parametrize('deployment_type, is_containerized, group_names, expect_active', [
("invalid", True, [], False),
("", True, [], False),
("origin", False, [], False),
("openshift-enterprise", False, [], False),
("origin", False, ["oo_nodes_to_config", "oo_masters_to_config"], True),
("openshift-enterprise", False, ["oo_etcd_to_config"], False),
("origin", True, ["nfs"], False),
("openshift-enterprise", True, ["lb"], False),
])
def test_is_active(task_vars, deployment_type, is_containerized, group_names, expect_active):
task_vars['openshift_deployment_type'] = deployment_type
task_vars['openshift']['common']['is_containerized'] = is_containerized
task_vars['group_names'] = group_names
assert DockerImageAvailability(None, task_vars).is_active() == expect_active
@pytest.mark.parametrize("is_containerized,is_atomic", [
(True, True),
(False, False),
(True, False),
(False, True),
])
def test_all_images_available_locally(task_vars, is_containerized, is_atomic):
def execute_module(module_name, module_args, *_):
if module_name == "yum":
return {}
assert module_name == "docker_image_facts"
assert 'name' in module_args
assert module_args['name']
return {
'images': [module_args['name']],
}
task_vars['openshift']['common']['is_containerized'] = is_containerized
task_vars['openshift']['common']['is_atomic'] = is_atomic
result = DockerImageAvailability(execute_module, task_vars).run()
assert not result.get('failed', False)
@pytest.mark.parametrize("available_locally", [
False,
True,
])
def test_all_images_available_remotely(task_vars, available_locally):
def execute_module(module_name, *_):
if module_name == 'docker_image_facts':
return {'images': [], 'failed': available_locally}
return {}
task_vars['openshift_docker_additional_registries'] = ["docker.io", "registry.access.redhat.com"]
task_vars['openshift_image_tag'] = 'v3.4'
check = DockerImageAvailability(execute_module, task_vars)
check._module_retry_interval = 0
result = check.run()
assert not result.get('failed', False)
def test_all_images_unavailable(task_vars):
def execute_module(module_name=None, *args):
if module_name == "wait_for":
return {}
elif module_name == "command":
return {'failed': True}
return {} # docker_image_facts failure
task_vars['openshift_docker_additional_registries'] = ["docker.io"]
task_vars['openshift_deployment_type'] = "openshift-enterprise"
task_vars['openshift_image_tag'] = 'latest'
check = DockerImageAvailability(execute_module, task_vars)
check._module_retry_interval = 0
actual = check.run()
assert actual['failed']
assert "required Docker images are not available" in actual['msg']
def test_no_known_registries():
def execute_module(module_name=None, *_):
if module_name == "command":
return {
'failed': True,
}
return {
'changed': False,
}
def mock_known_docker_registries():
return []
dia = DockerImageAvailability(execute_module, task_vars=dict(
openshift=dict(
common=dict(
service_type='origin',
is_containerized=False,
is_atomic=False,
)
),
openshift_docker_additional_registries=["docker.io"],
openshift_deployment_type="openshift-enterprise",
openshift_image_tag='latest',
group_names=['oo_nodes_to_config', 'oo_masters_to_config'],
))
dia.known_docker_registries = mock_known_docker_registries
actual = dia.run()
assert actual['failed']
assert "Unable to retrieve any docker registries." in actual['msg']
@pytest.mark.parametrize("message,extra_words", [
(
"docker image update failure",
["docker image update failure"],
),
(
"No package matching 'skopeo' found available, installed or updated",
["dependencies can be installed via `yum`"]
),
])
def test_skopeo_update_failure(task_vars, message, extra_words):
def execute_module(module_name=None, *_):
if module_name == "yum":
return {
"failed": True,
"msg": message,
}
return {}
task_vars['openshift_docker_additional_registries'] = ["unknown.io"]
task_vars['openshift_deployment_type'] = "openshift-enterprise"
check = DockerImageAvailability(execute_module, task_vars)
check._module_retry_interval = 0
actual = check.run()
assert actual["failed"]
for word in extra_words:
assert word in actual["msg"]
@pytest.mark.parametrize(
"image, registries, connection_test_failed, skopeo_failed, "
"expect_success, expect_registries_reached", [
(
"spam/eggs:v1", ["test.reg"],
True, True,
False,
{"test.reg": False},
),
(
"spam/eggs:v1", ["test.reg"],
False, True,
False,
{"test.reg": True},
),
(
"eggs.reg/spam/eggs:v1", ["test.reg"],
False, False,
True,
{"eggs.reg": True},
),
])
def test_registry_availability(image, registries, connection_test_failed, skopeo_failed,
expect_success, expect_registries_reached):
def execute_module(module_name=None, *_):
if module_name == "wait_for":
return dict(msg="msg", failed=connection_test_failed)
elif module_name == "command":
return dict(msg="msg", failed=skopeo_failed)
check = DockerImageAvailability(execute_module, task_vars())
check._module_retry_interval = 0
available = check.is_available_skopeo_image(image, registries)
assert available == expect_success
assert expect_registries_reached == check.reachable_registries
@pytest.mark.parametrize("deployment_type, is_containerized, groups, oreg_url, expected", [
( # standard set of stuff required on nodes
"origin", False, ['oo_nodes_to_config'], None,
set([
'openshift/origin-pod:vtest',
'openshift/origin-deployer:vtest',
'openshift/origin-docker-registry:vtest',
'openshift/origin-haproxy-router:vtest',
'cockpit/kubernetes', # origin version of registry-console
])
),
( # set a different URL for images
"origin", False, ['oo_nodes_to_config'], 'foo.io/openshift/origin-${component}:${version}',
set([
'foo.io/openshift/origin-pod:vtest',
'foo.io/openshift/origin-deployer:vtest',
'foo.io/openshift/origin-docker-registry:vtest',
'foo.io/openshift/origin-haproxy-router:vtest',
'cockpit/kubernetes', # AFAICS this is not built from the URL
])
),
(
"origin", True, ['oo_nodes_to_config', 'oo_masters_to_config', 'oo_etcd_to_config'], None,
set([
# images running on top of openshift
'openshift/origin-pod:vtest',
'openshift/origin-deployer:vtest',
'openshift/origin-docker-registry:vtest',
'openshift/origin-haproxy-router:vtest',
'cockpit/kubernetes',
# containerized component images
'openshift/origin:vtest',
'openshift/node:vtest',
'openshift/openvswitch:vtest',
'registry.access.redhat.com/rhel7/etcd',
])
),
( # enterprise images
"openshift-enterprise", True, ['oo_nodes_to_config'], 'foo.io/openshift3/ose-${component}:f13ac45',
set([
'foo.io/openshift3/ose-pod:f13ac45',
'foo.io/openshift3/ose-deployer:f13ac45',
'foo.io/openshift3/ose-docker-registry:f13ac45',
'foo.io/openshift3/ose-haproxy-router:f13ac45',
# registry-console is not constructed/versioned the same as the others.
'registry.access.redhat.com/openshift3/registry-console',
# containerized images aren't built from oreg_url
'openshift3/node:vtest',
'openshift3/openvswitch:vtest',
])
),
(
"openshift-enterprise", True, ['oo_etcd_to_config', 'lb'], 'foo.io/openshift3/ose-${component}:f13ac45',
set([
'registry.access.redhat.com/rhel7/etcd',
# lb does not yet come in a containerized version
])
),
])
def test_required_images(deployment_type, is_containerized, groups, oreg_url, expected):
task_vars = dict(
openshift=dict(
common=dict(
is_containerized=is_containerized,
is_atomic=False,
),
),
openshift_deployment_type=deployment_type,
group_names=groups,
oreg_url=oreg_url,
openshift_image_tag='vtest',
)
assert expected == DockerImageAvailability(task_vars=task_vars).required_images()
def test_containerized_etcd():
task_vars = dict(
openshift=dict(
common=dict(
is_containerized=True,
),
),
openshift_deployment_type="origin",
group_names=['oo_etcd_to_config'],
)
expected = set(['registry.access.redhat.com/rhel7/etcd'])
assert expected == DockerImageAvailability(task_vars=task_vars).required_images()
| EricMountain-1A/openshift-ansible | roles/openshift_health_checker/test/docker_image_availability_test.py | Python | apache-2.0 | 10,066 |
import os
from ecl.util.test import PathContext,TestAreaContext
from tests import EclTest
class PathContextTest(EclTest):
def test_error(self):
with TestAreaContext("pathcontext"):
# Test failure on creating PathContext with an existing path
os.makedirs("path/1")
with self.assertRaises(OSError):
with PathContext("path/1"):
pass
# Test failure on non-writable path
os.chmod("path/1", 0o0444)
with self.assertRaises(OSError):
with PathContext("path/1/subfolder"):
pass
os.chmod("path/1", 0o0744)
# Test failure on creating PathContext with an existing file
with open("path/1/file", "w") as f:
f.write("xx")
with self.assertRaises(OSError):
with PathContext("path/1/file"):
pass
def test_chdir(self):
with PathContext("/tmp/pc"):
self.assertEqual(
os.path.realpath(os.getcwd()),
os.path.realpath("/tmp/pc")
)
def test_cleanup(self):
with TestAreaContext("pathcontext"):
os.makedirs("path/1")
with PathContext("path/1/next/2/level"):
with open("../../file" , "w") as f:
f.write("Crap")
self.assertTrue(os.path.isdir("path/1"))
self.assertTrue(os.path.isdir("path/1/next"))
self.assertFalse(os.path.isdir("path/1/next/2"))
| OPM/ResInsight | ThirdParty/Ert/python/tests/util_tests/test_path_context.py | Python | gpl-3.0 | 1,682 |
def directory_path(instance, filename):
"""."""
from datetime import datetime
dt = datetime.today()
return 'user/{year}/{month}/{file}'\
.format(year=dt.year, month=dt.month, file=filename)
| MichelLacerda/django-for-clients | dfc/cauth/utils.py | Python | gpl-3.0 | 214 |
# -*- coding: utf-8 -*-
"""
"""
from __future__ import absolute_import
from contextlib import contextmanager
import imp
import posixpath
from zipfile import ZipFile
from click.testing import CliRunner
import pkginfo
import pytest
from six import PY3
def test_pyfile_compiled(packages, tmpdir):
packages.require_eggs('dist1')
unzip = False
if PY3:
# Python >= 3.2 doesn't seem to run .pyc files from PEP 3147
# (__pycache__) repository directories.
unzip = True
venv = packages.get_venv('dist1', unzip=unzip)
assert venv.run("__import__('dist1').test_is_compiled()") == 0
@pytest.fixture
def dist1_metadata(packages):
egg = packages.get_egg('dist1')
return pkginfo.BDist(str(egg))
def test_summary(dist1_metadata):
assert dist1_metadata.summary == "A dummy distribution"
def test_description(dist1_metadata):
assert dist1_metadata.description.rstrip() \
== u"Long description.\n\nGruß."
def test_script_wrapper(packages):
packages.require_eggs('dist1')
venv = packages.get_venv('dist1')
assert venv.call(['dist1_wrapper']) == 42
def test_old_style_script(packages):
packages.require_eggs('dist1')
venv = packages.get_venv('dist1')
assert venv.call(['dist1_script']) == 42
def test_namespace_package(packages):
packages.require_eggs('dist1', 'dist2')
venv = packages.get_venv('dist2')
prog = (
'import sys\n'
'from dist2.plugins.builtin import the_answer\n'
'sys.exit(the_answer)\n'
)
assert venv.run(prog) == 42
def test_namespace_stubs_in_egg(packages):
dist2_egg = packages.get_egg('dist2')
dist2_stubs = with_byte_compiled(['dist2/__init__.py',
'dist2/plugins/__init__.py'])
with fileobj(ZipFile(str(dist2_egg))) as zf:
files_in_egg = dist2_stubs.intersection(zf.namelist())
# Make sure we generated the stubs (or not, depending on python
# version)
stubs_in_egg = files_in_egg.intersection(dist2_stubs)
assert stubs_in_egg == dist2_stubs
# Make sure we didn't copy the .pth file that the wheel installer
# creates for the namespaces
assert not any(fn.lower().endswith('.pth')
for fn in files_in_egg)
def test_extension(packages):
packages.require_eggs('extension_dist')
venv = packages.get_venv('extension_dist')
assert venv.run("__import__('extension_dist').test_extension()") == 0
def test_eager_resources(packages):
packages.require_eggs('extension_dist')
venv = packages.get_venv('extension_dist')
assert venv.run("__import__('extension_dist').test_eager_resources()") == 0
def test_extras(packages):
packages.require_eggs('dist1', 'extension_dist')
venv = packages.get_venv('dist1[extras]')
assert venv.run("__import__('dist1').test_extras()") == 0
def test_no_extras(packages):
packages.require_eggs('dist1', 'extension_dist')
venv = packages.get_venv('dist1')
assert venv.run("__import__('dist1').test_no_extras()") == 0
def test_main(packages, tmpdir):
from humpty import main
wheel = packages.get_wheel('dist1')
runner = CliRunner()
result = runner.invoke(main, ['-d', str(tmpdir), str(wheel)])
assert result.exit_code == 0
eggs = list(tmpdir.listdir(fil="*.egg"))
assert len(eggs) == 1
egg = eggs[0]
assert egg.isfile()
assert egg.fnmatch("dist1-*")
@contextmanager
def fileobj(fp):
try:
yield fp
finally:
fp.close()
def with_byte_compiled(paths):
""" Augment PATHS with paths of byte-compiled files.
"""
get_tag = getattr(imp, 'get_tag', None)
compiled = set()
for path in paths:
head, tail = posixpath.split(path)
root, ext = posixpath.splitext(tail)
if ext == '.py':
if get_tag:
root = '%s.%s' % (root, get_tag())
head = posixpath.join(head, '__pycache__')
compiled.add(posixpath.join(head, root + '.pyc'))
return compiled.union(paths)
| dairiki/humpty | tests/test_functional.py | Python | bsd-3-clause | 4,051 |
#!/usr/bin/env python
# -*- coding: utf-8
# Dmitry Abramov
# Python v. 2.7.9
from __future__ import print_function
import os.path, sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
import numpy as np
from preprocessing.tokenize_and_stem import tokenize_and_stem
# from Scraper import Scraper, search_articles
# from scrape.Scraper import Scraper, search_articles
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans
#from config import api_urls, api_keys
NUM_OF_CLUSTERS = 7
class kMeansClustering:
"""
News clustering with KMeans method.
"""
def __init__(self, snippets):
"""
Args:
snippets - list of strings
"""
self.snippets = snippets
# print(snippets)
# for i in range(len(snippets)):
# print(i)
# print(snippets[i])
self.clusters = []
def find_clusters(self, n_clusters = NUM_OF_CLUSTERS):
"""
Finding clusters.
Requires sklearn library.
"""
if len(self.snippets) < n_clusters:
print("Sorry, but number of snippets should be >= number of clusters")
return {}
#define vectorizer parameters
tfidf_vectorizer = TfidfVectorizer(max_df=0.999, max_features=200000,
min_df=0.001, stop_words='english',
use_idf=True, tokenizer=tokenize_and_stem, ngram_range=(1,1))
tfidf_matrix = tfidf_vectorizer.fit_transform(self.snippets) #fit the vectorizer to synopses
# print(tfidf_matrix)
terms = tfidf_vectorizer.get_feature_names()
# print(terms)
matrix = tfidf_matrix.todense()
# print(matrix)
km = KMeans(n_clusters = n_clusters)
# print(km)
km.fit(tfidf_matrix)
self.clusters = km.labels_.tolist()
# print(self.clusters)
self.order_centroids = km.cluster_centers_.argsort()[:, ::-1]
self.terms = tfidf_vectorizer.get_feature_names()
# print(terms)
return self.get_clusters()
def get_common_phrases(self, num = 2):
def restemming(word, num_snippets):
for num_snippet in num_snippets:
tokenized_snippet = tokenize_and_stem(self.snippets[num_snippet], stem = 0)
for sn in tokenized_snippet:
if sn.find(word) != -1:
return sn
return ''
phrases = {}
for i in range(len(self.get_clusters().keys())):
for ind in self.order_centroids[i, :num]:
if i + 1 not in phrases:
phrases[i + 1] = []
restem = restemming(self.terms[ind], self.get_clusters()[i + 1])
if restem != '':
if len(phrases[i + 1]) < num:
phrases[i + 1].append(restem)
return phrases
def print_common_phrases(self, num = 2):
result = self.get_common_phrases(num = num)
for cluster, phrases in result.items():
print("cluster #%i tags: " % cluster, end = ' ')
# print(phrases)
def get_clusters(self):
"""
Return:
dict of elements-clusters.
Keys: clusters
Values: news in respective clusters
"""
result = {}
for i, cluster in enumerate(self.clusters):
if cluster + 1 not in result:
result[cluster + 1] = [i]
else:
result[cluster + 1].append(i)
return result
def print_clusters(self):
result = self.get_clusters()
for cluster, snippets in result.items():
print("cluster #%i contains documents: " % cluster, end = ' ')
print(snippets)
def main():
query = "obama"
snippets = search_articles(api_urls, api_keys, query)['snippets']
if len(snippets) == 0:
return
km = kMeansClustering(snippets)
km.find_clusters()
km.print_clusters()
km.print_common_phrases()
if __name__ == "__main__":
main()
| ProjectRecommend/Recommend | recommend/classifier/cluster/algorithms/KMeans.py | Python | gpl-2.0 | 4,159 |
#!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import inspect
import subprocess
from contextlib import contextmanager
import torch
def int_tuple(s):
return tuple(int(i) for i in s.split(','))
def float_tuple(s):
return tuple(float(i) for i in s.split(','))
def str_tuple(s):
return tuple(s.split(','))
def bool_flag(s):
if s == '1':
return True
elif s == '0':
return False
msg = 'Invalid value "%s" for bool flag (should be 0 or 1)'
raise ValueError(msg % s)
def lineno():
return inspect.currentframe().f_back.f_lineno
def get_gpu_memory():
torch.cuda.synchronize()
opts = [
'nvidia-smi', '-q', '--gpu=' + str(0), '|', 'grep', '"Used GPU Memory"'
]
cmd = str.join(' ', opts)
ps = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
output = ps.communicate()[0].decode('utf-8')
output = output.split("\n")[1].split(":")
consumed_mem = int(output[1].strip().split(" ")[0])
return consumed_mem
@contextmanager
def timeit(msg, should_time=True):
if should_time:
torch.cuda.synchronize()
t0 = time.time()
yield
if should_time:
torch.cuda.synchronize()
t1 = time.time()
duration = (t1 - t0) * 1000.0
print('%s: %.2f ms' % (msg, duration))
class LossManager(object):
def __init__(self):
self.total_loss = None
self.all_losses = {}
def add_loss(self, loss, name, weight=1.0):
cur_loss = loss * weight
if self.total_loss is not None:
self.total_loss += cur_loss
else:
self.total_loss = cur_loss
self.all_losses[name] = cur_loss.data.cpu().item()
def items(self):
return self.all_losses.items()
| google/sg2im | sg2im/utils.py | Python | apache-2.0 | 2,220 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Primary capsuls."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from monty.collections import AttrDict
import numpy as np
import sonnet as snt
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
from stacked_capsule_autoencoders.capsules import math_ops
from stacked_capsule_autoencoders.capsules import neural
from stacked_capsule_autoencoders.capsules import prob
from stacked_capsule_autoencoders.capsules.tensor_ops import make_brodcastable
from tensorflow.contrib import resampler as contrib_resampler
tfd = tfp.distributions
class CapsuleImageEncoder(snt.AbstractModule):
"""Primary capsule for images."""
OutputTuple = collections.namedtuple( # pylint:disable=invalid-name
'PrimaryCapsuleTuple',
'pose feature presence presence_logit '
'img_embedding')
def __init__(self,
encoder,
n_caps,
n_caps_dims,
n_features=0,
noise_scale=4.,
similarity_transform=False,
encoder_type='linear',
**encoder_kwargs):
super(CapsuleImageEncoder, self).__init__()
self._encoder = encoder
self._n_caps = n_caps
self._n_caps_dims = n_caps_dims
self._n_features = n_features
self._noise_scale = noise_scale
self._similarity_transform = similarity_transform
self._encoder_type = encoder_type
self._encoder_kwargs = dict(
n_layers=2, n_heads=4, n_dims=32, layer_norm=False)
self._encoder_kwargs.update(encoder_kwargs)
def _build(self, x):
batch_size = x.shape[0]
img_embedding = self._encoder(x)
splits = [self._n_caps_dims, self._n_features, 1] # 1 for presence
n_dims = sum(splits)
if self._encoder_type == 'linear':
n_outputs = self._n_caps * n_dims
h = snt.BatchFlatten()(img_embedding)
h = snt.Linear(n_outputs)(h)
else:
h = snt.AddBias(bias_dims=[1, 2, 3])(img_embedding)
if self._encoder_type == 'conv':
h = snt.Conv2D(n_dims * self._n_caps, 1, 1)(h)
h = tf.reduce_mean(h, (1, 2))
h = tf.reshape(h, [batch_size, self._n_caps, n_dims])
elif self._encoder_type == 'conv_att':
h = snt.Conv2D(n_dims * self._n_caps + self._n_caps, 1, 1)(h)
h = snt.MergeDims(1, 2)(h)
h, a = tf.split(h, [n_dims * self._n_caps, self._n_caps], -1)
h = tf.reshape(h, [batch_size, -1, n_dims, self._n_caps])
a = tf.nn.softmax(a, 1)
a = tf.reshape(a, [batch_size, -1, 1, self._n_caps])
h = tf.reduce_sum(h * a, 1)
else:
raise ValueError('Invalid encoder type="{}".'.format(
self._encoder_type))
h = tf.reshape(h, [batch_size, self._n_caps, n_dims])
pose, feature, pres_logit = tf.split(h, splits, -1)
if self._n_features == 0:
feature = None
pres_logit = tf.squeeze(pres_logit, -1)
if self._noise_scale > 0.:
pres_logit += ((tf.random.uniform(pres_logit.shape) - .5)
* self._noise_scale)
pres = tf.nn.sigmoid(pres_logit)
pose = math_ops.geometric_transform(pose, self._similarity_transform)
return self.OutputTuple(pose, feature, pres, pres_logit, img_embedding)
def choose_nonlinearity(name):
nonlin = getattr(math_ops, name, getattr(tf.nn, name, None))
if not nonlin:
raise ValueError('Invalid nonlinearity: "{}".'.format(name))
return nonlin
class TemplateBasedImageDecoder(snt.AbstractModule):
"""Template-based primary capsule decoder for images."""
_templates = None
def __init__(self,
output_size,
template_size,
n_channels=1,
learn_output_scale=False,
colorize_templates=False,
output_pdf_type='mixture',
template_nonlin='relu1',
color_nonlin='relu1',
use_alpha_channel=False):
super(TemplateBasedImageDecoder, self).__init__()
self._output_size = output_size
self._template_size = template_size
self._n_channels = n_channels
self._learn_output_scale = learn_output_scale
self._colorize_templates = colorize_templates
self._output_pdf_type = output_pdf_type
self._template_nonlin = choose_nonlinearity(template_nonlin)
self._color_nonlin = choose_nonlinearity(color_nonlin)
self._use_alpha_channel = use_alpha_channel
@property
def templates(self):
self._ensure_is_connected()
return tf.squeeze(self._templates, 0)
@snt.reuse_variables
def make_templates(self, n_templates=None, template_feature=None):
if self._templates is not None:
if n_templates is not None and self._templates.shape[1] != n_templates:
raise ValueError
else:
with self._enter_variable_scope():
# create templates
n_dims = self._n_channels
template_shape = ([1, n_templates] + list(self._template_size) +
[n_dims])
n_elems = np.prod(template_shape[2:])
# make each templates orthogonal to each other at init
n = max(n_templates, n_elems)
q = np.random.uniform(size=[n, n])
q = np.linalg.qr(q)[0]
q = q[:n_templates, :n_elems].reshape(template_shape).astype(np.float32)
q = (q - q.min()) / (q.max() - q.min())
template_logits = tf.get_variable('templates', initializer=q)
# prevent negative ink
self._template_logits = template_logits
self._templates = self._template_nonlin(template_logits)
if self._use_alpha_channel:
self._templates_alpha = tf.get_variable(
'templates_alpha',
shape=self._templates[Ellipsis, :1].shape,
initializer=tf.zeros_initializer())
self._n_templates = n_templates
templates = self._templates
if template_feature is not None:
if self._colorize_templates:
mlp = snt.BatchApply(snt.nets.MLP([32, self._n_channels]))
template_color = mlp(template_feature)[:, :, tf.newaxis, tf.newaxis]
if self._color_nonlin == math_ops.relu1:
template_color += .99
template_color = self._color_nonlin(template_color)
templates = tf.identity(templates) * template_color
return templates
def _build(self,
pose,
presence=None,
template_feature=None,
bg_image=None,
img_embedding=None):
"""Builds the module.
Args:
pose: [B, n_templates, 6] tensor.
presence: [B, n_templates] tensor.
template_feature: [B, n_templates, n_features] tensor; these features are
used to change templates based on the input, if present.
bg_image: [B, *output_size] tensor representing the background.
img_embedding: [B, d] tensor containing image embeddings.
Returns:
[B, n_templates, *output_size, n_channels] tensor.
"""
batch_size, n_templates = pose.shape[:2].as_list()
templates = self.make_templates(n_templates, template_feature)
if templates.shape[0] == 1:
templates = snt.TileByDim([0], [batch_size])(templates)
# it's easier for me to think in inverse coordinates
warper = snt.AffineGridWarper(self._output_size, self._template_size)
warper = warper.inverse()
grid_coords = snt.BatchApply(warper)(pose)
resampler = snt.BatchApply(contrib_resampler.resampler)
transformed_templates = resampler(templates, grid_coords)
if bg_image is not None:
bg_image = tf.expand_dims(bg_image, axis=1)
else:
bg_image = tf.nn.sigmoid(tf.get_variable('bg_value', shape=[1]))
bg_image = tf.zeros_like(transformed_templates[:, :1]) + bg_image
transformed_templates = tf.concat([transformed_templates, bg_image], axis=1)
if presence is not None:
presence = tf.concat([presence, tf.ones([batch_size, 1])], axis=1)
if True: # pylint: disable=using-constant-test
if self._use_alpha_channel:
template_mixing_logits = snt.TileByDim([0], [batch_size])(
self._templates_alpha)
template_mixing_logits = resampler(template_mixing_logits, grid_coords)
bg_mixing_logit = tf.nn.softplus(
tf.get_variable('bg_mixing_logit', initializer=[0.]))
bg_mixing_logit = (
tf.zeros_like(template_mixing_logits[:, :1]) + bg_mixing_logit)
template_mixing_logits = tf.concat(
[template_mixing_logits, bg_mixing_logit], 1)
else:
temperature_logit = tf.get_variable('temperature_logit', shape=[1])
temperature = tf.nn.softplus(temperature_logit + .5) + 1e-4
template_mixing_logits = transformed_templates / temperature
scale = 1.
if self._learn_output_scale:
scale = tf.get_variable('scale', shape=[1])
scale = tf.nn.softplus(scale) + 1e-4
if self._output_pdf_type == 'mixture':
template_mixing_logits += make_brodcastable(
math_ops.safe_log(presence), template_mixing_logits)
rec_pdf = prob.MixtureDistribution(template_mixing_logits,
[transformed_templates, scale],
tfd.Normal)
else:
raise ValueError('Unknown pdf type: "{}".'.format(self._output_pdf_type))
return AttrDict(
raw_templates=tf.squeeze(self._templates, 0),
transformed_templates=transformed_templates[:, :-1],
mixing_logits=template_mixing_logits[:, :-1],
pdf=rec_pdf)
| google-research/google-research | stacked_capsule_autoencoders/capsules/primary.py | Python | apache-2.0 | 10,152 |
# encoding: utf-8
# module PyKDE4.kdeui
# from /usr/lib/python3/dist-packages/PyKDE4/kdeui.cpython-34m-x86_64-linux-gnu.so
# by generator 1.135
# no doc
# imports
import PyKDE4.kdecore as __PyKDE4_kdecore
import PyQt4.QtCore as __PyQt4_QtCore
import PyQt4.QtGui as __PyQt4_QtGui
import PyQt4.QtSvg as __PyQt4_QtSvg
class KPixmapProvider(): # skipped bases: <class 'sip.wrapper'>
# no doc
def pixmapFor(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
| ProfessorX/Config | .PyCharm30/system/python_stubs/-1247971765/PyKDE4/kdeui/KPixmapProvider.py | Python | gpl-2.0 | 722 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START mod1b_flask]
from flask import Flask, render_template, request
from google.appengine.api import wrap_wsgi_app
from google.appengine.ext import ndb
app = Flask(__name__)
app.wsgi_app = wrap_wsgi_app(app.wsgi_app)
class Visit(ndb.Model):
'Visit entity registers visitor IP address & timestamp'
visitor = ndb.StringProperty()
timestamp = ndb.DateTimeProperty(auto_now_add=True)
def store_visit(remote_addr, user_agent):
'create new Visit entity in Datastore'
Visit(visitor='{}: {}'.format(remote_addr, user_agent)).put()
def fetch_visits(limit):
'get most recent visits'
return Visit.query().order(-Visit.timestamp).fetch(limit)
@app.route('/')
def root():
'main application (GET) handler'
store_visit(request.remote_addr, request.user_agent)
visits = fetch_visits(10)
return render_template('index.html', visits=visits)
# [END mod1b_flask]
| googlecodelabs/migrate-python2-appengine | mod1b-flask/main.py | Python | apache-2.0 | 1,472 |
from basic import Basic
from sympify import _sympify
from cache import cacheit
from symbol import Symbol, Wild
from sympy import mpmath
from math import log as _log
def integer_nthroot(y, n):
"""
Return a tuple containing x = floor(y**(1/n))
and a boolean indicating whether the result is exact (that is,
whether x**n == y).
>>> integer_nthroot(16,2)
(4, True)
>>> integer_nthroot(26,2)
(5, False)
"""
if y < 0: raise ValueError("y must be nonnegative")
if n < 1: raise ValueError("n must be positive")
if y in (0, 1): return y, True
if n == 1: return y, True
if n == 2:
x, rem = mpmath.libmpf.sqrtrem(y)
return int(x), not rem
if n > y: return 1, False
# Get initial estimate for Newton's method. Care must be taken to
# avoid overflow
try:
guess = int(y ** (1./n)+0.5)
except OverflowError:
expt = _log(y,2)/n
if expt > 53:
shift = int(expt-53)
guess = int(2.0**(expt-shift)+1) << shift
else:
guess = int(2.0**expt)
#print n
if guess > 2**50:
# Newton iteration
xprev, x = -1, guess
while 1:
t = x**(n-1)
#xprev, x = x, x - (t*x-y)//(n*t)
xprev, x = x, ((n-1)*x + y//t)//n
#print n, x-xprev, abs(x-xprev) < 2
if abs(x - xprev) < 2:
break
else:
x = guess
# Compensate
t = x**n
while t < y:
x += 1
t = x**n
while t > y:
x -= 1
t = x**n
return x, t == y
class Pow(Basic):
is_Pow = True
__slots__ = ['is_commutative']
@cacheit
def __new__(cls, b, e, **assumptions):
b = _sympify(b)
e = _sympify(e)
if assumptions.get('evaluate') is False:
return Basic.__new__(cls, b, e, **assumptions)
if e is S.Zero:
return S.One
if e is S.One:
return b
obj = b._eval_power(e)
if obj is None:
obj = Basic.__new__(cls, b, e, **assumptions)
obj.is_commutative = (b.is_commutative and e.is_commutative)
return obj
@property
def base(self):
return self._args[0]
@property
def exp(self):
return self._args[1]
def _eval_power(self, other):
if other == S.NegativeOne:
return Pow(self.base, self.exp * other)
if self.exp.is_integer and other.is_integer:
return Pow(self.base, self.exp * other)
if self.base.is_nonnegative and self.exp.is_real and other.is_real:
return Pow(self.base, self.exp * other)
if self.exp.is_even and self.base.is_real:
return Pow(abs(self.base), self.exp * other)
if self.exp.is_real and other.is_real and abs(self.exp) < S.One:
return Pow(self.base, self.exp * other)
return
def _eval_is_comparable(self):
c1 = self.base.is_comparable
if c1 is None: return
c2 = self.exp.is_comparable
if c2 is None: return
return c1 and c2
def _eval_is_even(self):
if self.exp.is_integer and self.exp.is_positive:
if self.base.is_even:
return True
if self.base.is_integer:
return False
def _eval_is_positive(self):
if self.base.is_positive:
if self.exp.is_real:
return True
elif self.base.is_negative:
if self.exp.is_even:
return True
if self.exp.is_odd:
return False
elif self.base.is_nonpositive:
if self.exp.is_odd:
return False
def _eval_is_negative(self):
if self.base.is_negative:
if self.exp.is_odd:
return True
if self.exp.is_even:
return False
elif self.base.is_positive:
if self.exp.is_real:
return False
elif self.base.is_nonnegative:
if self.exp.is_real:
return False
elif self.base.is_nonpositive:
if self.exp.is_even:
return False
elif self.base.is_real:
if self.exp.is_even:
return False
def _eval_is_integer(self):
c1 = self.base.is_integer
c2 = self.exp.is_integer
if c1 is None or c2 is None:
return None
if not c1:
if self.exp.is_nonnegative:
return False
if c1 and c2:
if self.exp.is_nonnegative or self.exp.is_positive:
return True
if self.exp.is_negative:
return False
def _eval_is_real(self):
c1 = self.base.is_real
if c1 is None: return
c2 = self.exp.is_real
if c2 is None: return
if c1 and c2:
if self.base.is_positive:
return True
else: # negative or zero (or positive)
if self.exp.is_integer:
return True
elif self.base.is_negative:
if self.exp.is_Rational:
return False
def _eval_is_odd(self):
if not (self.base.is_integer and self.exp.is_nonnegative): return
return self.base.is_odd
def _eval_is_bounded(self):
if self.exp.is_negative:
if self.base.is_infinitesimal:
return False
if self.base.is_unbounded:
return True
c1 = self.base.is_bounded
if c1 is None: return
c2 = self.exp.is_bounded
if c2 is None: return
if c1 and c2:
if self.exp.is_nonnegative:
return True
def _eval_subs(self, old, new):
if self==old: return new
if isinstance(old, self.__class__) and self.base==old.base:
coeff1,terms1 = self.exp.as_coeff_terms()
coeff2,terms2 = old.exp.as_coeff_terms()
if terms1==terms2: return new ** (coeff1/coeff2) # (x**(2*y)).subs(x**(3*y),z) -> z**(2/3*y)
if old.func is C.exp:
coeff1,terms1 = old.args[0].as_coeff_terms()
coeff2,terms2 = (self.exp * C.log(self.base)).as_coeff_terms()
if terms1==terms2: return new ** (coeff1/coeff2) # (x**(2*y)).subs(exp(3*y*log(x)),z) -> z**(2/3*y)
return self.base._eval_subs(old, new) ** self.exp._eval_subs(old, new)
def as_powers_dict(self):
return { self.base : self.exp }
def as_base_exp(self):
if self.base.is_Rational and self.base.p==1:
return 1/self.base, -self.exp
return self.base, self.exp
def _eval_conjugate(self):
from sympy.functions.elementary.complexes import conjugate as c
return c(self.base)**self.exp
def _eval_expand_basic(self, deep=True, **hints):
sargs, terms = self.args[:], []
for term in sargs:
if hasattr(term, '_eval_expand_basic'):
newterm = term._eval_expand_basic(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_power_exp(self, deep=True, *args, **hints):
"""a**(n+m) -> a**n*a**m"""
if deep:
b = self.base.expand(deep=deep, **hints)
e = self.exp.expand(deep=deep, **hints)
else:
b = self.base
e = self.exp
if e.is_Add:
expr = 1
for x in e.args:
if deep:
x = x.expand(deep=deep, **hints)
expr *= (self.base**x)
return expr
return b**e
def _eval_expand_power_base(self, deep=True, **hints):
"""(a*b)**n -> a**n * b**n"""
b = self.base
if deep:
e = self.exp.expand(deep=deep, **hints)
else:
e = self.exp
if b.is_Mul:
if deep:
return Mul(*(Pow(t.expand(deep=deep, **hints), e)\
for t in b.args))
else:
return Mul(*(Pow(t, e) for t in b.args))
else:
return b**e
def _eval_expand_mul(self, deep=True, **hints):
sargs, terms = self.args[:], []
for term in sargs:
if hasattr(term, '_eval_expand_mul'):
newterm = term._eval_expand_mul(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_multinomial(self, deep=True, **hints):
"""(a+b+..) ** n -> a**n + n*a**(n-1)*b + .., n is nonzero integer"""
if deep:
b = self.base.expand(deep=deep, **hints)
e = self.exp.expand(deep=deep, **hints)
else:
b = self.base
e = self.exp
if b is None:
base = self.base
else:
base = b
if e is None:
exp = self.exp
else:
exp = e
if e is not None or b is not None:
result = base**exp
if result.is_Pow:
base, exp = result.base, result.exp
else:
return result
else:
result = None
if exp.is_Integer and exp.p > 0 and base.is_Add:
n = int(exp)
if base.is_commutative:
order_terms, other_terms = [], []
for order in base.args:
if order.is_Order:
order_terms.append(order)
else:
other_terms.append(order)
if order_terms:
# (f(x) + O(x^n))^m -> f(x)^m + m*f(x)^{m-1} *O(x^n)
f = Add(*other_terms)
g = (f**(n-1)).expand()
return (f*g).expand() + n*g*Add(*order_terms)
if base.is_number:
# Efficiently expand expressions of the form (a + b*I)**n
# where 'a' and 'b' are real numbers and 'n' is integer.
a, b = base.as_real_imag()
if a.is_Rational and b.is_Rational:
if not a.is_Integer:
if not b.is_Integer:
k = (a.q * b.q) ** n
a, b = a.p*b.q, a.q*b.p
else:
k = a.q ** n
a, b = a.p, a.q*b
elif not b.is_Integer:
k = b.q ** n
a, b = a*b.q, b.p
else:
k = 1
a, b, c, d = int(a), int(b), 1, 0
while n:
if n & 1:
c, d = a*c-b*d, b*c+a*d
n -= 1
a, b = a*a-b*b, 2*a*b
n //= 2
I = S.ImaginaryUnit
if k == 1:
return c + I*d
else:
return Integer(c)/k + I*d/k
p = other_terms
# (x+y)**3 -> x**3 + 3*x**2*y + 3*x*y**2 + y**3
# in this particular example:
# p = [x,y]; n = 3
# so now it's easy to get the correct result -- we get the
# coefficients first:
from sympy import multinomial_coefficients
expansion_dict = multinomial_coefficients(len(p), n)
# in our example: {(3, 0): 1, (1, 2): 3, (0, 3): 1, (2, 1): 3}
# and now construct the expression.
# An elegant way would be to use Poly, but unfortunately it is
# slower than the direct method below, so it is commented out:
#b = {}
#for k in expansion_dict:
# b[k] = Integer(expansion_dict[k])
#return Poly(b, *p).as_basic()
from sympy.polys.polynomial import multinomial_as_basic
result = multinomial_as_basic(expansion_dict, *p)
return result
else:
if n == 2:
return Add(*[f*g for f in base.args for g in base.args])
else:
multi = (base**(n-1))._eval_expand_multinomial(deep=False)
if multi.is_Add:
return Add(*[f*g for f in base.args for g in base.args])
else:
return Add(*[f*multi for f in base.args])
elif exp.is_Integer and exp.p < 0 and base.is_Add:
return 1 / Pow(base, -exp.p)._eval_expand_multinomial(deep=False)
elif exp.is_Add and base.is_Number:
# a + b a b
# n --> n n , where n, a, b are Numbers
coeff, tail = S.One, S.Zero
for term in exp.args:
if term.is_Number:
coeff *= base**term
else:
tail += term
return coeff * base**tail
else:
return result
def _eval_expand_log(self, deep=True, **hints):
sargs, terms = self.args[:], []
for term in sargs:
if hasattr(term, '_eval_expand_log'):
newterm = term._eval_expand_log(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_complex(self, deep=True, **hints):
if self.exp.is_Integer:
exp = self.exp
re, im = self.base.as_real_imag()
if exp >= 0:
base = re + S.ImaginaryUnit*im
else:
mag = re**2 + im**2
base = re/mag - S.ImaginaryUnit*(im/mag)
exp = -exp
return (base**exp).expand()
elif self.exp.is_Rational:
# NOTE: This is not totally correct since for x**(p/q) with
# x being imaginary there are actually q roots, but
# only a single one is returned from here.
re, im = self.base.as_real_imag()
r = (re**2 + im**2)**S.Half
t = C.atan2(im, re)
rp, tp = r**self.exp, t*self.exp
return rp*C.cos(tp) + rp*C.sin(tp)*S.ImaginaryUnit
else:
if deep:
hints['complex'] = False
return C.re(self.expand(deep, **hints)) + \
S.ImaginaryUnit*C.im(self. expand(deep, **hints))
else:
return C.re(self) + S.ImaginaryUnit*C.im(self)
return C.re(self) + S.ImaginaryUnit*C.im(self)
def _eval_expand_trig(self, deep=True, **hints):
sargs, terms = self.args[:], []
for term in sargs:
if hasattr(term, '_eval_expand_trig'):
newterm = term._eval_expand_trig(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_func(self, deep=True, **hints):
sargs, terms = self.args[:], []
for term in sargs:
if hasattr(term, '_eval_expand_func'):
newterm = term._eval_expand_func(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_derivative(self, s):
dbase = self.base.diff(s)
dexp = self.exp.diff(s)
return self * (dexp * C.log(self.base) + dbase * self.exp/self.base)
def _eval_evalf(self, prec):
base, exp = self.as_base_exp()
base = base._evalf(prec)
if not exp.is_Integer:
exp = exp._evalf(prec)
if exp < 0 and not base.is_real:
base = base.conjugate() / (base * base.conjugate())._evalf(prec)
exp = -exp
return (base ** exp).expand()
@cacheit
def count_ops(self, symbolic=True):
if symbolic:
return Add(*[t.count_ops(symbolic) for t in self.args]) + Symbol('POW')
return Add(*[t.count_ops(symbolic) for t in self.args]) + 1
def _eval_is_polynomial(self, syms):
if self.exp.has(*syms):
return False
if self.base.has(*syms):
# it would be nice to have is_nni working
return self.base._eval_is_polynomial(syms) and \
self.exp.is_nonnegative and \
self.exp.is_integer
else:
return True
def as_numer_denom(self):
base, exp = self.as_base_exp()
c,t = exp.as_coeff_terms()
n,d = base.as_numer_denom()
negate = False
if exp.is_integer != True:
if d.is_negative == True:
# Roots need to take care that negative denominators behave
# differently than the rest of the complex plane.
negate = True
elif d.is_negative is None:
# Can make no conclusions.
return self, S(1)
if c.is_negative == True:
exp = -exp
n,d = d,n
num = n ** exp
den = d ** exp
if negate:
num = -num
return num, den
def matches(pattern, expr, repl_dict={}, evaluate=False):
if evaluate:
pat = pattern
for old,new in repl_dict.items():
pat = pat.subs(old, new)
if pat!=pattern:
return pat.matches(expr, repl_dict)
expr = _sympify(expr)
b, e = expr.as_base_exp()
# special case, pattern = 1 and expr.exp can match to 0
if expr is S.One:
d = repl_dict.copy()
d = pattern.exp.matches(S.Zero, d, evaluate=False)
if d is not None:
return d
d = repl_dict.copy()
d = pattern.base.matches(b, d, evaluate=False)
if d is None:
return None
d = pattern.exp.matches(e, d, evaluate=True)
if d is None:
return Basic.matches(pattern, expr, repl_dict, evaluate)
return d
def _eval_nseries(self, x, x0, n):
from sympy import powsimp, collect
def geto(e):
"Returns the O(..) symbol, or None if there is none."
if e.is_Order:
return e
if e.is_Add:
for x in e.args:
if x.is_Order:
return x
def getn(e):
"""
Returns the order of the expression "e".
The order is determined either from the O(...) term. If there
is no O(...) term, it returns None.
Example:
>>> getn(1+x+O(x**2))
2
>>> getn(1+x)
>>>
"""
o = geto(e)
if o is None:
return None
else:
o = o.expr
if o.is_Symbol:
return Integer(1)
if o.is_Pow:
return o.args[1]
n, d = o.as_numer_denom()
if isinstance(d, log):
# i.e. o = x**2/log(x)
if n.is_Symbol:
return Integer(1)
if n.is_Pow:
return n.args[1]
raise NotImplementedError()
base, exp = self.args
if exp.is_Integer:
if exp > 0:
# positive integer powers are easy to expand, e.g.:
# sin(x)**4 = (x-x**3/3+...)**4 = ...
return (base.nseries(x, x0, n) ** exp)._eval_expand_multinomial(deep = False)
elif exp == -1:
# this is also easy to expand using the formula:
# 1/(1 + x) = 1 + x + x**2 + x**3 ...
# so we need to rewrite base to the form "1+x"
from sympy import log
if base.has(log(x)):
# we need to handle the log(x) singularity:
assert x0 == 0
y = Symbol("y", dummy=True)
p = self.subs(log(x), -1/y)
if not p.has(x):
p = p.nseries(y, x0, n)
p = p.subs(y, -1/log(x))
return p
base = base.nseries(x, x0, n)
if base.has(log(x)):
# we need to handle the log(x) singularity:
assert x0 == 0
y = Symbol("y", dummy=True)
self0 = 1/base
p = self0.subs(log(x), -1/y)
if not p.has(x):
p = p.nseries(y, x0, n)
p = p.subs(y, -1/log(x))
return p
prefactor = base.as_leading_term(x)
# express "rest" as: rest = 1 + k*x**l + ... + O(x**n)
rest = ((base-prefactor)/prefactor)._eval_expand_mul()
if rest == 0:
# if prefactor == w**4 + x**2*w**4 + 2*x*w**4, we need to
# factor the w**4 out using collect:
return 1/collect(prefactor, x)
if rest.is_Order:
return (1+rest)/prefactor
n2 = getn(rest)
if n2 is not None:
n = n2
term2 = collect(rest.as_leading_term(x), x)
k, l = Wild("k"), Wild("l")
r = term2.match(k*x**l)
k, l = r[k], r[l]
if l.is_Rational and l>0:
pass
elif l.is_number and l>0:
l = l.evalf()
else:
raise NotImplementedError()
from sympy.functions import ceiling
terms = [1/prefactor]
for m in xrange(1,ceiling(n/l)):
new_term = terms[-1]*(-rest)
if new_term.is_Pow:
new_term = new_term._eval_expand_multinomial(deep = False)
else:
new_term = new_term._eval_expand_mul(deep = False)
terms.append(new_term)
r = Add(*terms)
if n2 is None:
# Append O(...) because it is not included in "r"
from sympy import O
r += O(x**n)
return powsimp(r, deep=True, combine='exp')
else:
# negative powers are rewritten to the cases above, for example:
# sin(x)**(-4) = 1/( sin(x)**4) = ...
# and expand the denominator:
denominator = (base**(-exp)).nseries(x, x0, n)
if 1/denominator == self:
return self
# now we have a type 1/f(x), that we know how to expand
return (1/denominator).nseries(x, x0, n)
if exp.has(x):
import sympy
return sympy.exp(exp*sympy.log(base)).nseries(x, x0, n)
if base == x:
return powsimp(self, deep=True, combine='exp')
order = C.Order(x**n, x)
x = order.symbols[0]
e = self.exp
b = self.base
ln = C.log
exp = C.exp
if e.has(x):
return exp(e * ln(b)).nseries(x, x0, n)
if b==x:
return self
b0 = b.limit(x,0)
if b0 is S.Zero or b0.is_unbounded:
lt = b.as_leading_term(x)
o = order * lt**(1-e)
bs = b.nseries(x, x0, n-e)
if bs.is_Add:
bs = bs.removeO()
if bs.is_Add:
# bs -> lt + rest -> lt * (1 + (bs/lt - 1))
return (lt**e * ((bs/lt).expand()**e).nseries(x,
x0, n-e)).expand() + order
return bs**e+order
o2 = order * (b0**-e)
# b -> b0 + (b-b0) -> b0 * (1 + (b/b0-1))
z = (b/b0-1)
#r = self._compute_oseries3(z, o2, self.taylor_term)
x = o2.symbols[0]
ln = C.log
o = C.Order(z, x)
if o is S.Zero:
r = (1+z)
else:
if o.expr.is_number:
e2 = ln(o2.expr*x)/ln(x)
else:
e2 = ln(o2.expr)/ln(o.expr)
n = e2.limit(x,0) + 1
if n.is_unbounded:
# requested accuracy gives infinite series,
# order is probably nonpolynomial e.g. O(exp(-1/x), x).
r = (1+z)
else:
try:
n = int(n)
except TypeError:
#well, the n is something more complicated (like 1+log(2))
n = int(n.evalf()) + 1
assert n>=0,`n`
l = []
g = None
for i in xrange(n+2):
g = self.taylor_term(i, z, g)
g = g.nseries(x, x0, n)
l.append(g)
r = Add(*l)
return r * b0**e + order
def _eval_as_leading_term(self, x):
if not self.exp.has(x):
return self.base.as_leading_term(x) ** self.exp
return C.exp(self.exp * C.log(self.base)).as_leading_term(x)
@cacheit
def taylor_term(self, n, x, *previous_terms): # of (1+x)**e
if n<0: return S.Zero
x = _sympify(x)
return C.Binomial(self.exp, n) * x**n
def _sage_(self):
return self.args[0]._sage_() ** self.args[1]._sage_()
from basic import Basic, S, C
from add import Add
from numbers import Integer
from mul import Mul
| KevinGoodsell/sympy | sympy/core/power.py | Python | bsd-3-clause | 25,862 |
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import copy
from ansible.module_utils.basic import AnsibleFallbackNotFound
from ansible.module_utils.iosxr import iosxr_argument_spec
from ansible.module_utils.six import iteritems
from ansible.plugins.action.normal import ActionModule as _ActionModule
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._play_context.connection != 'local':
return dict(
failed=True,
msg='invalid connection specified, expected connection=local, '
'got %s' % self._play_context.connection
)
provider = self.load_provider()
pc = copy.deepcopy(self._play_context)
pc.connection = 'network_cli'
pc.network_os = 'iosxr'
pc.remote_addr = provider['host'] or self._play_context.remote_addr
pc.port = provider['port'] or self._play_context.port or 22
pc.remote_user = provider['username'] or self._play_context.connection_user
pc.password = provider['password'] or self._play_context.password
pc.timeout = provider['timeout'] or self._play_context.timeout
display.vvv('using connection plugin %s' % pc.connection, pc.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
socket_path = connection.run()
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
if not socket_path:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
# make sure we are in the right cli context which should be
# enable mode and not config module
rc, out, err = connection.exec_command('prompt()')
while str(out).strip().endswith(')#'):
display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
connection.exec_command('exit')
rc, out, err = connection.exec_command('prompt()')
task_vars['ansible_socket'] = socket_path
result = super(ActionModule, self).run(tmp, task_vars)
return result
def load_provider(self):
provider = self._task.args.get('provider', {})
for key, value in iteritems(iosxr_argument_spec):
if key != 'provider' and key not in provider:
if key in self._task.args:
provider[key] = self._task.args[key]
elif 'fallback' in value:
provider[key] = self._fallback(value['fallback'])
elif key not in provider:
provider[key] = None
return provider
def _fallback(self, fallback):
strategy = fallback[0]
args = []
kwargs = {}
for item in fallback[1:]:
if isinstance(item, dict):
kwargs = item
else:
args = item
try:
return strategy(*args, **kwargs)
except AnsibleFallbackNotFound:
pass
| tux-00/ansible | lib/ansible/plugins/action/iosxr.py | Python | gpl-3.0 | 4,032 |
import numpy as np
import sklearn
import sklearn.datasets
import matplotlib.pyplot as plt
import math
import tensorflow as tf
def get_data():
X, y = sklearn.datasets.make_moons(600, noise=0.30)
y = y.reshape([600,1])
X_train = X[:400]; y_train = y[:400]
X_cv = X[400:500]; y_cv = y[400:500]
X_test = X[500:]; y_test = y[500:]
return (X_train, y_train, X_cv, y_cv, X_test, y_test)
class NN():
def __init__(self, layer_sizes, hidden_activation='relu', l2_reg=0.0005, learning_rate=0.005, momentum=0.9):
self.nlayers = len(layer_sizes) - 1
self.X = tf.placeholder(tf.float32, shape=[None, layer_sizes[0]], name="X")
self.Y = tf.placeholder(tf.float32, shape=[None, layer_sizes[-1]], name="Y")
if hidden_activation=='sigmoid':
activation_fn = tf.nn.sigmoid
elif hidden_activation=='tanh':
activation_fn = tf.nn.tanh
elif hidden_activation=='relu':
activation_fn = tf.nn.relu
else:
raise('Unrecognised activation function {}'.format(hidden_activation))
prev_A = self.X
self.wl2 = 0
for l in range(self.nlayers):
n_in = layer_sizes[l]
n_out = layer_sizes[l+1]
scale = math.sqrt(2/(n_in + n_out))
W = tf.Variable(tf.random_normal([n_in, n_out]) * scale, name='W{}'.format(l+1))
b = tf.Variable(tf.zeros([n_out]), name='b{}'.format(l+1))
Z = tf.add(tf.matmul(prev_A, W), b, name='Z{}'.format(l+1))
if l + 1 < self.nlayers:
A = activation_fn(Z, name='A{}'.format(l+1))
else:
A = tf.nn.sigmoid(Z, name='Y_prob')
self.wl2 = self.wl2 + tf.nn.l2_loss(W)
prev_A = A
self.logits = Z
self.Y_pred = tf.cast(tf.greater(A, 0.5, name='Y_pred'), 'float')
self.cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits, labels=self.Y))
self.cost_plus_wl2 = self.cost + l2_reg * self.wl2
self.correct = tf.equal(self.Y_pred, self.Y)
self.accuracy = tf.reduce_mean(tf.cast(self.correct, 'float'))
self.optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=momentum, use_nesterov=True)
self.train = self.optimizer.minimize(self.cost_plus_wl2)
def __train_on_batch(self, X_batch, Y_batch):
_, cost, accuracy = self.sess.run([self.train, self.cost, self.accuracy],
feed_dict={self.X: X_batch, self.Y: Y_batch})
return (cost, accuracy)
def fit(self, X_train, y_train, X_cv, y_cv, epochs, batch_size):
train_accuracies = []
val_accuracies = []
train_costs = []
val_costs = []
self.sess = tf.Session()
init = tf.global_variables_initializer()
self.sess.run(init)
nbatches = int(X_train.shape[0]/batch_size)
for epoch in range(epochs):
batch_idx = np.random.permutation(X_train.shape[0])
train_cost = 0.0
train_accuracy = 0.0
for b in range(nbatches):
this_batch = batch_idx[b*batch_size:(b+1)*batch_size]
X_batch = X_train[this_batch, :]
Y_batch = y_train[this_batch, :]
batch_cost, batch_accuracy = self.__train_on_batch(X_batch, Y_batch)
train_cost += batch_cost
train_accuracy += batch_accuracy
train_cost /= nbatches
train_accuracy /= nbatches
val_cost, val_accuracy = self.sess.run([ self.cost, self.accuracy],
feed_dict={self.X: X_cv, self.Y: y_cv})
train_accuracies.append(train_accuracy)
val_accuracies.append(val_accuracy)
train_costs.append(train_cost)
val_costs.append(val_cost)
return { 'acc': train_accuracies, 'val_acc': val_accuracies,
'cost': train_costs, 'val_cost': val_costs }
def predict_classes(self, X_batch):
Y_pred = self.sess.run(self.Y_pred, feed_dict={self.X: X_batch})
return Y_pred
def test_accuracy(self, X_batch, Y_batch):
return self.sess.run(self.accuracy, feed_dict={self.X: X_batch, self.Y: Y_batch})
def build_model(layer_sizes, hidden_activation='relu', l2_reg=0.0005, learning_rate=0.005, momentum=0.9):
return NN(layer_sizes, hidden_activation=hidden_activation, l2_reg=l2_reg,
learning_rate=learning_rate, momentum=momentum)
def train_model(model, X_train, y_train, X_cv, y_cv, epochs=200, batch_size=10):
return model.fit(X_train, y_train, X_cv, y_cv, epochs=epochs, batch_size=batch_size)
def test_model(model, X_test, y_test):
accuracy = model.test_accuracy(X_test, y_test)
print("Final accuracy {:0.1f}".format(accuracy * 100))
def plot_history(history):
plt.plot(history['acc'])
plt.plot(history['val_acc'])
plt.title('Accuracy')
plt.ylabel('Accuracy vs Epoch')
plt.xlabel('Epoch')
plt.legend(['train', 'val'], loc='lower right')
plt.show()
def main():
np.random.seed(1234)
X_train, y_train, X_cv, y_cv, X_test, y_test = get_data()
model = build_model( layer_sizes=[X_train.shape[1], 20, 20, 1],
hidden_activation='relu',
l2_reg=0.0005,
learning_rate=0.005,
momentum=0.9 )
history = train_model(model, X_train, y_train, X_cv, y_cv)
test_model(model, X_test, y_test)
plot_history(history)
if __name__ == '__main__':
main()
| neilslater/nn_practice | 01_basic_mlp/mlp_tf.py | Python | mit | 5,623 |
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="color", parent_name="mesh3d.legendgrouptitle.font", **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/mesh3d/legendgrouptitle/font/_color.py | Python | mit | 425 |
"""
Extra utilities for waffle: most classes are defined in edx_toggles.toggles (https://edx-toggles.readthedocs.io/), but
we keep here some extra classes for usage within edx-platform. These classes cover course override use cases.
"""
import logging
import warnings
from contextlib import contextmanager
from edx_django_utils.monitoring import set_custom_attribute
from edx_toggles.toggles import (
LegacyWaffleFlag,
LegacyWaffleFlagNamespace,
LegacyWaffleSwitch,
LegacyWaffleSwitchNamespace,
)
from opaque_keys.edx.keys import CourseKey
log = logging.getLogger(__name__)
class CourseWaffleFlag(LegacyWaffleFlag):
"""
Represents a single waffle flag that can be forced on/off for a course. This class should be used instead of
WaffleFlag when in the context of a course. This class will also respect any org-level overrides, though
course-level overrides will take precedence.
Uses a cached waffle namespace.
Usage:
SOME_COURSE_FLAG = CourseWaffleFlag('my_namespace', 'some_course_feature', __name__)
And then we can check this flag in code with::
SOME_COURSE_FLAG.is_enabled(course_key)
To configure a course-level override, go to Django Admin "waffle_utils" -> "Waffle flag course overrides".
Waffle flag: Set this to the flag name (e.g. my_namespace.some_course_feature).
Course id: Set this to the course id (e.g. course-v1:edx+100+Demo)
Override choice: (Force on/Force off). "Force on" will enable the waffle flag for all users in a course,
overriding any behavior configured on the waffle flag itself. "Force off" will disable the waffle flag
for all users in a course, overriding any behavior configured on the waffle flag itself. Requires
"Enabled" (see below) to apply.
Enabled: Must be marked as "enabled" in order for the override to be applied. These settings can't be
deleted, so instead, you need to add another disabled override entry to disable the override.
To configure an org-level override, go to Django Admin "waffle_utils" -> "Waffle flag org overrides".
Waffle flag: Set this to the flag name (e.g. my_namespace.some_course_feature).
Org name: Set this to the organization name (e.g. edx)
Override choice: (Force on/Force off). "Force on" will enable the waffle flag for all users in an org's courses,
overriding any behavior configured on the waffle flag itself. "Force off" will disable the waffle flag
for all users in a org's courses, overriding any behavior configured on the waffle flag itself. Requires
"Enabled" (see below) to apply.
Enabled: Must be marked as "enabled" in order for the override to be applied. These settings can't be
deleted, so instead, you need to add another disabled override entry to disable the override.
"""
def _get_course_override_value(self, course_key):
"""
Returns True/False if the flag was forced on or off for the provided course. Returns None if the flag was not
overridden.
Note: Has side effect of caching the override value.
Arguments:
course_key (CourseKey): The course to check for override before checking waffle.
"""
# Import is placed here to avoid model import at project startup.
from .models import WaffleFlagCourseOverrideModel, WaffleFlagOrgOverrideModel
course_cache_key = f"{self.name}.cwaffle.{str(course_key)}"
course_override = self.cached_flags().get(course_cache_key)
if course_override is None:
course_override = WaffleFlagCourseOverrideModel.override_value(
self.name, course_key
)
self.cached_flags()[course_cache_key] = course_override
if course_override == WaffleFlagCourseOverrideModel.ALL_CHOICES.on:
return True
if course_override == WaffleFlagCourseOverrideModel.ALL_CHOICES.off:
return False
# Since no course-specific override was found, fall back to checking at the org-level.
if course_key:
org = course_key.org
org_cache_key = f"{self.name}.owaffle.{org}"
org_override = self.cached_flags().get(org_cache_key)
if org_override is None:
org_override = WaffleFlagOrgOverrideModel.override_value(
self.name, org
)
self.cached_flags()[org_cache_key] = org_override
if org_override == WaffleFlagOrgOverrideModel.ALL_CHOICES.on:
return True
if org_override == WaffleFlagOrgOverrideModel.ALL_CHOICES.off:
return False
return None
def is_enabled(self, course_key=None): # pylint: disable=arguments-differ
"""
Returns whether or not the flag is enabled within the context of a given course.
Arguments:
course_key (Optional[CourseKey]): The course to check for override before
checking waffle. If omitted, check whether the flag is enabled
outside the context of any course.
"""
if course_key:
assert isinstance(
course_key, CourseKey
), "Provided course_key '{}' is not instance of CourseKey.".format(
course_key
)
is_enabled_for_course = self._get_course_override_value(course_key)
if is_enabled_for_course is not None:
return is_enabled_for_course
return super().is_enabled()
| eduNEXT/edx-platform | openedx/core/djangoapps/waffle_utils/__init__.py | Python | agpl-3.0 | 5,613 |
from setuptools import setup
package_name = 'examples_rclpy_minimal_action_client'
setup(
name=package_name,
version='0.15.0',
packages=[package_name],
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
],
install_requires=['setuptools'],
zip_safe=True,
author='Jacob Perron',
author_email='jacob@openrobotics.org',
maintainer='Aditya Pande, Shane Loretz',
maintainer_email='aditya.pande@openrobotics.org, shane@openrobotics.org',
keywords=['ROS'],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Topic :: Software Development',
],
description='Examples of action clients using rclpy.',
license='Apache License, Version 2.0',
tests_require=['pytest'],
entry_points={
'console_scripts': [
'client = ' + package_name + '.client:main',
'client_cancel = ' + package_name + '.client_cancel:main',
'client_not_composable = ' + package_name + '.client_not_composable:main',
'client_asyncio = ' + package_name + '.client_asyncio:main',
],
},
)
| ros2/examples | rclpy/actions/minimal_action_client/setup.py | Python | apache-2.0 | 1,311 |
# -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2013 SF Isle of Man Limited
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
from flask import Blueprint
from flask import render_template
from pybossa.cache import apps as cached_apps
from pybossa.cache import categories as cached_cat
from random import choice
blueprint = Blueprint('help', __name__)
@blueprint.route('/api')
def api():
"""Render help/api page"""
categories = cached_cat.get_used()
apps = cached_apps.get(categories[0]['short_name'])
if len(apps) > 0:
app_id = choice(apps)['id']
else: # pragma: no cover
app_id = None
return render_template('help/api.html', title="Help: API",
app_id=app_id)
@blueprint.route('/license')
def license():
"""Render help/license page"""
return render_template('help/license.html', title='Help: Licenses')
@blueprint.route('/terms-of-use')
def tos():
"""Render help/terms-of-use page"""
return render_template('help/tos.html', title='Help: Terms of Use')
@blueprint.route('/cookies-policy')
def cookies_policy():
"""Render help/cookies-policy page"""
return render_template('help/cookies_policy.html', title='Help: Cookies Policy')
| stitchfix/pybossa | pybossa/view/help.py | Python | agpl-3.0 | 1,848 |
## @package shesha.supervisor
## @brief User layer for initialization and execution of a COMPASS simulation
## @author COMPASS Team <https://github.com/ANR-COMPASS>
## @version 5.2.1
## @date 2022/01/24
## @copyright GNU Lesser General Public License
#
# This file is part of COMPASS <https://anr-compass.github.io/compass/>
#
# Copyright (C) 2011-2022 COMPASS Team <https://github.com/ANR-COMPASS>
# All rights reserved.
# Distributed under GNU - LGPL
#
# COMPASS is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser
# General Public License as published by the Free Software Foundation, either version 3 of the License,
# or any later version.
#
# COMPASS: End-to-end AO simulation tool using GPU acceleration
# The COMPASS platform was designed to meet the need of high-performance for the simulation of AO systems.
#
# The final product includes a software package for simulating all the critical subcomponents of AO,
# particularly in the context of the ELT and a real-time core based on several control approaches,
# with performances consistent with its integration into an instrument. Taking advantage of the specific
# hardware architecture of the GPU, the COMPASS tool allows to achieve adequate execution speeds to
# conduct large simulation campaigns called to the ELT.
#
# The COMPASS platform can be used to carry a wide variety of simulations to both testspecific components
# of AO of the E-ELT (such as wavefront analysis device with a pyramid or elongated Laser star), and
# various systems configurations such as multi-conjugate AO.
#
# COMPASS is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along with COMPASS.
# If not, see <https://www.gnu.org/licenses/lgpl-3.0.txt>.
from shesha.init.target_init import target_init
from shesha.supervisor.components.sourceCompass import SourceCompass
import numpy as np
class TargetCompass(SourceCompass):
""" Target handler for compass simulation
Attributes:
sources : (List) : List of SutraSource instances used for raytracing
_target : (sutraWrap.Target) : Sutra target instance
_context : (carmaContext) : CarmaContext instance
_config : (config module) : Parameters configuration structure module
"""
def __init__(self, context, config, tel):
""" Initialize a TargetCompass component for target related supervision
Args:
context : (carmaContext) : CarmaContext instance
config : (config module) : Parameters configuration structure module
tel : (TelescopeCompass) : A TelescopeCompass instance
"""
self._context = context
self._config = config # Parameters configuration coming from supervisor init
print("->target init")
self._target = target_init(self._context, tel._tel, self._config.p_targets,
self._config.p_atmos, self._config.p_tel,
self._config.p_geom, self._config.p_dms, brahma=False)
self.sources = self._target.d_targets
def get_tar_image(self, tar_index : int, *, expo_type: str = "se") -> np.ndarray:
""" Get the PSF in the direction of the given target
Args:
tar_index : (int) : Index of target
Kwargs:
expo_type : (str) : "se" for short exposure (default)
"le" for long exposure
Returns:
psf : (np.ndarray) : PSF
"""
if (expo_type == "se"):
return np.fft.fftshift(
np.array(self._target.d_targets[tar_index].d_image_se))
elif (expo_type == "le"):
nb = self._target.d_targets[tar_index].strehl_counter
if nb == 0: nb = 1
return np.fft.fftshift(np.array(self._target.d_targets[tar_index].d_image_le)) / nb
else:
raise ValueError("Unknown exposure type")
def set_tar_phase(self, tar_index : int, phase : np.ndarray) -> None:
""" Set the phase screen seen by the tar
Args:
tar_index : (int) : target index
phase : (np.ndarray) : phase screen to set
"""
self._target.d_targets[tar_index].set_phase(phase)
def get_tar_phase(self, tar_index: int, *, pupil: bool = False) -> np.ndarray:
""" Returns the target phase screen of target number tar_index
Args:
tar_index : (int) : Target index
Kwargs:
pupil : (bool) : If True, applies the pupil on top of the phase screen
Default is False
Returns:
tar_phase : (np.ndarray) : Target phase screen
"""
tar_phase = np.array(self._target.d_targets[tar_index].d_phase)
if pupil:
pup = self._config.p_geom._spupil
tar_phase *= pup
return tar_phase
def reset_strehl(self, tar_index: int) -> None:
""" Reset the Strehl Ratio of the target tar_index
Args:
tar_index : (int) : Target index
"""
self._target.d_targets[tar_index].reset_strehlmeter()
def reset_tar_phase(self, tar_index: int) -> None:
""" Reset the phase screen of the target tar_index
Args:
tar_index : (int) : Target index
"""
self._target.d_targets[tar_index].d_phase.reset()
def get_strehl(self, tar_index: int, *, do_fit: bool = True) -> np.ndarray:
""" Return the Strehl Ratio of target number tar_index.
This fuction will return an array of 4 values as
[SR SE, SR LE, phase variance SE [µm²], phase variance LE [µm²]]
Args:
tar_index : (int) : Target index
Kwargs:
do_fit : (bool) : If True (default), fit the PSF
with a sinc before computing SR
Returns:
strehl : (np.ndarray) : Strehl ratios and phase variances
"""
src = self._target.d_targets[tar_index]
src.comp_strehl(do_fit)
avg_var = 0
if (src.phase_var_count > 0):
avg_var = src.phase_var_avg / src.phase_var_count
return [src.strehl_se, src.strehl_le, src.phase_var, avg_var]
def get_ncpa_tar(self, tar_index : int) -> np.ndarray:
""" Return the current NCPA phase screen of the target path
Args:
tar_index : (int) : Index of the target
Returns:
ncpa : (np.ndarray) : NCPA phase screen
"""
return np.array(self._target.d_targets[tar_index].d_ncpa_phase)
def set_ncpa_tar(self, tar_index: int, ncpa: np.ndarray) -> None:
""" Set the additional fixed NCPA phase in the target path.
ncpa must be of the same size of the spupil support
Args:
tar_index : (int) : WFS index
ncpa : (ndarray) : NCPA phase screen to set [µm]
"""
self._target.d_targets[tar_index].set_ncpa(ncpa)
def comp_tar_image(self, tarNum: int, *, puponly: int = 0, compLE: bool = True) -> None:
""" Computes the PSF
Args:
tarNum: (int): target index
Kwargs:
puponly: (int) : if set to 1, computes Airy (default=0)
compLE: (bool) : if True, the computed image is taken into account in long exposure image (default=True)
"""
self._target.d_targets[tarNum].comp_image(puponly, compLE)
def comp_strehl(self, tarNum: int, *, do_fit: bool = True) -> None:
""" Computes the Strehl ratio
Args:
tarNum: (int): target index
Kwargs:
do_fit: (bool): Flag for enabling fitting by sinc
"""
self._target.d_targets[tarNum].comp_strehl(do_fit) | ANR-COMPASS/shesha | shesha/supervisor/components/targetCompass.py | Python | gpl-3.0 | 8,047 |
import json
import os
import socket
import threading
import time
import traceback
from .base import (Protocol,
RefTestExecutor,
RefTestImplementation,
TestharnessExecutor,
strip_server)
from ..testrunner import Stop
webdriver = None
here = os.path.join(os.path.split(__file__)[0])
extra_timeout = 5
def do_delayed_imports():
global webdriver
import webdriver
class ServoWebDriverProtocol(Protocol):
def __init__(self, executor, browser, capabilities, **kwargs):
do_delayed_imports()
Protocol.__init__(self, executor, browser)
self.capabilities = capabilities
self.host = browser.webdriver_host
self.port = browser.webdriver_port
self.session = None
def setup(self, runner):
"""Connect to browser via WebDriver."""
self.runner = runner
url = "http://%s:%d" % (self.host, self.port)
session_started = False
try:
self.session = webdriver.Session(self.host, self.port,
extension=webdriver.servo.ServoCommandExtensions)
self.session.start()
except:
self.logger.warning(
"Connecting with WebDriver failed:\n%s" % traceback.format_exc())
else:
self.logger.debug("session started")
session_started = True
if not session_started:
self.logger.warning("Failed to connect via WebDriver")
self.executor.runner.send_message("init_failed")
else:
self.executor.runner.send_message("init_succeeded")
def teardown(self):
self.logger.debug("Hanging up on WebDriver session")
try:
self.session.end()
except:
pass
def is_alive(self):
try:
# Get a simple property over the connection
self.session.window_handle
# TODO what exception?
except Exception:
return False
return True
def after_connect(self):
pass
def wait(self):
while True:
try:
self.session.execute_async_script("")
except webdriver.TimeoutException:
pass
except (socket.timeout, IOError):
break
except Exception as e:
self.logger.error(traceback.format_exc(e))
break
def on_environment_change(self, old_environment, new_environment):
#Unset all the old prefs
self.session.extension.reset_prefs(*old_environment.get("prefs", {}).keys())
self.session.extension.set_prefs(new_environment.get("prefs", {}))
class ServoWebDriverRun(object):
def __init__(self, func, session, url, timeout, current_timeout=None):
self.func = func
self.result = None
self.session = session
self.url = url
self.timeout = timeout
self.result_flag = threading.Event()
def run(self):
executor = threading.Thread(target=self._run)
executor.start()
flag = self.result_flag.wait(self.timeout + extra_timeout)
if self.result is None:
assert not flag
self.result = False, ("EXTERNAL-TIMEOUT", None)
return self.result
def _run(self):
try:
self.result = True, self.func(self.session, self.url, self.timeout)
except webdriver.TimeoutException:
self.result = False, ("EXTERNAL-TIMEOUT", None)
except (socket.timeout, IOError):
self.result = False, ("CRASH", None)
except Exception as e:
message = getattr(e, "message", "")
if message:
message += "\n"
message += traceback.format_exc(e)
self.result = False, ("ERROR", e)
finally:
self.result_flag.set()
def timeout_func(timeout):
if timeout:
t0 = time.time()
return lambda: time.time() - t0 > timeout + extra_timeout
else:
return lambda: False
class ServoWebDriverTestharnessExecutor(TestharnessExecutor):
def __init__(self, browser, server_config, timeout_multiplier=1,
close_after_done=True, capabilities=None, debug_info=None):
TestharnessExecutor.__init__(self, browser, server_config, timeout_multiplier=1,
debug_info=None)
self.protocol = ServoWebDriverProtocol(self, browser, capabilities=capabilities)
with open(os.path.join(here, "testharness_servodriver.js")) as f:
self.script = f.read()
self.timeout = None
def on_protocol_change(self, new_protocol):
pass
def is_alive(self):
return self.protocol.is_alive()
def do_test(self, test):
url = self.test_url(test)
timeout = test.timeout * self.timeout_multiplier + extra_timeout
if timeout != self.timeout:
try:
self.protocol.session.timeouts.script = timeout
self.timeout = timeout
except IOError:
self.logger.error("Lost webdriver connection")
return Stop
success, data = ServoWebDriverRun(self.do_testharness,
self.protocol.session,
url,
timeout).run()
if success:
return self.convert_result(test, data)
return (test.result_cls(*data), [])
def do_testharness(self, session, url, timeout):
session.url = url
result = json.loads(
session.execute_async_script(
self.script % {"abs_url": url,
"url": strip_server(url),
"timeout_multiplier": self.timeout_multiplier,
"timeout": timeout * 1000}))
# Prevent leaking every page in history until Servo develops a more sane
# page cache
session.back()
return result
class TimeoutError(Exception):
pass
class ServoWebDriverRefTestExecutor(RefTestExecutor):
def __init__(self, browser, server_config, timeout_multiplier=1,
screenshot_cache=None, capabilities=None, debug_info=None):
"""Selenium WebDriver-based executor for reftests"""
RefTestExecutor.__init__(self,
browser,
server_config,
screenshot_cache=screenshot_cache,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = ServoWebDriverProtocol(self, browser,
capabilities=capabilities)
self.implementation = RefTestImplementation(self)
self.timeout = None
with open(os.path.join(here, "reftest-wait_servodriver.js")) as f:
self.wait_script = f.read()
def is_alive(self):
return self.protocol.is_alive()
def do_test(self, test):
try:
result = self.implementation.run_test(test)
return self.convert_result(test, result)
except IOError:
return test.result_cls("CRASH", None), []
except TimeoutError:
return test.result_cls("TIMEOUT", None), []
except Exception as e:
message = getattr(e, "message", "")
if message:
message += "\n"
message += traceback.format_exc(e)
return test.result_cls("ERROR", message), []
def screenshot(self, test, viewport_size, dpi):
# https://github.com/w3c/wptrunner/issues/166
assert viewport_size is None
assert dpi is None
timeout = (test.timeout * self.timeout_multiplier + extra_timeout
if self.debug_info is None else None)
if self.timeout != timeout:
try:
self.protocol.session.timeouts.script = timeout
self.timeout = timeout
except IOError:
self.logger.error("Lost webdriver connection")
return Stop
return ServoWebDriverRun(self._screenshot,
self.protocol.session,
self.test_url(test),
timeout).run()
def _screenshot(self, session, url, timeout):
session.url = url
session.execute_async_script(self.wait_script)
return session.screenshot()
| mateon1/servo | tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/executors/executorservodriver.py | Python | mpl-2.0 | 8,627 |
from distutils.core import setup
setup(name='matplot-opencv',
author = 'Yunfu Liu',
version = '0.1',
packages = ['matplot-opencv'],
) | yunfuliu/matplot-opencv | setup.py | Python | mit | 157 |
# -*- coding: utf-8 -*-
from __future__ import print_function
import pyxb.bundles.common.xhtml1 as xhtml
import pyxb.utils.domutils
pyxb.utils.domutils.BindingDOMSupport.SetDefaultNamespace(xhtml.Namespace)
head = xhtml.head(title='A Test Document')
body = xhtml.body()
body.append(xhtml.h1('Contents'))
body.append(xhtml.p('''Here is some text.
It doesn't do anything special.'''))
p2 = xhtml.p('Here is more text. It has ',
xhtml.b('bold'),
' and ',
xhtml.em('emphasized'),
' content with ',
xhtml.b('more bold'),
' just to complicate things.')
body.append(p2)
# Verify we have two b's and an em
assert 2 == len(p2.b)
assert 1 == len(p2.em)
# Generate the document and externally verify that the em is between the two bs.
doc = xhtml.html(head, body)
try:
xmls = doc.toDOM().toprettyxml()
except pyxb.ValidationError as e:
print(e.details())
raise
open('genout.xhtml', 'w').write(xmls)
| jonfoster/pyxb-upstream-mirror | examples/xhtml/generate.py | Python | apache-2.0 | 986 |
#!/usr/bin/env python
#####################################################################
# #
# Fretwork #
# Copyright (C) 2009-2019 FoFiX Team #
# #
# This program is free software; you can redistribute it and/or #
# modify it under the terms of the GNU General Public License #
# as published by the Free Software Foundation; either version 2 #
# of the License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the Free Software #
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, #
# MA 02110-1301, USA. #
#####################################################################
from setuptools import setup, Extension
import os
import shlex
import shutil
import subprocess
import sys
from Cython.Build import cythonize
from fretwork.version import version_number
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except ImportError:
long_description = open('README.md').read()
def find_command(cmd):
'''Find a program on the PATH, or, on win32, in the dependency pack.'''
sys.stdout.write('checking for program %s... ' % cmd)
if os.name == 'nt':
# Only accept something from the dependency pack.
path = os.path.join('.', 'win32', 'deps', 'bin', cmd + '.exe')
else:
# Search the PATH.
path = None
for dir in os.environ['PATH'].split(os.pathsep):
if os.access(os.path.join(dir, cmd), os.X_OK):
path = os.path.join(dir, cmd)
break
if path is None or not os.path.isfile(path):
print('not found')
sys.stderr.write('Could not find required program "%s".\n' % cmd)
if os.name == 'nt':
sys.stderr.write('(Check that you have the latest version of the dependency pack installed.)\n')
sys.exit(1)
print(path)
return path
def pc_exists(pkg):
'''Check whether pkg-config thinks a library exists.'''
return subprocess.call([pkg_config, '--print-errors', '--exists', pkg]) == 0
def pc_info(pkg, altnames=None):
'''Obtain build options for a library from pkg-config and
return a dict that can be expanded into the argument list for
L{distutils.core.Extension}.'''
sys.stdout.write('checking for library %s... ' % pkg)
altnames = altnames if altnames is not None else list()
if not pc_exists(pkg):
for name in altnames:
if pc_exists(name):
pkg = name
sys.stdout.write('(using alternative name %s) ' % pkg)
break
else:
print('not found')
sys.stderr.write('Could not find required library "%s".\n' % pkg)
sys.stderr.write('(Also tried the following alternative names: %s)\n' % ', '.join(altnames))
if os.name == 'nt':
sys.stderr.write('(Check that you have the latest version of the dependency pack installed.)\n')
else:
sys.stderr.write('(Check that you have the appropriate development package installed.)\n')
sys.exit(1)
cflags = shlex.split(subprocess.check_output([pkg_config, '--cflags', pkg]).decode())
libs = shlex.split(subprocess.check_output([pkg_config, '--libs', pkg]).decode())
# Pick out anything interesting in the cflags and libs, and
# silently drop the rest.
def def_split(x):
pair = list(x.split('=', 1))
if len(pair) == 1:
pair.append(None)
return tuple(pair)
info = {
'define_macros': [def_split(x[2:]) for x in cflags if x[:2] == '-D'],
'include_dirs': [x[2:] for x in cflags if x[:2] == '-I'],
'libraries': [x[2:] for x in libs if x[:2] == '-l' and x[2:] not in lib_blacklist],
'library_dirs': [x[2:] for x in libs if x[:2] == '-L'],
}
print('ok')
return info
def combine_info(*args):
'''Combine multiple result dicts from L{pc_info} into one.'''
info = {
'define_macros': [],
'include_dirs': [],
'libraries': [],
'library_dirs': [],
}
for a in args:
info['define_macros'].extend(a.get('define_macros', []))
info['include_dirs'].extend(a.get('include_dirs', []))
info['libraries'].extend(a.get('libraries', []))
info['library_dirs'].extend(a.get('library_dirs', []))
return info
# Find pkg-config so we can find the libraries we need.
pkg_config = find_command('pkg-config')
# Blacklist MinGW-specific dependency libraries on Windows.
if os.name == 'nt':
lib_blacklist = ['m', 'mingw32']
else:
lib_blacklist = []
vorbisfile_info = pc_info('vorbisfile')
sdl_info = pc_info('sdl')
sdl_mixer_info = pc_info('SDL_mixer')
glib_info = pc_info('glib-2.0')
gthread_info = pc_info('gthread-2.0')
soundtouch_info = pc_info('soundtouch', ['soundtouch-1.4', 'soundtouch-1.0'])
if os.name == 'nt':
# And glib needs a slight hack to work correctly.
glib_info['define_macros'].append(('inline', '__inline'))
# And we use the prebuilt soundtouch-c.
soundtouch_info['libraries'].append('soundtouch-c')
extra_soundtouch_src = []
else:
# And build our own soundtouch-c.
extra_soundtouch_src = ['fretwork/mixstream/soundtouch-c.cpp']
mixstreamSource = [
'fretwork/mixstream/_MixStream.pyx',
'fretwork/mixstream/MixStream.c',
'fretwork/mixstream/vorbis.c'
]
mixstreamSource.extend(extra_soundtouch_src)
mixstreamExt = Extension('fretwork.mixstream._MixStream', mixstreamSource,
**combine_info(vorbisfile_info, soundtouch_info, glib_info, gthread_info, sdl_info, sdl_mixer_info))
if os.name == 'nt':
# Work around for distutils needing the files to be inside the packages in order
# to copy them to the final package
mixstreamDlls = [
'./win32/deps/bin/iconv.dll',
'./win32/deps/bin/libglib-2.0-0.dll',
'./win32/deps/bin/libgthread-2.0-0.dll',
'./win32/deps/bin/libintl-8.dll',
'./win32/deps/bin/libogg-0.dll',
'./win32/deps/bin/libSoundTouch-0.dll',
'./win32/deps/bin/libtheora-0.dll',
'./win32/deps/bin/libvorbis-0.dll',
'./win32/deps/bin/libvorbisfile-3.dll',
'./win32/deps/bin/SDL.dll',
'./win32/deps/bin/SDL_mixer.dll',
'./win32/deps/bin/zlib1.dll'
]
for f in mixstreamDlls:
print('copying ', f, ' -> ', './fretwork/mixstream/%s' % f.rsplit('/', 1)[1])
shutil.copy(f, './fretwork/mixstream/%s' % f.rsplit('/', 1)[1])
else:
mixstreamDlls = []
needs_pytest = {'pytest', 'test', 'ptr'}.intersection(sys.argv)
pytest_runner = [
"pytest-runner<5.3;python_version<'3.3'",
"pytest-runner;python_version>'3.3'",
] if needs_pytest else []
setup(
name='fretwork',
version=version_number,
description='Game library used by FoFiX, and FoF:R.',
long_description=long_description,
long_description_content_type='text/markdown',
author='FoFiX team',
author_email='contact@fofix.org',
license='GPLv2+',
url='https://github.com/fofix/fretwork',
packages=['fretwork', 'fretwork.mixstream', 'fretwork.midi'],
package_data={'fretwork.mixstream': ['*.dll']},
zip_safe=False,
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Multimedia',
'Topic :: Multimedia :: Sound/Audio',
'Topic :: Software Development :: Libraries',
],
keywords='music engine fofix frets game',
setup_requires=['cython'] + pytest_runner,
install_requires=[
"Cython>=0.29.2,<3.0",
"Pygame<2.0",
"PyOpenGL",
"numpy<1.17;python_version<'3.4'",
"numpy<1.20;python_version=='3.6'",
"numpy;python_version>'3.6'",
],
ext_modules=cythonize(mixstreamExt),
test_suite="tests",
tests_require=["pytest"],
)
if os.name == 'nt':
for f in mixstreamDlls:
print('removing ', './fretwork/mixstream/%s' % f.rsplit('/', 1)[1])
os.remove('./fretwork/mixstream/%s' % f.rsplit('/', 1)[1])
| fofix/fretwork | setup.py | Python | gpl-2.0 | 9,191 |
# -*- coding: utf8 -*-
# Copyright (c) 2017 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
from dace.processdefinition.processdef import ProcessDefinition
from dace.processdefinition.activitydef import ActivityDefinition
from dace.processdefinition.gatewaydef import (
ExclusiveGatewayDefinition,
ParallelGatewayDefinition)
from dace.processdefinition.transitiondef import TransitionDefinition
from dace.processdefinition.eventdef import (
StartEventDefinition,
EndEventDefinition)
from dace.objectofcollaboration.services.processdef_container import (
process_definition)
from pontus.core import VisualisableElement
from novaideo.connectors.core import CONNECTOR_PROCESSES
from .behaviors import (
LogIn,
CreateConnector,
Configure,
Remove)
# Import)
from novaideo import _
@process_definition(name='facebookprocess', id='facebookprocess')
class FacebookProcess(ProcessDefinition, VisualisableElement):
isUnique = True
def __init__(self, **kwargs):
super(FacebookProcess, self).__init__(**kwargs)
self.title = _('Facebook process')
self.description = _('Facebook process')
def _init_definition(self):
self.defineNodes(
start = StartEventDefinition(),
#egs = ExclusiveGatewayDefinition(),
login = ActivityDefinition(contexts=[LogIn],
description=_("Login with Facebook"),
title=_("Login with Facebook"),
groups=[]),
create = ActivityDefinition(contexts=[CreateConnector],
description=_("Add a Facebook connector"),
title=_("Add a Facebook connector"),
groups=[]),
configure = ActivityDefinition(contexts=[Configure],
description=_("Configure the Facebook connector"),
title=_("Configure"),
groups=[]),
remove = ActivityDefinition(contexts=[Remove],
description=_("Remove the Facebook connector"),
title=_("Remove"),
groups=[]),
# import_messages = ActivityDefinition(contexts=[Import],
# description=_("Import messages from Facebook"),
# title=_("Import"),
# groups=[]),
pg = ParallelGatewayDefinition(),
eg = ExclusiveGatewayDefinition(),
end = EndEventDefinition(),
)
self.defineTransitions(
TransitionDefinition('start', 'pg'),
TransitionDefinition('pg', 'login'),
TransitionDefinition('login', 'eg'),
TransitionDefinition('pg', 'create'),
TransitionDefinition('create', 'eg'),
TransitionDefinition('pg', 'configure'),
TransitionDefinition('configure', 'eg'),
TransitionDefinition('pg', 'remove'),
TransitionDefinition('remove', 'eg'),
# TransitionDefinition('pg', 'import_messages'),
# TransitionDefinition('import_messages', 'eg'),
TransitionDefinition('eg', 'end'),
)
CONNECTOR_PROCESSES.append('facebookprocess')
| ecreall/nova-ideo | novaideo/connectors/facebook/content/definition.py | Python | agpl-3.0 | 3,654 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'MaxFlatPrice', fields ['year']
db.create_unique(u'mo_maxflatprice', ['year'])
def backwards(self, orm):
# Removing unique constraint on 'MaxFlatPrice', fields ['year']
db.delete_unique(u'mo_maxflatprice', ['year'])
models = {
'mo.departamentagreement': {
'Meta': {'object_name': 'DepartamentAgreement'},
'agreement_type': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'finish_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2018, 12, 31, 0, 0)'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mo.MO']", 'null': 'True', 'blank': 'True'}),
'num': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 1, 1, 0, 0)'}),
'subvention': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mo.Subvention']", 'null': 'True', 'blank': 'True'})
},
'mo.federalbudget': {
'Meta': {'object_name': 'FederalBudget'},
'adm_coef': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'minis_sum': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'sub_orph_home': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'sub_sum': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'subvention_performance': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
u'mo.maxflatprice': {
'Meta': {'object_name': 'MaxFlatPrice'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_price': ('django.db.models.fields.FloatField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {'unique': 'True'})
},
'mo.mo': {
'Meta': {'object_name': 'MO'},
'common_amount': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'common_economy': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'common_fed_amount': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'common_percentage': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'common_reg_amount': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'common_spent': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'creation_form': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '24', 'null': 'True', 'blank': 'True'}),
'flats_amount': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'has_trouble': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'home_fed_orphans': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'home_orphans': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'home_reg_orphans': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '2048'}),
'planing_home_orphans': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
'mo.orphan': {
'Meta': {'object_name': 'Orphan'},
'age': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'have_home': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_privilege': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'})
},
'mo.peopleamount': {
'Meta': {'object_name': 'PeopleAmount'},
'deals': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'future_queue_by_list': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'future_unhome_orphan': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'future_unhome_orphan_14_18': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mo.MO']", 'null': 'True', 'blank': 'True'}),
'privilege_people': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'queue_by_list': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'recoverers': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'unhome_orphan': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'unhome_orphan_14_18': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'mo.regionalbudget': {
'Meta': {'object_name': 'RegionalBudget'},
'adm_coef': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'minis_sum': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'sub_orph_home': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'sub_sum': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'subvention_performance': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'mo.subvention': {
'Meta': {'object_name': 'Subvention'},
'amount': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'fed_budget': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mo.FederalBudget']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'minis_sum': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'reg_budget': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mo.RegionalBudget']", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['mo'] | zionist/mon | mon/apps/mo/migrations/0016_auto__add_unique_maxflatprice_year.py | Python | bsd-3-clause | 8,109 |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import socket
import requests
import json
import argparse
import os
import re
if __name__ == '__main__':
"""
The following DNS records will be created
<prefix>-v4.<domain> for IPv4
<prefix>-v6.<domain> for IPv6
IP address depends on the result of ifconfig command
"""
parser = argparse.ArgumentParser()
parser.add_argument('--domain', required=True, help='which means zone in cloudflare. eg: example.com')
parser.add_argument('--prefix', required=True, help='which means prefix of DDNS')
parser.add_argument('--api-key', required=True, help='the api key of cloudflare')
arg = parser.parse_args()
domain = arg.domain
dns_base_name = arg.prefix
cf_api_key = arg.api_key
print('Domain:', domain)
print('DNS base name:', dns_base_name)
cf_base_url = 'https://api.cloudflare.com/client/v4'
# grep of interface name
interface_name = [
'wan'
]
# blacklist of IPs
blacklist = [
'^fe80',
'^fd19',
'^::',
'^127',
'^192',
'^172',
'^10\.'
]
ifconfig = os.popen("ifconfig -a").read()
interfaces = re.findall('([a-zA-Z0-9_-]+)\s+Link([\w\W]*?)(?:\n\n|$)',ifconfig)
ipv4 = []
ipv6 = []
for inet in interfaces:
inet_name = inet[0]
to_continue = False
for white in interface_name:
if re.search(white, inet_name):
to_continue = True
if not to_continue:
continue
address0 = inet[1]
address1 = address = re.findall('addr:\s*([0-9a-f.:]+)', address0)
for addr in address:
to_continue = True
for b in blacklist:
if re.search(b, addr):
to_continue = False
if not to_continue:
continue
if addr.__contains__(':'):
print('IPv6(%s) With Interface(%s)' % (addr, inet_name))
ipv6.append(addr)
else:
print('IPv4(%s) With Interface(%s)' % (addr, inet_name))
ipv4.append(addr)
headers = {
'Authorization': 'Bearer %s' % cf_api_key,
'Content-Type': 'application/json'
}
zones = requests.get('%s/zones' % cf_base_url,
headers=headers,
params={'match': 'all', 'name': domain}).json()
zone_id = zones['result'][0]['id']
print('find zone id %s for %s' % (zone_id, domain))
dns_records = requests.get('%s/zones/%s/dns_records' % (cf_base_url, zone_id),
params={
'per_page': '100',
'proxied': False
},
headers=headers).json()
# print('raw list:', dns_records)
for record in dns_records['result']:
dns_id = record['id']
name = record['name']
content = record['content']
if name.startswith(dns_base_name):
# delete records if exist
if content not in ipv4 and content not in ipv6:
requests.delete('%s/zones/%s/dns_records/%s' % (cf_base_url, zone_id, dns_id),
headers=headers)
print('delete id(%s) for %s with %s' % (dns_id, name, content))
else:
print('exist DNS record with id(%s) name(%s) content(%s)' % (dns_id, name, content))
if content in ipv6:
ipv6.remove(content)
if content in ipv4:
ipv4.remove(content)
create_dns_record = []
# create new records
for v4 in ipv4:
ret = requests.post('%s/zones/%s/dns_records' % (cf_base_url, zone_id),
headers=headers,
data=json.dumps({
'type': 'A',
'name': '%s-v4.%s' % (dns_base_name, domain),
'content': v4,
'ttl': '120',
'proxied': False
})).json()
create_dns_record.append(ret)
for v6 in ipv6:
ret = requests.post('%s/zones/%s/dns_records' % (cf_base_url, zone_id),
headers=headers,
data=json.dumps({
'type': 'AAAA',
'name': '%s-v6.%s' % (dns_base_name, domain),
'content': v6,
'ttl': '120',
'proxied': False
})).json()
create_dns_record.append(ret)
for rec in create_dns_record:
if rec['success']:
ret = rec['result']
dns_id = ret['id']
dns_name = ret['name']
dns_content = ret['content']
print('add new record(%s) id(%s) with content(%s)' % (dns_name, dns_id, dns_content))
else:
print('Error:', rec)
print('all done.')
| gam2046/UtilsClass | network/ddns.py | Python | gpl-3.0 | 5,180 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.