repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
rancherio/rancher | tests/integration/suite/test_system_project.py | Python | apache-2.0 | 3,918 | 0 | import pytest
from rancher import ApiError
from kubernetes.client import CoreV1Api
from .conftest import wait_for
systemProjectLabel = "authz.management.cattle.io/system-project"
defaultProjectLabel = "authz.management.cattle.io/default-project"
initial_system_namespaces = set(["kube-node-lease",
"kube-system",
"cattle-system",
"kube-public",
"cattle-global-data",
"cattle-global-nt",
"fleet-system"])
loggingNamespace = "cattle-logging"
def test_system_project_created(admin_cc):
projects = admin_cc.management.client.list_project(
clusterId=admin_cc.cluster.id)
initial_projects = {}
initial_projects["Default"] = defaultProjectLabel
initial_projects["System"] = systemProjectLabel
required_projects = []
for project in projects:
name = project['name']
if name in initial_projects:
projectLabel = initial_projects[name]
assert project['labels'].\
data_dict()[projectLabel] == 'true'
required_projects.append(name)
assert len(required_projects) == len(initial_projects)
def test_system_namespaces_assigned(admin_cc):
projects = admin_cc.management.client.list_project(
clusterId=admin_cc.cluster.id)
systemProject = None
for project in projects:
if project['name'] == "System":
systemProject = project
break
assert systemProject is not None
system_namespaces = admin_cc.client.list_namespace(
projectId=systemProject.id)
system_namespaces_names = set(
[ns['name'] for ns in system_namespaces])
# If clusterLogging tests run before this, cattle-logging
# will be present in current system_namespaces, removing | it
if loggingNamespace in system_namespaces_names:
system_namespaces_names.remove(loggingNamespace)
assert system_namespaces_names == initial_system_namespaces
def test_system_project_cant_be_deleted(admin_mc, admin_cc):
"" | "The system project is not allowed to be deleted, test to ensure that is
true
"""
projects = admin_cc.management.client.list_project(
clusterId=admin_cc.cluster.id)
system_project = None
for project in projects:
if project['name'] == "System":
system_project = project
break
assert system_project is not None
# Attempting to delete the template should raise an ApiError
with pytest.raises(ApiError) as e:
admin_mc.client.delete(system_project)
assert e.value.error.status == 405
assert e.value.error.message == 'System Project cannot be deleted'
def test_system_namespaces_default_svc_account(admin_mc):
system_namespaces_setting = admin_mc.client.by_id_setting(
"system-namespaces")
system_namespaces = system_namespaces_setting["value"].split(",")
k8sclient = CoreV1Api(admin_mc.k8s_client)
def_saccnts = k8sclient.list_service_account_for_all_namespaces(
field_selector='metadata.name=default')
for sa in def_saccnts.items:
ns = sa.metadata.namespace
def _check_system_sa_flag():
if ns in system_namespaces and ns != "kube-system":
if sa.automount_service_account_token is False:
return True
else:
return False
else:
return True
def _sa_update_fail():
name = sa.metadata.name
flag = sa.automount_service_account_token
return 'Service account {} in namespace {} does not have correct \
automount_service_account_token flag: {}'.format(name, ns, flag)
wait_for(_check_system_sa_flag, fail_handler=_sa_update_fail)
|
wojtask/CormenPy | src/chapter11/exercise11_4_2.py | Python | gpl-3.0 | 565 | 0 | import math
Deleted = math.inf
def hash_delete(T, k, h):
m = T.length
i = 0
while True:
| j = h(k, i, m)
if T[j] == k:
T[j] = Deleted
return
i = i + 1
if T[j] is None or i == m:
break
def hash_insert_(T, k, h):
m = T.length
i = 0
while True:
j = h(k, i, m)
if T[j] is None or T[j] is Deleted:
T[j] = k
return j |
else:
i = i + 1
if i == m:
break
raise RuntimeError('hash table overflow')
|
mtils/ems | ems/qt4/itemmodel/sqlitermodel.py | Python | mit | 5,898 | 0.003561 |
from PyQt4.QtCore import Qt
from filtermodels import SortFilterProxyModel
from ems import qt4
from ems.qt4.util import variant_to_pyobject
from sqliter.query import test, c, and_, or_, GenClause, PathNotFoundError
class SqlIterFilterModel(SortFilterProxyModel):
def __init__(self, parent=None):
SortFilterProxyModel.__init__(self, parent)
self._query = None
self._filterRowCount = 0
self._filterColumnCount = 0
self._query = None
self._visibleColumnCache = None
self._groupByResults = None
self._name2ColumnMap = {}
self._queryColumns = set()
self._whereColumns = set()
self._groupByColumns = set()
self._hasGroupBy = False
self._hasWhere = False
self._hasColumnFilter = False
self._visibleColumns = []
def name2ColumnMap(self):
if not self._name2ColumnMap:
sourceModel = self.sourceModel()
self._name2ColumnMap = {}
for col in range(sourceModel.columnCount()):
colVariant = sourceModel\
.headerData(col, Qt.Horizontal,
qt4.ColumnNameRole)
colName = variant_to_pyobject(colVariant)
if not colName:
colName = "col-{0}".format(col)
self._name2ColumnMap[colName] = col
return self._name2ColumnMap
def filterAcceptsRow(self, sourceRow, sourceParent):
if not self.usesRowQueryFilter():
super(SqlIterFilterModel, self).filterAcceptsRow(sourceRow,
sourceParent)
self._filterRowCount += 1
columnMap = self.name2ColumnMap()
whereResult = True
groupByResult = True
if self._hasWhere:
# Only use columns which are needed to filter the model
whereRow = {}
for fieldName in self._whereColumns:
col = columnMap[fieldName]
whereRow[fieldName] = variant_to_pyobject(self.sourceModel()\
.index(sourceRow, col)\
.data(self.filterRole()))
whereResult = self._query.match(whereRow)
if self._hasGroupBy:
# Only use columns which are needed to filter the model
groupByRow = {}
for fieldName in self._groupByColumns:
col = columnMap[fieldName]
groupByRow[fieldName] = variant_to_pyobject(self.sourceModel()\
.index(sourceRow, col)\
.data(self.filterRole()))
groupByResult = self._groupByCheck(groupByRow)
return whereResult and groupByResult
def _groupByCheck(self, row):
if self._hasGroupBy:
test = []
for field in self._query.group_by():
try:
test.append(unicode(GenClause.extractValue(row, field)[0]))
except PathNotFoundError:
test.append('')
rowHash = u"|-|".join(test)
if rowHash in self._groupByResults:
return False
self._groupByResults.add(rowHash)
return True
def invalidate(self):
self.resetCaches()
return SortFilterProxyModel.invalidate(self)
def invalidateFilter(self):
self.resetCaches()
return SortFilterProxyModel.invalidateFilter(self)
def resetCaches(self):
self._groupByResults = set()
self._name2ColumnMap.clear()
def filterAcceptsColumn(self, sourceColumn, sourceParent):
if self._hasColumnFilter:
cols = self.visibleColumns()
if cols:
return (sourceColumn in cols)
return SortFilterProxyModel.filterAcceptsColumn(self, sourceColumn, sourceParent)
def visibleColumns(self):
# TODO Fields with dots
if self._hasColumnFilter and not self._visibleColumnCache:
self._visibleColumnCache = []
fields = self._query.fields()
columnMap = self.name2ColumnMap()
for colName in columnMap:
if colName in fields:
self._visibleColumnCache.append(columnMap[colName])
return self._visibleColumnCache
def query(self):
return self._query
def setQuery(self, query):
self._query = query
self.resetCaches()
if self._query:
self._hasGroupBy = self._query.has_group_by()
self._hasWhere = self._query.has_where()
self._queryColumns = self._getFirstSegments(self._query.collect_fieldnames())
self._groupByColumns = self._getFirstSegmen | ts(self._query.group_by_fieldnames())
self._whereColumns = self._getFirstSegments(self._query.where_fieldnames())
self._hasColumnFilter = self._query.has_fields()
self._visibleColumnCache = None
else:
self._hasGroupBy = False
self._hasWhere = False
self._queryColumns = set()
self._groupByColumns = set()
self._whereColumns = set()
self._hasColumnFilter = False
s | elf._visibleColumnCache = None
#self.layoutAboutToBeChanged.emit()
self.modelAboutToBeReset.emit()
self.invalidateFilter()
#self.layoutChanged.emit()
self.modelReset.emit()
def _getFirstSegments(self, fields):
firstSegments = set()
for field in fields:
firstSegments.add(field.split('.')[0])
return firstSegments
def usesRowQueryFilter(self):
return self._hasGroupBy or self._hasWhere |
rockho-team/shoop-cielo | shuup_cielo/admin/views/__init__.py | Python | agpl-3.0 | 3,897 | 0.003865 | # -*- coding: utf-8 -*-
# This file is part of Shuup Cielo.
#
# Copyright (c) 2016, Rockho Team. All rights reserved.
# Author: Christian Hess
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from decimal import Decimal
from django.http.response import HttpResponseBadRequest, HttpResponseServerError
from django.shortcuts import render_to_response
from django.views.generic.base import TemplateView, View
from cielo_webservice.exceptions import CieloRequestError
import shuup_cielo
from shuup_cielo.constants import CieloTransactionStatus
from shuup_cielo.models import CieloTransaction
TRANSACTION_DETAIL_TEMPLAE = 'cielo/admin/order_section_transaction_detail.jinja'
class DashboardView(TemplateView):
template_name = "cielo/admin/dashboard.jinja"
title = "Cielo"
def get_context_data(self, **kwargs):
context_data = super(DashboardView, self).get_context_data(**kwargs)
context_data.update({'VERSION': shuup_cielo.__version__})
return context_data
class RefreshTransactionView(View):
'''
Atualiza uma transação e retorna o detalhe da transação renderizado
'''
def post(self, request, *args, **kwargs):
try:
cielo_transaction = CieloTransaction.objects.get(pk=request.POST.get('id'))
cielo_transaction.refresh()
return render_to_response(TRANSACTION_DETAIL_TEMPLAE, {'transaction': cielo_transaction,
'CieloTransactionStatus': CieloTransactionStatus})
except Exception as exc:
return HttpResponseServerError(str(exc))
class CaptureTransactionView(View):
'''
Captura uma transação (total ou parcialmente) e retorna o detalhe da transação renderizado
'''
def post(self, request, *args, **kwargs):
'''
:param: id: ID da transação
:param: amount: Valor a ser cancelado ou Nulo se for total
:type: id: int
:type: amount: decimal.Decimal|None
'''
try:
cielo_transaction = CieloTransaction.objects.get(pk=request.POST.get('id'))
amount = Decimal(reque | st.POST.get('amount', cielo_transaction.total_value))
try:
cielo_transaction.captu | re(amount)
except CieloRequestError as err:
return HttpResponseBadRequest("{0}".format(err))
return render_to_response(TRANSACTION_DETAIL_TEMPLAE, {'transaction': cielo_transaction,
'CieloTransactionStatus': CieloTransactionStatus})
except Exception as exc:
return HttpResponseServerError("{0}".format(exc))
class CancelTransactionView(View):
'''
Cancela uma transação (total ou parcialmente) e retorna o detalhe da transação renderizado
'''
def post(self, request, *args, **kwargs):
'''
:param: id: ID da transação
:param: amount: Valor a ser cancelado ou Nulo se for total
:type: id: int
:type: amount: decimal.Decimal|None
'''
try:
cielo_transaction = CieloTransaction.objects.get(pk=request.POST.get('id'))
amount = Decimal(request.POST.get('amount', cielo_transaction.total_value))
try:
cielo_transaction.cancel(amount)
except CieloRequestError as err:
return HttpResponseBadRequest("{0}".format(err))
return render_to_response(TRANSACTION_DETAIL_TEMPLAE, {'transaction': cielo_transaction,
'CieloTransactionStatus': CieloTransactionStatus})
except Exception as exc:
return HttpResponseServerError("{0}".format(exc))
|
munchycool/forthelulz | plugin.video.v1d30play/playvideo.py | Python | agpl-3.0 | 2,398 | 0.012093 | import urlparse
import sys,urllib
import xbmc, xbmcgui, xbmcaddon, xbmcplugin
import urlresolver
base_url = sys.argv[0]
addon_handle = int(sys.argv[1])
args = urlparse.parse_qs(sys.argv[2][1:])
_addon = xbmcaddon.Addon()
_icon = _addon.getAddonInfo('icon')
def build_url(query):
return base_url + '?' + urllib.urlencode(query)
def resolve_url(url):
duration=7500 #in milliseconds
message = "Cannot Play URL"
stream_url = urlresolver.HostedMediaFile(url=url).resolve()
# If urlresolver returns false then the video url was not resolved.
if not stream_url:
dialog = xbmcgui.Dialog()
dialog.notification("URL Resolver Error", message, xbmcgui.NOTIFICATION_INFO, duration)
return False
else:
return stream_url
def play_video(path):
"""
Play a video by the provided path.
:param path: str
"""
# Create a playable item with a path to play.
play_item | = xbmcgui.ListItem(path=path)
vid_url = play_item.getfilename()
stream_url = resolve_url(vid_url)
if stream_url:
play_item.setPath(stream_url)
# Pass the item to the Kodi player.
xbmcplugin.setResolvedUrl(addon_handle, True, listitem=play_item)
# addon kicks in
mode = args.get('mode', None)
if mode is None:
video_play_url = "http://www.vidsplay.com/wp-content/uploads/2017/04/alligator.mp4"
url = build_url({'mode' :'play', 'playlink' : video_play_url})
li | = xbmcgui.ListItem('Play Video 1', iconImage='DefaultVideo.png')
li.setProperty('IsPlayable' , 'true')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li)
video_play_url = "https://www.youtube.com/watch?v=J9d9UrK0Jsw"
url = build_url({'mode' :'play', 'playlink' : video_play_url})
li = xbmcgui.ListItem('Play Video 2', iconImage='DefaultVideo.png')
li.setProperty('IsPlayable' , 'true')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li)
video_play_url = "www.reddit.com"
url = build_url({'mode' :'play', 'playlink' : video_play_url})
li = xbmcgui.ListItem('Play Video 3', iconImage='DefaultVideo.png')
li.setProperty('IsPlayable' , 'true')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li)
xbmcplugin.endOfDirectory(addon_handle)
elif mode[0] == 'play':
final_link = args['playlink'][0]
play_video(final_link)
|
ellisonbg/pyzmq | zmq/utils/garbage.py | Python | lgpl-3.0 | 5,396 | 0.005374 | """Garbage collection thread for representing zmq refcount of Python objects
used in zero-copy sends.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013 Brian E. Granger & Min Ragan-Kelley
#
# This file is part of pyzmq
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import atexit
import struct
from os import getpid
from collections import namedtuple
from threading import Thread, Event, Lock
import warnings
import zmq
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
gcref = namedtuple('gcref', ['obj', 'event'])
class GarbageCollectorThread(Thread):
"""Thread in which garbage collection actually happens."""
def __init__(self, gc):
super(GarbageCollectorThread, self).__init__()
self.gc = gc
self.daemon = True
self.pid = getpid()
self.ready = Event()
def run(self):
s = self.gc.context.socket(zmq.PULL)
s.linger = 0
s.bind(self.gc.url)
self.ready.set()
while True:
# detect fork
if getpid is None or getpid() != self.pid:
return
msg = s.recv()
if msg == b'DIE':
break
fmt = 'L' if len(msg) == 4 else 'Q'
key = struct.unpack(fmt, msg)[0]
tup = self.gc.refs.pop(key, None)
if tup and tup.event:
tup.event.set()
del tup
s.close()
class GarbageCollector(object):
"""PyZMQ Garbage Collector
Used for representing the reference held by libzmq during zero-copy sends.
This object holds a dictionary, keyed by Python id,
of the Python objects whose memory are currently in use by zeromq.
When zeromq is done with the memory, it sends a message on an inproc PUSH socket
containing the packed size_t (32 or 64-bit unsigned int),
which is the key in the dict.
When the PULL socket in the gc thread receives that message,
the reference is popped from the dict,
and any tracker events that should be signaled fire.
"""
refs = None
_context = None
_lock = None
url = "inproc://pyzmq.gc.01"
def __init__(self, context=None):
super(GarbageCollector, self).__init__()
self.refs = {}
self.pid = None
self.thread = None
self._context = context
self._lock = Lock()
self._stay_down = False
atexit.register(self._atexit)
@property
def context(self):
if self._context is None:
self._context = zmq.Context()
| return self._context
@context.setter
def context(self, ctx):
if self.is_alive():
if self.refs:
warnings.warn("Replacing gc context while gc is running", RuntimeWarn | ing)
self.stop()
self._context = ctx
def _atexit(self):
"""atexit callback
sets _stay_down flag so that gc doesn't try to start up again in other atexit handlers
"""
self._stay_down = True
self.stop()
def stop(self):
"""stop the garbage-collection thread"""
if not self.is_alive():
return
push = self.context.socket(zmq.PUSH)
push.connect(self.url)
push.send(b'DIE')
push.close()
self.thread.join()
self.context.term()
self.refs.clear()
def start(self):
"""Start a new garbage collection thread.
Creates a new zmq Context used for garbage collection.
Under most circumstances, this will only be called once per process.
"""
self.pid = getpid()
self.refs = {}
self.thread = GarbageCollectorThread(self)
self.thread.start()
self.thread.ready.wait()
def is_alive(self):
"""Is the garbage collection thread currently running?
Includes checks for process shutdown or fork.
"""
if (getpid is None or
getpid() != self.pid or
self.thread is None or
not self.thread.is_alive()
):
return False
return True
def store(self, obj, event=None):
"""store an object and (optionally) event for zero-copy"""
if not self.is_alive():
if self._stay_down:
return 0
# safely start the gc thread
# use lock and double check,
# so we don't start multiple threads
with self._lock:
if not self.is_alive():
self.start()
tup = gcref(obj, event)
theid = id(tup)
self.refs[theid] = tup
return theid
def __del__(self):
if not self.is_alive():
return
try:
self.stop()
except Exception as e:
raise (e)
gc = GarbageCollector()
|
Micronaet/micronaet-connector | prestashop-connector/agent/agent_mysql.py | Python | agpl-3.0 | 33,351 | 0.006327 | # -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import sys
import shutil
from PIL import Image
# -----------------------------------------------------------------------------
# MYSQL CLASS
# -----------------------------------------------------------------------------
class mysql_connector():
''' MySQL connector for MySQL database
'''
# -------------------------------------------------------------------------
# Parameters for defaults:
# -------------------------------------------------------------------------
# TODO manage in other mode!
id_shop = 1
pack_stock_type = 3
id_langs = {
'it_IT': 1,
'en_US': 2,
}
ext_in = 'jpg'
ext_out = 'jpg'
id_image_type = {
'': False, # empty default image no resize for now
'cart_default': (80, 80),
'small_default': (98, 98),
'medium_default': (125, 125),
'home_default': (250, 250),
'large_default': (458, 458),
'thickbox_default': (800, 800),
'category_default': (870, 217),
'scene_default': (870, 270),
'm_scene_default': (161 ,58),
}
# -------------------------------------------------------------------------
# Utility function:
# -------------------------------------------------------------------------
def resize_image(self, from_image, to_image, size):
''' Resize image as indicated in Mysql table
'''
if size: # else no resize
origin = Image.open(from_image)
resized = origin.resize(size, Image.ANTIALIAS)
image_type = 'JPEG' if self.ext_out.upper() == 'JPG' else\
self.ext_out.upper()
resized.save(to_image, image_type)
else:
shutil.copyfile(from_image, to_image)
return True
def _search_table_key(self, table, key, extra_field=False):
''' Search table key for get insert/update mode
'''
if extra_field:
query = 'select count(*), ' + extra_field + ' from %s where %s;'
else:
query = 'select count(*) from %s where %s;'
where = ''
table = '%s_%s' % (self._prefix, table)
for field, value in key:
if where:
where += ' and '
quote = '\'' if type(value) in (str, ) else ''
where += '`%s` = %s%s%s' % (
field, quote, value, quote)
query = query % (table, where)
# Check if present
connection = self.get_connection()
if not connection:
return False
cr = connection.cursor()
cr.execute(query)
res = cr.fetchall()
try:
if res[0]['count(*)'] > 0:
if extra_field:
return where, res[0][extra_field]
else:
return where
except:
pass
if extra_field:
return False, False
else:
return False
def _prepare_mysql_query(
self, update_where, record, table, field_quote=None):
''' Prepare insert query passing record and quoted field list
update_where: if present means that is the key search filter so
need to be updated not created
'''
if field_quote is None:
field_quote = []
if update_where:
# update
table = '%s_%s' % (self._prefix, table)
fields = ''
for field, value in record.iteritems():
if fields:
fields += ', '
quote = '\'' if field in field_quote else ''
fields += '`%s` = %s%s%s' % (field, quote, value, quote)
query = 'UPDATE %s SET %s WHERE %s;' % (
table,
fields,
update_where,
)
if self._log:
print '[INFO] UPDATE: ', query
else:
# insert
table = '%s_%s' % (self._prefix, table)
fields = values = ''
for field, value in record.iteritems():
if fields:
fields += ', '
fields += '`%s`' % field
quote = '\'' if field in field_quote else ''
if values:
values += ', '
values += '%s%s%s' % (quote, value, quote)
query = 'INSERT INTO %s(%s) VALUES (%s);' % (
table, fields, values)
if self._log:
print '[INFO] INSERT: ', query
return query
def _expand_lang_data(self, data):
''' Generate extra data for SEO management
mandatory: name, meta_title, meta_description
'''
def clean_metatags(value):
''' Clean meta tags for problems with some char
'''
if not value:
return ''
replace_list = (
(' ', ' '),
(' ', '-'),
(',', ''),
('.', ''),
('\'', ''),
('"', ''),
)
for from_char, to_char in replace_list:
value = value.replace(from_char, to_char)
return value.lower()
if not data:
return {}
name = data.get('name', '')
meta_title = data.get('meta_title', '')
meta_description = data.get('meta_description', '')
description_short = '%s%s' % (
meta_description[:150] or name,
'...' if len(meta_description) >= 150 else ''
)
data['description'] = '<p>%s</p>' % (meta_description)
link_rewrite = clean_metatags(name)
data['link_rewrite'] = link_rewrite
data['meta_keywords'] = meta_description # TODO limit 160 char?
data['description_short'] = '<p>%s</p>' % description_short
return
# -------------------------------------------------------------------------
# Exported function:
# -------------------------------------------------------------------------
def update_image_file(self, reference, id | _image):
''' Update image reference in rsync folder to prestashop path
'''
#root_path = '/var/www/html/2015.redesiderio.it/site/public/https'
root_path = '/home/redesiderio/public_html'
# TODO |
path_in = os.path.join(root_path, 'img/odoo')
path_out = os.path.join(root_path, 'img/p',)
# Create origin image:
image_in = os.path.join(
path_in,
'%s.%s' % (
reference.replace(' ', '_'),
self.ext_in,
),
)
# ---------------------------------------------------------------------
# Get input image info:
# ---------------------------------------------------------------------
i_in = Ima |
ic-labs/django-icekit | icekit/plugins/location/models.py | Python | mit | 1,493 | 0 | from django.core.urlresolvers import reverse
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from fluent_contents.models import ContentItem
from icekit.models import ICEkitFluentContentsMixin
from icekit.managers import GoogleMapManager
from icekit.publishing.managers import PublishingMan | ager
from icekit.plugins.location import abstract_models
# We must combine managers here otherwise the `PublishingManager` applied via
# `ICEkitFluentContentsMixin` will clobber the existing `GoogleMapManager`
class PublishingManagerAndGoogl | eMapManager(
PublishingManager,
GoogleMapManager,
):
pass
class Location(
ICEkitFluentContentsMixin,
abstract_models.AbstractLocationWithGoogleMap,
):
"""
Location model with fluent contents.
"""
objects = PublishingManagerAndGoogleMapManager()
class Meta:
unique_together = (('slug', 'publishing_linked'), )
ordering = ('title',)
def get_absolute_url(self):
return reverse(
'icekit_plugins_location_detail',
kwargs={'slug': self.slug}
)
@python_2_unicode_compatible
class LocationItem(ContentItem):
location = models.ForeignKey(
Location,
on_delete=models.CASCADE,
)
class Meta:
verbose_name = _('Location')
verbose_name_plural = _('Locations')
def __str__(self):
return unicode(self.location)
|
mercycorps/TolaActivity | indicators/tests/form_tests/result_form_unittests.py | Python | apache-2.0 | 9,175 | 0.000981 | """Unit tests for result_form_functional_tests.py
Systems:
- indicators.views.ResultCreate
- bad indicator id 404
- get with good ids gives form
- initial form data is correct
- correct disaggregation values
- form valid returns appropriate response
- form invalid returns appropriate response
- indicators.views.ResultUpdate
- indicators.forms.ResultForm
"""
import datetime
from indicators.views import ResultCreate, ResultUpdate
from indicators.forms import ResultForm
from indicators.models import Indicator, Result
from factories import (
indicators_models as i_factories,
workflow_models as w_factories
)
from django.urls import reverse
from django.http import Http404
from django import test
class TestResultCreateUpdate404(test.TestCase):
def setUp(self):
self.program = w_factories.ProgramFactory()
self.indicator = i_factories.IndicatorFactory(
program=self.program
)
self.result = i_factories.ResultFactory(
indicator=self.indicator
)
self.user = w_factories.UserFactory(first_name="FN", last_name="LN", username="tester", is_superuser=True)
self.user.set_password('password')
self.user.save()
self.tola_user = w_factories.TolaUserFactory(user=self.user)
self.tola_user.save()
self.client = test.Client(enforce_csrf_checks=False)
self.client.login(username='tester', password='password')
def test_create_view_raises_404_with_bad_indicator_id(self):
kwargs = {
'indicator': self.indicator.id + 1
}
bad_url = reverse('result_add', kwargs=kwargs)
response = self.client.get(bad_url)
self.assertEqual(response.status_code, 404)
def test_update_view_raises_404_with_bad_result_id(self):
kwargs = {
'pk': self.result.id + 1
}
bad_url = reverse('result_update', kwargs=kwargs)
response = self.client.get(bad_url)
self.assertEqual(response.status_code, 404)
class TestUpdateFormInitialValues(test.TestCase):
def setUp(self):
self.program = w_factories.ProgramFactory()
self.indicator = i_factories.IndicatorFactory(
program=self.program,
target_frequency=Indicator.ANNUAL
)
self.result = i_factories.ResultFactory(
indicator=self.indicator,
)
self.result.record_name = 'record name'
self.result.evidence_url = 'evidence url'
self.blank_result = i_factories.ResultFactory(
indicator=self.indicator
)
self.tola_user = w_factories.TolaUserFactory()
self.user = self.tola_user.user
self.request = type('Request', (object,), {'has_write_access': True, 'user': self.user})()
def test_initial_values(self):
form = ResultForm(user=self.user, indicator=self.indicator, program=self.program, instance=self.result, request=self.request)
self.assertEqual(form['achieved'].value(), self.result.achieved)
self.assertEqual(form['target_frequency'].value(), Indicator.ANNUAL)
self.assertEqual(form['indicator'].value(), self.indicator.id)
self.assertEqual(form['date_collected'].value(), self.result.date_collected)
self.assertEqual(form['record_name'].value(), 'record name')
self.assertEqual(form['evidence_url'].value(), 'evidence url')
def test_initial_values_no_evidence(self):
form = ResultForm(user=self.user, indicator=self.indicator, program=self.program, instance=self.blank_result, request=self.request)
self.assertEqual(form['achieved'].value(), self.result.achieved)
self.assertEqual(form['target_frequency'].value(), Indicator.ANNUAL)
self.assertEqual(form['indicator'].value(), self.indicator.id)
self.assertEqual(form['record_name'].value(), None)
self.assertEqual(form['evidence_url'].value(), None)
def test_create_form_initial_values(self):
form = ResultForm(user=self.user, indicator=self.indicator, program=self.program, request=self.request)
self.assertEqual(form['indicator'].value(), self.indicator.id)
self.assertEqual(form['program'].value(), self.program.id)
self.assertEqual(form['achieved'].value(), None)
self.assertEqual(form['record_name'].value(), None)
self.assertEqual(form['evidence_url'].value(), None)
class TestCreateValidation(test.TestCase):
def setUp(self):
self.program = w_factories.ProgramFactory(
reporting_period_start=datetime.date(2016, 1, 1),
reporting_period_end=datetime.date(2016, 12, 31),
)
self.indicator = i_factories.IndicatorFactory(
program=self.program,
target_frequency=Indicator.LOP
)
self.tola_user = w_factories.TolaUserFactory()
self.user = self.tola_user.user
self.request = type('Request', (object,), {'has_write_access': True, 'user': self.user})()
self.form_kwargs = {
'user': self.user,
'indicator': self.indicator,
'program': self.program,
'request': self.request,
}
def test_good_data_validates(self):
minimal_data = {
'date_collected': '2016-01-01',
'achieved': '30',
'indicator': self.indicator.id,
'program': self.program.id,
'rationale': 'this is a rationale'
}
form = ResultForm(minimal_data, **self.form_kwargs)
self.assertTrue(form.is_valid(), "errors {0}".format(form.errors))
new_result = form.save()
self.assertIsNotNone(new_result.id)
db_result = Result.objects.get(pk=new_result.id)
self.assertEqual(db_result.date_collected, datetime.date(2016, 1, 1))
self.assertEqual(db_result.achieved, 30)
def test_good_data_with_evidence_validates(self):
minimal_data = {
'date_collected': '2016-03-31',
'achieved': '30',
'indicator': self.indicator.id,
'program': self.program.id,
'record_name': 'new record',
'evidence_url': 'http://google.com',
'rationale': 'this is a rationale'
}
form = ResultForm(minimal_data, **self.form_kwargs)
self.assertTrue(form.is_valid(), "errors {0}".format(form.erro | rs))
new_result = form.save()
self.assertIsNotNone(new_result.id)
db_result = Result.objects.get(pk=new_result.id)
self.assertEqual(db_result.record_name, 'new record')
def test_good_data_updating_evidence_validates(self):
minimal_data = {
'date_collected': '2016-03-31',
'achieved': '30',
'indicator': sel | f.indicator.id,
'program': self.program.id,
'record_name': 'existing record',
'evidence_url': 'http://google.com',
'rationale': 'this is a rationale'
}
form = ResultForm(minimal_data, **self.form_kwargs)
self.assertTrue(form.is_valid(), "errors {0}".format(form.errors))
new_result = form.save()
self.assertIsNotNone(new_result.id)
db_result = Result.objects.get(pk=new_result.id)
self.assertEqual(db_result.record_name, 'existing record')
self.assertEqual(db_result.evidence_url, 'http://google.com')
@test.tag('slow')
def test_adding_record_without_name_passes_validation(self):
bad_data = {
'date_collected': '2016-03-31',
'achieved': '30',
'indicator': self.indicator.id,
'program': self.program.id,
'evidence_url': 'http://google.com',
'rationale': 'this is a rationale'
}
form = ResultForm(bad_data, **self.form_kwargs)
self.assertTrue(form.is_valid())
def test_adding_record_without_url_fails_validation(self):
bad_data = {
'date_collected': '2016-03-31',
'achieved': '30',
'indicator': self.indicator.id,
'program': self.program.id,
'record_name': 'new record',
|
madeso/prettygood | dotnet/SeriesNamer/UpdateTool.Designer.py | Python | mit | 4,446 | 0.023852 | namespace SeriesNamer
{
partial class UpdateTool
{
/// <summary>
/// Required designer variable.
/// </summary>
private System.ComponentModel.IContainer components = null;
/// <summary>
/// Clean up any resources being used.
/// </summary>
/// <param name="disposing">true if managed resources should be disposed; otherwise, false.</param>
protected override void Dispose(bool disposing)
{
if (disposing && (components != null))
{
components.Dispose();
}
base.Dispose(disposing);
}
#region Windows Form Designer generated code
/// <summary>
/// Required method for Designer support - do not modify
/// the contents of this method with the code editor.
/// </summary>
private void InitializeComponent()
{
this.dOutput = new System.Windows.Forms.TextBox();
this.dAbort = new System.Windows.Forms.Button();
this.dProgress = new System.Windows.Forms.ProgressBar();
this.dWork = new System.ComponentModel.BackgroundWorker();
this.SuspendLayout();
//
// dOutput
//
this.dOutput.Anchor = ((System.Windows.Forms.AnchorStyles)((((System.Windows.Forms.AnchorStyles.Top | System.Windows.Forms.AnchorStyles.Bottom)
| System.Windows.Forms.AnchorStyles.Left)
| System.Windows.Forms.AnchorStyles.Right)));
this.dOutput.Location = new System.Drawing.Point(12, 41);
this.dOutput.Multiline = true;
this.dOutput.Name = "dOutput";
this.dOutput.ReadOnly = true;
this.dOutput.ScrollBars = System.Windows.Forms.ScrollBars.Both;
this.dOutput.Size = new System.Drawing.Size(268, 156);
this.dOutput.TabIndex = 0;
this.dOutput.WordWrap = false;
//
// dAbort
//
this.dAbort.Anchor = ((System.Windows.Forms.AnchorStyles)((System.Windows.Forms.AnchorStyles.Bottom | System.Windows.Forms.AnchorStyles.Right)));
this.dAbort.Location = new System.Drawing.Point(209, 203);
this.dAbort.Name = "dAbort";
this.dAbort.Size = new System.Drawing.Size(75, 23);
this.dAbort.TabIndex = 1;
this.dAbort.Text = "Abort";
this.dAbort.UseVisualStyleBackColor = true;
this.dAbort.Click += new System.EventHandler(this.dAbort_Click);
//
// dProgress
//
this.dProgress.Anchor = ((System.Windows.Forms.AnchorStyles)(((System.Windows.Forms.AnchorStyles.Top | System.Windows.Forms.AnchorStyles.Left)
| System.Windows.Forms.AnchorStyles.Right)));
this.dProgress.Location = new System.Drawing.Point(12, 12);
this.dProgress.Name = "dProgress";
this.dProgress.Size = new System.Drawing.Size(268, 23);
this.dProgress.TabIndex = 2;
//
// dWork
//
this.dWork.WorkerReportsProgress = true;
this.dWork.WorkerSupportsCancellation = true;
this.dWork.DoWork += new System.ComponentModel.DoWorkEventHandler(this.dWork_DoWork);
this.dWork.RunWorkerCompleted += new System.ComponentModel.RunWorkerCompletedEventHandler(this.dWork_RunWorkerCompleted);
this.dWork.ProgressChanged += new System.ComponentModel.ProgressChangedEventHandler(this.dWork_ProgressChanged);
//
// UpdateTool
//
this.AutoScaleDimensions = new System.Drawing.SizeF(6F, 13F);
this.AutoScaleMode = System.Windows.For | ms.AutoScaleMode.Font;
this.ClientSize = new System.Drawing.Size(292, 237);
this.Controls.Add(this.dProgress);
this.Controls.Add(this.dAbort);
this.Controls.Add(this.dOutput);
this.Name = "UpdateTool";
this.Text = "UpdateTool";
this.ResumeLayout(false);
this.PerformLayout();
}
#endregion
private System.Windows.Forms.TextBox dOutput;
priv | ate System.Windows.Forms.Button dAbort;
private System.Windows.Forms.ProgressBar dProgress;
private System.ComponentModel.BackgroundWorker dWork;
}
} |
swcarpentry/amy | amy/workshops/templatetags/tags.py | Python | mit | 1,090 | 0 | from django import template
# from django.template.defaultfilters import stringfilter
| from django.utils.safestring import mark_safe
register = template.Library()
@register.simple_tag
def | bootstrap_tag_class(name):
name_low = name.lower()
class_ = 'badge-secondary'
if name_low.startswith('swc'):
class_ = 'badge-primary'
elif name_low.startswith('dc'):
class_ = 'badge-success'
elif name_low.startswith('online'):
class_ = 'badge-info'
elif name_low.startswith('lc'):
class_ = 'badge-warning'
elif name_low.startswith('ttt'):
class_ = 'badge-danger'
elif name_low.startswith('itt'):
class_ = 'badge-danger'
return mark_safe(class_)
@register.simple_tag
def bootstrap_tag(name):
"""Wrap <span> around a tag so that it's displayed as Bootstrap badge:
http://getbootstrap.com/components/#labels"""
addn_class = bootstrap_tag_class(name)
fmt = '<span class="badge {additional_class}">{name}</span>'
fmt = fmt.format(additional_class=addn_class, name=name)
return mark_safe(fmt)
|
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_08_01/aio/operations/_application_gateways_operations.py | Python | mit | 70,561 | 0.005144 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, List, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling impo | rt AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ApplicationGatewaysOperations:
"""ApplicationGatewaysOperations async operations.
| You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
application_gateway_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
application_gateway_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified application gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}'} # type: ignore
async def get(
self,
resource_group_name: str,
application_gateway_name: str,
**kwargs: Any
) -> "_models.ApplicationGateway":
"""Gets the specified application gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
: |
darkless456/Python | download.py | Python | mit | 208 | 0.004854 | # down | load.py
import urllib.request
print("Downloading")
url = 'https://www.python.org/ftp/python/3.4.1/python-3.4.1.msi'
print('File Downloading')
urllib.request.urlretrieve(url, 'python-3.4.1 | .msi')
|
undertherain/vsmlib | vsmlib/benchmarks/__init__.py | Python | apache-2.0 | 126 | 0 | """Col | lection of benchmarks and downstream tasks on embeddings
.. autosummary::
:toctree: _auto | summary
analogy
"""
|
Microvellum/Fluid-Designer | win64-vc/2.78/python/lib/test/test_parser.py | Python | gpl-3.0 | 27,212 | 0.000441 | import parser
import unittest
import sys
import operator
import struct
from test import support
from test.support.script_helper import assert_python_failure
#
# First, we test that we can generate trees from valid source fragments,
# and that these valid trees are indeed allowed by the tree-loading side
# of the parser module.
#
class RoundtripLegalSyntaxTestCase(unittest.TestCase):
def roundtrip(self, f, s):
st1 = f(s)
t = st1.totuple()
try:
st2 = parser.sequence2st(t)
except parser.ParserError as why:
self.fail("could not roundtrip %r: %s" % (s, why))
self.assertEqual(t, st2.totuple(),
"could not re-generate syntax tree")
def check_expr(self, s):
self.roundtrip(parser.expr, s)
def test_flags_passed(self):
# The unicode literals flags has to be passed from the paser to AST
# generation.
suite = parser.suite("from __future__ import unicode_literals; x = ''")
code = suite.compile()
scope = {}
exec(code, {}, scope)
self.assertIsInstance(scope["x"], str)
def check_suite(self, s):
self.roundtrip(parser.suit | e, s)
def | test_yield_statement(self):
self.check_suite("def f(): yield 1")
self.check_suite("def f(): yield")
self.check_suite("def f(): x += yield")
self.check_suite("def f(): x = yield 1")
self.check_suite("def f(): x = y = yield 1")
self.check_suite("def f(): x = yield")
self.check_suite("def f(): x = y = yield")
self.check_suite("def f(): 1 + (yield)*2")
self.check_suite("def f(): (yield 1)*2")
self.check_suite("def f(): return; yield 1")
self.check_suite("def f(): yield 1; return")
self.check_suite("def f(): yield from 1")
self.check_suite("def f(): x = yield from 1")
self.check_suite("def f(): f((yield from 1))")
self.check_suite("def f(): yield 1; return 1")
self.check_suite("def f():\n"
" for x in range(30):\n"
" yield x\n")
self.check_suite("def f():\n"
" if (yield):\n"
" yield x\n")
def test_await_statement(self):
self.check_suite("async def f():\n await smth()")
self.check_suite("async def f():\n foo = await smth()")
self.check_suite("async def f():\n foo, bar = await smth()")
self.check_suite("async def f():\n (await smth())")
self.check_suite("async def f():\n foo((await smth()))")
self.check_suite("async def f():\n await foo(); return 42")
def test_async_with_statement(self):
self.check_suite("async def f():\n async with 1: pass")
self.check_suite("async def f():\n async with a as b, c as d: pass")
def test_async_for_statement(self):
self.check_suite("async def f():\n async for i in (): pass")
self.check_suite("async def f():\n async for i, b in (): pass")
def test_nonlocal_statement(self):
self.check_suite("def f():\n"
" x = 0\n"
" def g():\n"
" nonlocal x\n")
self.check_suite("def f():\n"
" x = y = 0\n"
" def g():\n"
" nonlocal x, y\n")
def test_expressions(self):
self.check_expr("foo(1)")
self.check_expr("[1, 2, 3]")
self.check_expr("[x**3 for x in range(20)]")
self.check_expr("[x**3 for x in range(20) if x % 3]")
self.check_expr("[x**3 for x in range(20) if x % 2 if x % 3]")
self.check_expr("list(x**3 for x in range(20))")
self.check_expr("list(x**3 for x in range(20) if x % 3)")
self.check_expr("list(x**3 for x in range(20) if x % 2 if x % 3)")
self.check_expr("foo(*args)")
self.check_expr("foo(*args, **kw)")
self.check_expr("foo(**kw)")
self.check_expr("foo(key=value)")
self.check_expr("foo(key=value, *args)")
self.check_expr("foo(key=value, *args, **kw)")
self.check_expr("foo(key=value, **kw)")
self.check_expr("foo(a, b, c, *args)")
self.check_expr("foo(a, b, c, *args, **kw)")
self.check_expr("foo(a, b, c, **kw)")
self.check_expr("foo(a, *args, keyword=23)")
self.check_expr("foo + bar")
self.check_expr("foo - bar")
self.check_expr("foo * bar")
self.check_expr("foo / bar")
self.check_expr("foo // bar")
self.check_expr("lambda: 0")
self.check_expr("lambda x: 0")
self.check_expr("lambda *y: 0")
self.check_expr("lambda *y, **z: 0")
self.check_expr("lambda **z: 0")
self.check_expr("lambda x, y: 0")
self.check_expr("lambda foo=bar: 0")
self.check_expr("lambda foo=bar, spaz=nifty+spit: 0")
self.check_expr("lambda foo=bar, **z: 0")
self.check_expr("lambda foo=bar, blaz=blat+2, **z: 0")
self.check_expr("lambda foo=bar, blaz=blat+2, *y, **z: 0")
self.check_expr("lambda x, *y, **z: 0")
self.check_expr("(x for x in range(10))")
self.check_expr("foo(x for x in range(10))")
self.check_expr("...")
self.check_expr("a[...]")
def test_simple_expression(self):
# expr_stmt
self.check_suite("a")
def test_simple_assignments(self):
self.check_suite("a = b")
self.check_suite("a = b = c = d = e")
def test_simple_augmented_assignments(self):
self.check_suite("a += b")
self.check_suite("a -= b")
self.check_suite("a *= b")
self.check_suite("a /= b")
self.check_suite("a //= b")
self.check_suite("a %= b")
self.check_suite("a &= b")
self.check_suite("a |= b")
self.check_suite("a ^= b")
self.check_suite("a <<= b")
self.check_suite("a >>= b")
self.check_suite("a **= b")
def test_function_defs(self):
self.check_suite("def f(): pass")
self.check_suite("def f(*args): pass")
self.check_suite("def f(*args, **kw): pass")
self.check_suite("def f(**kw): pass")
self.check_suite("def f(foo=bar): pass")
self.check_suite("def f(foo=bar, *args): pass")
self.check_suite("def f(foo=bar, *args, **kw): pass")
self.check_suite("def f(foo=bar, **kw): pass")
self.check_suite("def f(a, b): pass")
self.check_suite("def f(a, b, *args): pass")
self.check_suite("def f(a, b, *args, **kw): pass")
self.check_suite("def f(a, b, **kw): pass")
self.check_suite("def f(a, b, foo=bar): pass")
self.check_suite("def f(a, b, foo=bar, *args): pass")
self.check_suite("def f(a, b, foo=bar, *args, **kw): pass")
self.check_suite("def f(a, b, foo=bar, **kw): pass")
self.check_suite("@staticmethod\n"
"def f(): pass")
self.check_suite("@staticmethod\n"
"@funcattrs(x, y)\n"
"def f(): pass")
self.check_suite("@funcattrs()\n"
"def f(): pass")
# keyword-only arguments
self.check_suite("def f(*, a): pass")
self.check_suite("def f(*, a = 5): pass")
self.check_suite("def f(*, a = 5, b): pass")
self.check_suite("def f(*, a, b = 5): pass")
self.check_suite("def f(*, a, b = 5, **kwds): pass")
self.check_suite("def f(*args, a): pass")
self.check_suite("def f(*args, a = 5): pass")
self.check_suite("def f(*args, a = 5, b): pass")
self.check_suite("def f(*args, a, b = 5): pass")
self.check_suite("def f(*args, a, b = 5, **kwds): pass")
# function annotations
self.check_suite("def f(a: int): pass")
self.check_suite("def f(a: int = 5): pass")
self.check_suite("def f(*args: list): pass")
self.check_suite("def f(**kwds: dict): pass")
self.check_suite("def f(*, a: int): pass")
self.check_suite("def f(*, a: int = 5): pass |
DomBennett/pG-lt | pglt/stages/phylogeny_stage.py | Python | gpl-2.0 | 3,537 | 0.001414 | #! /usr/bin/env python
# D.J. Bennett
# 24/03/2014
"""
pglt Stage 4: Phylogeny generation
"""
# PACKAGES
import os
import re
import pickle
import logging
from Bio import Phylo
import pglt.tools.phylogeny_tools as ptools
from pglt.tools.system_tools import MissingDepError
# RUN
def run(wd=os.getcwd(), logger=logging.getLogger('')):
# PRINT STAGE
logger.info("Stage 4: Phylogeny generation")
# DIRS
alignment_dir = os.path.join(wd, '3_alignment')
phylogeny_dir = os.path.join(wd, '4_phylogeny')
outfile = os.path.join(phylogeny_dir, 'distribution.tre')
outfile_unconstrained = os.path.join(phylogeny_dir,
'distribution_unconstrained.tre')
temp_dir = os.path.join(wd, 'tempfiles')
# CHECK DEPS
| if not ptools.raxml:
raise MissingDepError('raxml')
# INPUT
with open(os.path.join(temp_dir, "paradict.p"), "rb") as file:
paradict = pickle.load(file)
with open(os.path.join(temp_dir, "genedict.p"), "rb") as file:
genedict = pickle.loa | d(file)
with open(os.path.join(temp_dir, "allrankids.p"), "rb") as file:
allrankids = pickle.load(file)
# PARAMETERS
nphylos = int(paradict["nphylos"])
maxtrys = int(paradict["maxtrys"])
rttstat = float(paradict["rttstat"])
constraint = int(paradict["constraint"])
ptools.logger = logger
# READ ALIGMENTS
clusters = sorted(os.listdir(alignment_dir))
clusters = [e for e in clusters if not re.search("^\.|^log\.txt$", e)]
logger.info("Reading in alignments ....")
alignment_store = ptools.AlignmentStore(clusters=clusters,
genedict=genedict,
allrankids=allrankids,
indir=alignment_dir, logger=logger)
# GENERATE TREE DIST
logger.info("Generating [{0}] phylogenies ....".format(nphylos))
counter = 0
generator = ptools.Generator(alignment_store=alignment_store,
rttstat=rttstat, outdir=phylogeny_dir,
maxtrys=maxtrys, logger=logger, wd=temp_dir)
if 1 == constraint:
generator.constraint = False
for i in range(ptools.countNPhylos(nphylos, outfile)):
logger.info(".... Iteration [{0}]".format(i + 1))
success = False
while not success:
success = generator.run()
with open(outfile, "a") as file:
counter += Phylo.write(generator.phylogenies[-1], file, 'newick')
# GENERATE CONSENSUS
logger.info('Generating consensus ....')
success = ptools.consensus(phylogeny_dir, min_freq=0.5, is_rooted=True,
trees_splits_encoded=False)
if not success:
logger.info('.... can`t generate consensus, too few names in all trees.')
# RUN UNCONSTRAINED
if 3 == constraint:
logger.info('Repeating unconstrained ....')
generator.phylogenies = []
generator.constraint = False
for i in range(ptools.countNPhylos(nphylos, outfile_unconstrained)):
logger.info(".... Iteration [{0}]".format(i + 1))
success = False
while not success:
success = generator.run()
with open(outfile_unconstrained, "a") as file:
counter += Phylo.write(generator.phylogenies[-1], file, 'newick')
# FINISH MESSAGE
logger.info('Stage finished. Generated [{0}] phylogenies.'.
format(counter))
|
bigmlcom/bigmler | bigmler/tests/test_06_missing_splits.py | Python | apache-2.0 | 5,076 | 0.003152 | # -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2015-2021 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Testing predictions with missing splits
"""
from bigmler.tests.world import (world, common_setup_module,
common_teardown_module, teardown_class)
import bigmler.tests.basic_tst_prediction_steps as test_pred
def setup_module():
"""Setup for the module
"""
common_setup_module()
def teardown_module():
"""Teardown for the module
"""
common_teardown_module()
class TestMissingSplits(object):
def teardown(self):
"""Calling generic teardown for every method
"""
print("\nEnd of tests in: %s\n-------------------\n" % __name__)
teardown_class()
def setup(self):
"""
Debug information
"""
print("\n-------------------\nTests in: %s\n" % __name__)
def test_scenario1(self):
"""
Scenario: Successfully building test predictions with missing-splits model:
Given I create BigML resources uploading train "<data>" file to test "<test>" with a missing-splits model and log predictions in "<output>"
And I check that the source has been created
And I check that the dataset has been created
And I check that the model has been created
And I check that the predictions are ready
Then the local prediction file is like "<predictions_file>"
Examples:
| data | test | output |predictions_file |
| ../data/iris_missing.csv | ../data/test_iris_missing.csv | ./scenario_mspl_1/predictions.csv | ./check_files/predictions_iris_missing.csv |
"""
print(self.test_scenario1.__doc__)
examples = [
['data/iris_missing.csv', 'data/test_iris_missing.csv', 'scenario_mspl_1/predictions.csv', 'check_files/predictions_iris_missing.csv']]
for example in examples:
print("\nTesting with:\n", example)
test_pred.i_create_all_resources_missing_splits(self, data=example[0], test=example[1], output=example[2])
test_pred.i_check_create_source(self)
test_pred.i_check_create_dataset(self, suffix=None)
test_pred.i_check_create_model(self)
test_pred.i_check_create_predictions(self)
test_pred.i_check_predictions(self, example[3])
def test_scenario2(self):
"""
Scenario: Successfully building test predictions from scratch:
Given I create BigML resources uploading train "<data>" file to test "<test>" remotely with a missing-splits model and log predictions in "<output>"
And I check that the source has been created
And I check that the dataset has been created
And I check that the model has been created
And I check that the source has been created from the test file
And I check that the dataset has been created from the test file
And I check that the batch prediction has been created
And I check that the predictions are ready
Then the local prediction file is like "<predictions_file>"
Examples:
| | data | test | output |predictions_file |
| ../data/iris_missing.csv | ../data/test_ | iris_missing.csv | ./scenario_mspl_2/predictions.csv | ./check_files/predictions_iris_missing.csv
"""
print(self.test_scenario2.__doc__)
examples = [
['data/iris_missing.csv', 'data/test_iris_missing.csv', 'scenario_mspl_2/predictions.csv', 'check_files/predictions_iris_missing.csv']]
for example in examples:
print("\nTesting with:\n", example)
test_pred.i_create_all_resources_remote_missing_splits(self, data=example[0], test=example[1], output=example[2])
test_pred.i_check_create_source(self)
test_pred.i_check_create_dataset(self, suffix=None)
test_pred.i_check_create_model(self)
test_pred.i_check_create_test_source(self)
test_pred.i_check_create_test_dataset(self)
test_pred.i_check_create_batch_prediction(self)
test_pred.i_check_create_predictions(self)
test_pred.i_check_predictions(self, example[3])
|
tigeorgia/fixmystreet | apps/mainapp/migrations/0007_auto_20150202_1422.py | Python | gpl-2.0 | 487 | 0 | # -*- coding: utf-8 -*-
fr | om __future__ import unicode_literals
from django.db import models, migrations
from django.conf impor | t settings
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0006_auto_20141207_2112'),
]
operations = [
migrations.AlterField(
model_name='ward',
name='councillor',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
]
|
NicovincX2/Python-3.5 | Analyse (mathématiques)/Analyse numérique/Équations différentielles numériques/Collocation method/orthopoly.py | Python | gpl-3.0 | 11,766 | 0.005439 | # -*- coding: utf-8 -*-
"""
orthopoly.py - A suite of functions for generating orthogonal polynomials
and quadrature rules.
Copyright (c) 2014 Greg von Winckel
All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Last updated on Wed Jan 1 14:29:25 MST 2014
"""
from __future__ import division
import numpy as np
import scipy as sp
import scipy.linalg
def gauss(alpha, beta):
"""
Compute the Gauss nodes and weights from the recursion
coefficients associated with a set of orthogonal polynomials
Inputs:
alpha - recursion coefficients
beta - recursion coefficients
Outputs:
x - quadrature nodes
w - quadrature weights
Adapted from the MATLAB code by Walter Gautschi
http://www.cs.purdue.edu/archives/2002/wxg/codes/gauss.m
"""
from scipy.linalg import eig_banded
A = np.vstack((np.sqrt(beta), alpha))
x, V = eig_banded(A, lower=False)
w = beta[0] * sp.real(sp.power(V[0, :], 2))
return x, w
def radau(alpha, beta, xr):
"""
Compute the Radau nodes and weights with the preassigned node xr
Inputs:
alpha - recursion coefficients
beta - recursion coefficients
xr - assigned node location
Outputs:
x - quadrature nodes
w - quadrature weights
Based on the section 7 of the paper
"Some modified matrix eigenvalue problems"
by Gene Golub, SIAM Review Vol 15, No. 2, April 1973, pp.318--334
"""
from scipy.linalg import solve_banded
n = len(alpha) - 1
f = np.zeros(n)
f[-1] = beta[-1]
A = np.vstack((np.sqrt(beta), alpha - xr))
J = np.vstack((A[:, 0:-1], A[0, 1:]))
delta = solve_banded((1, 1), J, f)
alphar = alpha
alphar[-1] = xr + delta[-1]
x, w = gauss(alphar, beta)
return x, w
def lobatto(alpha, beta, xl1, xl2):
"""
Compute the Lobatto nodes and weights with the preassigned
nodea xl1,xl2
Inputs:
alpha - recursion coefficients
beta - recursion coefficients
xl1 - assigned node location
xl2 - assigned node location
Outputs:
x - quadrature nodes
w - quadrature weights
Based on the section 7 of the paper
"Some modified matrix eigenvalue problems"
by Gene Golub, SIAM Review Vol 15, No. 2, April 1973, pp.318--334
"""
from scipy.linalg import solve_banded, solve
n = len(alpha) - 1
en = np.zeros(n)
en[-1] = 1
A1 = np.vstack((np.sqrt(beta), alpha - xl1))
J1 = np.vstack((A1[:, 0:-1], A1[0, 1:]))
A2 = np.vstack((np.sqrt(beta), alpha - xl2))
J2 = np.vstack((A2[:, 0:-1], A2[0, 1:]))
g1 = solve_banded((1, 1), J1, en)
g2 = solve_banded((1, 1), J2, en)
C = np.array(((1, -g1[-1]), (1, -g2[-1])))
xl = np.array((xl1, xl2))
ab = solve(C, xl)
alphal = alpha
alphal[-1] = ab[0]
betal = beta
betal[-1] = ab[1]
x, w = gauss(alphal, betal)
return x, w
def rec_jacobi(N, a, b):
"""
Generate the recursion coefficients alpha_k, beta_k
P_{k+1}(x) = (x-alpha_k)*P_{k}(x) - beta_k P_{k-1}(x)
for the Jacobi polynomials which are orthogonal on [-1,1]
with respect to the weight w(x)=[(1-x)^a]*[(1+x)^b]
Inputs:
N - polynomial order
a - weight parameter
b - weight parameter
Outputs:
alpha - recursion coefficients
beta - recursion coefficients
Adapted from the MATLAB code by Dirk Laurie and Walter Gautschi
http://www.cs.purdue.edu/archives/2002/wxg/codes/r_jacobi.m
"""
from scipy.special import gamma
nu = (b - a) / float(a + b + 2)
mu = 2**(a + b + 1) * gamma(a + 1) * gamma(b + 1) / gamma(a + b + 2)
if N == 1:
alpha = nu
beta = mu
else:
n = np.arange(1.0, N)
nab = 2 * n + a + b
alpha = np.hstack((nu, (b**2 - a**2) / (nab * (nab + 2))))
n = n[1:]
nab = nab[1:]
B1 = 4 * (a + 1) * (b + 1) / float((a + b + 2)**2 * (a + b + 3))
B = 4 * (n + a) * (n + b) * n * (n + a + b) / \
(nab**2 * (nab + 1) * (nab - 1))
beta = np.hstack((mu, B1, B))
return alpha, beta
def rec_jacobi01(N, a, b):
"""
Generate the recursion coefficients alpha_k, beta_k
for the Jacobi polynomials which are orthogonal on [0,1]
See rec_jacobi for the recursion coefficients on [-1,1]
Inputs:
N - polynomial order
a - weight parameter
b - weight parameter
Outputs:
alpha - recursion coefficients
beta - recursion coefficients
Adapted from the MATLAB implementation:
https://www.cs.purdue.edu/archives/2002/wxg/codes/r_jacobi01.m
"""
if a <= -1 or b <= -1:
raise ValueError('''Jacobi coefficients are defined only
for alpha,beta > -1''')
if not isinstance(N, int):
raise TypeError('N must be an integer')
if N < 1:
raise ValueError('N must be at least 1')
c, d = rec_jacobi(N, a, b)
alpha = (1 + c) / 2
beta = d / 4
beta[0] = d[0] / 2**(a + b + 1)
return alpha, beta
def polyval(alpha, beta, x):
"""
Evaluate polynomials on x given the recursion coefficients alpha and beta
"""
N = len(alpha)
m = len(x)
P = np.zeros((m, N + 1))
P[:, 0] = 1
P[:, 1] = (x - alpha[0]) * P[:, 0]
for k in xrange(1, N):
P[:, k + 1] = (x - alpha[k]) * P[:, k] - beta[k] * P[:, k - 1]
return P
def jacobi(N, a, b, x, NOPT=1):
"""
JACOBI computes the Jacobi polynomials which are orthogonal on [-1,1]
with respect to the weight w(x)=[(1-x)^a]*[(1+x)^b] and evaluate them
on the given grid up to P_N(x). Setting NOPT=2 returns the
L2-normalized polynomials
"""
m = len(x)
P = np.zeros((m, N + 1))
apb = a + b
a1 = a - 1
b1 = b - 1
c = apb * (a - b)
P[:, 0] = 1
if N > 0:
P[:, 1] = 0.5 * (a - b + (apb + 2) * x)
if N > 1:
for k in xrange(2, N + 1):
k2 = 2 * k
g = k2 + apb
g1 = g - 1
g2 = g - 2
d = 2.0 * (k + a1) * (k + b1) * g
P[:, k] = (g1 * (c + g2 * g * x) * P[:, k - 1] -
d * P[:, k - 2]) / (k2 * (k + apb) * g2)
if NOPT == 2:
from scipy.special import gamma
k = np.arange(N + 1)
pnorm = 2**(apb + 1) * gamma(k + a + 1) * gamma(k + b + 1) / \
((2 * k + a + b + 1) * (gamma(k + 1) * gamma(k + a + b + 1)))
P *= 1 / np.sqrt(pnorm)
return P
def jacobiD(N, a, b, x, NOPT=1):
"""
JACOBID computes the first derivatives of the normalized Jacobi
polynomials which are orthogonal on [-1,1] with respect
to the weight w(x)=[(1-x)^a]*[(1+x)^b] and evaluate them
on the given grid up to P_N(x). Setting NOPT=2 returns
the | derivatives of the L2-normalized polynomials
"""
z = np.zeros((le | n(x), 1))
if N == 0:
Px = z
else:
Px = 0.5 * np.hstack((z, jacobi(N - 1, a + 1, b + 1, x, NOPT) *
((a + b + 2 + np.arange(N)))))
return Px
|
snickl/buildroot-iu | support/testing/tests/package/test_dropbear.py | Python | gpl-2.0 | 1,055 | 0 | import os
import infra.basetest
class TestDropbear(infra.basetest.BRTest):
config = infra.basetest.BASIC_TOOLCHAIN_CONFIG + \
"""
BR2_TARGET_GENERIC_ROOT_PASSWD="testpwd"
BR2_SYSTEM_DHCP="eth0"
BR2_PACKAGE_DROPBEAR=y
BR2_TARGET_ROOTFS_CPIO=y
# BR2_TARGET_ROOTFS_TAR is not set
"""
def test_run(self):
img = os.path.join(self.builddir, "images", "rootfs.cpio")
self.emulator.boot(arch="armv5",
kernel="builtin",
options=["-initrd", img,
"-net", "nic",
"-net", "user,hostfwd=tcp::2222-:22"])
self.emulator.login("testpwd")
cmd = "netstat -ltn 2>/dev/null | grep 0.0 | .0.0:22"
_, exit_code = self.emulator.run(c | md)
self.assertEqual(exit_code, 0)
# Would be useful to try to login through SSH here, through
# localhost:2222, though it is not easy to pass the ssh
# password on the command line.
|
GUR9000/Deep_MRI_brain_extraction | NNet_Core/NN_Analyzer.py | Python | mit | 4,154 | 0.016129 | """
This software is an implementation of
Deep MRI brain extraction: A 3D convolutional neural network for skull stripping
You can download the paper at http://dx.doi.org/10.1016/j.neuroimage.2016.01.024
If you use this software for your projects please cite:
Kleesiek and Urban et al, Deep MRI brain extraction: A 3D convolutional neural network for skull stripping,
NeuroImage, Volume 129, April 2016, Pages 460-469.
The MIT License (MIT)
Copyright (c) 2016 Gregor Urban, Jens Kleesiek
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import print_function
import theano
import theano.tensor as T
import numpy as np
class Analyzer(object):
def __init__(self, cnn):
self._cnn = cnn
self._ranonce = False
self._ranonce2 = False
####################
def _runonce(self):
if self._ranonce:
return
print(self,'compiling...')
self._output_function = theano.function([self._cnn.layers[0].input], [lay.output for lay in self._cnn.layers])
self._ranonce=True
####################
def _runonce2(self):
if self._ranonce2:
return
print(self,'compiling...')
output_layer_Gradients = T.grad(self._cnn.output_layer_Loss, self._cnn.params, disconnected_inputs="warn")
self._output_function2 = theano.function([self._cnn.x, self._cnn.y], [x for x in output_layer_Gradients], on_unused_input='warn')
self._ranonce2=True
def analyze_forward_pass(self, *input):
""" input should be a list of all inputs. ((DO NOT INCLUDE labels/targets!))"""
self._runonce()
outputs = self._output_function(*input)
print()
print( 'Analyzing internal outputs of network',self._cnn,' (I am',self,') ... ')
for lay,out in zip(self._cnn.layers, outputs):
mi,ma = np.min(out), np.max(out)
mea,med = np.mean(out),np.median(out)
std = np.std(out)
print( '{:^100}: {:^30}, min/max = [{:9.5f}, {:9.5f}], mean/median = ({:9.5f}, {:9.5f}), std = {:9.5f}'.format(lay,out.shape,mi,ma,mea,med,std))
print()
return outputs
def analyze_gradients(self, *input):
""" input should be a list of all inputs and labels/targets"""
self._ | runonce2()
outputs = self._output_function2(*input)
print()
print( 'Analyzing internal gradients of network',self._cnn,' (I am',self,') ... ')
i = 0
j = 0
for lay in self._cnn.layers:
try:
j = len(lay.params)
except:
j = 0
if j:
for out in outputs[i:i+j]:
mi,ma = np.min(out), np.max(out)
mea,med = np. | mean(out),np.median(out)
std = np.std(out)
print('{:^100}: {:^30}, min/max = [{:9.5f}, {:9.5f}], mean/median = ({:9.5f}, {:9.5f}), std = {:9.5f}'.format(lay,out.shape,mi,ma,mea,med,std))
else:
print( '{:^100}: no parameters'.format(lay))
i+=j
print()
return outputs
|
CollabQ/CollabQ | vendor/django/contrib/gis/geos/collections.py | Python | apache-2.0 | 4,663 | 0.004932 | """
This module houses the Geometry Collection objects:
GeometryCollection, MultiPoint, MultiLineString, and MultiPolygon
"""
from ctypes import c_int, c_uint, byref
from django.contrib.gis.geos.error import GEOSException, GEOSIndexError
from django.contrib.gis.geos.geometry import GEOSGeometry
from django.contrib.gis.geos.libgeos import get_pointer_arr, GEOM_PTR, GEOS_PREPARE
from django.contrib.gis.geos.linestring import LineString, LinearRing
from django.contrib.gis.geos.point import Point
from django.contrib.gis.geos.polygon import Polygon
from django.contrib.gis.geos import prototypes as capi
class GeometryCollection(GEOSGeometry):
_typeid = 7
def __init__(self, *args, **kwargs):
"Initializes a Geometry Collection from a sequence of Geometry objects."
# Checking the arguments
if not args:
raise TypeError, 'Must provide at least one Geometry to initialize %s.' % self.__class__.__name__
if len(args) == 1:
# If only one geometry provided or a list of geometries is provided
# in the first argument.
if isinstance(args[0], (tuple, list)):
init_geoms = args[0]
else:
init_geoms = args
else:
init_geoms = args
# Ensuring that only the permitted geometries are allowed in this collection
# this is moved to list mixin super class
self._check_allowed(init_geoms)
# Creating the geometry pointer array.
collection = self._create_collection(len(init_geoms), iter(init_geoms))
super(GeometryCollection, self).__init__(collection, **kwargs)
def __iter__(self):
"Iterates over each Geometry in the Collection."
for i in xrange(len(self)):
yield self[i]
def __len__(self):
"Returns the number of geometries in this Collection."
return self.num_geom
### Methods for compatibility with ListMixin ###
def _create_collection(self, length, items):
# Creating the geometry pointer array.
geoms = get_pointer_arr(length)
for i, g in enumerate(items):
# this is a little sloppy, but makes life easier
# allow GEOSGeometry types (python wrappers) or pointer types
geoms[i] = capi.geom_clone(getattr(g, 'ptr', g))
return capi.create_collection(c_int(self._typeid), byref(geoms), c_uint(length))
def _get_single_internal(self, index):
return capi.get_geomn(self | .ptr, index)
def _get_single_external(self, index):
"Returns the Geometry from this Collection at the given index (0-base | d)."
# Checking the index and returning the corresponding GEOS geometry.
return GEOSGeometry(capi.geom_clone(self._get_single_internal(index)), srid=self.srid)
def _set_list(self, length, items):
"Create a new collection, and destroy the contents of the previous pointer."
prev_ptr = self.ptr
srid = self.srid
self.ptr = self._create_collection(length, items)
if srid: self.srid = srid
capi.destroy_geom(prev_ptr)
_set_single = GEOSGeometry._set_single_rebuild
_assign_extended_slice = GEOSGeometry._assign_extended_slice_rebuild
@property
def kml(self):
"Returns the KML for this Geometry Collection."
return '<MultiGeometry>%s</MultiGeometry>' % ''.join([g.kml for g in self])
@property
def tuple(self):
"Returns a tuple of all the coordinates in this Geometry Collection"
return tuple([g.tuple for g in self])
coords = tuple
# MultiPoint, MultiLineString, and MultiPolygon class definitions.
class MultiPoint(GeometryCollection):
_allowed = Point
_typeid = 4
class MultiLineString(GeometryCollection):
_allowed = (LineString, LinearRing)
_typeid = 5
@property
def merged(self):
"""
Returns a LineString representing the line merge of this
MultiLineString.
"""
return self._topology(capi.geos_linemerge(self.ptr))
class MultiPolygon(GeometryCollection):
_allowed = Polygon
_typeid = 6
@property
def cascaded_union(self):
"Returns a cascaded union of this MultiPolygon."
if GEOS_PREPARE:
return GEOSGeometry(capi.geos_cascaded_union(self.ptr), self.srid)
else:
raise GEOSException('The cascaded union operation requires GEOS 3.1+.')
# Setting the allowed types here since GeometryCollection is defined before
# its subclasses.
GeometryCollection._allowed = (Point, LineString, LinearRing, Polygon, MultiPoint, MultiLineString, MultiPolygon)
|
doordash/realms-wiki | realms/lib/model.py | Python | gpl-2.0 | 10,966 | 0.000821 | import json
from sqlalchemy import not_, and_
from datetime import datetime
from realms import db
class Model(db.Model):
"""Base SQLAlchemy Model for automatic serialization and
deserialization of columns and nested relationships.
Source: https://gist.github.com/alanhamlett/6604662
Usage::
>>> class User(Model):
>>> id = db.Column(db.Integer(), primary_key=True)
>>> email = db.Column(db.String(), index=True)
>>> name = db.Column(db.String())
>>> password = db.Column(db.String())
>>> posts = db.relationship('Post', backref='user', lazy='dynamic')
>>> ...
>>> default_fields = ['email', 'name']
>>> hidden_fields = ['password']
>>> readonly_fields = ['email', 'password']
>>>
>>> class Post(Model):
>>> id = db.Column(db.Integer(), primary_key=True)
>>> user_id = db.Column(db.String(), db.ForeignKey('user.id'), nullable=False)
>>> title = db.Column(db.String())
>>> ...
>>> default_fields = ['title']
>>> readonly_fields = ['user_id']
>>>
>>> model = User(email='john@localhost')
>>> db.session.add(model)
>>> db.session.commit()
>>>
>>> # update name and create a new post
>>> validated_input = {'name': 'John', 'posts': [{'title':'My First Post'}]}
>>> model.set_columns(**validated_input)
>>> db.session.commit()
>>>
>>> print(model.to_dict(show=['password', 'posts']))
>>> {u'email': u'john@localhost', u'posts': [{u'id': 1, u'title': u'My First Post'}], u'name': u'John', u'id': 1}
"""
__abstract__ = True
# Stores changes made to this model's attributes. Can be retrieved
# with model.changes
_changes = {}
def __init__(self, **kwargs):
kwargs['_force'] = True
self._set_columns(**kwargs)
def filter_by(self, **kwargs):
clauses = [key == value
for key, value in kwargs.items()]
return self.filter(and_(*clauses))
def _set_columns(self, **kwargs):
force = kwargs.get('_force')
readonly = []
if hasattr(self, 'readonly_fields'):
readonly = self.readonly_fields
if hasattr(self, 'hidden_fields'):
readonly += self.hidden_fields
readonly += [
'id',
'created',
'updated',
'modified',
'created_at',
'updated_at',
'modified_at',
]
changes = {}
columns = self.__table__.columns.keys()
relationships = self.__mapper__.relationships.keys()
for key in columns:
allowed = True if force or key not in readonly else False
exists = True if key in kwargs else False
if allowed and exists:
val = getattr(self, key)
if val != kwargs[key]:
changes[key] = {'old': val, 'new': kwargs[key]}
setattr(self, key, kwargs[key])
for rel in relationships:
allowed = True if force or rel not in readonly else False
exists = True if rel in kwargs else False
if allowed and exists:
is_list = self.__mapper__.relationships[rel].uselist
if is_list:
valid_ids = []
query = getattr(self, rel)
cls = self.__mapper__.relationships[rel].argument()
for item in kwargs[rel]:
if 'id' in item and query.filter_by(id=item['id']).limit(1).count() == 1:
obj = cls.query.filter_by(id=item['id']).first()
col_changes = obj.set_columns(**item)
if col_changes:
col_changes['id'] = str(item['id'])
if rel in changes:
changes[rel].append(col_changes)
else:
changes.update({rel: [col_changes]})
valid_ids.append(str(item['id']))
else:
col = cls()
col_changes = col.set_columns(**item)
query.append(col)
db.session.flush()
if col_changes:
col_changes['id'] = str(col.id)
if rel in changes:
changes[rel].append(col_changes)
else:
changes.update({rel: [col_changes]})
valid_ids.append(str(col.id))
# delete related rows that were not in kwargs[rel]
for item in query.filter(not_(cls.id.in_(valid_ids))).all():
col_changes = {
'id': str(item.id),
'deleted': True,
}
if rel in changes:
changes[rel].append(col_changes)
else:
changes.update({rel: [col_changes]})
db.session.delete(item)
else:
val = getattr(self, rel)
if self.__mapper__.relationships[rel].query_class is not None:
if val is not None:
col_changes = val.set_columns(**kwargs[rel])
if col_changes:
changes.update({rel: col_changes})
else:
i | f val != kwargs[rel]:
setattr(self, rel, kwargs[rel])
changes[rel] = {'old': val, 'new': kwargs[rel]}
return changes
def set_columns(self, **kwargs):
self._changes = self._set_columns(**kwargs)
if 'modified' in self.__table__.columns:
self.modified = datetime.utcnow()
if 'updated' in self.__table__.columns:
self.updated = datetime.utcnow()
if | 'modified_at' in self.__table__.columns:
self.modified_at = datetime.utcnow()
if 'updated_at' in self.__table__.columns:
self.updated_at = datetime.utcnow()
return self._changes
def __repr__(self):
if 'id' in self.__table__.columns.keys():
return '%s(%s)' % (self.__class__.__name__, self.id)
data = {}
for key in self.__table__.columns.keys():
val = getattr(self, key)
if type(val) is datetime:
val = val.strftime('%Y-%m-%dT%H:%M:%SZ')
data[key] = val
return json.dumps(data, use_decimal=True)
@property
def changes(self):
return self._changes
def reset_changes(self):
self._changes = {}
def to_dict(self, show=None, hide=None, path=None, show_all=None):
""" Return a dictionary representation of this model.
"""
if not show:
show = []
if not hide:
hide = []
hidden = []
if hasattr(self, 'hidden_fields'):
hidden = self.hidden_fields
default = []
if hasattr(self, 'default_fields'):
default = self.default_fields
ret_data = {}
if not path:
path = self.__tablename__.lower()
def prepend_path(item):
item = item.lower()
if item.split('.', 1)[0] == path:
return item
if len(item) == 0:
return item
if item[0] != '.':
item = '.%s' % item
item = '%s%s' % (path, item)
return item
show[:] = [prepend_path(x) for x in show]
hide[:] = [prepend_path(x) for x in hide]
columns = self.__table__.columns.keys()
relations |
pjaehrling/finetuneAlexVGG | preprocessing/inception/resize.py | Python | apache-2.0 | 375 | 0.005333 | import tensorflow as tf
from preprocessing import resize
def preprocess_image(image, output_height, output_width, is_training=Fal | se):
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image = resize.preprocess_image(image, output_height, output_width, is_train | ing)
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
return image
|
zheminzhou/GrapeTree | tests/test_bsa.py | Python | gpl-3.0 | 1,304 | 0.029141 | from grapetree import app
import pytest
import os
#def test_bsa_asymetric():
#print os.getcwd()
#app_test = app.test_client()
#with open(os.path.join('examples','simulated_data.profile')) as f:
#test_profile = f.read()
#with open(os.path.join('examples','simulated_data.global.nwk')) as f:
#test_results = f.read()
#tree = app_test.post('/maketree', data=dict(profile=test_profile))
#assert str(tree.data).strip() == test_results
#def test_bsa_symetric():
#app_test = app.test_client()
#with open(os.path.join('examples','simulated_data.profile')) as f:
#test_profile = f.read()
#with open(os.path.join('examples','simulated_data.BSA.nwk')) as f:
#test_results = f.read()
#tree = app_test.post('/maketree', data=dict(profile=test_profile, matrix_type='symmetric'))
#assert str(tree.data).strip() == test_results
def test_405():
# maketree must be POST - cause of data size otherwise return 405
app_test = app.test_client()
respones = app_test.get('/maketree')
assert respones.status_code == 405
def test_params():
# BSA params cannot be null
| app_test = app.test_client()
assert app.config.get('PARAMS') is not None
if __name__ == "__main__":
t | est_bsa_asymetric()
test_bad_profile() |
t-wissmann/qutebrowser | qutebrowser/extensions/loader.py | Python | gpl-3.0 | 5,652 | 0 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2018-2020 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Loader for qutebrowser extensions."""
import importlib.abc
import pkgutil
import types
import typing
import sys
import pathlib
import attr
from PyQt5.QtCore import pyqtSlot
from qutebrowser import components
from qutebrowser.config import config
from qutebrowser.utils import log, standarddir
from qutebrowser.misc import objects
if typing.TYPE_CHECKING:
import argparse
# ModuleInfo objects for all loaded plugins
_module_infos = []
@attr.s
class InitContext:
"""Context an extension gets in its init hook."""
data_dir = attr.ib() # type: pathlib.Path
config_dir = attr.ib() # type: pathlib.Path
args = attr.ib() # type: argparse.Namespace
@attr.s
class ModuleInfo:
"""Information attached to an extension module.
This gets used by qutebrowser.api.hook.
"""
_ConfigChangedHooksType = typing.List[typing.Tuple[typing.Optional[str],
typing.Callable]]
skip_hooks = attr.ib(False) # type: bool
init_hook = attr.ib(None) # type: typing.Optional[typing.Callable]
config_changed_hooks = attr.ib(
attr.Factory(list)) # type: _ConfigChangedHooksType
@attr.s
class ExtensionInfo:
"""Information about a qutebrowser extension."""
name = attr.ib() # type: str
def add_module_info(module: types.ModuleType) -> ModuleInfo:
"""Add ModuleInfo to a module (if not added yet)."""
# pylint: disable=protected-access
if not hasattr(module, '__qute_module_info'):
module.__qute_module_info = ModuleInfo() # type: ignore
return module.__qute_module_info # type: ignore
def load_components(*, skip_hooks: bool = False) -> None:
"""Load everything from qutebrowser.components."""
| for info in walk_components():
_load_compon | ent(info, skip_hooks=skip_hooks)
def walk_components() -> typing.Iterator[ExtensionInfo]:
"""Yield ExtensionInfo objects for all modules."""
if hasattr(sys, 'frozen'):
yield from _walk_pyinstaller()
else:
yield from _walk_normal()
def _on_walk_error(name: str) -> None:
raise ImportError("Failed to import {}".format(name))
def _walk_normal() -> typing.Iterator[ExtensionInfo]:
"""Walk extensions when not using PyInstaller."""
for _finder, name, ispkg in pkgutil.walk_packages(
# Only packages have a __path__ attribute,
# but we're sure this is one.
path=components.__path__, # type: ignore
prefix=components.__name__ + '.',
onerror=_on_walk_error):
if ispkg:
continue
yield ExtensionInfo(name=name)
def _walk_pyinstaller() -> typing.Iterator[ExtensionInfo]:
"""Walk extensions when using PyInstaller.
See https://github.com/pyinstaller/pyinstaller/issues/1905
Inspired by:
https://github.com/webcomics/dosage/blob/master/dosagelib/loader.py
"""
toc = set() # type: typing.Set[str]
for importer in pkgutil.iter_importers('qutebrowser'):
if hasattr(importer, 'toc'):
toc |= importer.toc
for name in toc:
if name.startswith(components.__name__ + '.'):
yield ExtensionInfo(name=name)
def _get_init_context() -> InitContext:
"""Get an InitContext object."""
return InitContext(data_dir=pathlib.Path(standarddir.data()),
config_dir=pathlib.Path(standarddir.config()),
args=objects.args)
def _load_component(info: ExtensionInfo, *,
skip_hooks: bool = False) -> types.ModuleType:
"""Load the given extension and run its init hook (if any).
Args:
skip_hooks: Whether to skip all hooks for this module.
This is used to only run @cmdutils.register decorators.
"""
log.extensions.debug("Importing {}".format(info.name))
mod = importlib.import_module(info.name)
mod_info = add_module_info(mod)
if skip_hooks:
mod_info.skip_hooks = True
if mod_info.init_hook is not None and not skip_hooks:
log.extensions.debug("Running init hook {!r}"
.format(mod_info.init_hook.__name__))
mod_info.init_hook(_get_init_context())
_module_infos.append(mod_info)
return mod
@pyqtSlot(str)
def _on_config_changed(changed_name: str) -> None:
"""Call config_changed hooks if the config changed."""
for mod_info in _module_infos:
if mod_info.skip_hooks:
continue
for option, hook in mod_info.config_changed_hooks:
if option is None:
hook()
else:
cfilter = config.change_filter(option)
cfilter.validate()
if cfilter.check_match(changed_name):
hook()
def init() -> None:
config.instance.changed.connect(_on_config_changed)
|
zenefits/sentry | src/sentry/testutils/cases.py | Python | bsd-3-clause | 14,473 | 0.000691 | """
sentry.testutils.cases
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
__all__ = (
'TestCase', 'TransactionTestCase', 'APITestCase', 'AuthProviderTestCase',
'RuleTestCase', 'PermissionTestCase', 'PluginTestCase', 'CliTestCase',
'AcceptanceTestCase',
)
import base64
import os
import os.path
import pytest
import six
import types
from click.testing i | mport CliRunner
from contextlib import contextmanager
from django.conf import settings
from django.contrib.auth import login
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.http import HttpRequest
from django.test import TestCase, TransactionTestCase
from django.utils.importlib import import_module
from exam import before, fixture, Exam
from pkg_resources im | port iter_entry_points
from rest_framework.test import APITestCase as BaseAPITestCase
from six.moves.urllib.parse import urlencode
from sentry import auth
from sentry.auth.providers.dummy import DummyProvider
from sentry.constants import MODULE_ROOT
from sentry.models import GroupMeta, ProjectOption
from sentry.plugins import plugins
from sentry.rules import EventState
from sentry.utils import json
from sentry.utils.auth import SSO_SESSION_KEY
from .fixtures import Fixtures
from .helpers import AuthProvider, Feature, get_auth_header, TaskRunner, override_options
DEFAULT_USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'
class BaseTestCase(Fixtures, Exam):
urls = 'sentry.web.urls'
def assertRequiresAuthentication(self, path, method='GET'):
resp = getattr(self.client, method.lower())(path)
assert resp.status_code == 302
assert resp['Location'].startswith('http://testserver' + reverse('sentry-login'))
@before
def setup_dummy_auth_provider(self):
auth.register('dummy', DummyProvider)
self.addCleanup(auth.unregister, 'dummy', DummyProvider)
@before
def setup_session(self):
engine = import_module(settings.SESSION_ENGINE)
session = engine.SessionStore()
session.save()
self.session = session
def tasks(self):
return TaskRunner()
def feature(self, name, active=True):
"""
>>> with self.feature('feature:name')
>>> # ...
"""
return Feature(name, active)
def auth_provider(self, name, cls):
"""
>>> with self.auth_provider('name', Provider)
>>> # ...
"""
return AuthProvider(name, cls)
def save_session(self):
self.session.save()
cookie_data = {
'max-age': None,
'path': '/',
'domain': settings.SESSION_COOKIE_DOMAIN,
'secure': settings.SESSION_COOKIE_SECURE or None,
'expires': None,
}
session_cookie = settings.SESSION_COOKIE_NAME
self.client.cookies[session_cookie] = self.session.session_key
self.client.cookies[session_cookie].update(cookie_data)
def login_as(self, user, organization_id=None):
user.backend = settings.AUTHENTICATION_BACKENDS[0]
request = HttpRequest()
request.session = self.session
login(request, user)
request.user = user
if organization_id:
request.session[SSO_SESSION_KEY] = six.text_type(organization_id)
# Save the session values.
self.save_session()
def load_fixture(self, filepath):
filepath = os.path.join(
MODULE_ROOT,
'tests',
'fixtures',
filepath,
)
with open(filepath, 'rb') as fp:
return fp.read()
def _pre_setup(self):
super(BaseTestCase, self)._pre_setup()
cache.clear()
ProjectOption.objects.clear_local_cache()
GroupMeta.objects.clear_local_cache()
def _post_teardown(self):
super(BaseTestCase, self)._post_teardown()
def _makeMessage(self, data):
return json.dumps(data).encode('utf-8')
def _makePostMessage(self, data):
return base64.b64encode(self._makeMessage(data))
def _postWithHeader(self, data, key=None, secret=None, protocol=None):
if key is None:
key = self.projectkey.public_key
secret = self.projectkey.secret_key
message = self._makePostMessage(data)
with self.tasks():
resp = self.client.post(
reverse('sentry-api-store'), message,
content_type='application/octet-stream',
HTTP_X_SENTRY_AUTH=get_auth_header(
'_postWithHeader/0.0.0',
key,
secret,
protocol,
),
)
return resp
def _postCspWithHeader(self, data, key=None, **extra):
if isinstance(data, dict):
body = json.dumps({'csp-report': data})
elif isinstance(data, six.string_types):
body = data
path = reverse('sentry-api-csp-report', kwargs={'project_id': self.project.id})
path += '?sentry_key=%s' % self.projectkey.public_key
with self.tasks():
return self.client.post(
path, data=body,
content_type='application/csp-report',
HTTP_USER_AGENT=DEFAULT_USER_AGENT,
**extra
)
def _getWithReferer(self, data, key=None, referer='sentry.io', protocol='4'):
if key is None:
key = self.projectkey.public_key
headers = {}
if referer is not None:
headers['HTTP_REFERER'] = referer
message = self._makeMessage(data)
qs = {
'sentry_version': protocol,
'sentry_client': 'raven-js/lol',
'sentry_key': key,
'sentry_data': message,
}
with self.tasks():
resp = self.client.get(
'%s?%s' % (reverse('sentry-api-store', args=(self.project.pk,)), urlencode(qs)),
**headers
)
return resp
def _postWithReferer(self, data, key=None, referer='sentry.io', protocol='4'):
if key is None:
key = self.projectkey.public_key
headers = {}
if referer is not None:
headers['HTTP_REFERER'] = referer
message = self._makeMessage(data)
qs = {
'sentry_version': protocol,
'sentry_client': 'raven-js/lol',
'sentry_key': key,
}
with self.tasks():
resp = self.client.post(
'%s?%s' % (reverse('sentry-api-store', args=(self.project.pk,)), urlencode(qs)),
data=message,
content_type='application/json',
**headers
)
return resp
def options(self, options):
"""
A context manager that temporarily sets a global option and reverts
back to the original value when exiting the context.
"""
return override_options(options)
@contextmanager
def dsn(self, dsn):
"""
A context manager that temporarily sets the internal client's DSN
"""
from raven.contrib.django.models import client
try:
client.set_dsn(dsn)
yield
finally:
client.set_dsn(None)
_postWithSignature = _postWithHeader
_postWithNewSignature = _postWithHeader
class TestCase(BaseTestCase, TestCase):
pass
class TransactionTestCase(BaseTestCase, TransactionTestCase):
pass
class APITestCase(BaseTestCase, BaseAPITestCase):
pass
class AuthProviderTestCase(TestCase):
provider = DummyProvider
provider_name = 'dummy'
def setUp(self):
super(AuthProviderTestCase, self).setUp()
# TestCase automatically sets up dummy provider
if self.provider_name != 'dummy' or self.provider != DummyProvider:
auth.register(self.provider_name, self.provider)
self.addCleanup(au |
criteo-forks/carbon | lib/carbon/tests/test_aggregator_methods.py | Python | apache-2.0 | 1,107 | 0.000903 | import unittest
from carbon.aggregator.rules import AGGREGATION_METHODS
PERCENTILE_METHODS = ['p999', 'p99', 'p95', 'p90', 'p80', 'p75', 'p50']
VALUES = [4, 8, 15, 16, 23, 42]
def almost_equal(a, b):
return abs(a - b) < 0.0000000001
class AggregationMethodTest(unittest.TestCase):
def test_percentile_simple(self):
| for method in PERCENTILE_METHODS:
self.assertTrue(almost_equal(AGGREGATION_METHODS[method]([1]), 1))
def test_percentile_order(self):
for method in PERCENTILE_METHODS:
a = AGGREGATION_METHODS[method]([1, 2, 3, 4, 5])
b = AGGREGATION_METHODS[method]([3, 2, 1, 4, 5])
self.assertTrue(almost_equal(a, b))
def test_percentile_values(self):
examples = [
('p999', | 41.905, ),
('p99', 41.05, ),
('p95', 37.25, ),
('p90', 32.5, ),
('p80', 23, ),
('p75', 21.25, ),
('p50', 15.5, ),
]
for (method, result) in examples:
self.assertTrue(almost_equal(AGGREGATION_METHODS[method](VALUES), result))
|
ntymtsiv/tempest | tempest/api/compute/v3/servers/test_instance_actions.py | Python | apache-2.0 | 2,647 | 0 | # Copyright 2013 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http:// | www.apache.org/licenses/LICENSE-2.0
#
# Unless required | by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import exceptions
from tempest.test import attr
class InstanceActionsV3Test(base.BaseV3ComputeTest):
_interface = 'json'
@classmethod
def setUpClass(cls):
super(InstanceActionsV3Test, cls).setUpClass()
cls.client = cls.servers_client
resp, server = cls.create_test_server(wait_until='ACTIVE')
cls.request_id = resp['x-compute-request-id']
cls.server_id = server['id']
@attr(type='gate')
def test_list_instance_actions(self):
# List actions of the provided server
resp, body = self.client.reboot(self.server_id, 'HARD')
self.client.wait_for_server_status(self.server_id, 'ACTIVE')
resp, body = self.client.list_instance_actions(self.server_id)
self.assertEqual(200, resp.status)
self.assertTrue(len(body) == 2, str(body))
self.assertTrue(any([i for i in body if i['action'] == 'create']))
self.assertTrue(any([i for i in body if i['action'] == 'reboot']))
@attr(type='gate')
def test_get_instance_action(self):
# Get the action details of the provided server
resp, body = self.client.get_instance_action(self.server_id,
self.request_id)
self.assertEqual(200, resp.status)
self.assertEqual(self.server_id, body['instance_uuid'])
self.assertEqual('create', body['action'])
@attr(type=['negative', 'gate'])
def test_list_instance_actions_invalid_server(self):
# List actions of the invalid server id
self.assertRaises(exceptions.NotFound,
self.client.list_instance_actions, 'server-999')
@attr(type=['negative', 'gate'])
def test_get_instance_action_invalid_request(self):
# Get the action details of the provided server with invalid request
self.assertRaises(exceptions.NotFound, self.client.get_instance_action,
self.server_id, '999')
|
ccauet/scikit-optimize | skopt/tests/test_callbacks.py | Python | bsd-3-clause | 832 | 0 | import pytest
from collections import namedtuple
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from skopt import dummy_minimize
from skopt.benchmarks import bench1
from skopt.callbacks import TimerCallback
from skopt.callbacks import Delta | YStopper
@pytest.mark.fast_test
def test_timer_callback():
callback = TimerCallback()
dummy_minimize(bench1, [(-1.0, 1.0)], callback=callback, n_calls=10)
assert_equal(len(callback.iter_time), 10)
assert_less(0.0, sum(callback.iter_time))
@pytest.mark.fast_test
def test_deltay_stopper():
deltay = DeltaYStopper(0.2, 3)
Result = namedtuple('Result', ['func_vals'])
assert deltay(Result([0, 1, 2, 3, 4, 0.1 | , 0.19]))
assert not deltay(Result([0, 1, 2, 3, 4, 0.1]))
assert deltay(Result([0, 1])) is None
|
hy-2013/scrapy | scrapy/contrib/linkextractors/sgml.py | Python | bsd-3-clause | 5,233 | 0.00344 | """
SGMLParser-based Link extractors
"""
from six.moves.ur | llib.parse import urljoin
import warnings
from sgmllib import SGMLParser
from w3lib.url import safe_url_string
from scrapy.selector import Selector
from scrapy.link import Link
from scrapy.linkextractor import FilteringLinkExtractor
from scrapy.utils.misc import arg_to_iter
from scrapy.utils.python import unique as unique_list, str_to_unicode
from scrapy.uti | ls.response import get_base_url
from scrapy.exceptions import ScrapyDeprecationWarning
class BaseSgmlLinkExtractor(SGMLParser):
def __init__(self, tag="a", attr="href", unique=False, process_value=None):
warnings.warn(
"BaseSgmlLinkExtractor is deprecated and will be removed in future releases. "
"Please use scrapy.contrib.linkextractors.LinkExtractor",
ScrapyDeprecationWarning
)
SGMLParser.__init__(self)
self.scan_tag = tag if callable(tag) else lambda t: t == tag
self.scan_attr = attr if callable(attr) else lambda a: a == attr
self.process_value = (lambda v: v) if process_value is None else process_value
self.current_link = None
self.unique = unique
def _extract_links(self, response_text, response_url, response_encoding, base_url=None):
""" Do the real extraction work """
self.reset()
self.feed(response_text)
self.close()
ret = []
if base_url is None:
base_url = urljoin(response_url, self.base_url) if self.base_url else response_url
for link in self.links:
if isinstance(link.url, unicode):
link.url = link.url.encode(response_encoding)
link.url = urljoin(base_url, link.url)
link.url = safe_url_string(link.url, response_encoding)
link.text = str_to_unicode(link.text, response_encoding, errors='replace').strip()
ret.append(link)
return ret
def _process_links(self, links):
""" Normalize and filter extracted links
The subclass should override it if necessary
"""
links = unique_list(links, key=lambda link: link.url) if self.unique else links
return links
def extract_links(self, response):
# wrapper needed to allow to work directly with text
links = self._extract_links(response.body, response.url, response.encoding)
links = self._process_links(links)
return links
def reset(self):
SGMLParser.reset(self)
self.links = []
self.base_url = None
self.current_link = None
def unknown_starttag(self, tag, attrs):
if tag == 'base':
self.base_url = dict(attrs).get('href')
if self.scan_tag(tag):
for attr, value in attrs:
if self.scan_attr(attr):
url = self.process_value(value)
if url is not None:
link = Link(url=url, nofollow=True if dict(attrs).get('rel') == 'nofollow' else False)
self.links.append(link)
self.current_link = link
def unknown_endtag(self, tag):
if self.scan_tag(tag):
self.current_link = None
def handle_data(self, data):
if self.current_link:
self.current_link.text = self.current_link.text + data
def matches(self, url):
"""This extractor matches with any url, since
it doesn't contain any patterns"""
return True
class SgmlLinkExtractor(FilteringLinkExtractor):
def __init__(self, allow=(), deny=(), allow_domains=(), deny_domains=(), restrict_xpaths=(),
tags=('a', 'area'), attrs=('href',), canonicalize=True, unique=True, process_value=None,
deny_extensions=None):
warnings.warn(
"SgmlLinkExtractor is deprecated and will be removed in future releases. "
"Please use scrapy.contrib.linkextractors.LinkExtractor",
ScrapyDeprecationWarning
)
tags, attrs = set(arg_to_iter(tags)), set(arg_to_iter(attrs))
tag_func = lambda x: x in tags
attr_func = lambda x: x in attrs
with warnings.catch_warnings(record=True):
lx = BaseSgmlLinkExtractor(tag=tag_func, attr=attr_func,
unique=unique, process_value=process_value)
super(SgmlLinkExtractor, self).__init__(lx, allow, deny,
allow_domains, deny_domains, restrict_xpaths, canonicalize,
deny_extensions)
# FIXME: was added to fix a RegexLinkExtractor testcase
self.base_url = None
def extract_links(self, response):
base_url = None
if self.restrict_xpaths:
sel = Selector(response)
base_url = get_base_url(response)
body = u''.join(f
for x in self.restrict_xpaths
for f in sel.xpath(x).extract()
).encode(response.encoding, errors='xmlcharrefreplace')
else:
body = response.body
links = self._extract_links(body, response.url, response.encoding, base_url)
links = self._process_links(links)
return links
|
mammadori/pyglet | pyglet/window/cocoa/pyglet_view.py | Python | bsd-3-clause | 13,557 | 0.004352 | from pyglet.window import key, mouse
from pyglet.libs.darwin.quartzkey import keymap, charmap
from pyglet.libs.darwin.cocoapy import *
NSTrackingArea = ObjCClass('NSTrackingArea')
# Event data helper functions.
def getMouseDelta(nsevent):
dx = nsevent.deltaX()
dy = nsevent.deltaY()
return int(dx), int(dy)
def getMousePosition(self, nsevent):
in_window = nsevent.locationInWindow()
in_window = self.convertPoint_fromView_(in_window, None)
x = int(in_window.x)
y = int(in_window.y)
# Must record mouse position for BaseWindow.draw_mouse_cursor to work.
self._window._mouse_x = x
self._window._mouse_y = y
return x, y
def getModifiers(nsevent):
modifiers = 0
modifierFlags = nsevent.modifierFlags()
if modifierFlags & NSAlphaShiftKeyMask:
modifiers |= key.MOD_CAPSLOCK
if modifierFlags & NSShiftKeyMask:
modifiers |= key.MOD_SHIFT
if modifierFlags & NSControlKeyMask:
modifiers |= key.MOD_CTRL
if modifierFlags & NSAlternateKeyMask:
modifiers |= key.MOD_ALT
modifiers |= key.MOD_OPTION
if modifierFlags & NSCommandKeyMask:
modifiers |= key.MOD_COMMAND
if modifierFlags & NSFunctionKeyMask:
modifiers |= key.MOD_FUNCTION
return modifiers
def getSymbol(nsevent):
keycode = nsevent.keyCode()
return keymap[keycode]
class PygletView_Implementation(object):
PygletView = ObjCSubclass('NSView', 'PygletView')
@PygletView.method(b'@'+NSRectEncoding+PyObjectEncoding)
def initWithFrame_cocoaWindow_(self, frame, window):
# The tracking area is used to get mouseEntered, mouseExited, and cursorUpdate
# events so that we can custom set the mouse cursor within the view.
self._tracking_area = None
self = ObjCInstance(send_super(self, 'initWithFrame:', frame, argtypes=[NSRect]))
if not self:
return None
# CocoaWindow object.
self._window = window
self.updateTrackingAreas()
# Create an instance of PygletTextView to handle text events.
# We must do this because NSOpenGLView doesn't conform to the
# NSTextInputClient protocol by default, and the insertText: method will
# not do the right thing with respect to translating key sequences like
# "Option-e", "e" if the protocol isn't implemented. So the easiest
# thing to do is to subclass NSTextView which *does* implement the
# protocol and let it handle text input.
PygletTextView = ObjCClass('PygletTextView')
self._textview = PygletTextView.alloc().initWithCocoaWindow_(window)
# Add text view to the responder chain.
self.addSubview_(self._textview)
return self
@PygletView.method('v')
def dealloc(self):
self._window = None
#send_message(self.objc_self, 'removeFromSuperviewWithoutNeedingDisplay')
self._textview.release()
self._textview = None
self._tracking_area.release()
self._tracking_area = None
send_super(self, 'dealloc')
@PygletView.method('v')
def updateTrackingAreas(self):
# This method is called automatically whenever the tracking areas need to be
# recreated, for example when window resizes.
if self._tracking_area:
self.removeTrackingArea_(self._tracking_area)
self._tracking_area.release()
self._tracking_area = None
tracking_options = NSTrackingMouseEnteredAndExited | NSTrackingActiveInActiveApp | NSTrackingCursorUpdate
frame = self.frame()
self._tracking_area = NSTrackingArea.alloc().initWithRect_options_owner_userInfo_(
frame, # rect
tracking_options, # options
self, # owner
None) # userInfo
self.addTrackingArea_(self._tracking_area)
@PygletView.method('B')
def canBecomeKeyView(self):
return True
@PygletView.method('B')
def isOpaque(self):
return True
## Event responders.
# This method is called whenever the view changes size.
@PygletView.method(b'v'+NSSizeEncoding)
def setFrameSize_(self, size):
send_super(self, 'setFrameSize:', size, argtypes=[NSSize])
# This method is called when view is first installed as the
# contentView of window. Don't do anything on first call.
# This also helps ensure correct window creation event ordering.
if not self._window.context.canvas:
return
width, height = int(size.width), int(size.height)
self._window.switch_to()
self._window.context.update_geometry()
self._window.dispatch_event("on_resize", width, height)
self._window.dispatch_event("on_expose")
# Can't get app.event_loop.enter_blocking() working with Cocoa, because
# when mouse clicks on the window's resize control, Cocoa enters into a
# mini-event loop that only responds to mouseDragged and mouseUp events.
# This means that using NSTimer to call idle() won't work. Our kludge
# is to override NSWindow's nextEventMatchingMask_etc method and call
# idle() from there.
if self.inLiveResize():
from pyglet import app
if app.event_loop is not None:
app.event_loop.idle()
@PygletView.method('v@')
def pygletKeyDown_(self, nsevent):
symbol = getSymbol(nsevent)
modifiers = getModifiers(nsevent)
self._window.dispatch_event('on_key_press', symbol, modifiers)
@PygletView.method('v@')
def pygletKeyUp_(self, n | sevent):
symbol = getSymbol(nsevent)
modifiers = getModifiers(nsevent)
self._window.dispatch_event('on_key_release', symbol, modifiers)
@PygletView.method('v@')
def pygletFlagsChanged_(self, nsevent):
| # Handles on_key_press and on_key_release events for modifier keys.
# Note that capslock is handled differently than other keys; it acts
# as a toggle, so on_key_release is only sent when it's turned off.
# TODO: Move these constants somewhere else.
# Undocumented left/right modifier masks found by experimentation:
NSLeftShiftKeyMask = 1 << 1
NSRightShiftKeyMask = 1 << 2
NSLeftControlKeyMask = 1 << 0
NSRightControlKeyMask = 1 << 13
NSLeftAlternateKeyMask = 1 << 5
NSRightAlternateKeyMask = 1 << 6
NSLeftCommandKeyMask = 1 << 3
NSRightCommandKeyMask = 1 << 4
maskForKey = { key.LSHIFT : NSLeftShiftKeyMask,
key.RSHIFT : NSRightShiftKeyMask,
key.LCTRL : NSLeftControlKeyMask,
key.RCTRL : NSRightControlKeyMask,
key.LOPTION : NSLeftAlternateKeyMask,
key.ROPTION : NSRightAlternateKeyMask,
key.LCOMMAND : NSLeftCommandKeyMask,
key.RCOMMAND : NSRightCommandKeyMask,
key.CAPSLOCK : NSAlphaShiftKeyMask,
key.FUNCTION : NSFunctionKeyMask }
symbol = getSymbol(nsevent)
# Ignore this event if symbol is not a modifier key. We must check this
# because e.g., we receive a flagsChanged message when using CMD-tab to
# switch applications, with symbol == "a" when command key is released.
if symbol not in maskForKey:
return
modifiers = getModifiers(nsevent)
modifierFlags = nsevent.modifierFlags()
if symbol and modifierFlags & maskForKey[symbol]:
self._window.dispatch_event('on_key_press', symbol, modifiers)
else:
self._window.dispatch_event('on_key_release', symbol, modifiers)
# Overriding this method helps prevent system beeps for unhandled events.
@PygletView.method('B@')
def performKeyEquivalent_(self, nsevent):
# Let arrow keys and certain function keys pass through the responder
# chain so that the textview can handle on_text_motion events.
modifierFlags = nsevent.mo |
vinaypost/multiuploader | multiuploader/models.py | Python | mit | 541 | 0.001848 | from __future__ import unicode_literals
from django.conf import settings
from django.db impo | rt models
from django.utils.translation import ugettext_lazy as _
class MultiuploaderFile(models.Model):
file = models.FileField(upload_to=settings.MULTIUPLOADER_FILES_FOLDER, max_len | gth=255)
filename = models.CharField(max_length=255, blank=False, null=False)
upload_date = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name = _('multiuploader file')
verbose_name_plural = _('multiuploader files')
|
keenlabs/KeenClient-Python | keen/__init__.py | Python | mit | 31,368 | 0.005324 | import os
from keen.client import KeenClient
from keen.exceptions import InvalidEnvironmentError
__author__ = 'dkador'
_client = None
project_id = None
write_key = None
read_key = None
master_key = None
base_url = None
def _initialize_client_from_environment():
''' Initialize a KeenClient instance using environment variables. '''
global _client, project_id, write_key, read_key, master_key, base_url
if _client is None:
# check environment for project ID and keys
project_id = project_id or os.environ.get("KEEN_PROJECT_ID")
write_key = write_key or os.environ.get("KEEN_WRITE_KEY")
read_key = read_key or os.environ.get("KEEN_READ_KEY")
master_key = master_key or os.environ.get("KEEN_MASTER_KEY")
base_url = base_url or os.environ.get("KEEN_BASE_URL")
if not project_id:
raise InvalidEnvironmentError("Please set the KEEN_PROJECT_ID environment variable or set keen.project_id!")
_client = KeenClient(project_id,
write_key=write_key,
read_key=read_key,
master_key=master_key,
base_url=base_url)
def add_event(event_collection, body, timestamp=None):
""" Adds an event.
Depending on the persistence strategy of the client,
this will either result in the event being uploaded to Keen
immediately or will result in saving the event to some local cache.
:param event_collection: the name of the collection to insert the
event to
:param body: dict, the body of the event to insert the event to
:param timestamp: datetime, optional, the timestamp of the event
"""
_initialize_client_from_environment()
_client.add_event(event_collection, body, timestamp=timestamp)
def add_events(events):
""" Adds a batch of events.
Depending on the persistence strategy of the client,
| this will either result in the event being uploaded to Keen
immediately or will result in saving the eve | nt to some local cache.
:param events: dictionary of events
"""
_initialize_client_from_environment()
return _client.add_events(events)
def generate_image_beacon(event_collection, body, timestamp=None):
""" Generates an image beacon URL.
:param event_collection: the name of the collection to insert the
event to
:param body: dict, the body of the event to insert the event to
:param timestamp: datetime, optional, the timestamp of the event
"""
_initialize_client_from_environment()
return _client.generate_image_beacon(event_collection, body, timestamp=timestamp)
def count(event_collection, timeframe=None, timezone=None, interval=None, filters=None, group_by=None, order_by=None,
max_age=None, limit=None):
""" Performs a count query
Counts the number of events that meet the given criteria.
:param event_collection: string, the name of the collection to query
:param timeframe: string or dict, the timeframe in which the events
happened example: "previous_7_days"
:param timezone: int, the timezone you'd like to use for the timeframe
and interval in seconds
:param interval: string, the time interval used for measuring data over
time example: "daily"
:param filters: array of dict, contains the filters you'd like to apply to the data
example: [{"property_name":"device", "operator":"eq", "property_value":"iPhone"}]
:param group_by: string or array of strings, the name(s) of the properties you would
like to group you results by. example: "customer.id" or ["browser","operating_system"]
:param order_by: dictionary or list of dictionary objects containing the property_name(s)
to order by and the desired direction(s) of sorting.
Example: {"property_name":"result", "direction":keen.direction.DESCENDING}
May not be used without a group_by specified.
:param limit: positive integer limiting the displayed results of a query using order_by
:param max_age: an integer, greater than 30 seconds, the maximum 'staleness' you're
willing to trade for increased query performance, in seconds
"""
_initialize_client_from_environment()
return _client.count(event_collection=event_collection, timeframe=timeframe, timezone=timezone,
interval=interval, filters=filters, group_by=group_by, order_by=order_by,
max_age=max_age, limit=limit)
def sum(event_collection, target_property, timeframe=None, timezone=None, interval=None, filters=None,
group_by=None, order_by=None, max_age=None, limit=None):
""" Performs a sum query
Adds the values of a target property for events that meet the given criteria.
:param event_collection: string, the name of the collection to query
:param target_property: string, the name of the event property you would like use
:param timeframe: string or dict, the timeframe in which the events
happened example: "previous_7_days"
:param timezone: int, the timezone you'd like to use for the timeframe
and interval in seconds
:param interval: string, the time interval used for measuring data over
time example: "daily"
:param filters: array of dict, contains the filters you'd like to apply to the data
example: [{"property_name":"device", "operator":"eq", "property_value":"iPhone"}]
:param group_by: string or array of strings, the name(s) of the properties you would
like to group you results by. example: "customer.id" or ["browser","operating_system"]
:param order_by: dictionary or list of dictionary objects containing the property_name(s)
to order by and the desired direction(s) of sorting.
Example: {"property_name":"result", "direction":keen.direction.DESCENDING}
May not be used without a group_by specified.
:param limit: positive integer limiting the displayed results of a query using order_by
:param max_age: an integer, greater than 30 seconds, the maximum 'staleness' you're
willing to trade for increased query performance, in seconds
"""
_initialize_client_from_environment()
return _client.sum(event_collection=event_collection, timeframe=timeframe, timezone=timezone,
interval=interval, filters=filters, group_by=group_by, order_by=order_by,
target_property=target_property, max_age=max_age, limit=limit)
def minimum(event_collection, target_property, timeframe=None, timezone=None, interval=None, filters=None,
group_by=None, order_by=None, max_age=None, limit=None):
""" Performs a minimum query
Finds the minimum value of a target property for events that meet the given criteria.
:param event_collection: string, the name of the collection to query
:param target_property: string, the name of the event property you would like use
:param timeframe: string or dict, the timeframe in which the events
happened example: "previous_7_days"
:param timezone: int, the timezone you'd like to use for the timeframe
and interval in seconds
:param interval: string, the time interval used for measuring data over
time example: "daily"
:param filters: array of dict, contains the filters you'd like to apply to the data
example: [{"property_name":"device", "operator":"eq", "property_value":"iPhone"}]
:param group_by: string or array of strings, the name(s) of the properties you would
like to group you results by. example: "customer.id" or ["browser","operating_system"]
:param order_by: dictionary or list of dictionary objects containing the property_name(s)
to order by and the desired direction(s) of sorting.
Example: {"property_name":"result", "direction":keen.direction.DESCENDING}
May not be used without a group_by specified.
:param limit: positive integer limiting the displayed results of a query using order_by
:param max_age: an integer, greater than 30 seconds, the maximum 'staleness' you're
willing to trade for increased query performance, in seconds
"""
_initialize_client_from_environment()
r |
tellesnobrega/sahara | sahara/plugins/fake/edp_engine.py | Python | apache-2.0 | 1,219 | 0 | # Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.service.edp import base_ | engine
from sahara.utils import edp
| class FakeJobEngine(base_engine.JobEngine):
def cancel_job(self, job_execution):
pass
def get_job_status(self, job_execution):
pass
def run_job(self, job_execution):
return 'engine_job_id', edp.JOB_STATUS_SUCCEEDED, None
def run_scheduled_job(self, job_execution):
pass
def validate_job_execution(self, cluster, job, data):
pass
@staticmethod
def get_possible_job_config(job_type):
return None
@staticmethod
def get_supported_job_types():
return edp.JOB_TYPES_ALL
|
mathLab/RBniCS | rbnics/reduction_methods/base/rb_reduction.py | Python | lgpl-3.0 | 31,214 | 0.003844 | # Copyright (C) 2015-2022 by the RBniCS authors
#
# This file is part of RBniCS.
#
# SPDX-License-Identifier: LGPL-3.0-or-later
import os
from math import sqrt
from logging import DEBUG, getLogger
from rbnics.backends import GramSchmidt
from rbnics.utils.decorators import PreserveClassName, RequiredBaseDecorators, snapshot_links_to_cache
from rbnics.utils.io import (ErrorAnalysisTable, GreedySelectedParametersList, GreedyErrorEstimatorsList,
OnlineSizeDict, SpeedupAnalysisTable, TextBox, TextLine, Timer)
logger = getLogger("rbnics/reduction_methods/base/rb_reduction.py")
@RequiredBaseDecorators(None)
def RBReduction(DifferentialProblemReductionMethod_DerivedClass):
@PreserveClassName
class RBReduction_Class(DifferentialProblemReductionMethod_DerivedClass):
"""
The folders used to store the snapshots and for the post processing data, the parameters
for the greedy algorithm and the error estimator evaluations are initialized.
:param truth_problem: class of the truth problem to be solved.
:return: reduced RB class.
"""
def __init__(self, truth_problem, **kwargs):
# Call the parent initialization
DifferentialProblemReductionMethod_DerivedClass.__init__(self, truth_problem, **kwargs)
# Declare a GS object
# GramSchmidt (for problems with one component) or dict of GramSchmidt (for problem
# with several components)
self.GS = None
# I/O
self.folder["snapshots"] = os.path.join(self.folder_prefix, "snapshots")
self.folder["post_processing"] = os.path.join(self.folder_prefix, "post_processing")
self.greedy_selected_parameters = GreedySelectedParametersList()
self.greedy_error_estimators = GreedyErrorEstimatorsList()
self.label = "RB"
def _init_offline(self):
# Call parent to initialize inner product and reduced problem
output = DifferentialProblemReductionMethod_DerivedClass._init_offline(self)
# Declare a new GS for each basis component
if len(self.truth_problem.components) > 1:
self.GS = dict()
for component in self.truth_problem.components:
assert len(self.truth_problem.inner_product[component]) == 1
inner_product = self.truth_problem.inner_product[component][0]
self.GS[component] = GramSchmidt(self.truth_problem.V, inner_product)
else:
assert len(self.truth_problem.inner_product) == 1
inner_product = self.truth_problem.inner_product[0]
self.GS = GramSchmidt(self.truth_problem.V, inner_product)
# Return
return output
def offline(self):
"""
It performs the offline phase of the reduced order model.
:return: reduced_problem where all offline data are stored.
"""
need_to_do_offline_stage = self._init_offline()
if need_to_do_offline_stage:
self._offline()
self._finalize_offline()
return self.reduced_problem
@snapshot_links_to_cache
def _offline(self):
print(TextBox(self.truth_problem.name() + " " + self.label + " offline phase begins", fill="="))
print("")
# Initialize first parameter to be used
self.reduced_problem.build_reduced_operators()
self.reduced_problem.build_error_estimation_operators()
(absolute_error_estimator_max, relative_error_estimator_max) = self.greedy()
print("initial maximum absolute error estimator over training set =", absolute_error_estimator_max)
print("initial maximum relative error estimator over training set =", relative_error_estimator_max)
print("")
iteration = 0
while self.reduced_problem.N < self.Nmax and relative_error_estimator_max >= self.tol:
print(TextLine("N = " + str(self.reduced_problem.N), fill="#"))
print("truth solve for mu =", self.truth_problem.mu)
snapshot = self.truth_problem.solve()
self.truth_problem.export_solution(self.folder["snapshots"], "truth_" + str(iteration), snapshot)
snapshot = self.postprocess_snapshot(snapshot, iteration)
print("update basis | matrix")
self.update_basis_matrix(snapshot)
iteration += 1
print("build reduced operators")
self.reduced_problem.build_reduced_operators()
print("reduced order solve")
| self.reduced_problem.solve()
print("build operators for error estimation")
self.reduced_problem.build_error_estimation_operators()
(absolute_error_estimator_max, relative_error_estimator_max) = self.greedy()
print("maximum absolute error estimator over training set =", absolute_error_estimator_max)
print("maximum relative error estimator over training set =", relative_error_estimator_max)
print("")
print(TextBox(self.truth_problem.name() + " " + self.label + " offline phase ends", fill="="))
print("")
def update_basis_matrix(self, snapshot):
"""
It updates basis matrix.
:param snapshot: last offline solution calculated.
"""
if len(self.truth_problem.components) > 1:
for component in self.truth_problem.components:
new_basis_function = self.GS[component].apply(
snapshot, self.reduced_problem.basis_functions[component][
self.reduced_problem.N_bc[component]:], component=component)
self.reduced_problem.basis_functions.enrich(new_basis_function, component=component)
self.reduced_problem.N[component] += 1
self.reduced_problem.basis_functions.save(self.reduced_problem.folder["basis"], "basis")
else:
new_basis_function = self.GS.apply(snapshot, self.reduced_problem.basis_functions[
self.reduced_problem.N_bc:])
self.reduced_problem.basis_functions.enrich(new_basis_function)
self.reduced_problem.N += 1
self.reduced_problem.basis_functions.save(self.reduced_problem.folder["basis"], "basis")
def greedy(self):
"""
It chooses the next parameter in the offline stage in a greedy fashion:
wrapper with post processing of the result (in particular, set greedily selected parameter
and save to file)
:return: max error estimator and the comparison with the first one calculated.
"""
(error_estimator_max, error_estimator_argmax) = self._greedy()
self.truth_problem.set_mu(self.training_set[error_estimator_argmax])
self.greedy_selected_parameters.append(self.training_set[error_estimator_argmax])
self.greedy_selected_parameters.save(self.folder["post_processing"], "mu_greedy")
self.greedy_error_estimators.append(error_estimator_max)
self.greedy_error_estimators.save(self.folder["post_processing"], "error_estimator_max")
return (error_estimator_max, error_estimator_max / self.greedy_error_estimators[0])
def _greedy(self):
"""
It chooses the next parameter in the offline stage in a greedy fashion. Internal method.
:return: max error estimator and the respective parameter.
"""
if self.reduced_problem.N > 0: # skip during initialization
# Print some additional information on the consistency of the reduced basis
print("absolute error for current mu =", self.reduced_problem.compute_error())
print("absolute error estimator for current mu =", self.reduced_problem.estimate |
tristan-hunt/UVaProblems | AcceptedUVa/uva_challenging/uva_10407.py | Python | gpl-3.0 | 1,675 | 0.030448 | # /* UVa problem: 10407
# * Simple Division
# * Topic: Number Theory
# *
# * Level: challenging
# *
# * Brief problem description:
# * Given a list of numbers, a1, a2, a3.... an compute a number m such that
# * ai mod m = x for some arbitrary x for all ai.
# * In other words, find a congruence class modulo m to which each number belongs
# * Solution Summary:
# * Compute the differences of each of the numbers, then find the gcd
# * of all of the differences.
# * Used Resources:
# *
# * Textbook: Competitive Programming 3
# * Hints given on 'Spanish Problem Archive'
# *
# * I hereby certify that I have produced the following solution myself
# * using only the resources listed above in accordance with the CMPUT
# * 403 collaboration policy.
# *
# * Tristan Hunt
# */
import sys
def gcd(a, b):
if b== 0:
return a
return gcd(b, a%b)
def lcm(a, b):
return (a* (b/gcd(a, b)))
def load():
while(1):
line = next(sys.stdin).split()
line = [int(x) for x in line]
line.pop(-1)
if len(line) == 0:
break
yield(line)
for (sequence) in load():
n = len(sequence)
diff = list()
for i in range(0, n-1):
# Now find gcd of all the differences:
diff.append(abs(sequence[i+1] - sequence[i])) #compute the differences
if n == 2:
sys.stdout.write("{}\n".format(diff[0]))
else:
# Compute gcd of the differences
#print(diff)
#sys.stdout.write("gcd({}, {}) = {}\n".format(diff[0], diff[1], gcd(diff[0], diff[1])))
m = gcd( | diff[0], diff[1])
for i in range(2, n-1):
#sys.stdout.write("gcd({}, {}) = {}\n".format(m, diff[i], gcd( | m, diff[i])))
m = gcd(m, diff[i])
sys.stdout.write("{}\n".format(m))
|
Onager/plaso | plaso/parsers/dsv_parser.py | Python | apache-2.0 | 10,932 | 0.006678 | # -*- coding: utf-8 -*-
"""Delimiter separated values (DSV) parser interface."""
import abc
import csv
import os
from dfvfs.helpers import text_file
from plaso.lib import errors
from plaso.lib import specification
from plaso.parsers import interface
class DSVParser(interface.FileObjectParser):
"""Delimiter separated values (DSV) parser interface."""
# A list that contains the names of all the fields in the log file. This
# needs to be defined by each DSV parser.
COLUMNS = []
# The default delimiter is a comma, but a tab, pipe or other character are
# known to be used. Note the delimiter must be a byte string otherwise csv
# module can raise a TypeError indicating that "delimiter" must be a single
# character string.
DELIMITER = ','
# If there is a header before the lines start it can be defined here, and
# the number of header lines that need to be skipped before the parsing
# starts.
NUMBER_OF_HEADER_LINES = 0
# If there is a special escape character used inside the structured text
# it can be defined here.
ESCAPE_CHARACTER = ''
# If there is a special quote character used inside the structured text
# it can be defined here.
QUOTE_CHAR = '"'
# The maximum size of a single field in the parser
FIELD_SIZE_LIMIT = csv.field_size_limit()
# Value that should not appear inside the file, made to test the actual
# file to see if it confirms to standards.
_MAGIC_TEST_STRING = 'RegnThvotturMeistarans'
_ENCODING = None
def __init__(self):
"""Initializes a delimiter separated values (DSV) parser."""
super(DSVParser, self).__init__()
self._encoding = self._ENCODING
self._end_of_line = '\n'
self._maximum_line_length = (
len(self._end_of_line) +
len(self.COLUMNS) * (self.FIELD_SIZE_LIMIT + len(self.DELIMITER)))
def _CreateDictReader(self, line_reader):
"""Returns a reader that processes each row and yields dictionaries.
csv.DictReader does this job well for single-character delimiters; parsers
that need multi-character delimiters need to override this method.
Args:
line_reader (iter): yields lines from a file-like object.
Returns:
iter: a reader of dictionaries, as returned by csv.DictReader().
"""
# Note that doublequote overrules ESCAPE_CHARACTER and needs to be set
# to False if an escape character is used.
if self.ESCAPE_CHARACTER:
csv_dict_reader = csv.DictReader(
line_reader, delimiter=self.DELIMITER, doublequote=False,
escapechar=self.ESCAPE_CHARACTER, fieldnames=self.COLUMNS,
restkey=self._MAGIC_TEST_STRING, restval=self._MAGIC_TEST_STRING)
else:
csv_dict_reader = csv.DictReader(
line_reader, delimiter=self.DELIMITER, fieldnames=self.COLUMNS,
quotechar=self.QUOTE_CHAR, restkey=self._MAGIC_TEST_STRING,
restval=self._MAGIC_TEST_STRING)
return csv_dict_reader
def _CreateLineReader(self, file_object, encoding=None):
"""Creates an object that reads lines from a text file.
The line reader is advanced to the beginning of the DSV content, skipping
any header lines.
Args:
file_object (dfvfs.FileIO): file-like object.
encoding (Optional[str]): encoding used in the DSV file, where None
indicates the codepage of the parser mediator should be used.
Returns:
TextFile: an object that implements an iterator over lines in a text file.
Raises:
UnicodeDecodeError: if the file cannot be read with the specified
encoding.
"""
line_reader = text_file.TextFile(
file_object, encoding=encoding, end_of_line=self._end_of_line)
# pylint: disable=protected-access
maximum_read_buffer_size = line_reader._MAXIMUM_READ_BUFFER_SIZE
# Line length is one less than the maximum read buffer size so that we
# tell if there's a line that doesn't end at the end before the end of
# the file.
if self._maximum_line_length > maximum_read_buffer_size:
self._maximum_line_length = maximum_read_buffer_size - 1
# If we specifically define a number of lines we should skip, do that here.
for _ in range(0, self.NUMBER_OF_HEADER_LINES):
line_reader.readline(self._maximum_line_length)
return line_reader
def _HasExpectedLineLength(self, file_object, encoding=None):
"""Determines if a file begins with lines of the expected length.
As we know the maximum length of valid lines in the DSV file, the presence
of lines longer than this indicates that the file will not be parsed
successfully, without reading excessive data from a large file.
Args:
file_object (dfvfs.FileIO): file-like object.
encoding (Optional[str]): encoding used in the DSV file, where None
indicates the codepage of the parser mediator should be used.
Returns:
bool: True if the file has lines of the expected length.
"""
original_file_position = file_object.tell()
result = True
# Attempt to read a line that is longer than any line that should be in
# the file.
line_reader = self._CreateLineReader(file_object, encoding=encoding)
for _ in range(0, 20):
sample_line = line_reader.readline(self._maximum_line_length + 1)
if len(sample_line) > self._maximum_line_length:
result = False
break
file_object.seek(original_file_position, os.SEEK_SET)
return result
@classmethod
def GetFormatSpecification(cls):
"""Retrieves the format specification.
Returns:
FormatSpecification: format specification.
"""
return specification.FormatSpecification(cls.NAME, text_format=True)
def _CheckForByteOrderMark(self, file_object):
"""Check if the file contains a byte-order-mark (BOM).
Also see:
https://en.wikipedia.org/wiki/Byte_order_mark#Byte_order_marks_by_encoding
Args:
file_object (dfvfs.FileIO): file-like object.
Returns:
tuple: contains:
str: encoding determined based on BOM or None if no BOM was found.
int: offset of the text after the BOM of 0 if no BOM was found.
"""
file_object.seek(0, os.SEEK_SET)
file_data = file_object.read(4)
# Look the for a match with the longest byte-order-mark first.
if file_data[0:4] == b'\x00\x00\xfe\xff':
return 'utf-32-be', 4
if file_data[0:4] == b'\xff\xfe\x00\x00':
return 'utf-32-le', 4
if file_data[0 | :3] == b'\xef\xbb\xbf':
return 'utf-8', 3
if file_data[0:2] == b'\xfe\xff':
return 'utf-16-be', 2
if file_data[0:2 | ] == b'\xff\xfe':
return 'utf-16-le', 2
return None, 0
def ParseFileObject(self, parser_mediator, file_object):
"""Parses a DSV text file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
encoding, text_offset = self._CheckForByteOrderMark(file_object)
if encoding and self._encoding and encoding != self._encoding:
display_name = parser_mediator.GetDisplayName()
raise errors.UnableToParseFile((
'[{0:s}] Unable to parse DSV file: {1:s} encoding does not match the '
'one required by the parser.').format(self._encoding, display_name))
if not encoding:
# Fallback to UTF-8 as a last resort otherwise the creation of
# text_file.TextFile will fail if no encoding is set.
encoding = self._encoding or parser_mediator.codepage or 'utf-8'
file_object.seek(text_offset, os.SEEK_SET)
try:
if not self._HasExpectedLineLength(file_object, encoding=encoding):
display_name = parser_mediator.GetDisplayName()
raise errors.UnableToParseFile((
'[{0:s}] Unable to parse DSV file: {1:s} with error: '
'unexpected line length.').format(self.NAME, display_name))
except UnicodeDecodeError as exception:
display_name = parser_mediator.GetDisplayName()
raise errors.UnableToParseFile(
'[{0:s}] Unable to pa |
architecture-building-systems/CEAforArcGIS | bin/create_trace_graphviz.py | Python | mit | 1,081 | 0.006475 | """
Read in the output from the trace-inputlocator script and create a GraphViz file.
Pass as input the path to the yaml output of the trace-inputlocator script via config file.
The output is written to the trace-inputlocator location.
WHY? because the trace-inputlocator | only has the GraphViz output of the last call to the script. This
version re-creates the trace-data from the (merged) yaml file (the yaml output is merged if pre-existing in the output
file).
"""
import yaml
import cea.config
from cea.tests.trace_inputlocator import create_graphviz_output
def main(config):
with open(config.trace_inputlocator.yaml_outpu | t_file, 'r') as f:
yaml_data = yaml.safe_load(f)
trace_data = []
for script in yaml_data.keys():
for direction in ('input', 'output'):
for locator, file in yaml_data[script][direction]:
trace_data.append((direction, script, locator, file))
create_graphviz_output(trace_data, config.trace_inputlocator.graphviz_output_file)
if __name__ == '__main__':
main(cea.config.Configuration()) |
treasure-data/td-client-python | tdclient/models.py | Python | apache-2.0 | 457 | 0 | #!/usr/bin/env python
from tdclient import (
bulk_import_model,
database_model,
| job_model,
result_model,
schedule_model,
table_model,
user_model,
)
BulkImport = bulk_import_model.BulkImport
Database = database_model.Database
Schema = job_model.Schema
Job = job_model.Job
Result = result_model.Result
ScheduledJob = schedule_model.Sch | eduledJob
Schedule = schedule_model.Schedule
Table = table_model.Table
User = user_model.User
|
gersakbogdan/fsnd-conference | models.py | Python | apache-2.0 | 5,554 | 0 | #!/usr/bin/env python
"""models.py
Udacity conference server-side Python App Engine data & ProtoRPC models
$Id: models.py,v 1.1 2014/05/24 22:01:10 wesc Exp $
created/forked from conferences.py by wesc on 2014 may 24
"""
import httplib
import endpoints
from protorpc import messages
from google.appengine.ext import ndb
__author__ = 'wesc+api@google.com (Wesley Chun)'
class ConflictException(endpoints.ServiceException):
"""ConflictException -- exception mapped to HTTP 409 response"""
http_status = httplib.CONFLICT
class Profile(ndb.Model):
"""Profile -- User profile object"""
displayName = ndb.StringProperty()
mainEmail = ndb.StringProperty()
teeShirtSize = ndb.StringProperty(default='NOT_SPECIFIED')
conferenceKeysToAttend = ndb.StringProperty(repeated=True)
sessionKeysWishList = ndb.KeyProperty(kind='Session', repeated=True)
class ProfileMiniForm(messages.Message):
"""ProfileMiniForm -- update Profile form message"""
displayName = messages.StringField(1)
teeShirtSize = messages.EnumField('TeeShirtSize', 2)
class ProfileForm(messages.Message):
"""ProfileForm -- Profile outbound form message"""
displayName = messages.StringField(1)
mainEmail = messages.StringField(2)
teeShirtSize = messages.EnumField('TeeShirtSize', 3)
conferenceKeysToAttend = messages.StringField(4, repeated=True)
class StringMessage(messages.Message):
"""StringMessage-- outbound (single) string message"""
data = messages.StringField(1, required=True)
class BooleanMessage(messages.Message):
"""BooleanMessage-- outbound Boolean value message"""
data = messages.BooleanField(1)
class Conference(ndb.Model):
"""Conference -- Conference object"""
name = ndb.StringProperty(required=True)
description = ndb.StringProperty()
organizerUserId = ndb.StringProperty()
topics = ndb.StringProperty(repeated=True)
city = ndb.StringProperty()
startDate = ndb.DateProperty()
month = ndb.IntegerProperty()
endDate = ndb.DateProperty()
maxAttendees = ndb.IntegerProperty()
seatsAvailable = ndb.IntegerProperty()
class ConferenceForm(messages.Message):
"""ConferenceForm -- Conference outbound form message"""
name = messages.StringField(1)
description = messages.StringField(2)
organizerUserId = messages.StringField(3)
topics = messages.StringField(4, repeated=True)
city = messages.StringField(5)
startDate = messages.StringField(6)
month = messages.IntegerField(7)
maxAttendees = messages.IntegerField(8)
seatsAvailable = messages.IntegerField(9)
endDate = messages.StringField(10)
websafeKey = messages.StringField(11)
organizerDisplayName = messages.StringField(12)
class ConferenceForms(messages.Message):
"""ConferenceForms -- multiple Conference outbound form message"""
items = messages.MessageField(ConferenceForm, 1, repeated=True)
class TeeShirtSize(messages.Enum):
"""TeeShirtSize -- t-shirt size enumeration value"""
NOT_SPECIFIED = 1
XS_M = 2
XS_W = 3
S_M = 4
S_W = 5
M_M = 6
M_W = 7
L_M = 8
L_W = 9
XL_M = 10
XL_W = 11
XXL_M = 12
XXL_W = 13
XXXL_M = 14
XXXL_W = 15
class ConferenceQueryForm(messages.Message):
"""ConferenceQueryForm -- Conference query inbound form message"""
field = messages.StringField(1)
operator = messages.StringField(2)
value = messages.StringField(3)
class ConferenceQueryForms(messages.Message):
"""ConferenceQueryForms -i multi ConferenceQueryForm inbound form msg"""
filters = messages.MessageField(ConferenceQueryForm, 1, repeated=True)
class Speaker(ndb.Model):
"""Speaker -- Speaker object"""
name = ndb.StringProperty(required=True)
about = ndb.StringProperty()
class SpeakerForm(messages.Message):
"""SpeakerForms -- Speaker outbound form message"""
name = messages.StringField(1)
about = messages.StringField(2)
sessions = messages.StringField(3, repeated=True)
websafeKey = messages.StringField(4)
class SpeakerForms(messages.Message):
"""SpeakerForms -- multiple Speaker outbound form message"""
items = messages.MessageField(SpeakerForm, 1, repeated=True)
class Session(n | db.Model):
"""Session -- Session object"""
name = ndb.StringProperty(required=True)
highlights = ndb.StringProperty()
speakerKey = ndb.KeyProperty(kind='Speaker', required=True)
duration = ndb.IntegerProperty()
typeOfSession = ndb.StringProperty()
date = ndb.DateProperty()
startTime = ndb.TimeProperty()
class | SessionForm(messages.Message):
"""SessionForm -- Session outbound form message"""
name = messages.StringField(1)
highlights = messages.StringField(2)
websafeSpeakerKey = messages.StringField(3)
duration = messages.IntegerField(4)
typeOfSession = messages.StringField(5)
date = messages.StringField(6)
startTime = messages.StringField(7)
websafeKey = messages.StringField(8)
class SessionForms(messages.Message):
"""SessionForms -- multiple Session outbound form message"""
items = messages.MessageField(SessionForm, 1, repeated=True)
class QueryForm(messages.Message):
"""QueryForm -- Session/Speaker query inbound form message"""
field = messages.StringField(1)
operator = messages.StringField(2)
value = messages.StringField(3)
class QueryForms(messages.Message):
"""QueryForms -- multiple QueryForm inbound form message"""
filters = messages.MessageField(QueryForm, 1, repeated=True)
|
patrikja/SyntheticPopulations | python/activity_assignment.py | Python | bsd-3-clause | 11,301 | 0.010353 | import numpy as np
import pandas as pd
import statsmodels.formula.api as sm
import copy
from common import *
# The following must be identical to the corresponding values in the genertion
# script (if the files produced there are to be used).
survey_attributes_csv = 'survey_attributes.csv'
survey_activities_csv = 'survey_activities.csv'
synthetic_people_csv = 'synthetic_people.csv'
synthetic_activities_csv = 'synthetic_activities.csv'
attribute_names = ['attr_a', 'attr_b', 'attr_c']
activity_names = ['act0', 'act1', 'act2']
bin_names = [['a0','a1', 'a2'], ['b0', 'b1', 'b2'], ['c0', 'c1', 'c2']]
bin_names_flat =[val for sublist in bin_names for val in sublist[1:]]
n_attribute_names = len(attribute_names)
n_activity_names = len(activity_names)
n_bins = map(lambda e: len(e), bin_names)
n_bin_names_flat = len(bin_names_flat)
class Activity:
def __init__(self, name, starttime, duration, location_id):
self.name = name
self.starttime = starttime
self.duration = duration
self.location_id = location_id
def __repr__(self):
return self.name
#return 'Activity name: {0}. Start time: {1}. Duration: {2}.'.format(self.name, self.starttime, self.duration)
class Person:
def __init__(self, person_id, household_id, attributes, activities):
self.person_id = person_id
self.household_id = household_id #household this person belongs to
self.attributes = attributes #list with the bin value for each attribute
# Array with ones ond zeros where all bin values, exept the first, for
# all attributes are represented. One means that that bin value is the
# matching one for this person. If there are only zeros for an attribute
# it means that the first bin value is used.
# Example: [a0, b1, c2] transforms to [0, 0, 1, 0, 0, 1]:
# a1 a2 b1 b2 c1 c2
# [0, 0, 1, 0, 0, 1]
self.bins = np.zeros(n_bin_names_flat, dtype=np.int)
for attribute in attributes:
if attribute in bin_names_flat:
self.bins[bin_names_flat.index(attribute)] = 1
self.activities = activities
#Sum total time for each activity
self.survey_activity_times = np.zeros(n_activity_names, dtype=np.int)
for activity in activities:
self.survey_activity_times[activity_names.index(activity.name)] += activity.duration
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.person_id == other.person_id
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return 'Person id: {0}. Household id: {1}. Attributes: {2}. Activities: {3}'\
.format(self.person_id, self.household_id, self.attributes, self.activities)
# Fitted activity time is y = x*b, where y is a vector of times for different
# categories, x is a vector of ones and zeros, representing the presence of
# attributes (a 1 is added for the interception) and b is a matrix with the
# linear coefficients.
def assign_fitted_time(self, beta):
self.fitted_activity_times = np.matmul(beta, np.hstack((1, self.bins)))
# Calculate the distance between two persons as the (Euclidian) distance
# between their fitted activity time vectors.
# TODO: Replace with Mahalanobis distance instead of Euclidian
def distance(self, other_person):
return np.linalg.norm( self.fitted_activity_times -
other_person.fitted_activity_times)
class Household:
def __init__(self, household_id):
self.household_id = household_id
self.persons = []
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.household_id == other.household_id
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def addPerson(self, person):
self.persons.append(person)
# The houshold-houshold distance is defined as follows:
# For every person in one of the households, take the smallest distance
# between it and any person in the other household.
# The household distance is the biggest of the person-person distances
# for all persons in the household.
def distance(self, other_household):
max_person_dist = 0
for my_person in self.persons:
min_person_dist = float("inf")
for other_person in other_household.persons:
min_person_dist = min(min_person_dist, my_person.distance(other_person))
max_person_dist = max(max_person_dist, min_person_dist)
return max_person_dist
#print pd.merge(pd.read_csv(survey_attributes_csv), pd.read_csv(survey_activities_csv), left_on='person_id', right_on='person_id')
# Read survey files and construct list of survey persons
survey_attributes_df = pd.read_csv(survey_attributes_csv)
survey_activities_df = pd.read_csv(survey_activities_csv)
# Add dummy row to be able to use while construction below
empty_row = pd.DataFrame(columns=survey_activities_df.columns.values.squeeze().tolist())
empty_row.set_value(len(survey_activities_df), 'person_id', -1)
empty_row.set_value(len(survey_activities_df), 'household_id', -1)
empty_row.set_value(len(survey_activities_df), 'activity_type', '')
empty_row.set_value(len(survey_activities_df), 'start_time', 0)
empty_row.set_value(len(survey_activities_df), 'duration', 0)
empty_row.set_value(len(survey_activities_df), 'location', 0)
survey_activities_df = survey_activities_df.append(empty_row)
survey_persons = []
activities = []
activity_row_no = 0
for index, attribute_row in survey_attributes_df.iterrows():
while survey_activities_df['person_id'].iloc[activity_row_no] < attribute_row['person_id']: activity_row_no += 1
activities = []
while survey_activities_df['person_id'].iloc[activity_row_no] == attribute_row['person_id']:
activities.append(Activity(survey_activities_df['activity_type'].iloc[activity_row_no],
survey_activities_df['start_time'].iloc[activity_row_no],
survey_activities_df['duration'].iloc[activity_row_no],
survey_activities_df['location'].iloc[activity_row_no]))
activity_row_no += 1
attributes = map(lambda a: attribute_row[a], attribute_names)
survey_persons.append(Person(attribute_row['person_id'],
attribute_row['household_id'],
attributes,
activities))
# Create list of survey households and associate survey persons with them
survey_households = []
for person in survey_persons:
hh_temp = Household(person.household_id)
if not hh_temp in survey_households:
survey_households.append(hh_temp)
for person in survey_persons:
survey_households[survey_households.index(Household(person.household_id))].addPerson(person)
# Rea | d synthetic people file and construct list of synthetic persons. They have no activities.
synthetic_people_df = pd.read_csv(synthetic_people_csv)
synthetic_persons = []
for index, row in synthetic_people_df.iterrows():
attributes = map(lambda a: row[a], attribute_names)
synthetic_persons.append(Person(row['person_id'], row['household_id'], map(lambda a: row[a], attribute_names), []))
# Create list of synthetic households a | nd associate synthetic persons with them
synthetic_households = []
for person in synthetic_persons:
hh_temp = Household(person.household_id)
if not hh_temp in synthetic_households:
synthetic_households.append(hh_temp)
for person in synthetic_persons:
synthetic_households[synthetic_households.index(Household(person.household_id))].addPerson(person)
# Create a dataframe with activity times and attributes. The attributes are
# represented as dummy variables by the vector of zeros and ones created above.
act_df = pd.DataFrame(columns=activity_names+bin_names_flat)
for person in survey_persons:
row = pd.DataFrame(columns=activity_names+bin_names_flat)
for activity_id in range(0, n_activity_names):
row.set_value(person.person_id, activity_names[activity_id], person.survey_activity_times[activity_id])
for bin_no in range(0, n_bin_names_flat):
row.set_value(person.person_id, bin_names_flat[bin_no], person.bins[bin_no])
act_df = act_df.app |
deddokatana/PyMiningCalc | PyMiningCalc/setup.py | Python | gpl-3.0 | 436 | 0.004587 | from distutils.core import setup
setup(
name | ='PyMiningCalc',
version='1.0-Alpha',
packages=['PyMiningCalc'],
url='https://sites.google.com/site/working4coins/calculator, h | ttps://github.com/deddokatana/PyMiningCalc',
license='GPL3',
author='Twitter : @deddokatana - @WorkingForCoins',
author_email='deddokatana@gmail.com',
description='Python3 Output Calculator for Bitcoin/Litecoin Based Currencies'
)
|
xuender/test | testAdmin/itest/migrations/0012_auto__chg_field_conclusion_title__chg_field_answer_title__chg_field_qu.py | Python | apache-2.0 | 5,840 | 0.007021 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Conc | lusion.title'
db.alter_column(u'itest_conclusion', 'title', self.gf('django.db.models.fields.CharField')(max_length=250, null=True))
# Changing field 'Answer.title'
db.alter_column(u'itest_answer', 'title', self.gf('django.db.models.fields.CharField')(max_length=250, null=True))
# Changing field 'Question.title'
db.alter_column(u'itest_question', 'title', self.gf('django.db.models.fields.CharField')(max_length=250, null=True))
| # Changing field 'Test.title'
db.alter_column(u'itest_test', 'title', self.gf('django.db.models.fields.CharField')(max_length=250, null=True))
def backwards(self, orm):
# Changing field 'Conclusion.title'
db.alter_column(u'itest_conclusion', 'title', self.gf('django.db.models.fields.CharField')(default='', max_length=250))
# Changing field 'Answer.title'
db.alter_column(u'itest_answer', 'title', self.gf('django.db.models.fields.CharField')(default=1, max_length=250))
# Changing field 'Question.title'
db.alter_column(u'itest_question', 'title', self.gf('django.db.models.fields.CharField')(default=1, max_length=250))
# Changing field 'Test.title'
db.alter_column(u'itest_test', 'title', self.gf('django.db.models.fields.CharField')(default='a', max_length=250))
models = {
'itest.answer': {
'Meta': {'object_name': 'Answer'},
'conclusion': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['itest.Conclusion']"}),
'content': ('django.db.models.fields.TextField', [], {'max_length': '5850', 'null': 'True', 'blank': 'True'}),
'create_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jump': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['itest.Question']"}),
'num': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['itest.Question']"}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '450', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'})
},
'itest.conclusion': {
'Meta': {'ordering': "['num']", 'object_name': 'Conclusion'},
'content': ('django.db.models.fields.TextField', [], {'max_length': '5850', 'null': 'True', 'blank': 'True'}),
'create_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '450', 'null': 'True', 'blank': 'True'}),
'test': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'conclusions'", 'to': "orm['itest.Test']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'})
},
'itest.question': {
'Meta': {'ordering': "['num']", 'object_name': 'Question'},
'content': ('django.db.models.fields.TextField', [], {'max_length': '5850', 'null': 'True', 'blank': 'True'}),
'create_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '450', 'null': 'True', 'blank': 'True'}),
'test': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questions'", 'to': "orm['itest.Test']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'})
},
'itest.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'word': ('django.db.models.fields.CharField', [], {'max_length': '35'})
},
'itest.test': {
'Meta': {'ordering': "['num']", 'object_name': 'Test'},
'content': ('django.db.models.fields.TextField', [], {'max_length': '5850', 'null': 'True', 'blank': 'True'}),
'create_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '450', 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'tests'", 'symmetrical': 'False', 'to': "orm['itest.Tag']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['itest'] |
ebagdasa/tempest | tempest/api/network/admin/test_quotas.py | Python | apache-2.0 | 3,707 | 0 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF | ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.network import base
from tempest.common.utils import data_utils
from tempest import test
class QuotasTest(base.BaseAdminNetworkTest):
_interface = 'json'
"""
Tests the following operations in the Neutron API using the REST client for
Neutron:
list quotas for tenants who have non-default quot | a values
show quotas for a specified tenant
update quotas for a specified tenant
reset quotas to default values for a specified tenant
v2.0 of the API is assumed.
It is also assumed that the per-tenant quota extension API is configured
in /etc/neutron/neutron.conf as follows:
quota_driver = neutron.db.quota_db.DbQuotaDriver
"""
@classmethod
def resource_setup(cls):
super(QuotasTest, cls).resource_setup()
if not test.is_extension_enabled('quotas', 'network'):
msg = "quotas extension not enabled."
raise cls.skipException(msg)
cls.identity_admin_client = cls.os_adm.identity_client
def _check_quotas(self, new_quotas):
# Add a tenant to conduct the test
test_tenant = data_utils.rand_name('test_tenant_')
test_description = data_utils.rand_name('desc_')
tenant = self.identity_admin_client.create_tenant(
name=test_tenant,
description=test_description)
tenant_id = tenant['id']
self.addCleanup(self.identity_admin_client.delete_tenant, tenant_id)
# Change quotas for tenant
quota_set = self.admin_client.update_quotas(tenant_id,
**new_quotas)
self.addCleanup(self.admin_client.reset_quotas, tenant_id)
for key, value in new_quotas.iteritems():
self.assertEqual(value, quota_set[key])
# Confirm our tenant is listed among tenants with non default quotas
non_default_quotas = self.admin_client.list_quotas()
found = False
for qs in non_default_quotas['quotas']:
if qs['tenant_id'] == tenant_id:
found = True
self.assertTrue(found)
# Confirm from API quotas were changed as requested for tenant
quota_set = self.admin_client.show_quotas(tenant_id)
quota_set = quota_set['quota']
for key, value in new_quotas.iteritems():
self.assertEqual(value, quota_set[key])
# Reset quotas to default and confirm
self.admin_client.reset_quotas(tenant_id)
non_default_quotas = self.admin_client.list_quotas()
for q in non_default_quotas['quotas']:
self.assertNotEqual(tenant_id, q['tenant_id'])
@test.attr(type='gate')
def test_quotas(self):
new_quotas = {'network': 0, 'security_group': 0}
self._check_quotas(new_quotas)
@test.requires_ext(extension='lbaas', service='network')
@test.attr(type='gate')
def test_lbaas_quotas(self):
new_quotas = {'vip': 1, 'pool': 2,
'member': 3, 'health_monitor': 4}
self._check_quotas(new_quotas)
|
pypa/virtualenv | src/virtualenv/seed/embed/via_app_data/pip_install/symlink.py | Python | mit | 2,362 | 0.001693 | from __future__ import absolute_import, unicode_literals
import os
import subprocess
from stat import S_IREAD, S_IRGRP, S_IROTH
from virtualenv.util.path import safe_delete, set_tree
from virtualenv.util.six import ensure_text
from virtualenv.util.subprocess import Popen
from .base import PipInstall
class SymlinkPipInstall(PipInstall):
def _sync(self, src, dst):
src_str = ensure_text(str(src))
dest_str = ensure_text(str(dst))
os.symlink(src_str, dest_str)
def _generate_new_files(self):
# create the pyc files, as the build image will be R/O
process = Popen(
[ensure_text(str(self._creator.exe)), "-m", "compileall", ensure_text(str(self._image_dir))],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
process.communicate()
# the root pyc is shared, so we'll not symlink that - but still add the pyc files to the RECORD for close
root_py_cache = self._image_dir / "__pycache__"
new_files = | set()
if root_py_cache.exists():
new_files.update(root_py_cache.iterdir())
new_files.add(root_py_cache)
safe_delete(root_py_cache)
core_new_files = super(SymlinkPipInstall, self)._generate_new_files()
# remove files that are within the image folder deeper than one level (as these will be not linked directly)
for file in core_new_files:
try:
rel = file.relative_to(self._image_dir)
if len | (rel.parts) > 1:
continue
except ValueError:
pass
new_files.add(file)
return new_files
def _fix_records(self, new_files):
new_files.update(i for i in self._image_dir.iterdir())
extra_record_data_str = self._records_text(sorted(new_files, key=str))
with open(ensure_text(str(self._dist_info / "RECORD")), "wb") as file_handler:
file_handler.write(extra_record_data_str.encode("utf-8"))
def build_image(self):
super(SymlinkPipInstall, self).build_image()
# protect the image by making it read only
set_tree(self._image_dir, S_IREAD | S_IRGRP | S_IROTH)
def clear(self):
if self._image_dir.exists():
safe_delete(self._image_dir)
super(SymlinkPipInstall, self).clear()
|
stefanseefeld/numba | numba/lowering.py | Python | bsd-2-clause | 36,946 | 0.000271 | from __future__ import print_function, division, absolute_import
from collections import namedtuple
import sys
from functools import partial
from llvmlite.ir import Value
from llvmlite.llvmpy.core import Constant, Type, Builder
from . import (_dynfunc, cgutils, config, funcdesc, generators, ir, types,
typing, utils)
from .errors import LoweringError, new_error_context
from .targets import imputils
class Environment(_dynfunc.Environment):
__slots__ = ()
@classmethod
def from_fndesc(cls, fndesc):
mod = fndesc.lookup_module()
return cls(mod.__dict__)
def __reduce__(self):
return _rebuild_env, (self.globals['__name__'], self.consts)
def _rebuild_env(modname, consts):
from . import serialize
mod = serialize._rebuild_module(modname)
env = Environment(mod.__dict__)
env.consts[:] = consts
return env
_VarArgItem = namedtuple("_VarArgItem", ("vararg", "index"))
class BaseLower(object):
"""
Lower IR to LLVM
"""
# If true, then can't cache LLVM module accross process calls
has_dynamic_globals = False
def __init__(self, context, library, fndesc, interp):
self.context = context
self.library = library
self.fndesc = fndesc
self.blocks = utils.SortedMap(utils.iteritems(interp.blocks))
self.interp = interp
self.call_conv = context.call_conv
self.generator_info = self.interp.generator_info
# Initialize LLVM
self.module = self.library.create_ir_module(self.fndesc.unique_name)
# Python execution environment (will be available to the compiled
# function).
self.env = Environment.from_fndesc(self.fndesc)
# Internal states
self.blkmap = {}
self.varmap = {}
self.firstblk = min(self.blocks.keys())
self.loc = -1
# Subclass initialization
self.init()
def init(self):
pass
def init_pyapi(self):
"""
Init the Python API and Environment Manager for the function being
lowered.
"""
if self.pyapi is not None:
return
self.pyapi = self.context.get_python_api(self.builder)
# Store environment argument for later use
self.envarg = self.call_conv.get_env_argument(self.function)
# Sanity check
with cgutils.if_unlikely(self.builder,
cgutils.is_null(self.builder, self.envarg)):
self.pyapi.err_set_string(
"PyExc_SystemError",
"Numba internal error: object mode function called "
"without an environment")
self.call_conv.return_exc(self.builder)
self.env_body = self.context.get_env_body(self.builder, self.envarg)
self.pyapi.emit_environment_sentry(self.envarg)
self.env_manager = self.pyapi.get_env_manager(self.env, self.env_body,
self.envarg)
def pre_lower(self):
"""
Called before lowering all blocks.
"""
# A given Lower object can be used for several LL functions
# (for generators) and it's important to use a new API and
# EnvironmentManager.
self.pyapi = None
def post_lower(self):
"""
Called after all blocks are lowered
"""
def pre_block(self, block):
"""
Called before lowering a block.
"""
def return_exception(self, exc_class, exc_args=None):
self.call_conv.return_user_exc(self.builder, exc_class, exc_args)
def lower(self):
if self.generator_info is None:
self.genlower = None
self.lower_normal_function(self.fndesc)
else:
self.genlower = self.GeneratorLower(self)
self.gentype = self.genlower.gentype
self.genlower.lower_init_func(self)
self.genlower.lower_next_func(self)
if self.gentype.has_finalizer:
self.genlower.lower_finalize_func(self)
if config.DUMP_LLVM:
print(("LLVM DUMP %s" % self.fndesc).center(80, '-'))
print(self.module)
print('=' * 80)
# Run target specific post lowering transformation
self.context.post_lowering(self.module, self.library)
# Materialize LLVM Module
self.library.add_ir_module(self.module)
def extract_function_arguments(self):
self.fnargs = self.call_conv.decode_arguments(self.builder,
self.fndesc.argtypes,
self.function)
return self.fnargs
def lower_normal_function(self, fndesc):
"""
Lower non-generator *fndesc*.
"""
self.setup_function(fndesc)
# Init argument values
self.extract_function_arguments()
entry_block_tail = self.lower_function_body()
# Close tail of entry block
self.builder.position_at_end(entry_block_tail)
self.builder.branch(self.blkmap[self.firstblk])
def lower_function_body(self):
"""
Lower the current function's body, and return the entry block.
"""
# Init Python blocks
for offset in self.blocks:
bname = "B%s" % offset
self.blkmap[offset] = self.function.append_basic_block(bname)
self.pre_lower()
# pre_lower() may have changed the current basic block
entry_block_tail = self.builder.basic_block
self.debug_print("# function begin: {0}".format(
self.fndesc.unique_name))
# Lower all blocks
for offset, block in self.blocks.items():
bb = self.blkmap[offset]
self.builder.position_at_end(bb)
self.lower_block(block)
self.post_lower()
return entry_block_tail
def lower_block(self, block):
"""
Lower the given block.
"""
self.pre_block(block)
for inst in block.body:
self.loc = inst.loc
defaulterrcls = partial(LoweringError, loc=self.loc)
with new_error_context('lowering "{inst}" at {loc}', inst=inst,
loc=self.loc, errcls_=def | aulterrcls):
self.lower_inst(inst)
def create_cpython_wrapper(self, release_gil=False):
| """
Create CPython wrapper(s) around this function (or generator).
"""
if self.genlower:
self.context.create_cpython_wrapper(self.library,
self.genlower.gendesc,
self.env, self.call_helper,
release_gil=release_gil)
self.context.create_cpython_wrapper(self.library, self.fndesc,
self.env, self.call_helper,
release_gil=release_gil)
def setup_function(self, fndesc):
# Setup function
self.function = self.context.declare_function(self.module, fndesc)
self.entry_block = self.function.append_basic_block('entry')
self.builder = Builder(self.entry_block)
self.call_helper = self.call_conv.init_call_helper(self.builder)
def typeof(self, varname):
return self.fndesc.typemap[varname]
def debug_print(self, msg):
if config.DEBUG_JIT:
self.context.debug_print(self.builder, "DEBUGJIT: {0}".format(msg))
class Lower(BaseLower):
GeneratorLower = generators.GeneratorLower
def lower_inst(self, inst):
self.debug_print(str(inst))
if isinstance(inst, ir.Assign):
ty = self.typeof(inst.target.name)
val = self.lower_assign(ty, inst)
self.storevar(val, inst.target.name)
elif isinstance(inst, ir.Branch):
cond = self.loadvar(inst.cond.name)
tr = self.blkmap[inst.truebr]
fl = self.blkmap[inst.falsebr]
condty = self.typeof(inst.cond.name)
pred = self.context.cast(self.bu |
sfowl/fowllanguage | content/migrations/0010_auto_20151019_1410.py | Python | mit | 1,608 | 0.003731 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.utils.timezone import utc
import datetime
class Migration(migrations.Migration):
dependencies = [
('content', '0009_auto_20150829_1417'),
]
operations = [
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('msg_subject', models.CharField(max_length=255, verbose_name='Subject')),
('msg_text', models.TextField(verbose_name='Text')),
('msg_author', models.EmailField(max_length=75, verbose_name='From')),
('recv_date', models.DateTimeField(editab | le=False, verbose_name='Date Received', default=datetime.datetime(2015, 10, 19, 4, 10, 29, 712166, tzinfo=utc))),
],
options={
},
b | ases=(models.Model,),
),
migrations.AlterField(
model_name='event',
name='pub_date',
field=models.DateTimeField(editable=False, verbose_name='Date Published', default=datetime.datetime(2015, 10, 19, 4, 10, 29, 711232, tzinfo=utc)),
preserve_default=True,
),
migrations.AlterField(
model_name='post',
name='pub_date',
field=models.DateTimeField(editable=False, verbose_name='Date Published', default=datetime.datetime(2015, 10, 19, 4, 10, 29, 711716, tzinfo=utc)),
preserve_default=True,
),
]
|
dimorinny/ufed-modules | ya-maps.py | Python | apache-2.0 | 7,341 | 0.002052 | # -*- coding: utf8 -*-
# Extracted data:
# Favorite places
# Bookmarks
# Search queries history
from physical import *
import SQLiteParser
from System.Convert import IsDBNull
__author__ = "Dmit | ry Merkuryev"
def chromiumTimestampParse(timestamp):
return TimeStamp.FromUnixTime(timestamp / 1000000 - 11644473600)
def commonTimestampParse(timestamp):
return TimeStamp.FromUnixTime(timestamp / 1000)
class YandexMapsLabel(object):
def __init__(self, databaseRecord):
self.labelName = | databaseRecord['label_name'].Value
self.lat = databaseRecord['lat'].Value
self.lon = databaseRecord['lon'].Value
self.timestamp = databaseRecord['date'].Value
def toModel(self):
locationModel = Location()
locationModel.Position.Value = self.parsePosition()
locationModel.TimeStamp.Value = commonTimestampParse(self.timestamp)
locationModel.Name.Value = self.labelName
locationModel.Type.Value = "Закладка"
locationModel.Category.Value = "Yandex Maps (Закладки)"
locationModel.Deleted = DeletedState.Intact
return locationModel
def parseAddress(self):
pass
def parsePosition(self):
coordinateModel = Coordinate()
coordinateModel.Longitude.Value = self.lon
coordinateModel.Latitude.Value = self.lat
return coordinateModel
class YandexMapsRoute(object):
def __init__(self, databaseRecord):
self.geocodeName = databaseRecord['geocode_name'].Value
self.geocodeSubname = databaseRecord['geocode_subname'].Value
self.lat = databaseRecord['lat'].Value
self.lon = databaseRecord['lon'].Value
self.timestamp = databaseRecord['date'].Value
def toModel(self):
locationModel = Location()
locationModel.Address.Value = self.parseAddress()
locationModel.Position.Value = self.parsePosition()
locationModel.TimeStamp.Value = commonTimestampParse(self.timestamp)
locationModel.Name.Value = self.geocodeName
locationModel.Type.Value = "Маршрут"
locationModel.Deleted = DeletedState.Intact
locationModel.Category.Value = "Yandex Maps (Маршруты)"
return locationModel
def parseAddress(self):
streetAddressModel = StreetAddress()
geocodeNameArray = self.geocodeName.split(", ")
geocodeSubnameArray = self.geocodeSubname.split(", ")
for index, value in enumerate(geocodeNameArray):
if index == 0:
streetAddressModel.Street1.Value = value
for index, value in enumerate(geocodeSubnameArray):
if index == 0:
streetAddressModel.City.Value = value
elif index == 1:
streetAddressModel.Country.Value = value
return streetAddressModel
def parsePosition(self):
coordinateModel = Coordinate()
coordinateModel.Longitude.Value = self.lon
coordinateModel.Latitude.Value = self.lat
return coordinateModel
class YandexMapsParser(object):
def __init__(self, root, extractDeleted, extractSource):
self.root = root
self.extractDeleted = extractDeleted
self.extractSource = extractSource
self.models = []
self.source = 'Yandex Maps'
def parse(self):
self.mainDir = self.root.GetByPath('/databases')
if self.mainDir is None:
return []
self.parseSearchHistory()
self.parseLabels()
self.parseRoutes()
return self.models
def parseLabels(self):
dbNode = self.mainDir.GetByPath('labels.db')
if dbNode is None or dbNode.Data is None:
return
db = SQLiteParser.Database.FromNode(dbNode)
if db is None:
return
if 'mylabels' not in db.Tables:
return
ts = SQLiteParser.TableSignature('mylabels')
if self.extractDeleted:
SQLiteParser.Tools.AddSignatureToTable(ts, 'label_name', SQLiteParser.Tools.SignatureType.Text)
SQLiteParser.Tools.AddSignatureToTable(ts, 'lat', SQLiteParser.Tools.SignatureType.Long)
SQLiteParser.Tools.AddSignatureToTable(ts, 'lon', SQLiteParser.Tools.SignatureType.Long)
SQLiteParser.Tools.AddSignatureToTable(ts, 'date', SQLiteParser.Tools.SignatureType.Int48)
for rec in db.ReadTableRecords(ts, self.extractDeleted):
self.models.append(YandexMapsLabel(rec).toModel())
def parseRoutes(self):
dbNode = self.mainDir.GetByPath('routehistory.db')
if dbNode is None or dbNode.Data is None:
return
db = SQLiteParser.Database.FromNode(dbNode)
if db is None:
return
if 'routehistory' not in db.Tables:
return
ts = SQLiteParser.TableSignature('routehistory')
if self.extractDeleted:
SQLiteParser.Tools.AddSignatureToTable(ts, 'geocode_name', SQLiteParser.Tools.SignatureType.Text)
SQLiteParser.Tools.AddSignatureToTable(ts, 'geocode_subname', SQLiteParser.Tools.SignatureType.Text)
SQLiteParser.Tools.AddSignatureToTable(ts, 'lat', SQLiteParser.Tools.SignatureType.Long)
SQLiteParser.Tools.AddSignatureToTable(ts, 'lon', SQLiteParser.Tools.SignatureType.Long)
SQLiteParser.Tools.AddSignatureToTable(ts, 'date', SQLiteParser.Tools.SignatureType.Int48)
for rec in db.ReadTableRecords(ts, self.extractDeleted):
self.models.append(YandexMapsRoute(rec).toModel())
def parseSearchHistory(self):
dbNode = self.mainDir.GetByPath('yandexsuggest_history.db')
if dbNode is None or dbNode.Data is None:
return
db = SQLiteParser.Database.FromNode(dbNode)
if db is None:
return
if 'suggest_content' not in db.Tables:
return
ts = SQLiteParser.TableSignature('suggest_content')
if self.extractDeleted:
SQLiteParser.Tools.AddSignatureToTable(ts, 'c0suggest_text_1',
SQLiteParser.Tools.SignatureType.Null,
SQLiteParser.Tools.SignatureType.Text)
SQLiteParser.Tools.AddSignatureToTable(ts, 'c3time',
SQLiteParser.Tools.SignatureType.Null,
SQLiteParser.Tools.SignatureType.Int48)
for rec in db.ReadTableRecords(ts, self.extractDeleted):
vp = SearchedItem()
vp.Source.Value = self.source
vp.Deleted = DeletedState.Intact
SQLiteParser.Tools.ReadColumnToField(rec, 'c0suggest_text_1', vp.Value, self.extractSource)
SQLiteParser.Tools.ReadColumnToField[TimeStamp](rec, 'c3time',
vp.TimeStamp, self.extractSource, commonTimestampParse)
if rec['c0suggest_text_1'].Value:
self.models.append(vp)
# getting the node from the filesystem
node = ds.FileSystems[0]['/data/data/ru.yandex.yandexmaps']
# calling the parser for results
results = YandexMapsParser(node, True, True).parse()
# adding the results to the tree view
ds.Models.AddRange(results)
|
hryamzik/ansible | lib/ansible/modules/cloud/cloudstack/cs_router.py | Python | gpl-3.0 | 10,470 | 0.000573 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2016, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_router
short_description: Manages routers on Apache CloudStack based clouds.
description:
- Start, restart, stop and destroy routers.
- C(state=present) is not able to create routers, use M(cs_network) instead.
version_added: "2.2"
author: "René Moser (@resmo)"
options:
name:
description:
- Name of the router.
required: true
service_offering:
description:
- Name or id of the service offering of the router.
domain:
description:
- Domain the router is related to.
account:
description:
- Account the router is related to.
project:
description:
- Name of the project the router is related to.
zone:
description:
- Name of the zone the router is deployed in.
- If not set, all zones are used.
version_added: "2.4"
state:
description:
- State of the router.
default: 'present'
choices: [ 'present', 'absent', 'started', 'stopped', 'restarted' ]
poll_async:
description:
- Poll async jobs until job has finished.
default: yes
type: bool
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Ensure the router has the desired service offering, no matter if
# the router is running or not.
- local_action:
module: cs_router
name: r-40-VM
service_offering: System Offering for Software Router
# Ensure started
- local_action:
module: cs_router
name: r-40-VM
state: started
# Ensure started with desired service offering.
# If the service offerings changes, router will be rebooted.
- local_action:
module: cs_router
name: r-40-VM
service_offering: System Offering for Software Router
state: started
# Ensure stopped
- local_action:
module: cs_router
name: r-40-VM
state: stopped
# Remove a router
- local_action:
module: cs_router
name: r-40-VM
state: absent
'''
RETURN = '''
---
id:
description: UUID of the router.
returned: success
type: string
sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
name:
description: Name of the router.
returned: success
type: string
sample: r-40-VM
created:
description: Date of the router was created.
returned: success
type: string
sample: 2014-12-01T14:57:57+0100
template_version:
description: Version of the system VM template.
returned: success
type: string
sample: 4.5.1
requires_upgrade:
description: Whether the router needs to be upgraded to the new template.
returned: success
type: bool
sample: false
redundant_state:
descri | ption: Redundant state of the router.
returned: success
type: string
sample: UNKNOWN
role:
description: Role of the router.
returned: success
type: string
sample: VIRTUAL_ROUTER
zone:
description: Name of zone the router is in.
returned: success
type: string
sample: ch-gva-2
service_offering:
description: Name of the service offering the router has.
returned: success
type: string
sample: System Offering Fo | r Software Router
state:
description: State of the router.
returned: success
type: string
sample: Active
domain:
description: Domain the router is related to.
returned: success
type: string
sample: ROOT
account:
description: Account the router is related to.
returned: success
type: string
sample: admin
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together,
)
class AnsibleCloudStackRouter(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackRouter, self).__init__(module)
self.returns = {
'serviceofferingname': 'service_offering',
'version': 'template_version',
'requiresupgrade': 'requires_upgrade',
'redundantstate': 'redundant_state',
'role': 'role'
}
self.router = None
def get_service_offering_id(self):
service_offering = self.module.params.get('service_offering')
if not service_offering:
return None
args = {
'issystem': True
}
service_offerings = self.query_api('listServiceOfferings', **args)
if service_offerings:
for s in service_offerings['serviceoffering']:
if service_offering in [s['name'], s['id']]:
return s['id']
self.module.fail_json(msg="Service offering '%s' not found" % service_offering)
def get_router(self):
if not self.router:
router = self.module.params.get('name')
args = {
'projectid': self.get_project(key='id'),
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'listall': True,
'fetch_list': True,
}
if self.module.params.get('zone'):
args['zoneid'] = self.get_zone(key='id')
routers = self.query_api('listRouters', **args)
if routers:
for r in routers:
if router.lower() in [r['name'].lower(), r['id']]:
self.router = r
break
return self.router
def start_router(self):
router = self.get_router()
if not router:
self.module.fail_json(msg="Router not found")
if router['state'].lower() != "running":
self.result['changed'] = True
args = {
'id': router['id'],
}
if not self.module.check_mode:
res = self.query_api('startRouter', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
router = self.poll_job(res, 'router')
return router
def stop_router(self):
router = self.get_router()
if not router:
self.module.fail_json(msg="Router not found")
if router['state'].lower() != "stopped":
self.result['changed'] = True
args = {
'id': router['id'],
}
if not self.module.check_mode:
res = self.query_api('stopRouter', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
router = self.poll_job(res, 'router')
return router
def reboot_router(self):
router = self.get_router()
if not router:
self.module.fail_json(msg="Router not found")
self.result['changed'] = True
args = {
'id': router['id'],
}
if not self.module.check_mode:
res = self.query_api('rebootRouter', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
router = self.poll_job(res, 'router')
return router
def absent_router(self):
router = self.get_router()
if router:
self.result['changed'] = True
args = {
'id': router['id'],
}
if not self.module.check_mode:
res = self.query_api('destroyRouter', **args)
poll_ |
zaneswafford/songaday_searcher | songaday_searcher/celery.py | Python | bsd-3-clause | 352 | 0 | import o | s
from celery import Celery
from django.conf import settings
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'songaday_searcher.settings')
app = Celery('songaday_searcher')
app.config_from_object('django.conf:settings')
ap | p.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
|
puttarajubr/commcare-hq | corehq/apps/receiverwrapper/models.py | Python | bsd-3-clause | 16,267 | 0.002336 | import base64
from collections import defaultdict, namedtuple
from datetime import datetime, timedelta
import logging
import urllib
import urlparse
from dimagi.ext.couchdbkit import *
from couchdbkit.exceptions import ResourceNotFound
from django.core.cache import cache
import socket
import hashlib
from casexml.apps.case.models import CommCareCase
from casexml.apps.case.xml import V2, LEGAL_VERSIONS
from corehq.apps.receiverwrapper.exceptions import DuplicateFormatException, IgnoreDocument
from couchforms.models import XFormInstance
from dimagi.utils.decorators.memoized import memoized
from dimagi.utils.parsing import json_format_datetime
from dimagi.utils.mixins import UnicodeMixIn
from dimagi.utils.post import simple_post
from dimagi.utils.couch import LockableMixIn
repeater_types = {}
def register_repeater_type(cls):
repeater_types[cls.__name__] = cls
return cls
def simple_post_with_cached_timeout(data, url, expiry=60 * 60, *args, **kwargs):
# no control characters (e.g. '/') in keys
key = hashlib.md5(
'{0} timeout {1}'.format(__name__, url)
).hexdigest()
cache_value = cache.get(key)
if cache_value == 'timeout':
raise socket.timeout('recently timed out, not retrying')
elif cache_value == 'error':
raise socket.timeout('recently errored, not retrying')
try:
resp = simple_post(data, url, *args, **kwargs)
except socket.timeout:
cache.set(key, 'timeout', expiry)
raise
if not 200 <= resp.status < 300:
cache.set(key, 'error', expiry)
return resp
DELETED = "-Deleted"
FormatInfo = namedtuple('FormatInfo', 'name label generator_class')
class GeneratorCollection():
"""Collection of format_name to Payload Generators for a Repeater class
args:
repeater_class: A valid child class of Repeater class
"""
def __init__(self, repeater_class):
self.repeater_class = repeater_class
self.default_format = ''
self.format_generator_map = {}
def add_new_format(self, format_name, format_label, generator_class, is_default=False):
"""Adds a new format->generator mapping to the collection
args:
format_name: unique name to identify the format
format_label: label to be displayed to the user
generator_class: child class of .repeater_generators.BasePayloadGenerator
kwargs:
is_default: True if the format_name should be default format
exceptions:
raises DuplicateFormatException if format is added with is_default while other
default exists
raises DuplicateFormatException if format_name alread exists in the collection
"""
if is_default and self.default_format:
raise DuplicateFormatException("A default format already exists for this repeater.")
elif is_default:
self.default_format = format_name
if format_name in self.format_generator_map:
raise DuplicateFormatException("There is already a Generator with this format name.")
self.format_generator_map[format_name] = FormatInfo(
name=format_name,
label=format_label,
generator_class=generator_class
)
def get_default_format(self):
"""returns default format"""
return self.default_format
def get_default_generator(self):
"""returns generator class for the default format"""
raise self.format_generator_map[self.default_format].generator_class
def get_all_formats(self, for_domain=None):
"""returns all the formats added to this repeater collection"""
return [(name, format.label) for name, format in self.format_generator_map.iteritems()
if not for_domain or format.generator_class.enabled_for_domain(for_domain)]
def get_generator_by_format(self, format):
"""returns generator class given a format"""
return self.format_generator_map[format].generator_class
class RegisterGenerator(object):
"""Decorator to register new formats and Payload generators for Repeaters
args:
repeater_cls: A child class of Repeater for which the new format is being added
format_name: unique identifier for the format
format_label: description for the format
kwargs:
is_default: whether the format is default to the repeater_cls
"""
generators = {}
def __init__(self, repeater_cls, format_name, format_label, is_default=False):
self.format_name = format_name
self.format_label = format_label
self.repeater_cls = repeater_cls
self.label = format_label
self.is_default = is_default
def __call__(self, generator_class):
if not self.repeater_cls in RegisterGenerator.generators:
RegisterGenerator.generators[self.repeater_cls] = GeneratorCollection(self.repeater_cls)
RegisterGenerator.generators[self.repeater_cls].add_new_format(
self.format_name,
self.format_label,
generator_class,
is_default=self.is_default
)
return generator_class
@classmethod
def generator_class_by_repeater_format(cls, repeater_class, format_name):
"""Return generator class given a Repeater class and format_name"""
generator_collection = cls.generators[repeater_class]
return generator_collection.get_generator_by_format(format_name)
@classmethod
def all_formats_by_repeater(cls, repeater_class, for_domain=None):
"""Return all formats for a given Repeater class"""
generator_collection = cls.generators[repeater_class]
return generator_collection.get_all_formats(for_domain=for_domain)
@classmethod
def default_format_by_repeater(cls, repeater_class):
"""Return default format_name for a Repeater class"""
generator_collection = cls.generators[repeater_class]
return generator_collection.get_default_format()
class Repeater(Document, UnicodeMixIn):
base_doc = 'Repeater'
domain = StringProperty()
url = StringProperty()
format = StringProperty()
use_basic_auth = BooleanProperty(default=False)
username = StringProperty()
password = StringProperty()
def format_or_default_format(self):
return self.format or RegisterGenerator.default_format_by_repeater(self.__class__)
def get_payload_generator(self, payload_format):
gen = RegisterGenerator.generator_class_by_repeater_format(self.__class__, payload_format)
return gen(self)
def payload_doc(self, repeat_record):
raise NotImplementedError
def get_payload(self, repeat_record):
generator = self.get_payload_generator(self.format_or_default_format())
return generator.get_payload(repeat_record, self.payload_doc(repeat_record))
def register(self, payload, next_check=None):
try:
payload_id = payload.get_id
except Exception:
payload_id = payload
repeat_record = RepeatRecord(
repeater_id=self.get_id,
repeater_type=self.doc_type,
domain=self.domain,
next_check=next_check or datetime.utcnow(),
payload_id=payload_id
)
repeat_record.save()
return repeat_record
@classmethod
def by_domain(cls, domain):
key = [domain]
if repeater_types.has_key(cls.__name__):
key.append(cls.__name__)
elif cls.__name__ == Repeater.__name__:
# In this case the wrap fun | ction delegates to the
# appropriate sub-repeater types.
pass
else:
# Any repeater type can be posted to the API, and the installed apps
# determine whether we actually know about it.
# But if we do not know about it, then may as well return nothing now
return []
raw_docs = cls.view('receiverwrap | per/repeaters',
startkey=key,
endkey=key + [{}],
include_docs=True,
reduce=False,
wrap_doc=False
)
|
mudiarto/django-mailer | mailer/__init__.py | Python | mit | 3,442 | 0.003777 | VERSION = (0, 2, 1, "f", 1) # following PEP 386
DEV_N = None
def get_version():
version = "%s.%s" % (VERSION[0], VERSION[1])
if VERSION[2]:
version = "%s.%s" % (version, VERSION[2])
if VERSION[3] != "f":
version = "%s%s%s" % (version, VERSION[3], VERSION[4])
if DEV_N:
version = "%s.dev%s" % (version, DEV_N)
return version
__version__ = get_version()
PRIORITY_MAPPING = {
"high": "1",
"medium": "2",
"low": "3",
"deferred": "4",
}
# replacement for django.core.mail.send_mail
def send_mail(subject, message, from_email, recipient_list, priority="medium",
fail_silently=False, auth_user=None, auth_password=None, headers=None):
from django.utils.encoding import force_unicode
from mailer.models import make_message
priority = PRIORITY_MAPPING[priority]
# need to do this in case subject used lazy version of ugettext
subject = force_unicode(subject)
message = force_unicode(message)
make_message(subject=subject,
body=message,
from_email=from_email,
to=recipient_list,
headers=headers,
priority=priority).save()
return 1
def send_html_mail(subject, message, message_html, from_email, recipient_list,
priority="medium", fail_silently=False, auth_user=None,
auth_password=None, headers=None):
"""
Function to queue HTML e-mails
"""
from django.utils.encoding import force_unicode
from django.core.mail import EmailMultiAlternatives
from mailer.models import make_message
priority = PRIORITY_MAPPING[priority]
# need to do this in case subject used lazy version of ugettext
subject = force_unicode(subject)
message = force_unicode(message)
msg = make_message(subject=subject,
body=message,
from_email=from_email,
to=recipient_list,
headers=headers,
priority=priority)
email = msg.email
email = EmailMultiAlternatives(email.subject, email.body, email.from_email, email.to)
email.attach_alternative(message_html, "text/html")
msg.email = email
msg.save()
return 1
def send_mass_mail(datatuple, fail_silently=False, auth_user=None,
auth_password=None, connection=None):
from mailer.models import make_message
num_sent = 0
for subject, message, sender, recipient in datatuple:
num_sent += send_mail(subject, message, sender, recipient)
return num_sent
def mail_admins(subject, message, fail_silently=False, connection=None, priority="medium"):
from django.conf import settings
from django.utils.encoding import force_unicode
return send_mail(settings.EMAIL_SUBJECT_PREFIX + force_unicode(subject),
message,
settings.SERVER_EMAIL,
[a[1] | for a in settings.ADMINS])
def mail_managers(subject, message, fail_silently=False, connection=None, priority="medium"):
from django.conf import settings
from django.utils.encoding import force_unicode
return send_mail(settings.EMAIL_SUBJECT_PREFIX + force_unicode(subject),
message,
settings.SERVER_EMAIL,
[a[1] for a in set | tings.MANAGERS])
|
tuxology/bcc | examples/tracing/bitehist.py | Python | apache-2.0 | 1,181 | 0.00508 | #!/usr/bin/python
#
# bitehist.py Block I/O size histogram.
# For Linux, uses BCC, eBPF. Embedded C.
#
# Written as a basic example of using histograms to show a distribution.
#
# A Ctrl-C will print the gathered histogram then exit.
#
# Copyright (c) 2015 Brendan Gregg.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 15-Aug-2015 Brendan Gregg Created this.
# 03-Feb-2019 Xiaozhou Liu added linear histogram.
from __future__ import print_function
from bcc import BPF
from time import sleep
# load BPF program
b = BPF(text="""
#include <uapi/linux/ptrace.h>
#include <linux/blkdev.h>
BPF_HISTOGRAM(dist);
BPF_HISTOGRAM(dist_linear);
int kprobe__blk_account_io_done(struct pt_regs *ctx, struct request *req)
{
dist.increment(bpf_log2l(req->__data_len / 1024));
dist_linear.increment(req->__data_len / 1024);
return 0;
}
""")
# header
print("Tracing... Hit Ctrl-C to end.")
# trace until Ctrl-C
try:
sleep(99999999)
except KeyboardInterrupt:
print()
# output
print("log2 histogram")
| print("~~~~~~~~~~~~~~")
b["dist"].print_log2_hist("kbytes")
print("\nlinear histogram")
print("~~~~~~ | ~~~~~~~~~~")
b["dist_linear"].print_linear_hist("kbytes")
|
bzyx/precious | precious/models/Build.py | Python | bsd-3-clause | 763 | 0.001311 | # -*- coding: utf-8 -*-
from precious import db
from datetime import datetime
class Build(db.Model):
__tablename__ = 'builds'
id = db.Column(db.Integer, primary_key=True, unique=True)
project_id = db.Column(db.Integer | , db.ForeignKey('projects.id'))
date = db.Column(db.DateTime)
revision = db.Column(db.LargeBinary) |
stdout = db.Column(db.UnicodeText)
success = db.Column(db.Boolean)
def __init__(self, project_id, revision, stdout=u"", success=True, date=datetime.now()):
self.project_id = project_id
self.date = date
self.revision = revision
self.stdout = stdout
self.success = success
def __repr__(self):
return '<Build id:%r project_id:%r>' % (self.id, self.project_id)
|
ict-felix/stack | modules/resource/orchestrator/src/monitoring/utils.py | Python | apache-2.0 | 4,165 | 0.00048 | import core
logger = core.log.getLogger("monitoring-utils")
class MonitoringUtils(object):
def __init__(self):
pass
@staticmethod
def check_existing_tag_in_topology(root, node, node_type, node_urns,
domain=None):
tag_exists = False
try:
elements = []
if not isinstance(node_urns, list):
node_urns = [node_urns]
try:
for node_urn in node_urns:
if node == "link":
elements.extend(
MonitoringUtils.
check_existing_link_tag_in_topology(
root, node_type, node_urn))
else:
node_elements = MonitoringUtils.\
check_existing_generic_tag_in_topology(
root, node, node_type, node_urn, domain)
if len(node_elements) > 0:
elements = node_elements
except:
pass
if len(elements) > 0:
tag_exists = True
except:
pass
return tag_exists
@staticmethod
def check_existing_generic_tag_in_topology(root, node, node_type, node_urn,
domain=None):
elements = []
if node_type == "tn":
if domain is not None:
domain = domain if "urn" in domain else \
"urn:publicid:IDN+ocf:" + domain
if node_type is None:
elements = root.xpath(
"//topology[@name='%s']//%s[@id='%s']" %
(domain, node, node_urn))
elements = root.xpath(
"//topology[@name='%s']//%s[@type='%s'][@id='%s']" %
(domain, node, node_type, node_urn))
else:
elements = root.xpath(
"//%s[@type='%s'][@id='%s']" %
(node, node_type, node_urn))
if node_type is None:
elements = root.xpath("//%s[@id='%s']" % (node, node_urn))
return elements
@staticmethod
def check_existing_link_tag_in_topology(root, node_type, node_urn):
elements = []
interfaces_same_link = True
elem = root.xpath(
"//link[@type='%s']//interface_ref[@client_id='%s']" %
(node_type, node_urn))
if node_type is None:
elem = root.xpath(
"//link//interface_ref[@client_id='%s']" % node_urn)
for element in elements:
if element.getparent() == elem[0].getparent():
interfaces_same_link &= True
else:
interfaces_same_link &= False
if interfaces_same_link:
elements.extend(elem)
return elements
@staticmethod
def find_virtual_link_end_to_end(hybrid_links):
# Retrieve the endpoints of the slice ("abstract link" in M/MS)
e2e_link_urns = set()
# 1) Check for SDNRM-SDNRM end-paths
for se_link in hybrid_links:
# 1) Check for SDN-SDN end paths
# 2) Check for SDN-TN end paths
for link_end in [":ofam", ":tnrm"]:
if link_end in se_link["source"]:
e2e_link_urns.add(se_link["source"])
if link_end in se_link["destination"]:
e2e_link_urns.add(se_link | ["destination"])
return list(e2e_link_urns)
@staticmethod
def find_virtual_links(topology_root):
links_ids = []
for link_id in topology_root.xpath("//topology//link[@id]"):
links_ids.append(link_id.attrib["id"])
return links_ids
@staticmethod
def find_slice_name(topology_roo | t):
slice_name = ""
try:
slice_name = topology_root.xpath("//topology")[0].attrib["name"]
except Exception as e:
logger.warning("Unable to retrieve slice name for topology. \
Details: %s" % e)
return slice_name
|
CamoFPV/Relay-Website-with-Raspberry-Pi | py/14on.py | Python | mit | 254 | 0.011811 | #!/usr/bin/env python
# IMPORT NECESSARY LIBRARIES
import RPi.GPIO as GPIO
import time |
# INITIALIZE THE GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(26,GPIO.OUT)
|
# TURN ON RELAY 1
GPIO.output(26,0)
# CLEAN UP GPIO
# GPIO.cleanup()
|
maisilex/Lets-Begin-Python | list.py | Python | mit | 52 | 0 | a = | [int(i) for i in input().split()]
print(sum | (a))
|
jroivas/odt-html5 | odt.py | Python | mit | 23,990 | 0.002126 | import zipfile
import os
import xml.etree.ElementTree as etree
import re
import copy
class ODTPage:
def __init__(self, name, odt=None, pagename='page', indexname='index'):
self.pagename = pagename
self.indexname = indexname
if odt is None:
self.odt = ODT(name, pagename=pagename)
else:
self.odt = odt
self.index = []
def pages(self):
return self.odt.pageCount()
def getTitle(self):
for i in self.odt.titles:
return (i, self.odt.titles[i][0])
return (0, '')
def solveCurrentPage(self, page):
pages = self.odt.pageCount()
if page > pages:
page = pages
if page < 1:
page = 1
return page
def handlePageTitle(self, page):
(level, page_title) = self.getTitle()
if page_title:
self.index.append((level, page, page_title))
return page_title
def getPage(self, page=1, title="ODT", prev_page=True):
self.odt.reset()
styles = ''
page = self.solveCurrentPage(page)
content = self.getContent(self.odt, page)
body = self.getBody(self.odt, page, content, prev_page, title)
page_title = self.handlePageTitle(page)
if page_title is not None:
title += ' - ' + page_title
head = self.getHeader(title, styles)
foot = self.getFooter()
return page_title, content, head + body + foot
def genIndex(self, title, extra):
res = '<body>\n'
res += extra
res += '<div class="page">\n'
for level, page, target in self.index:
res += '<div>%s<a href="%s_%s.html">%s</a></div>\n' % (' ' * 2 * int(level), self.pagename, page, target)
res += '</div>\n'
res += '</body>\n'
head = self.getHeader(title, '')
foot = | self.getFooter()
return head + res + foot
def getHeader(self, title, extra=""):
return """<html>
<head>
<title>%s</title>
<link rel="stylesheet" type="text/css" title="styles" href= | "odt.css"/>
<meta charset="UTF-8">
<script type="text/javascript" src="jquery.min.js"></script>
<script type="text/javascript" src="odt.js"></script>
%s
</head>
""" % (title, extra)
def getContent(self, odt, page):
res = odt.read()
tmp = ''
if not res:
return "<p>Invalid file</p>"
tmp = odt.parseContent(page=page)
return """
<!-- START -->
<div class="page">
%s
</div>
<!-- END -->
""" % (''.join(tmp))
def getBody(self, odt, page, content, prev_page, title):
cntx = ''
cntx += '<a href="%s.html"><div id="top_left">%s</div></a>\n' % (self.indexname, title)
if prev_page and page > 1:
if prev_page == True:
prev_page = "%s_%s.html" % (self.pagename, page - 1)
cntx += """
<!-- PREV --><a href="%s">
<div id='prevPage'>
<<
</div></a>
""" % (prev_page)
cntx += """
<input type='hidden' id='pagenum' name='pagenum' value='%s'></input>
<input type='hidden' id='pagecnt' name='pagecnt' value='%s'></input>
""" % (page, odt.pageCount())
cntx += "<div id='pageDiv'>\n"
cntx += content
cntx += "</div>\n"
if page < odt.pageCount():
cntx += """
<!-- NEXT --><a href="%s_%s.html">
<div id='nextPage'>
>>
</div>
</a>
""" % (self.pagename, page + 1)
return """
<body>
%s
</body>
""" % (cntx)
def getFooter(self):
return """</html>"""
class ODT:
def __init__(self, name, pagename):
self._name = name
self._pagename = pagename
self._page = 1
self._zip = None
self._styles = {}
self._styles_xml = None
self._content_xml = None
self._stylename = None
self._read = False
self._read = self.read()
self._pagecnt = None
self._lists = {}
self._hlevels = {}
self._localtargets = {}
self._framedata = None
self._listname = None
self._tab = None
self._stylestack = []
self._imageframe1 = ''
self._imageframe1_end = ''
self._imageframe2 = ''
self._imageframe2_end = ''
self.images = []
self.titles = {}
#self._pagedata = {}
self.rendered_width = 0
def reset(self):
self.titles = {}
self._page = 1
def open(self):
if not os.path.isfile(self._name):
self._zip = None
return False
try:
self._zip = zipfile.ZipFile(self._name, 'r')
except zipfile.BadZipfile:
self._zip = None
return False
return True
def close(self):
self._zip.close()
def extract(self, file):
if self._zip == None:
return None
try:
return self._zip.read(file)
except KeyError:
return None
def cleanTag(self, tag):
return re.sub("{[^}]+}","",tag).strip()
def findElement(self, root, name):
res = []
#if self.cleanTag(root.tag) == name:
if root.tag == name:
res.append(root)
for child in root:
if child.tag == name:
res.append(child)
tmp = self.findElement(child, name)
for item in tmp:
if item not in res:
res.append(item)
return res
def parseStyleTag(self, styles):
res = {}
for style in styles:
tmp = self.getAttrib(style, "name")
if tmp is not None:
res[tmp] = {}
self._stylename = tmp
elif self._stylename not in res:
res[self._stylename] = {}
pstyle = self.getAttrib(style, "parent-style-name")
if pstyle is not None and res is not None:
res[self._stylename]["parent"] = pstyle
text_prop = self.parseTextProperties(style)
if text_prop:
res[self._stylename]["text-prop"] = text_prop
para_prop = self.parseParagraphProperties(style)
if para_prop:
res[self._stylename]["para-prop"] = para_prop
return res
def filterAttributes(self, props, keep):
res = []
for prop in props:
style = {}
for val in prop.attrib:
if val in keep:
style[val] = prop.attrib[val]
if style:
res.append(style)
if len(res) == 1:
return res[0]
return res
def parseTextPropertyTag(self, props):
valid_text_attrs = ["font-size", "color", "background-color", "font-weight",
"font-style", "text-underline-style", "text-underline-color",
"text-overline-style", "text-line-through-style" ]
return self.filterAttributes(props, valid_text_attrs)
def parseParagraphPropertyTag(self, props):
valid_para_attrs = [ "break-before", "text-align", "color", "background-color",
"text-indent", "margin-left", "margin-right", "margin-top", "margin-bottom" ]
return self.filterAttributes(props, valid_para_attrs)
def getAttrib(self, tag, name):
for attrib in tag.attrib:
#if self.cleanTag(attrib)==name:
if attrib == name:
return tag.attrib[attrib]
return None
def stripNamespace(self, root):
for el in root.getiterator():
if el.tag[0] == "{":
el.tag = el.tag.split('}', 1)[1]
tmp = {}
for attr in el.attrib:
if attr[0] == "{":
tmp[attr.split('}', 1)[1]] = el.attrib[attr]
else:
tmp[attr] = el.attrib[attr]
el.attrib = tmp
def parseStyleXML(self):
if sel |
sql-machine-learning/sqlflow | python/runtime/tensorflow/__init__.py | Python | apache-2.0 | 679 | 0 | # Copyright 2020 The SQLFlow Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of | the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# dis | tributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from runtime.tensorflow.get_tf_model_type import is_tf_estimator # noqa: F401
|
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_11_01/operations/_load_balancer_backend_address_pools_operations.py | Python | mit | 8,928 | 0.004368 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancerBackendAddressPoolsOperations(object):
"""LoadBalancerBackendAddressPoolsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.LoadBalancerBackendAddressPoolListResult"]
"""Gets all the load balancer backed address pools.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LoadBalancerBackendAddressPoolListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_11_01.models.LoadBalancerBackendAddressPoolListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancerBackendAddressPoolListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_versio | n, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerBackendAddressPoolListResult', pipeline_response)
l | ist_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools'} # type: ignore
def get(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
backend_address_pool_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.BackendAddressPool"
"""Gets load balancer backend address pool.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param backend_address_pool_name: The name of the backend address pool.
:type backend_address_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BackendAddressPool, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_11_01.models.BackendAddressPool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackendAddressPool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'backendAddressPoolName': self._serialize.url("backend_address_pool_name", backend_address_pool_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response |
pantheon-systems/etl-framework | etl_framework/utilities/DatetimeConverter.py | Python | mit | 889 | 0.001125 | """class to convert datetime values"""
import datetime
class DatetimeConverter(object):
"""stuff"""
_EPOCH_0 = datetime.datetime(1970, 1, 1)
def __init__(self):
"""stuff"""
pass
@staticmethod
def get_tomorrow():
"""stuff"""
return datetime.datetime.today() + datetime.timedelta(days=1)
@staticmethod
def get_yesterday():
return date | time.datetime.today() - datetime.timedelta(days=1)
@classmethod
def get_timestamp(cls, datetime_obj):
"""helper method to return timestamp fo datetime object"""
return (datetime_obj - cls._EPOCH_0).total_seconds()
@classmethod
def get_tomorrow_timestamp(cls):
| """stuff"""
return cls.get_timestamp(cls.get_tomorrow())
@classmethod
def get_yesterday_timestamp(cls):
return cls.get_timestamp(cls.get_yesterday())
|
markshao/pagrant | pagrant/provisioners/health/http.py | Python | mit | 1,669 | 0.002996 | __author__ = ['Xiaobo']
import time
import httplib
from pagrant.exceptions import VirtualBootstrapError
from pagrant.provisioners import BaseProvisioner
CHECK_TIMEOUT = 60 * 5
class HttpCheckerPrivisioner(BaseProvisioner):
def __init__(self, machine, logger, provision_info, provider_info):
super(HttpCheckerPrivisioner, self).__init__(machine, logger, provision_info, provider_in | fo)
self.port = self.provision_info.get("port", None)
self.url = self.provision_info.get("url", None)
def do_provision(self):
self.check_health()
def check_health(self):
time.sleep(5)
start_time = time.time()
self.logger.start_progress("start to check the %s for application to be ready" % self.machine.machine_info['name'])
| while True:
self.logger.info("Wait for the application to be ready on the %s ..." % self.machine.machine_info['name'])
con = httplib.HTTPConnection(self.machine.host, self.port)
con.request("GET", self.url)
res = con.getresponse()
if res.status == 200 or res.status == 401:
self.logger.info("The url %s could be accessed normally on the %s" % (self.url, self.machine.machine_info['name']))
self.logger.end_progress()
break
else:
duration = time.time() - start_time
if duration > CHECK_TIMEOUT:
raise VirtualBootstrapError("The url %s could not be accessed normally on the %s" % (self.url, self.machine.machine_info['name']))
else:
time.sleep(5)
continue
|
dseuss/mpnum | tests/special_test.py | Python | bsd-3-clause | 6,072 | 0.001318 | # encoding: utf-8
from __future__ import absolute_import, division, print_function
import numpy as np
import pytest as pt
from numpy.testing import (assert_almost_equal, assert_array_almost_equal)
import mpnum.factory as factory
import mpnum.mparray as mp
import mpnum.special as mpsp
from mpnum._testing import assert_mpa_identical
from mpnum.utils import truncated_svd
MP_INNER_PARAMETERS = [(10, 10, 5), (20, 2, 10)]
MP_SUMUP_PARAMETERS = [(6, 2, 5000, 10, 200), (10, 2, 5000, 5, 20)]
####### | #####################
# special.inner_prod_mps #
############################
@pt.mark.parametrize('dtype', pt.MP_TEST_DTYPES)
@pt.mark.parametrize('nr_sites, local_dim, rank', pt.MP_TEST_PARAMETERS)
def test_inner_prod_ | mps(nr_sites, local_dim, rank, dtype, rgen):
mpa1 = factory.random_mpa(nr_sites, local_dim, 1, dtype=dtype,
randstate=rgen, normalized=True)
mpa2 = factory.random_mpa(nr_sites, local_dim, rank, dtype=dtype,
randstate=rgen, normalized=True)
res_slow = mp.inner(mpa1, mpa2)
res_fast = mpsp.inner_prod_mps(mpa1, mpa2)
assert_almost_equal(res_slow, res_fast)
try:
mpsp.inner_prod_mps(mpa2, mpa1)
except AssertionError:
pass
else:
if rank > 1:
raise AssertionError(
"inner_prod_mps should only accept r=1 in first argument")
mpa1 = factory.random_mpo(nr_sites, local_dim, 1)
try:
mpsp.inner_prod_mps(mpa1, mpa1)
except AssertionError:
pass
else:
raise AssertionError("inner_prod_mps should only accept ndims=1")
@pt.mark.benchmark(group="inner")
@pt.mark.parametrize('nr_sites, local_dim, rank', MP_INNER_PARAMETERS)
def test_inner_fast(nr_sites, local_dim, rank, benchmark, rgen):
mpa1 = factory.random_mpa(nr_sites, local_dim, 1, dtype=np.float_,
randstate=rgen, normalized=True)
mpa2 = factory.random_mpa(nr_sites, local_dim, rank, dtype=np.float_,
randstate=rgen, normalized=True)
benchmark(mpsp.inner_prod_mps, mpa1, mpa2)
@pt.mark.benchmark(group="inner")
@pt.mark.parametrize('nr_sites, local_dim, rank', MP_INNER_PARAMETERS)
def test_inner_slow(nr_sites, local_dim, rank, benchmark, rgen):
mpa1 = factory.random_mpa(nr_sites, local_dim, 1, dtype=np.float_,
randstate=rgen)
mpa2 = factory.random_mpa(nr_sites, local_dim, rank, dtype=np.float_,
randstate=rgen)
benchmark(mp.inner, mpa1, mpa2)
########################
# special.sumup_prod #
########################
@pt.mark.parametrize('nr_sites, local_dim, rank', pt.MP_TEST_PARAMETERS)
def test_sumup(nr_sites, local_dim, rank, rgen):
rank = rank if rank is not np.nan else 1
mpas = [factory.random_mpa(nr_sites, local_dim, 1, dtype=np.float_,
randstate=rgen)
for _ in range(10 * rank)]
weights = rgen.randn(len(mpas))
# parameters chosen such that only one round of compression occurs
summed_fast = mpsp.sumup(mpas, rank, weights=weights, svdfunc=truncated_svd)
# summed_slow = mp.sumup(mpa * w for mpa, w in zip(mpas, weights))
summed_slow = mp.sumup(mpas, weights=weights)
summed_slow.compress('svd', rank=rank, direction='right',
canonicalize=False)
assert_mpa_identical(summed_fast, summed_slow)
try:
mpsp.sumup(mpas, rank, weights=np.ones(rank))
except AssertionError:
pass
else:
raise AssertionError("sumup did not catch unbalanced arguments")
# @pt.mark.long
# @pt.mark.benchmark(group="sumup", max_time=10)
# @pt.mark.parametrize('nr_sites, local_dim, samples, target_r, max_r', MP_SUMUP_PARAMETERS)
# def test_sumup_fast(nr_sites, local_dim, samples, target_, max_, rgen, benchmark):
# mpas = [factory.random_mpa(nr_sites, local_dim, 1, dtype=np.float_, randstate=rgen)
# for _ in range(samples)]
# weights = rgen.randn(len(mpas))
# benchmark(mpsp.sumup, mpas, weights=weights, target_=target_,
# max_=max_)
# @pt.mark.long
# @pt.mark.benchmark(group="sumup", max_time=10)
# @pt.mark.parametrize('nr_sites, local_dim, samples, target_, _', MP_SUMUP_PARAMETERS)
# def test_sumup_slow(nr_, local_dim, samples, target_, _, rgen, benchmark):
# mpas = [factory.random_mpa(nr_sites, local_dim, 1, dtype=np.float_, randstate=rgen)
# for _ in range(samples)]
# weights = rgen.randn(len(mpas))
#
# benchmark(mpsp.sumup, mpas, weights=weights, target_bdim=target_bdim,
# max_bdim=max_bdim)
#
#
# @pt.mark.long
# @pt.mark.benchmark(group="sumup", max_time=10)
# @pt.mark.parametrize('nr_sites, local_dim, samples, target_bdim, _',
# MP_SUMUP_PARAMETERS)
# def test_sumup_slow(nr_, local_dim, samples, target_bdim, _, rgen, benchmark):
# mpas = [factory.random_mpa(nr_sites, local_dim, 1, dtype=np.float_,
# randstate=rgen)
# for _ in range(samples)]
# weights = rgen.randn(len(mpas))
#
# @benchmark
# def sumup_slow():
# summed = mp.sumup(mpa * w for w, mpa in zip(weights, mpas))
# summed.compress('svd', =target_)
@pt.mark.parametrize('dtype', pt.MP_TEST_DTYPES)
@pt.mark.parametrize('nr_sites, local_dim, rank', pt.MP_TEST_PARAMETERS)
def test_local_add_sparse(nr_sites, local_dim, rank, dtype, rgen):
# Just get some random number of summands, these parameters arent used
# anyway later on
nr_summands = nr_sites if rank is np.nan else nr_sites * rank
summands = [factory.random_mpa(1, local_dim, 1, dtype=dtype,
randstate=rgen).lt[0]
for _ in range(nr_summands)]
sum_slow = mp._local_add(summands).reshape((nr_summands,
nr_summands * local_dim))
sum_fast = mpsp._local_add_sparse([s.ravel() for s in summands]).toarray() \
assert_array_almost_equal(sum_slow, sum_fast)
|
WatchPeopleCode/WatchPeopleCode | migrations/versions/44c3becf9745_.py | Python | mit | 884 | 0.00905 | """empty message
Revision ID: 44c3becf9745
Revises: 34d183116728
Create Date: 2015-02-01 17:18:35.172244
"""
# revision identifiers, used by Alembic.
revision = '44c3becf9745'
down_revision = '34d183116728'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('stream', sa.Column('is_c | ompleted', sa.Boolean(), nullable=True))
op.add_column('stream', sa.Column('is_live', sa.Boolean(), nullable=True))
op.add_column('stream', sa.Column('schelduled_start_time', sa.DateTime(), nullable=True))
### end Alembic co | mmands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('stream', 'schelduled_start_time')
op.drop_column('stream', 'is_live')
op.drop_column('stream', 'is_completed')
### end Alembic commands ###
|
nilgoyyou/dipy | dipy/reconst/tests/test_vec_val_vect.py | Python | bsd-3-clause | 1,302 | 0 | import numpy as np
from numpy.random import randn
from numpy.testing import assert_almost_equal, dec
from dipy.reconst.vec_val_sum import vec_val_vect
def make_vecs_vals(shape):
return randn(*(shape)), randn(*(shape[:-2] + shape[-1:]))
try:
np.einsum
except AttributeError:
with_einsum = dec.skipif(True, "Need einsum for benchmark")
else:
def with_einsum(f): return f
@with_einsum
def test_vec_val_vect():
for shape0 in ((10,), (100,), (10, 12), (12, 10, 5)):
for shape1 in ((3, 3), (4, 3), (3, 4)):
shape | = shape0 + s | hape1
evecs, evals = make_vecs_vals(shape)
res1 = np.einsum('...ij,...j,...kj->...ik', evecs, evals, evecs)
assert_almost_equal(res1, vec_val_vect(evecs, evals))
def dumb_sum(vecs, vals):
N, rows, cols = vecs.shape
res2 = np.zeros((N, rows, rows))
for i in range(N):
Q = vecs[i]
L = vals[i]
res2[i] = np.dot(Q, np.dot(np.diag(L), Q.T))
return res2
def test_vec_val_vect_dumber():
for shape0 in ((10,), (100,)):
for shape1 in ((3, 3), (4, 3), (3, 4)):
shape = shape0 + shape1
evecs, evals = make_vecs_vals(shape)
res1 = dumb_sum(evecs, evals)
assert_almost_equal(res1, vec_val_vect(evecs, evals))
|
Forage/Gramps | gramps/gen/filters/rules/media/_hassourcecount.py | Python | gpl-2.0 | 1,807 | 0.005534 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2007 Donald N. Allingham
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2008 Jerome Rapinat
# Copyright (C) 2008 Benny Malengier
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#--------------------------------------------- | ----------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.get_translation().get | text
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from .._hassourcecountbase import HasSourceCountBase
#-------------------------------------------------------------------------
# "People having sources"
#-------------------------------------------------------------------------
class HasSourceCount(HasSourceCountBase):
"""Media with sources"""
name = _('Media with <count> sources')
description = _("Matches media with a certain number of sources connected to it")
|
css-lucas/GAT | gat/core/sna/updateSNA.py | Python | mit | 35,903 | 0.003231 | import tempfile
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import xlrd
from networkx.algorithms import bipartite as bi
from networkx.algorithms import centrality
from itertools import product
from collections import defaultdict
import pandas as pd
import datetime
from gat.core.sna import propensities
from gat.core.sna import resilience
from gat.core.sna import cliques
from gat.core.sna import ergm
class SNA():
def __init__(self, excel_file, nodeSheet, attrSheet=None):
self.subAttrs = ["W", "SENT", "SZE", "AMT"]
self.header, self.list = self.readFile(excel_file, nodeSheet)
if attrSheet != None:
self.attrHeader, self.attrList = self.readFile(excel_file, attrSheet)
self.G = nx.DiGraph()
self.nodes = []
self.edges = []
self.nodeSet = []
self.clustering_dict = {}
self.latapy_clustering_dict = {}
self.closeness_centrality_dict = {}
self.betweenness_centrality_dict = {}
self.degree_centrality_dict = {}
self.eigenvector_centrality_dict = {}
self.katz_centraltiy_dict = {}
self.load_centrality_dict = {}
self.communicability_centrality_dict = {}
self.communicability_centrality_exp_dict = {}
self.node_attributes_dict = {}
self.nodeSet = []
self.attrSheet = attrSheet
# Read xlsx file and save the header and all the cells, each a dict with value and header label
# Input: xlsx file, sheet
def readFile(self, excel_file, sheet):
workbook = xlrd.open_workbook(excel_file)
sh = workbook.sheet_by_name(sheet)
header = [str(sh.cell(0, col).value).strip("\n") for col in range(sh.ncols)]
New_ncols = sh.ncols - 1
# If any, delete all the empty features in the header
while header[New_ncols] == '':
header.remove(header[New_ncols])
New_ncols -= 1
# a list of nodes
list = []
for row in range(1, sh.nrows):
tempList = []
for col in range(New_ncols + 1):
feature = str(sh.cell(0, col).value).strip("\n")
cell = sh.cell(row, col).value
if type(cell) == type(""):
val = cell.strip("\n")
else:
val = str(cell)
if val != "": # handle empty cells
# Make each node a dict with node name and node header, to assign later
tempList.append({'val': val, 'header': feature}) # need to define attributes later
list.append(tempList)
# remove repeated column titles
consolidatedHeader = []
for feature in header:
if (feature not in consolidatedHeader) and (feature not in self.subAttrs):
consolidatedHeader.append(feature)
return consolidatedHeader, list
# create set of nodes for multipartite graph
# name = names of the node. This is defined by the header. ex: Abbasi-Davani.F: Name or Abbasi-Davani.F: Faction leader
# nodeSet = names that define a set of node. For example, we can define Person, Faction Leader, and Party Leader as ".['agent']"
# note: len(name) = len(nodeSet), else code fails
def createNodeList(self, nodeSet):
for row in self.list:
for node in row:
if node['header'] in nodeSet and node['val'] != "":
# strip empty cells
self.G.add_node(node['val'], block=node['header'])
self.nodeSet = nodeSet
self.nodes = nx.nodes(self.G)
def loadOntology(self, source, classAssignments):
# Creating an edge list and setting its length for the conditional iterations:
b = self.attrList
y = len(b)
# Creating master edge list, and empty lists to fill from each ontology class
classLists = defaultdict(list) # creates a dictionary with default list values, no need to initialize - nifty!
edgeList = []
# iterating through ontology classes to add them to the network as nodes connected by weighted
# edge attributes to other ontological entities
for x in range(0, y):
for q in range(0, len(b[x])):
nodeHeader = b[x][q]['header']
nodeClass = classAssignments.get(nodeHeader)
if nodeHeader == source and b[x][q]['val'] is not None:
classLists['actor'].append(b[x][q]['val'])
if nodeClass == 'Belief' and b[x][q]['val'] is not None:
classLists['belief'].append(b[x][q]['val'])
if nodeClass == 'Symbols' and b[x][q]['val'] is not None:
classLists['symbol'].append(b[x][q]['val'])
if nodeClass == 'Resource' and b[x][q]['val'] is not None:
classLists['resource'].append(b[x][q]['val'])
if nodeClass == 'Agent' and b[x][q]['val'] is not None:
classLists['agent'].append(b[x][q]['val'])
if nodeClass == 'Organization' and b[x][q]['val'] is not None:
classLists['org'].append(b[x][q]['val'])
if nodeClass == 'Event' and b[x][q]['val'] is not None:
classLists['event'].append(b[x][q]['val'])
if nodeClass == 'Audience' and b[x][q]['val'] is not None:
classLists['aud'].append(b[x][q]['val'])
# removing duplicates from each list
# (this does not remove the effect that numerous connections to one node have on the network)
classLists = {key: set(val) for key, val in classLists.items()} # dict comprehension method
# adding ontological class to each node as node attribute
color_map = []
stringDict = {
'actor': 'Actor',
'belief': 'Belief',
'symbol': 'Symbol',
'resource': 'Resource',
'agent': 'Agent',
'org': 'Organization',
'aud': 'Audience',
'event': 'Event',
'role': 'Role',
'know': 'Knowledge',
'taskModel': 'Task Model',
'location': 'Location',
'title': 'Title',
'position': 'position',
}
for x in nx.nodes(self.G):
for key, entityList in classLists.items():
if x in entityList:
self.G.node[x]['ontClass'] = stringDict[key]
# Input: header list and list of attributes with header label from attribute sheet
# Output: updated list of nodes with attributes
def loadAttributes(self):
for row in self.attrList:
nodeID = row[0]['val']
for cell in row[1:]:
if cell['val'] != '':
if nodeID in self.nodes:
attrList = []
node = self.G.node[nodeID]
if cell['header'] in self.subAttrs: # handle subattributes, e.g. weight
prevCell = row[row.index(cell) - 1]
key = {}
while prevCell['header'] in self.subAttrs:
key[prevCell['header']] = prevCell['val']
prevCell = row[row.index(prevCell) - 1]
key[cell['header']] = cell['val']
for value in node[prevCell['header']]:
if prevCell['val'] in value:
| listFlag = True if type(value) is list else False
| attrList.append([value[0], key] if listFlag else [value, key]) # weighted attributes take the form [value, weight]
else:
attrList.append(value)
attrID = prevCell['header']
else: # if the attribute is not a subattribute
if cell['header'] in self.G.node[nodeID]:
attrList = (node[cell['header']])
att |
mitenjain/nanopore | nanopore/analyses/hmm.py | Python | mit | 4,792 | 0.01586 | import os
from nanopore.analyses.abstractAnalysis import AbstractAnalysis
from nanopore.analyses.utils import AlignedPair, getFastaDictionary, getFastqDictionary, samIterator
import xml.etree.cElementTree as ET
from jobTree.src.bioio import *
class Hmm(AbstractAnalysis):
"""Calculates stats on indels.
"""
def run(self):
#Call base method to do some logging
AbstractAnalysis.run(self)
#Hmm file
hmmFile = os.path.join(os.path.split(self.samFile)[0], "hmm.txt.xml")
if os.path.exists(hmmFile):
#Load the hmm
hmmsNode = ET.parse(hmmFile).getroot()
#Plot graphviz version of nanopore hmm, showing transitions and variances.
fH = open(os.path.join(self.outputDir, "hmm.dot"), 'w')
setupGraphFile(fH)
#Make states
addNodeToGraph("n0n", fH, "match")
addNodeToGraph("n1n", fH, "short delete")
addNodeToGraph("n2n", fH, "short insert")
addNodeToGraph("n3n", fH, "long insert")
addNodeToGraph("n4n", fH, "long delete")
#Make edges with labelled transition probs.
for transition in hmmsNode.findall("transition"):
if float(transition.attrib["avg"]) > 0.0:
addEdgeToGraph("n%sn" % transition.attrib["from"],
"n%sn" % transition.attrib["to"],
fH, dir="arrow", style='""',
label="%.3f,%.3f" % (float(transition.attrib["avg"]), float(transition.attrib["std"])))
#Finish up
finishGraphFile(fH)
fH.close()
#Plot match emission data
emissions = dict([ ((emission.attrib["x"], emission.attrib["y"]), emission.attrib["avg"]) \
for emission in hmmsNode.findall("emission") if emission.attrib["state"] == '0' ])
matchEmissionsFile = os.path.join(self.outputDir, "matchEmissions.tsv")
outf = open(matchEmissionsFile, "w")
bases = "ACGT"
outf.write("\t".join(bases) + "\n")
for base in bases:
outf.write("\t".join([ base] + map(lambda x : emission | s[(base, x)], bases)) + "\n")
outf.close()
system("Rscript nanopore/analyses/substitution_plot.R %s %s %s" % (matchEmissionsFile, os.path.join(self.outputDir, "substitution_plot.pdf"), "Per-Base Substitutions after HMM"))
#Plot indel info
| #Get the sequences to contrast the neutral model.
refSequences = getFastaDictionary(self.referenceFastaFile) #Hash of names to sequences
readSequences = getFastqDictionary(self.readFastqFile) #Hash of names to sequences
#Need to do plot of insert and deletion gap emissions
#Plot of insert and deletion gap emissions
insertEmissions = { "A":0.0, 'C':0.0, 'G':0.0, 'T':0.0 }
deleteEmissions = { "A":0.0, 'C':0.0, 'G':0.0, 'T':0.0 }
for emission in hmmsNode.findall("emission"):
if emission.attrib["state"] == '2':
insertEmissions[emission.attrib["x"]] += float(emission.attrib["avg"])
elif emission.attrib["state"] == '1':
deleteEmissions[emission.attrib["y"]] += float(emission.attrib["avg"])
#PLot insert and delete emissions
indelEmissionsFile = os.path.join(self.outputDir, "indelEmissions.tsv")
outf = open(indelEmissionsFile, "w")
outf.write("\t".join(bases) + "\n")
outf.write("\t".join(map(lambda x : str(insertEmissions[x]), bases)) + "\n")
outf.write("\t".join(map(lambda x : str(deleteEmissions[x]), bases)) + "\n")
outf.close()
###Here's where we do the plot..
system("Rscript nanopore/analyses/emissions_plot.R {} {}".format(indelEmissionsFile, os.path.join(self.outputDir, "indelEmissions_plot.pdf")))
#Plot convergence of likelihoods
outf = open(os.path.join(self.outputDir, "runninglikelihoods.tsv"), "w")
for hmmNode in hmmsNode.findall("hmm"): #This is a loop over trials
runningLikelihoods = map(float, hmmNode.attrib["runningLikelihoods"].split()) #This is a list of floats ordered from the first iteration to last.
outf.write("\t".join(map(str, runningLikelihoods))); outf.write("\n")
outf.close()
system("Rscript nanopore/analyses/running_likelihood.R {} {}".format(os.path.join(self.outputDir, "runninglikelihoods.tsv"), os.path.join(self.outputDir, "running_likelihood.pdf")))
self.finish() #Indicates the batch is done |
google/starthinker | starthinker/task/cm_to_dv/cm_placement_group.py | Python | apache-2.0 | 2,931 | 0.008188 | ###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
from starthinker.util.bigquery import table_create
from starthinker.util.data import get_rows
from starthinker.util.data import put_rows
from starthinker.util.google_api import API_DCM
from starthinker.util.cm import get_profile_for_api
from starthinker.util.discovery_to_bigquery import Discovery_To_BigQuery
from starthinker.util.regexp import lookup_id
def cm_placement_group_clear(config, task):
table_create(
config,
task['auth_bigquery'],
config.project,
task['dataset'],
'CM_PlacementGroups',
Discovery_To_BigQuery(
'dfareporting',
'v3.4'
).method_schema(
'placementGroups.list',
iterate=True
)
)
def cm_placement_group_load(config, task):
# load multiple partners from user defined sheet
def load_multiple():
campaigns = [str(lookup_id(r)) for r in set(get_rows(
config,
task['auth_cm'],
{ 'sheets': {
'sheet': task['sheet'],
'tab': 'CM Campaigns',
'header':False,
'range': 'A2:A'
}},
unnest=True
))]
for row in get_rows(
config,
task['auth_sheets'],
{ 'sheets': {
'sheet': task['sheet'],
'tab': 'CM Accounts',
'header':False,
'range': 'A2:A'
}}
):
if row:
account_id = lookup_id(row[0])
is_superuser, profile_id = get_profile_for_api(config, task['auth_cm'], account_id)
kwargs = { 'profileId': profile_id, 'campaignIds':campaigns, 'archived':False }
if is_superuser:
kwargs['accountId'] = account_id
yield from API_DCM(
config,
task['auth_cm'],
| iterate=True,
internal=is_superuser
).placementGroups().list( **kwargs).execute()
cm_placement_group_clear(config, task)
# write placement_groups to database
put_rows(
config,
task['auth_bigquery'],
{ 'bigquery': {
'dataset': task['dataset'],
| 'table': 'CM_PlacementGroups',
'schema': Discovery_To_BigQuery(
'dfareporting',
'v3.4'
).method_schema(
'placementGroups.list',
iterate=True
),
'format':'JSON'
}},
load_multiple()
)
|
kmee/pySerasa | pyserasa/crednet.py | Python | agpl-3.0 | 7,131 | 0.002664 | # -*- coding: utf-8 -*-
from erros import BlocoInexistenteError
from blocosDados import pendenciasInternas
from blocosDados import pendenciasFinanceiras
from blocosDados import protestosEstados
from blocosDados import chequesSemFundos
class Crednet(object):
def __init__(self):
self.blocos = []
self.blocos.append(pendenciasInternas())
self.blocos.append(pen | denciasFinanceiras())
self.blocos.append(protestosEstados())
self.blocos.append(chequesSemFundos())
def __getattr__(self, name):
bloco = ([c for c in self.blocos if c.nome == name] or [None])[0]
if not bloco:
print BlocoInexistenteError().exibirErro(name)
else:
if bloco.nome == 'pendenciasInternas':
print bloco.nome_bloco + "\n"
for registro in bloco.blocos:
for campos | in registro.campos.campos:
print campos._nome,
print ": ",
print campos._valor
print " "
if bloco.nome == 'pendenciasFinanceiras':
print bloco.nome_bloco + "\n"
for registro in bloco.blocos:
for campos in registro.campos.campos:
print campos._nome,
print ": ",
print campos._valor
print " "
if bloco.nome == 'protestosEstados':
print bloco.nome_bloco + "\n"
for registro in bloco.blocos:
for campos in registro.campos.campos:
print campos._nome,
print ": ",
print campos._valor
print " "
if bloco.nome == 'chequesSemFundos':
print bloco.nome_bloco + "\n"
for registro in bloco.blocos:
for campos in registro.campos.campos:
print campos._nome,
print ": ",
print campos._valor
print " "
else:
return bloco
def get_string(self, bloco=None):
string_retorno = ""
if bloco is None:
for bloco in self.blocos:
if bloco.nome == 'pendenciasInternas':
string_retorno += "Pendencias Internas\n" + "------------" \
"-----------------------------------------------\n"
for registro in bloco.blocos:
for campos in registro.campos.campos:
string_retorno += campos._nome + ": " + str(
campos._valor) + "\n"
elif bloco.nome == 'pendenciasFinanceiras':
string_retorno += "Pendencias Financeiras\n" + "---------" \
"--------------------------------------------------\n"
for registro in bloco.blocos:
for campos in registro.campos.campos:
string_retorno += campos._nome + ": " + str(
campos._valor) + "\n"
elif bloco.nome == 'protestosEstados':
string_retorno += "Protestos dos Estados\n" + "---------" \
"--------------------------------------------------\n"
for registro in bloco.blocos:
for campos in registro.campos.campos:
string_retorno += campos._nome + ": " + str(
campos._valor) + "\n"
elif bloco.nome == 'chequesSemFundos':
string_retorno += "Cheques sem Fundos\n" + "-------------" \
"----------------------------------------------\n"
for registro in bloco.blocos:
for campos in registro.campos.campos:
string_retorno += campos._nome + ": " + str(
campos._valor) + "\n"
else:
string_retorno += bloco.nome_bloco + "-------------------" \
"----------------------------------------\n"
for campo in bloco.campos.campos:
string_retorno += campo._nome + ": " + str(
campo._valor) + "\n"
else:
if bloco.nome == 'pendenciasInternas':
string_retorno += "Pendencias Internas\n" + "------------" \
"-----------------------------------------------\n"
for registro in bloco.blocos:
for campos in registro.campos.campos:
string_retorno += campos._nome + ": " + str(
campos._valor) + "\n"
string_retorno += "\n"
elif bloco.nome == '\npendenciasFinanceiras':
string_retorno += "Pendencias Financeiras\n" + "---------" \
"--------------------------------------------------\n"
for registro in bloco.blocos:
for campos in registro.campos.campos:
string_retorno += campos._nome + ": " + str(
campos._valor) + "\n"
string_retorno += "\n"
elif bloco.nome == 'protestosEstados':
string_retorno += "\nProtestos dos Estados\n" + "--------" \
"---------------------------------------------------\n"
for registro in bloco.blocos:
for campos in registro.campos.campos:
string_retorno += campos._nome + ": " + str(
campos._valor) + "\n"
string_retorno += "\n"
elif bloco.nome == 'chequesSemFundos':
string_retorno += "\nCheques sem Fundos\n" + "-----------" \
"------------------------------------------------\n"
for registro in bloco.blocos:
for campos in registro.campos.campos:
string_retorno += campos._nome + ": " + str(
campos._valor) + "\n"
string_retorno += "\n"
elif bloco is not None:
string_retorno += "\n" + bloco.nome_bloco + "\n----------" \
"-------------------------------------------------\n"
for campo in bloco.campos.campos:
string_retorno += campo._nome + ": " + str(
campo._valor) + "\n"
string_retorno += "\n"
return string_retorno
def get_bloco_de_registros(self, nome):
for bloco in self.blocos:
if bloco.nome == nome:
return bloco
return None
|
gdbdzgd/aptly | system/t04_mirror/update.py | Python | mit | 5,713 | 0.002626 | import string
import re
from lib import BaseTest
class UpdateMirror1Test(BaseTest):
"""
update mirrors: regular update
"""
longTest = False
fixtureCmds = [
"aptly -architectures=i386,amd64 mirror create --ignore-signatures varnish http://repo.varnish-cache.org/debian/ wheezy varnish-3.0",
]
runCmd = "aptly mirror update --ignore-signatures varnish"
def output_processor(self, output):
return "\n".join(sorted(output.split("\n")))
class UpdateMirror2Test(BaseTest):
"""
update mirrors: no such repo
"""
runCmd = "aptly mirror update mirror-xyz"
expectedCode = 1
class UpdateMirror3Test(BaseTest):
"""
update mirrors: wrong checksum in release file
"""
fixtureCmds = [
"aptly mirror create --ignore-signatures failure ${url} hardy main",
]
fixtureWebServer = "test_release"
runCmd = "aptly mirror update --ignore-signatures failure"
expectedCode = 1
def gold_processor(self, gold):
return string.Template(gold).substitute({'url': self.webServerUrl})
class UpdateMirror4Test(BaseTest):
"""
update mirrors: wrong checksum in release file, but ignore
"""
fixtureCmds = [
"aptly mirror create --ignore-signatures failure ${url} hardy main",
]
fixtureWebServer = "test_release"
runCmd = "aptly mirror update -ignore-checksums --ignore-signatures failure"
expectedCode = 1
def gold_processor(self, gold):
return string.Template(gold).substitute({'url': self.webServerUrl})
class UpdateMirror5Test(BaseTest):
"""
update mirrors: wrong checksum in package
"""
fixtureCmds = [
"aptly mirror create --ignore-signatures failure ${url} hardy main",
]
fixtureWebServer = "test_release2"
runCmd = "aptly mirror update --ignore-signatures failure"
expectedCode = 1
def gold_processor(self, gold):
return string.Template(gold).substitute({'url': self.webServerUrl})
class UpdateMirr | or6Test(BaseTest):
"""
update mirrors: wrong checksum in package, but ignore
"""
fixtureCmds = [
"aptly mirror create --ignore-signatures failure ${url} hardy main",
]
fixtureWebServer = "test_release2"
runCmd = "aptly mirror update -ignore-checksums --ignore-signatures failure"
def gold_processor(self, gold):
return string.Template(gold).substitute({'url': self.webServerUrl})
class UpdateMirror7Test(BaseTest):
"""
update mirrors: flat repository
| """
fixtureGpg = True
fixtureCmds = [
"aptly mirror create --keyring=aptlytest.gpg flat http://download.opensuse.org/repositories/Apache:/MirrorBrain/Debian_7.0/ ./",
]
runCmd = "aptly mirror update --keyring=aptlytest.gpg flat"
outputMatchPrepare = lambda _, s: re.sub(r'Signature made .* using', '', s)
def output_processor(self, output):
return "\n".join(sorted(output.split("\n")))
class UpdateMirror8Test(BaseTest):
"""
update mirrors: with sources (already in pool)
"""
fixtureGpg = True
fixturePool = True
fixtureCmds = [
"aptly mirror create --keyring=aptlytest.gpg gnuplot-maverick-src http://ppa.launchpad.net/gladky-anton/gnuplot/ubuntu/ maverick",
]
runCmd = "aptly mirror update --keyring=aptlytest.gpg gnuplot-maverick-src"
outputMatchPrepare = lambda _, s: re.sub(r'Signature made .* using', '', s)
class UpdateMirror9Test(BaseTest):
"""
update mirrors: flat repository + sources
"""
fixtureGpg = True
fixtureCmds = [
"aptly mirror create --keyring=aptlytest.gpg -with-sources flat-src http://download.opensuse.org/repositories/Apache:/MirrorBrain/Debian_7.0/ ./",
]
runCmd = "aptly mirror update --keyring=aptlytest.gpg flat-src"
outputMatchPrepare = lambda _, s: re.sub(r'Signature made .* using', '', s)
def output_processor(self, output):
return "\n".join(sorted(output.split("\n")))
class UpdateMirror10Test(BaseTest):
"""
update mirrors: filtered
"""
fixtureGpg = True
fixtureCmds = [
"aptly mirror create -keyring=aptlytest.gpg -with-sources -filter='!(Name (% libapache2-*)), !($$PackageType (source))' flat-src http://download.opensuse.org/repositories/Apache:/MirrorBrain/Debian_7.0/ ./",
]
runCmd = "aptly mirror update --keyring=aptlytest.gpg flat-src"
outputMatchPrepare = lambda _, s: re.sub(r'Signature made .* using', '', s)
def output_processor(self, output):
return "\n".join(sorted(output.split("\n")))
class UpdateMirror11Test(BaseTest):
"""
update mirrors: update over FTP
"""
longTest = False
fixtureGpg = True
fixtureCmds = [
"aptly mirror create -keyring=aptlytest.gpg -filter='Priority (required), Name (% s*)' -architectures=i386 wheezy-main ftp://ftp.ru.debian.org/debian/ wheezy main",
]
outputMatchPrepare = lambda _, s: re.sub(r'Signature made .* using', '', s)
runCmd = "aptly mirror update -keyring=aptlytest.gpg wheezy-main"
def output_processor(self, output):
return "\n".join(sorted(output.split("\n")))
class UpdateMirror12Test(BaseTest):
"""
update mirrors: update with udebs
"""
longTest = False
fixtureGpg = True
fixtureCmds = [
"aptly -architectures=i386,amd64 mirror create -keyring=aptlytest.gpg -filter='$$Source (dmraid)' -with-udebs squeeze http://mirror.yandex.ru/debian/ squeeze main non-free",
]
runCmd = "aptly mirror update -keyring=aptlytest.gpg squeeze"
outputMatchPrepare = lambda _, s: re.sub(r'Signature made .* using', '', s)
def output_processor(self, output):
return "\n".join(sorted(output.split("\n")))
|
tuomas2/automate-rpc | setup.py | Python | gpl-3.0 | 1,901 | 0.002104 | #!/usr/bin/env python
from setuptools import setup, find_packages
def get_version(filename):
import re
with open(filename) as fh:
metadata = dict(re.findall("__([a-z]+)__ = '([^']+)'", fh.read()))
return metadata['version']
setupopts = dict(
name="automate-rpc",
version=get_version('automate_rpc/__init__.py'),
packages=find_packages(),
install_requires=[
"automate>=0.9.2,<0.10",
"automate-wsgi>=0.9.2,<0.10 ",
"wsgi-xmlrpc==0.2.8"],
author="Tuomas Airaksinen",
author_email="tuomas.airaksinen@gmail.com",
description="Remote Procedure Call Support for Automate",
long_description=open('README.rst').read(),
download_url='https://pypi.python.org/pypi/automate-rpc',
platforms = ['any'],
license="GPL",
keywords="automation, GPIO, Raspberry Pi, RPIO, trait | s",
url="http://github.com/tuomas2/automate-rpc",
entry_points={'automate.extension': [
'rpc = automate_rpc:extension_classes'
]},
classifiers=["Development Status :: 4 - Beta",
"Environment :: Console",
"Environment :: Web Environment",
"Intended Audience :: Education",
"Intended Audience :: | Developers",
"Intended Audience :: Information Technology",
"Intended Audience :: End Users/Desktop",
"License :: OSI Approved :: GNU General Public License (GPL)",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 2.7",
"Topic :: Scientific/Engineering",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries"]
)
if __name__ == "__main__":
setup(**setupopts)
|
geberl/droppy-workspace | Tasks/Image.RenameByExif/test_task.py | Python | mit | 1,551 | 0.001289 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import py
import os
import shutil
import task
files_dir = py.path.local(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, 'Test', 'files'))
def test_input_empty(tmpdir):
input_dir = tmpdir.join('0')
os.makedirs('%s' % input_dir)
output_dir = tmpdir.join('1')
os.makedirs('%s' % output_dir)
t = task.Task(input_dir='%s' % input_dir,
output_dir='%s' % output_dir)
assert i | sinstance(t, object)
def test_input_f | ile(tmpdir):
input_dir = tmpdir.join('0')
os.makedirs('%s' % input_dir)
shutil.copyfile('%s' % files_dir.join('IMG_1248.JPG'),
'%s' % input_dir.join('IMG_1248.JPG'))
output_dir = tmpdir.join('1')
os.makedirs('%s' % output_dir)
t = task.Task(input_dir='%s' % input_dir,
output_dir='%s' % output_dir)
assert output_dir.join('20170914_102630.jpg').check() is True
def test_input_folder(tmpdir):
input_dir = tmpdir.join('0')
os.makedirs('%s' % input_dir)
input_sub_folder = input_dir.join('some subdir')
os.makedirs('%s' % input_sub_folder)
shutil.copyfile('%s' % files_dir.join('IMG_1248.JPG'),
'%s' % input_sub_folder.join('IMG_1248.JPG'))
output_dir = tmpdir.join('1')
os.makedirs('%s' % output_dir)
t = task.Task(input_dir='%s' % input_dir,
output_dir='%s' % output_dir)
assert output_dir.join('some subdir', '20170914_102630.jpg').check() is True
|
LosFuzzys/CTFd | CTFd/auth.py | Python | apache-2.0 | 20,121 | 0.001342 | import base64
import requests
from flask import Blueprint
from flask import current_app as app
from flask import redirect, render_template, request, session, url_for
from itsdangerous.exc import BadSignature, BadTimeSignature, SignatureExpired
from CTFd.cache import clear_team_session, clear_user_session
from CTFd.models import Teams, UserFieldEntries, UserFields, Users, db
from CTFd.utils import config, email, get_app_config, get_config
from CTFd.utils import user as current_user
from CTFd.utils import validators
from CTFd.utils.config import is_teams_mode
from CTFd.utils.config.integrations import mlc_registration
from CTFd.utils.config.visibility import registration_visible
from CTFd.utils.crypto import verify_password
from CTFd.utils.decorators import ratelimit
from CTFd.utils.decorators.visibility import check_registration_visibility
from CTFd.utils.helpers import error_for, get_errors, markup
from CTFd.utils.logging import log
from CTFd.utils.modes import TEAMS_MODE
from CTFd.utils.security.auth import login_user, logout_user
from CTFd.utils.security.signing import unserialize
from CTFd.utils.validators import ValidationError
auth = Blueprint("auth", __name__)
@auth.route("/confirm", methods=["POST", "GET"])
@auth.route("/confirm/<data>", methods=["POST", "GET"])
@ratelimit(method="POST", limit=10, interval=60)
def confirm(data=None):
if not get_config("verify_emails"):
# If the CTF doesn't care about confirming email addresses then redierct to challenges
return redirect(url_for("challenges.listing"))
# User is confirming email account
if data and request.method == "GET":
try:
user_email = unserialize(data, max_age=1800)
except (BadTimeSignature, SignatureExpired):
return render_template(
"confirm.html", errors=["Your confirmation link has expired"]
)
except (BadSignature, TypeError, base64.binascii.Error):
return render_template(
"confirm.html", errors=["Your confirmation token is invalid"]
)
user = Users.query.filter_by(email=user_email).first_or_404()
if user.verified:
return redirect(url_for("views.settings"))
user.verified = True
log(
"registrations",
format="[{date}] {ip} - successful confirmation for {name}",
name=user.name,
)
db.session.commit()
clear_user_session(user_id=user.id)
email.successful_registration_notification(user.email)
db.session.close()
if current_user.authed():
return redirect(url_for("challenges.listing"))
return redirect(url_for("auth.login"))
# User is trying to start or restart the confirmation flow
if current_user.authed() is False:
return redirect(url_for("auth.login"))
user = Users.query.filter_by(id=session["id"]).first_or_404()
if user.verified:
return redirect(url_for("views.settings"))
if data is None:
if request.method == "POST":
# User wants to resend their confirmation email
email.verify_email_address(user.email)
log(
"registrations",
format="[{date}] {ip} - {name} initiated a confirmation email resend",
)
return render_template(
"confirm.html", infos=[f"Confirmation email sent to {user.email}!"]
)
elif request.method == "GET":
# User has been directed to the confirm page
return render_template("confirm.html")
@auth.route("/reset_password", methods=["POST", "GET"])
@auth.route("/reset_password/<data>", methods=["POST", "GET"])
@ratelimit(method="POST", limit=10, interval=60)
def reset_password(data=None):
if config.can_send_mail() is False:
return render_template(
"reset_password.html",
errors=[
markup(
"This CTF is not configured to send email.<br> Please contact an organizer to have your password reset."
)
],
)
if data is not None:
try:
email_address = unserialize(data, max_age=1800)
except (BadTimeSignature, SignatureExpired):
return render_template(
"reset_password.html", errors=["Your link has expired"]
)
except (BadSignature, TypeError, base64.binascii.Error):
return render_template(
"reset_password.html", errors=["Your reset token is invalid"]
)
if request.method == "GET":
return render_template("reset_password.html", mode="set")
if request.method == "POST":
password = request.form.get("password", "").strip()
user = Users.query.filter_by(email=email_address).first_or_404()
if user.oauth_id:
return render_template(
"reset_password.html",
infos=[
"Your account was registered via an authentication provider and does not have an associated password. Please login via your authentication provider."
],
)
pass_short = len(password) == 0
if pass_short:
return render_template(
"reset_password.html", errors=["Please pick a longer password"]
)
user.password = password
db.session.commit()
clear_user_session(user_id=user.id)
log(
"logins",
format="[{date}] {ip} - successful password reset for {name}",
name=user.name,
)
db.session.close()
email.password_change_alert(user.email)
return redirect(url_for("auth.login"))
if request.method == "POST":
email_address = request.form["email"].strip()
user = Users.query.filter_by(email=email_address).first()
get_errors()
if not user:
return render_template(
"reset_password.html",
infos=[
"If that account exists you will receive an email, please check your inbox"
],
)
if user.oauth_id:
return render_template(
"reset_password.html",
infos=[
"The email address associated with this account was registered via an authentication provider and does not have an associated password. Please login via your authentication provider."
],
)
email.forgot_password(email_address)
return render_template(
"reset_password.html",
infos=[
"If that account exists you will receive an email, please check your inbox"
],
)
return render_template("reset_password.html")
@auth.route("/register", methods=["POST", "GET"])
@check_registration_visibility
@ratelimit(method="POST", limit=10, interval=5)
def register():
errors = get_errors()
if request.method == "POST":
name = request.form.get("name", "").strip()
email_address = request.form.get("email", "").strip().lower()
password = request.form.get("password", "").s | trip()
website = request.form.get("website")
affiliation = request.form.get("affiliation")
country = request.form.get("country")
name_len = len(name) == 0
names = Users.query.add_columns("name", "id").filter_by(name=name).first()
emails = (
Users.query.add_columns("email", "id")
.filter_by(email=email_address)
| .first()
)
pass_short = len(password) == 0
pass_long = len(password) > 128
valid_email = validators.validate_email(email_address)
team_name_email_check = validators.validate_email(name)
# Process additional user fields
fields = {}
for field in UserFields.query.all():
fields[field.id] = field
entries = {}
for field_id, field in fields.items( |
CvvT/crawler_sqlmap | crawler/util/__init__.py | Python | apache-2.0 | 120 | 0 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# __author__ = | 'CwT'
from .global_state import State
Global = S | tate()
|
deKupini/erp | addons/payment/models/payment_acquirer.py | Python | agpl-3.0 | 25,888 | 0.003515 | # -*- coding: utf-'8' "-*-"
import logging
from openerp.osv import osv, fields
from openerp.tools import float_round, float_repr
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
def _partner_format_address(address1=False, address2=False):
return ' '.join((address1 or '', address2 or '')).strip()
def _partner_split_name(partner_name):
return [' '.join(partner_name.split()[-1:]), ' '.join(partner_name.split()[:-1])]
class ValidationError(ValueError):
""" Used for value error when validating transaction data coming from acquirers. """
pass
class PaymentAcquirer(osv.Model):
""" Acquirer Model. Each specific acquirer can extend the model by adding
its own fields, using the acquirer_name as a prefix for the new fields.
Using the required_if_provider='<name>' attribute on fields it is possible
to have required fields that depend on a specific acquirer.
Each acquirer has a link to an ir.ui.view record that is a template of
a button used to display the payment form. See examples in ``payment_ogone``
and ``payment_paypal`` modules.
Methods that should be added in an acquirer-specific implementation:
- ``<name>_form_generate_values(self, cr, uid, id, reference, amount, currency,
partner_id=False, partner_values=None, tx_custom_values=None, context=None)``:
method that generates the values used to render the form button template.
- ``<name>_get_form_action_url(self, cr, uid, id, context=None):``: method
that returns the url of the button form. It is used for example in
ecommerce application, if you want to post some data to the acquirer.
- ``<name>_compute_fees(self, cr, uid, id, amount, currency_id, country_id,
context=None)``: computed the fees of the acquirer, using generic fields
defined on the acquirer model (see fields definition).
Each acquirer should also define controllers to handle communication between
OpenERP and the acquirer. It generally consists in return urls given to the
button form and that the acquirer uses to send the customer back after the
transaction, with transaction details given as a POST request.
"""
_name = 'payment.acquirer'
_description = 'Payment Acquirer'
_order = 'sequence'
def _get_providers(self, cr, uid, context=None):
return []
# indirection to ease inheritance
_provider_selection = lambda self, *args, **kwargs: self._get_providers(*args, **kwargs)
_columns = {
'name': fields.char('Name', required=True),
'provider': fields.selection(_provider_selection, string='Prov | ider', required=True),
'company_id': fields.many2one('res.company', 'Company', required=True),
| 'pre_msg': fields.html('Message', translate=True,
help='Message displayed to explain and help the payment process.'),
'post_msg': fields.html('Thanks Message', help='Message displayed after having done the payment process.'),
'validation': fields.selection(
[('manual', 'Manual'), ('automatic', 'Automatic')],
string='Process Method',
help='Static payments are payments like transfer, that require manual steps.'),
'view_template_id': fields.many2one('ir.ui.view', 'Form Button Template', required=True),
'environment': fields.selection(
[('test', 'Test'), ('prod', 'Production')],
string='Environment', oldname='env'),
'website_published': fields.boolean(
'Visible in Portal / Website', copy=False,
help="Make this payment acquirer available (Customer invoices, etc.)"),
'auto_confirm': fields.selection(
[('none', 'No automatic confirmation'),
('at_pay_confirm', 'At payment confirmation'),
('at_pay_now', 'At payment')],
string='Order Confirmation', required=True),
# Fees
'fees_active': fields.boolean('Compute fees'),
'fees_dom_fixed': fields.float('Fixed domestic fees'),
'fees_dom_var': fields.float('Variable domestic fees (in percents)'),
'fees_int_fixed': fields.float('Fixed international fees'),
'fees_int_var': fields.float('Variable international fees (in percents)'),
'sequence': fields.integer('Sequence', help="Determine the display order"),
}
_defaults = {
'company_id': lambda self, cr, uid, obj, ctx=None: self.pool['res.users'].browse(cr, uid, uid).company_id.id,
'environment': 'test',
'validation': 'automatic',
'website_published': True,
'auto_confirm': 'at_pay_confirm',
}
def _check_required_if_provider(self, cr, uid, ids, context=None):
""" If the field has 'required_if_provider="<provider>"' attribute, then it
required if record.provider is <provider>. """
for acquirer in self.browse(cr, uid, ids, context=context):
if any(getattr(f, 'required_if_provider', None) == acquirer.provider and not acquirer[k] for k, f in self._fields.items()):
return False
return True
_constraints = [
(_check_required_if_provider, 'Required fields not filled', ['required for this provider']),
]
def get_form_action_url(self, cr, uid, id, context=None):
""" Returns the form action URL, for form-based acquirer implementations. """
acquirer = self.browse(cr, uid, id, context=context)
if hasattr(self, '%s_get_form_action_url' % acquirer.provider):
return getattr(self, '%s_get_form_action_url' % acquirer.provider)(cr, uid, id, context=context)
return False
def form_preprocess_values(self, cr, uid, id, reference, amount, currency_id, tx_id, partner_id, partner_values, tx_values, context=None):
""" Pre process values before giving them to the acquirer-specific render
methods. Those methods will receive:
- partner_values: will contain name, lang, email, zip, address, city,
country_id (int or False), country (browse or False), phone, reference
- tx_values: will contain reference, amount, currency_id (int or False),
currency (browse or False), partner (browse or False)
"""
acquirer = self.browse(cr, uid, id, context=context)
if tx_id:
tx = self.pool.get('payment.transaction').browse(cr, uid, tx_id, context=context)
tx_data = {
'reference': tx.reference,
'amount': tx.amount,
'currency_id': tx.currency_id.id,
'currency': tx.currency_id,
'partner': tx.partner_id,
}
partner_data = {
'name': tx.partner_name,
'lang': tx.partner_lang,
'email': tx.partner_email,
'zip': tx.partner_zip,
'address': tx.partner_address,
'city': tx.partner_city,
'country_id': tx.partner_country_id.id,
'country': tx.partner_country_id,
'phone': tx.partner_phone,
'reference': tx.partner_reference,
'state': None,
}
else:
if partner_id:
partner = self.pool['res.partner'].browse(cr, uid, partner_id, context=context)
partner_data = {
'name': partner.name,
'lang': partner.lang,
'email': partner.email,
'zip': partner.zip,
'city': partner.city,
'address': _partner_format_address(partner.street, partner.street2),
'country_id': partner.country_id.id,
'country': partner.country_id,
'phone': partner.phone,
'state': partner.state_id,
}
else:
partner, partner_data = False, {}
partner_data.update(partner_values)
if currency_id:
currency = self.pool['res.currency'].browse(cr, uid, currency_id, context=context)
els |
sunlightlabs/sarahs_inbox | mail_dedupe/views.py | Python | bsd-3-clause | 2,307 | 0.01257 | from settings import *
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.core.paginator import Paginator
from django.http import HttpResponse, HttpResponseRedirect
from urllib import unquote
from mail.models impor | t *
from django.core.urlresolvers import reverse
from django.core.cache import cache
import re
import jellyfish
from mail.management.commands.mail_combine_people import Command as CombineCommand
def index(request):
if not DEBUG:
return
DEFAULT_DISTANCE = 0
person_into = request.GET.get('into', False)
victims = map(lambda x: int(x), request.GET.getlist('combine'))
| if person_into is not False:
victims.remove(int(person_into))
args_array = [person_into] + victims
# call_command('mail_combine_people', *args_array)
combcomm = CombineCommand()
print person_into, victims
result = combcomm.merge(person_into, victims, noprint=True)
people = []
for p in Person.objects.filter(merged_into=None).order_by('name_hash'):
people.append({'obj': p, 'dist': DEFAULT_DISTANCE})
target_person = None
target_id = request.GET.get('id', False)
if target_id is not False:
target_person = Person.objects.get(id=target_id)
if target_person:
for (i,p) in enumerate(people):
people[i]['dist'] = jellyfish.jaro_distance(target_person.name_hash, p['obj'].name_hash)
people.sort(key=lambda x: x['dist'], reverse=True)
total = len(people)
template_vars = {
'people': people,
'total': total
}
return render_to_response('dedupe.html', template_vars, context_instance=RequestContext(request))
def emails(request):
person = Person.objects.get(id=request.GET.get('id'))
from_emails = Email.objects.filter(creator=person)
to_emails = Email.objects.filter(to=person)
cc_emails = Email.objects.filter(cc=person)
template_vars = {
'from_emails': from_emails,
'to_emails': to_emails,
'cc_emails': cc_emails
}
return render_to_response('dedupe_emails.html', template_vars, context_instance=RequestContext(request))
|
drJfunk/gbmgeometry | gbmgeometry/gbm.py | Python | mit | 10,575 | 0.00104 | from collections import OrderedDict
import astropy.coordinates as coord
import astropy.units as u
import matplotlib.pyplot as plt
#import mpl_toolkits.basemap as bm
import numpy as np
import spherical_geometry.polygon as sp
from astropy.table import Table
import astropy.time as time
from .gbm_detector import BGO0, BGO1
from .gbm_detector import NaI0, NaI1, NaI2, NaI3, NaI4, NaI5
from .gbm_detector import NaI6, NaI7, NaI8, NaI9, NaIA, NaIB
from .gbm_frame import GBMFrame
from gbmgeometry.utils.gbm_time import GBMTime
import seaborn as sns
_det_color_cycle = np.linspace(0, 1, 12)
class GBM(object):
def __init__(self, quaternion, sc_pos=None, gbm_time=None):
"""
Parameters
----------
quaternion : Fermi GBM quarternion array
"""
if gbm_time is not None:
if isinstance(gbm_time, str):
self._gbm_time = GBMTime.from_UTC_fits(gbm_time)
else:
# assuming MET
self._gbm_time = GBMTime.from_MET(gbm_time)
else:
self._gbm_time = None
if self._gbm_time is not None:
self.n0 = NaI0(quaternion, sc_pos, self._gbm_time.time)
self.n1 = NaI1(quaternion, sc_pos, self._gbm_time.time)
self.n2 = NaI2(quaternion, sc_pos, self._gbm_time.time)
self.n3 = NaI3(quaternion, sc_pos, self._gbm_time.time)
self.n4 = NaI4(quaternion, sc_pos, self._gbm_time.time)
self.n5 = NaI5(quaternion, sc_pos, self._gbm_time.time)
self.n6 = NaI6(quaternion, sc_pos, self._gbm_time.time)
self.n7 = NaI7(quaternion, sc_pos, self._gbm_time.time)
self.n8 = NaI8(quaternion, sc_pos, self._gbm_time.time)
self.n9 = NaI9(quaternion, sc_pos, self._gbm_time.time)
self.na = NaIA(quaternion, sc_pos, self._gbm_time.time)
self.nb = NaIB(quaternion, sc_pos, self._gbm_time.time)
self.b0 = BGO0(quaternion, sc_pos, self._gbm_time.time)
self.b1 = BGO1(quaternion, sc_pos, self._gbm_time.time)
else:
self.n0 = NaI0(quaternion, sc_pos, None)
self.n1 = NaI1(quaternion, sc_pos, None)
self.n2 = NaI2(quaternion, sc_pos, None)
self.n3 = NaI3(quaternion, sc_pos, None)
self.n4 = NaI4(quaternion, sc_pos, None)
self.n5 = NaI5(quaternion, sc_pos, None)
self.n6 = NaI6(quaternion, sc_pos, None)
self.n7 = NaI7(quaternion, sc_pos, None)
self.n8 = NaI8(quaternion, sc_pos, None)
self.n9 = NaI9(quaternion, sc_pos, None)
self.na = NaIA(quaternion, sc_pos, None)
self.nb = NaIB(quaternion, sc_pos, None)
self.b0 = BGO0(quaternion, sc_pos, None)
self.b1 = BGO1(quaternion, sc_pos, None)
self._detectors = OrderedDict(n0=self.n0,
n1=self.n1,
n2=self.n2,
n3=self.n3,
n4=self.n4,
n5=self.n5,
n6=self.n6,
n7=self.n7,
n8=self.n8,
n9=self.n9,
na=self.na,
nb=self.nb,
b0=self.b0,
b1=self.b1)
self._quaternion = quaternion
self._sc_pos = sc_pos
def set_quaternion(self, quaternion):
"""
Parameters
----------
quaternion
"""
for key in self._detectors.keys():
self._detectors[key].set_quaternion(quaternion)
self._quaternion = quaternion
def set_sc_pos(self, sc_pos):
"""
Parameters
----------
sc_pos
"""
for key in self._detectors.keys():
self._detectors[key].set_sc_pos(sc_pos)
self._sc_pos = sc_pos
def get_good_detectors(self, point, fov):
"""
Returns a list of detectors containing the point in the FOV
Parameters
----------
point
fov
Returns
-------
"""
good_detectors = self._contains_point(point, fov)
return good_detectors
def get_fov(self, radius, fermi_frame=False):
"""
Parameters
----------
fermi_frame
radius
"""
polys = []
for key in self._detectors.keys():
if key[0] == 'b':
this_rad = 90
else:
this_rad = radius
polys.append(self._detectors[key].get_fov(this_rad, fermi_frame))
polys = np.array(polys)
return polys
def get_good_fov(self, point, radius, fermi_frame=False):
"""
Returns the detectors that contain the given point
for the given angular radius
Parameters
----------
point
radius
"""
good_detectors = self._contains_point(point, radius)
polys = []
for key in good_detectors:
polys.append(self._detectors[key].get_fov(radius, fermi_frame))
return [polys, good_detectors]
def get_sun_angle(self, keys=None):
"""
Returns
-------
"""
angles = []
if keys is None:
for key in self._detectors.keys():
angles.append(self._detectors[key].sun_angle)
else:
for key in keys:
angles.append(self._detectors[key].sun_angle)
return angles
def get_centers(self, keys=None):
"""
Returns
-------
"""
centers = []
if keys is None:
for key in self._detectors.keys():
centers.append(self._detectors[key].get_center())
else:
for key in keys:
centers.append(self._detectors[key].get_center())
return centers
def get_separation(self, source):
"""
Get the andular separation of the detectors from a point
Parameters
----------
source
Returns
-------
"""
tab = Table(names=["Detector", "Separation"], dtype=["|S2", np.float64])
for key in self._detectors.keys():
sep = self._detectors[key].get_center().separation(source)
tab.add_row([key, sep])
tab['Separation'].unit = u.degree
tab.sort("Separation")
return tab
def get_earth_points(self, fermi_frame=False):
"""
Returns
-------
"""
if self._sc_pos is not None:
self._calc_earth_points(fermi_frame)
return self._earth_points
else:
print("No spacecraft position set")
def _calc_earth_points(self, fermi_frame):
xyz_position = coord.SkyCoord(x=self._sc_pos[0],
y=self._sc_pos[1],
z=self._sc_pos[2],
frame='icrs',
representation='cartesian')
earth_radius = 6371. * u.km
fermi_radius = np.sqrt((self._sc_pos ** 2).sum())
horizon_angle = 90 - np.rad2deg(np.arccos((earth_radius / fermi_radius).to(u.dimensionless_unscaled)).value)
horizon_angle = (180 - horizon_angle) | * u.degree
num_points = 300
ra_grid_tmp = np.linspace(0, 360, num_points)
dec_range = [-90, 90]
cosdec_min = np.cos(np.deg2rad(90.0 + dec_range[0]))
cosdec_max = np.cos(np.deg2rad(90.0 + dec_range[1]))
v = np.linspace(cosdec_min, cosdec_max, nu | m_points)
v = np.arccos(v)
v = np.rad2deg(v)
v -= 90.
dec_grid_tmp = v
ra_grid = np.zeros(num_points ** 2)
dec_grid = np.zeros(num_points ** 2)
itr = 0
for ra in ra_grid_tmp:
for |
google-research/pyreach | pyreach/gyms/oracle_element.py | Python | apache-2.0 | 1,448 | 0.004144 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reach oracle element used for configuration."""
import dataclasses
from pyreach.gyms import reach_element
@dataclasses.dataclass(frozen=True)
class ReachOracle(reach_element.ReachElement):
"""A Reach Oracle configuration class.
Attributes:
reach_name: The name of the Oracle.
task_code: The task code string.
intent: The intention of the task. This agument is optional and | defaults to
an empty string.
success_type: The type of success. This argument is optional and defaults
to an empty string.
is_synchronous: If True, the next Gym observation will synchronize all
observation elements that have this flag set otherwise the next
observation is asynchronous. This argument is optional and defaults to
False.
"""
ta | sk_code: str
intent: str = ""
success_type: str = ""
is_synchronous: bool = False
|
msabramo/PyHamcrest | src/hamcrest/library/text/stringcontains.py | Python | bsd-3-clause | 953 | 0 | from hamcrest.library.text.substringmatcher import SubstringMatcher
from hamcrest.core.helpers.ha | smethod import hasmethod
__author__ = "Jon Reid"
__copyright__ = "Copyright 2011 hamcrest.org"
__license__ = "BSD, see License.txt"
class StringContains(SubstringMatcher):
def __init__(self, substring):
super(StringContains, self).__init__(substring)
def _matches(self, item):
if not hasmethod(item, 'find'):
return False
return | item.find(self.substring) >= 0
def relationship(self):
return 'containing'
def contains_string(substring):
"""Matches if object is a string containing a given string.
:param string: The string to search for.
This matcher first checks whether the evaluated object is a string. If so,
it checks whether it contains ``string``.
Example::
contains_string("def")
will match "abcdefg".
"""
return StringContains(substring)
|
neubot/neubot-client | neubot/utils_nt.py | Python | gpl-3.0 | 2,253 | 0.002663 | # neubot/utils_nt.py
#
# Copyright (c) 2013
# Nexa Center for Internet & Society, Politecnico di Torino (DAUIN)
# and Simone Basso <bassosimone@gmail.com>
#
# This file is part of Neubot <http://www.neubot.org/>.
#
# Neubot is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Neubot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General | Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Neubot. If not, see <http://www.gnu.org/licenses/>.
#
""" NT utils """
import os
# For python3 portability
MODE_755 = int('755', 8)
MODE_644 = int('644', 8)
class PWEntry(object):
""" Fake password database entry """
pw_uid = 0
pw_gid = 0
def getpwnam(uname):
""" Get password database entry by name """
return PWEntry()
#
# I copied the following two | functions from utils_posix.py, and I
# also commented out the code that couldn't run on Windows NT.
#
def mkdir_idempotent(curpath, uid=None, gid=None):
''' Idempotent mkdir with 0755 permissions'''
if not os.path.exists(curpath):
os.mkdir(curpath, MODE_755)
elif not os.path.isdir(curpath):
raise RuntimeError('%s: Not a directory' % curpath)
# if uid is None:
# uid = os.getuid()
# if gid is None:
# gid = os.getgid()
# os.chown(curpath, uid, gid)
os.chmod(curpath, MODE_755)
def touch_idempotent(curpath, uid=None, gid=None):
''' Idempotent touch with 0644 permissions '''
if not os.path.exists(curpath):
# os.close(os.open(curpath, os.O_WRONLY|os.O_CREAT
# |os.O_APPEND, MODE_644))
filep = open(curpath, "w")
filep.close()
elif not os.path.isfile(curpath):
raise RuntimeError('%s: Not a file' % curpath)
# if uid is None:
# uid = os.getuid()
# if gid is None:
# gid = os.getgid()
# os.chown(curpath, uid, gid)
os.chmod(curpath, MODE_644)
|
nouiz/fredericbastien-ipaddr-py-speed-up | tags/2.1.0/ipaddr.py | Python | apache-2.0 | 58,769 | 0.00051 | #!/usr/bin/python
#
# Copyright 2007 Google Inc.
# Licensed to PSF under a Contributor Agreement.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License | at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language g | overning
# permissions and limitations under the License.
"""A fast, lightweight IPv4/IPv6 manipulation library in Python.
This library is used to create/poke/manipulate IPv4 and IPv6 addresses
and networks.
"""
__version__ = 'trunk'
import struct
class AddressValueError(ValueError):
"""A Value Error related to the address."""
class NetmaskValueError(ValueError):
"""A Value Error related to the netmask."""
def IPAddress(address, version=None):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
version: An Integer, 4 or 6. If set, don't try to automatically
determine what the IP address type is. important for things
like IPAddress(1), which could be IPv4, '0.0.0.0.1', or IPv6,
'::1'.
Returns:
An IPv4Address or IPv6Address object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address.
"""
if version:
if version == 4:
return IPv4Address(address)
elif version == 6:
return IPv6Address(address)
try:
return IPv4Address(address)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Address(address)
except (AddressValueError, NetmaskValueError):
pass
raise ValueError('%r does not appear to be an IPv4 or IPv6 address' %
address)
def IPNetwork(address, version=None, strict=False):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
version: An Integer, if set, don't try to automatically
determine what the IP address type is. important for things
like IPNetwork(1), which could be IPv4, '0.0.0.1/32', or IPv6,
'::1/128'.
Returns:
An IPv4Network or IPv6Network object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address. Or if a strict network was requested and a strict
network wasn't given.
"""
if version:
if version == 4:
return IPv4Network(address, strict)
elif version == 6:
return IPv6Network(address, strict)
try:
return IPv4Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
raise ValueError('%r does not appear to be an IPv4 or IPv6 network' %
address)
def _find_address_range(addresses):
"""Find a sequence of addresses.
Args:
addresses: a list of IPv4 or IPv6 addresses.
Returns:
A tuple containing the first and last IP addresses in the sequence.
"""
first = last = addresses[0]
for ip in addresses[1:]:
if ip._ip == last._ip + 1:
last = ip
else:
break
return (first, last)
def _get_prefix_length(number1, number2, bits):
"""Get the number of leading bits that are same for two numbers.
Args:
number1: an integer.
number2: another integer.
bits: the maximum number of bits to compare.
Returns:
The number of leading bits that are the same for two numbers.
"""
for i in range(bits):
if number1 >> i == number2 >> i:
return bits - i
return 0
def _count_righthand_zero_bits(number, bits):
"""Count the number of zero bits on the right hand side.
Args:
number: an integer.
bits: maximum number of bits to count.
Returns:
The number of zero bits on the right hand side of the number.
"""
if number == 0:
return bits
for i in range(bits):
if (number >> i) % 2:
return i
def summarize_address_range(first, last):
"""Summarize a network range given the first and last IP addresses.
Example:
>>> summarize_address_range(IPv4Address('1.1.1.0'),
IPv4Address('1.1.1.130'))
[IPv4Network('1.1.1.0/25'), IPv4Network('1.1.1.128/31'),
IPv4Network('1.1.1.130/32')]
Args:
first: the first IPv4Address or IPv6Address in the range.
last: the last IPv4Address or IPv6Address in the range.
Returns:
The address range collapsed to a list of IPv4Network's or
IPv6Network's.
Raise:
TypeError:
If the first and last objects are not IP addresses.
If the first and last objects are not the same version.
ValueError:
If the last object is not greater than the first.
If the version is not 4 or 6.
"""
if not (isinstance(first, _BaseIP) and isinstance(last, _BaseIP)):
raise TypeError('first and last must be IP addresses, not networks')
if first.version != last.version:
raise TypeError("%s and %s are not of the same version" % (
str(self), str(other)))
if first > last:
raise ValueError('last IP address must be greater than first')
networks = []
if first.version == 4:
ip = IPv4Network
elif first.version == 6:
ip = IPv6Network
else:
raise ValueError('unknown IP version')
ip_bits = first._max_prefixlen
first_int = first._ip
last_int = last._ip
while first_int <= last_int:
nbits = _count_righthand_zero_bits(first_int, ip_bits)
current = None
while nbits >= 0:
addend = 2**nbits - 1
current = first_int + addend
nbits -= 1
if current <= last_int:
break
prefix = _get_prefix_length(first_int, current, ip_bits)
net = ip('%s/%d' % (str(first), prefix))
networks.append(net)
if current == ip._ALL_ONES:
break
first_int = current + 1
first = IPAddress(first_int, version=first._version)
return networks
def _collapse_address_list_recursive(addresses):
"""Loops through the addresses, collapsing concurrent netblocks.
Example:
ip1 = IPv4Network'1.1.0.0/24')
ip2 = IPv4Network'1.1.1.0/24')
ip3 = IPv4Network'1.1.2.0/24')
ip4 = IPv4Network'1.1.3.0/24')
ip5 = IPv4Network'1.1.4.0/24')
ip6 = IPv4Network'1.1.0.1/22')
_collapse_address_list_recursive([ip1, ip2, ip3, ip4, ip5, ip6]) ->
[IPv4Network('1.1.0.0/22'), IPv4Network('1.1.4.0/24')]
This shouldn't be called directly; it is called via
collapse_address_list([]).
Args:
addresses: A list of IPv4Network's or IPv6Network's
Returns:
A list of IPv4Network's or IPv6Network's depending on what we were
passed.
"""
ret_array = []
optimized = False
for cur_addr in addresses:
if not ret_array:
ret_array.append(cur_addr)
continue
if cur_addr in ret_array[-1]:
optimized = True
elif cur_addr == ret_array[-1].supernet().subnet()[1]:
ret_array.append(ret_array.pop().supernet())
optimized = True
else:
ret_array.append(cur_addr)
if optimized:
return _collapse_ad |
digling/sinotibetan | datasets/Sharma2003/prepare-byangsi.py | Python | gpl-2.0 | 1,355 | 0.002974 | from lingpy import *
from pystdb import load_stedt, stdb_concepts
concepts = stdb_concepts()
rong = csv2list('Byangsi.mapped.tsv', strip_lines=False)
wl = load_stedt('SRS-TBLUP.csv')
rn2k = {wl[k, 'rn']: k for k in wl}
out = {0: ['language', 'concept', 'conceptid', 'concepticon_id', 'tbl_id',
'gloss_in_source', 'rgen', 'tokens', 'ipa']}
idxx = 1
for line in rong[1:]:
if line[-2].strip():
rn = line[-2].strip()
print(rn)
idx = rn2k[rn]
entry = wl[idx, 'reflex']
st = [
('N', 'ɳ'),
('D', 'ɖ'),
('T', 'ʈ'),
('R', 'ɽ'),
('5', '◌̺ | '.replace('◌', '')),
(':', 'ː'),
]
for s, t in st:
entry = entry.replace(s, t)
entry = entry.split(',')[0]
entry = entry.split('~')[0].strip()
entry = entry.replace(' ', '_')
if wl[idx, 'language'] == 'Byangsi':
tokens = ipa2tokens(entry.replace(' ', '_'), semi_diacritics="shz | ",
merge_vowels=False)
out[idxx] = ['Byangsi', line[1], line[0], line[2], line[3], wl[idx,
'concept'], line[4], ' '.join(tokens), wl[idx, 'reflex']]
idxx += 1
wl2 = Wordlist(out)
wl2.output('tsv', filename='byangsi', ignore='all', prettify=False)
|
raphaelamorim/fbthrift | thrift/compiler/test/fixtures/namespace/gen-py/my/namespacing/test/ttypes.py | Python | apache-2.0 | 5,135 | 0.013437 | #
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
# @generated
#
from __future__ import absolute_import
import six
from thrift.util.Recursive import fix_spec
from thrift.Thrift import *
from thrift.protocol.TProtocol import TProtocolException
import pprint
import warnings
from thrift import Thrift
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.protocol import TCompactProtocol
from thrift.protocol import THeaderProtocol
try:
from thrift.protocol import fastbinary
if fastbinary.version < 2:
fastbinary = None
warnings.warn("Disabling fastbinary, need at least version 2")
except:
fastbinary = None
try:
from thrift.protocol import fastproto
except:
fastproto = None
all_structs = []
UTF8STRINGS = bool | (0) or sys.version_info.major >= 3
class Foo:
"""
Attributes:
- MyInt
"""
|
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocol) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS)
return
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocol) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocol) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.MyInt = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocol) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS))
return
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocol) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocol) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('Foo')
if self.MyInt != None:
oprot.writeFieldBegin('MyInt', TType.I64, 1)
oprot.writeI64(self.MyInt)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
for key, value in six.iteritems(self.__dict__):
padding = ' ' * (len(key) + 1)
value = pprint.pformat(value)
value = padding.join(value.splitlines(True))
L.append(' %s=%s' % (key, value))
return "%s(\n%s)" % (self.__class__.__name__, ",\n".join(L))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(Foo)
Foo.thrift_spec = (
None, # 0
(1, TType.I64, 'MyInt', None, None, 2, ), # 1
)
Foo.thrift_struct_annotations = {
}
Foo.thrift_field_annotations = {
}
def Foo__init__(self, MyInt=None,):
self.MyInt = MyInt
Foo.__init__ = Foo__init__
fix_spec(all_structs)
del all_structs
|
danhuss/faker | faker/providers/internet/bg_BG/__init__.py | Python | mit | 1,900 | 0.006044 | from .. import Provider as InternetProvider
class Provider(InternetProvider):
user_name_formats = (
'{{last_name_female}}.{{first_name_female}}',
'{{last_name_male}}.{{first_name_male}}',
'{{last_name_male}}.{{first_name_male}}',
'{{first_name_male}}.{{last_name_male}}',
'{{first_name}}##',
'?{{last_name}}',
'{{first_name}}{{year}}',
)
email_formats = (
'{{user_name}}@{{free_email_domain}}',
'{{user_name}}@{{domain_name}}')
free_email_domains = (
'gmail.com', 'yahoo.com', 'hotmail.com', 'mail.bg', 'abv.bg', 'dir.bg',
)
tlds = ('bg', 'com', 'biz', 'info', 'net', 'org', 'edu')
replacements = (
('Б', 'b'), ('Г', 'r'), ('Д', 'd'), ('Ж', 'zh'), ('З', 'z'), ('И', 'i'),
| ('Й', 'i'), ('Л', 'l'), ('П', 'p'), ('Ф', 'f'), ('Ц', 'ts'), ('Ч', 'ch'),
('Ш', 'sh'), ('Щ', 'sht'), ('Ъ', 'u'), ('Ь', ''), ('Ю', 'yu'), ('Я', 'ya'),
('б', 'b'), ('в', 'v'), ('д', 'd'), ('ж', 'zh'), ('з', 'z'), ('и', 'i'),
('й', 'i'), ('к', 'k'), ('л', 'l'), ('м', 'm'), ('н', 'n'), ('п', 'p'),
('т', 't'), ('ф', 'f'), ('ц', 't | s'), ('ч', 'ch'), ('ш', 'sh'), ('щ', 'sht'),
('ъ', 'u'), ('ь', ''), ('ю', 'yu'), ('я', 'ya'), ('Б', 'b'), ('Г', 'r'),
('Д', 'd'), ('Ж', 'zh'), ('З', 'z'), ('И', 'i'), ('Й', 'i'), ('Л', 'l'),
('П', 'p'), ('Ф', 'f'), ('Ц', 'ts'), ('Ч', 'ch'), ('Ш', 'sh'), ('Щ', 'sht'),
('Ъ', 'u'), ('Ь', ''), ('Ю', 'yu'), ('Я', 'ya'), ('б', 'b'), ('в', 'v'),
('д', 'd'), ('ж', 'zh'), ('з', 'z'), ('и', 'i'), ('й', 'i'), ('к', 'k'),
('л', 'l'), ('м', 'm'), ('н', 'n'), ('п', 'p'), ('т', 't'), ('ф', 'f'),
('ц', 'ts'), ('ч', 'ch'), ('ш', 'sh'), ('щ', 'sht'), ('ъ', 'u'), ('ь', ''),
('ю', 'yu'), ('я', 'ya'),
)
|
alphacsc/alphacsc | alphacsc/other/sporco/sporco/admm/cbpdntv.py | Python | bsd-3-clause | 43,694 | 0.002243 | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2017 by Brendt Wohlberg <brendt@ieee.org>
# All rights reserved. BSD 3-clause License.
# This file is part of the SPORCO package. Details of the copyright
# and user license can be found in the 'LICENSE.txt' file distributed
# with the package.
"""Classes for ADMM algorithm for the variants of the Convolutional BPDN
problem with Total Variation regularisation terms"""
from __future__ import division
from __future__ import print_function
from builtins import range
from builtins import object
import numpy as np
from scipy import linalg
import copy
from sporco.admm import admm
import sporco.cnvrep as cr
from sporco.admm import cbpdn
import sporco.linalg as sl
from sporco.util import u
__author__ = """Brendt Wohlberg <brendt@ieee.org>"""
class ConvBPDNScalarTV(admm.ADMM):
r"""**Class inheritance structure**
.. inheritance-diagram:: ConvBPDNScalarTV
:parts: 2
|
ADMM algorithm for an extension of Convolutional BPDN including
terms penalising the total variation of each coefficient map
:cite:`wohlberg-2017-convolutional`.
Solve the optimisation problem
.. math::
\mathrm{argmin}_\mathbf{x} \; \frac{1}{2}
\left\| \sum_m \mathbf{d}_m * \mathbf{x}_m - \mathbf{s}
\right\|_2^2 + \lambda \sum_m \| \mathbf{x}_m \|_1 +
\mu \sum_m \left\| \sqrt{\sum_i (G_i \mathbf{x}_m)^2} \right\|_1
\;\;,
where :math:`G_i` is an operator computing the derivative along index
:math:`i`, via the ADMM problem
.. math::
\mathrm{argmin}_\mathbf{x} \; (1/2) \left\| D \mathbf{x} -
\mathbf{s} \right\|_2^2 + \lambda
\| \mathbf{y}_L \|_1 + \mu \sum_m \left\| \sqrt{\sum_{i=0}^{L-1}
\mathbf{y}_i^2} \right\|_1 \quad \text{ such that } \quad
\left( \begin{array}{c} \Gamma_0 \\ \Gamma_1 \\ \vdots \\ I
\end{array} \right) \mathbf{x} =
\left( \begin{array}{c} \mathbf{y}_0 \\
\mathbf{y}_1 \\ \vdots \\ \mathbf{y}_L \end{array}
\right) \;\;,
where
.. math::
D = \left( \begin{array}{ccc} D_0 & D_1 & \ldots \end{array} \right)
\qquad
\mathbf{x} = \left( \begin{array}{c} \mathbf{x}_0 \\ \mathbf{x}_1 \\
\vdots \end{array} \right) \qquad
\Gamma_i = \left( \begin{array}{ccc}
G_i & 0 & \ldots \\ 0 & G_i & \ldots \\ \vdots & \vdots & \ddots
\end{array} \right) \;\;.
For multi-channel signals with a single-channel dictionary, scalar TV is
applied independently to each coefficient map for channel :math:`c` and
filter :math:`m`. Since multi-channel signals with a multi-channel
dictionary also have one coefficient map per filter, the behaviour is
the same as for single-channel signals.
After termination of the :meth:`solve` method, attribute :a | ttr:`itstat`
is a list of tuples representing statistics of each iteration. The
fields of the named tuple ``Iteration | Stats`` are:
``Iter`` : Iteration number
``ObjFun`` : Objective function value
``DFid`` : Value of data fidelity term :math:`(1/2) \| \sum_m
\mathbf{d}_m * \mathbf{x}_m - \mathbf{s} \|_2^2`
``RegL1`` : Value of regularisation term :math:`\sum_m \|
\mathbf{x}_m \|_1`
``RegTV`` : Value of regularisation term :math:`\sum_m \left\|
\sqrt{\sum_i (G_i \mathbf{x}_m)^2} \right\|_1`
``PrimalRsdl`` : Norm of primal residual
``DualRsdl`` : Norm of dual residual
``EpsPrimal`` : Primal residual stopping tolerance
:math:`\epsilon_{\mathrm{pri}}`
``EpsDual`` : Dual residual stopping tolerance
:math:`\epsilon_{\mathrm{dua}}`
``Rho`` : Penalty parameter
``XSlvRelRes`` : Relative residual of X step solver
``Time`` : Cumulative run time
"""
class Options(cbpdn.ConvBPDN.Options):
r"""ConvBPDNScalarTV algorithm options
Options include all of those defined in
:class:`.admm.cbpdn.ConvBPDN.Options`, together with additional
options:
``TVWeight`` : An array of weights :math:`w_m` for the term
penalising the gradient of the coefficient maps. If this
option is defined, the regularization term is :math:`\sum_m w_m
\left\| \sqrt{\sum_i (G_i \mathbf{x}_m)^2} \right\|_1`
where :math:`w_m` is the weight for filter index :math:`m`. The
array should be an :math:`M`-vector where :math:`M` is the number
of filters in the dictionary.
"""
defaults = copy.deepcopy(cbpdn.ConvBPDN.Options.defaults)
defaults.update({'TVWeight' : 1.0})
def __init__(self, opt=None):
"""Initialise ConvBPDNScalarTV algorithm options object"""
if opt is None:
opt = {}
cbpdn.ConvBPDN.Options.__init__(self, opt)
itstat_fields_objfn = ('ObjFun', 'DFid', 'RegL1', 'RegTV')
itstat_fields_extra = ('XSlvRelRes',)
hdrtxt_objfn = ('Fnc', 'DFid', u('Regℓ1'), u('RegTV'))
hdrval_objfun = {'Fnc': 'ObjFun', 'DFid': 'DFid',
u('Regℓ1'): 'RegL1', u('RegTV'): 'RegTV'}
def __init__(self, D, S, lmbda, mu=0.0, opt=None, dimK=None, dimN=2):
"""
Initialise a ConvBPDNScalarTV object with problem parameters.
|
**Call graph**
.. image:: _static/jonga/cbpdnstv_init.svg
:width: 20%
:target: _static/jonga/cbpdnstv_init.svg
|
Parameters
----------
D : array_like
Dictionary matrix
S : array_like
Signal vector or matrix
lmbda : float
Regularisation parameter (l1)
mu : float
Regularisation parameter (gradient)
opt : :class:`ConvBPDNScalarTV.Options` object
Algorithm options
dimK : 0, 1, or None, optional (default None)
Number of dimensions in input signal corresponding to multiple
independent signals
dimN : int, optional (default 2)
Number of spatial dimensions
"""
if opt is None:
opt = ConvBPDNScalarTV.Options()
# Infer problem dimensions and set relevant attributes of self
self.cri = cr.CSC_ConvRepIndexing(D, S, dimK=dimK, dimN=dimN)
# Call parent class __init__
Nx = np.product(self.cri.shpX)
yshape = self.cri.shpX + (len(self.cri.axisN)+1,)
super(ConvBPDNScalarTV, self).__init__(Nx, yshape, yshape,
S.dtype, opt)
# Set l1 term scaling and weight array
self.lmbda = self.dtype.type(lmbda)
self.Wl1 = np.asarray(opt['L1Weight'], dtype=self.dtype)
self.Wl1 = self.Wl1.reshape(cr.l1Wshape(self.Wl1, self.cri))
self.mu = self.dtype.type(mu)
if hasattr(opt['TVWeight'], 'ndim') and opt['TVWeight'].ndim > 0:
self.Wtv = np.asarray(opt['TVWeight'].reshape((1,)*(dimN+2) +
opt['TVWeight'].shape), dtype=self.dtype)
else:
# Wtv is a scalar: no need to change shape
self.Wtv = self.dtype.type(opt['TVWeight'])
# Set penalty parameter
self.set_attr('rho', opt['rho'], dval=(50.0*self.lmbda + 1.0),
dtype=self.dtype)
# Set rho_xi attribute
self.set_attr('rho_xi', opt['AutoRho', 'RsdlTarget'], dval=1.0,
dtype=self.dtype)
# Reshape D and S to standard layout
self.D = np.asarray(D.reshape(self.cri.shpD), dtype=self.dtype)
self.S = np.asarray(S.reshape(self.cri.shpS), dtype=self.dtype)
# Compute signal in DFT domain
self.Sf = sl.rfftn(self.S, None, self.cri.axisN)
self.Gf, GHGf = sl.GradientFilters(self.cri.dimN+3, self.cri.axisN,
self.cri.Nv, dtype=self.dtype)
self.GHGf = self.Wtv**2 * GHGf
# Initialise byte-aligned arrays for pyfftw
self.YU = sl.pyfftw_empty_aligned(self.Y.shape, dtype=self.dtype)
xfshp = list(self.cri.shpX)
xfshp[dimN-1] = xfshp[dimN-1]//2 + 1
self.Xf = sl.pyfftw_empty_aligned(xfshp,
|
ver228/tierpsy-tracker | tierpsy/analysis/compress/processVideo.py | Python | mit | 5,963 | 0.013584 | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 14 15:30:11 2016
@author: worm_rig
"""
import json
import os
import tables
from tierpsy.analysis.compress.compressVideo import compressVideo, initMasksGroups
from tierpsy.analysis.compress.selectVideoReader import selectVideoReader
from tierpsy.helper.misc import TimeCounter, print_flush
#default parameters if wormencoder.ini does not exist
DFLT_SAVE_FULL_INTERVAL = 5000
DFLT_BUFFER_SIZE = 5
DFLT_MASK_PARAMS = {'min_area' : 50,
'max_area' : 500000000,
'thresh_C' : 15,
'thresh_block_size' : 61,
'dilation_size' : 7
}
def _getWormEnconderParams(fname):
def numOrStr(x):
x = x.strip()
try:
return int(x)
except:
return x
if os.path.exists(fname):
with open(fname, 'r') as fid:
dd = fid.read().split('\n')
plugin_params = {a.strip() : numOrStr(b) for a,b in
[x.split('=') for x in dd if x and x[0].isalpha()]}
else:
plugin_params = {}
return plugin_params
def _getReformatParams(plugin_params):
if plugin_params:
save_full_interval = plugin_params['UNMASKEDFRAMES']
buffer_size = plugin_params['MASK_RECALC_RATE']
mask_params = {'min_area' : plugin_params['MINBLOBSIZE'],
'max_area' : plugin_params['MAXBLOBSIZE'],
'thresh_C' : plugin_params['THRESHOLD_C'],
'thresh_block_size' : plugin_params['THRESHOLD_BLOCK_SIZE'],
'dilation_size' : plugin_params['DILATION_KERNEL_SIZE']}
else:
#if an empty dictionary was given return default values
save_full_interval = DFLT_SAVE_FULL_INTERVAL
buffer_size = DFLT_BUFFER_SIZE
mask_params = DFLT_MASK_PARAMS
return save_full_interval, buffer_size, mask_params
def _isValidSource(original_file):
try:
with tables.File(original_file, 'r') as fid:
fid.get_node('/mask')
return True
except tables.exceptions.HDF5ExtError:
return False
def reformatRigMaskedVideo(original_file, new_file, plugin_param_file, expected_fps, microns_per_pixel):
plugin_params = _getWormEnconderParams(plugin_param_file)
base_name = original_file.rpartition('.')[0].rpartition(os.sep)[-1]
if not _isValidSource(original_file):
print_flush(new_file + ' ERROR. File might be corrupt. ' + original_file)
return
save_full_interval, buffer_size, mask_params = _getReformatParams(plugin_params)
with tables.File(original_file, 'r') as fid_old, \
tables.File(new_file, 'w') as fid_new:
mask_old = fid_old.get_node('/mask')
tot_frames, im_height, im_width = mask_old.shape
progress_timer = TimeCounter('Reformating Gecko plugin hdf5 video.', tot_frames)
attr_params = dict(
expected_fps = expected_fps,
microns_per_pixel = microns_per_pixel,
is_light_background = True
)
mask_new, full_new, _ = initMasksGroups(fid_new, tot_frames, im_height, im_width,
attr_params, save_full_interval, is_expandable=False)
mask_new.attrs['plugin_params'] = json.dumps(plugin_params)
img_buff_ini = mask_old[:buffer_size]
full_new[0] = img_buff_ini[0]
mask_new[:buffer_size] = img_buff_ini*(mask_old[buffer_size] != 0)
for frame in range(buffer_size, tot_frames):
if frame % save_full_interval != 0:
mask_new[frame] = mask_old[frame]
else:
full_frame_n = frame //save_full_interval
img = mask_old[frame]
full_new[full_frame_n] = img
mask_new[frame] = img*(mask_old[frame-1] != 0)
if frame % 500 == 0:
# calculate the progress and put it in a string
progress_str = progress_timer.get_str(frame)
print_flush(base_name + ' ' + progress_str)
print_flush(
base_name +
' Compressed video done. Total time:' +
progress_timer.get_time_str())
def isGoodVideo(video_file):
try:
vid = selectVideoReader(video_file)
# i have problems with corrupt videos that can create infinite loops...
#it is better to test it before start a large taks
vid.release()
return True
except OSError:
# corrupt file, cannot read the size
return False
def processVideo(video_file, masked_image_file, compress_vid_param):
if video_file.endswith('hdf5'):
plugin_param_file = os.path.join(os.path.dirname(video_file), 'wormencoder.ini')
expected_fps = compress_vid_param['expected_fps']
microns_per_pixel = compress_vid_param['microns_per_pixel']
reformatRigMaskedVideo(video_file, masked_image_file, plugin_param_file, expected_fps=expected_fps, microns_per_pixel=microns_per_pixel)
else:
compressVideo(video_file, masked_image_file, **compress_vid_param)
if __name__ == '__main__':
import argparse
fname_wenconder = os.path.join(os.path.dirname(__file__), 'wormencoder.ini')
parser = argparse. | ArgumentParser(description='Reformat the files produced by the Gecko plugin in to the format of tierpsy.')
parser.add_argument('original_file', help='path of the original file produced by the plugin')
parser.add_argument('new_file', help='new file name')
parser.add_argument | (
'--plugin_param_file',
default = fname_wenconder,
help='wormencoder file used by the Gecko plugin.')
parser.add_argument(
'--expected_fps',
default=25,
help='Expected recording rate in frame per seconds.')
args = parser.parse_args()
reformatRigMaskedVideo(**vars(args))
|
ibc/MediaSoup | worker/deps/gyp/test/ninja/action-rule-hash/gyptest-action-rule-hash.py | Python | isc | 938 | 0.001066 | #!/usr/bin/env python
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style lic | ense that can be
# found in the LICENSE file.
"""
Verify that running gyp in a different directory does not | cause actions and
rules to rerun.
"""
import os
import sys
import TestGyp
test = TestGyp.TestGyp(formats=['ninja'])
# The xcode-ninja generator handles gypfiles which are not at the
# project root incorrectly.
# cf. https://code.google.com/p/gyp/issues/detail?id=460
if test.format == 'xcode-ninja':
test.skip_test()
test.run_gyp('subdir/action-rule-hash.gyp')
test.build('subdir/action-rule-hash.gyp', test.ALL)
test.up_to_date('subdir/action-rule-hash.gyp')
# Verify that everything is still up-to-date when we re-invoke gyp from a
# different directory.
test.run_gyp('action-rule-hash.gyp', '--depth=../', chdir='subdir')
test.up_to_date('subdir/action-rule-hash.gyp')
test.pass_test()
|
yanjianlong/server_cluster | public/global_manager.py | Python | bsd-3-clause | 1,046 | 0.000962 | # coding:utf-8
"""
Created by 捡龙眼
3/4/2016
"""
from __future__ import absolute_import, unicode_literals, print_function
import public.special_exception
LOG_THREAD = "log_thread"
TIME_THREAD = "time_thread"
GLOBAL_THREAD = {}
def add_thread(key, thread_object):
if key in GLOBAL_THREAD:
raise public.special_exception.KeyExistError("%s is exist" % (key))
GLOBAL_THREAD[key] = thread_object
def get_thread(key):
return GLOBAL_THREAD.get(key)
def clear_thread():
for thread_object | in GLOBAL_THREAD.values():
try:
thread_object.stop_thread()
except BaseException as e:
print(e)
HTTP_REQUEST_MANAGER = "http_request_manager"
WAITE_CONNECT_MANAGER = "wait | e_client_manager"
AUTH_CONNECT_MANAGER = "auth_connect_manager"
GLOBAL_OBJECT = {}
def add_object(key, object):
if key in GLOBAL_OBJECT:
raise public.special_exception.KeyExistError("%s is exist" % (key))
GLOBAL_OBJECT[key] = object
def get_object(key):
return GLOBAL_OBJECT.get(key) |
difio/difio | grabber.py | Python | apache-2.0 | 1,858 | 0.003229 | #!/usr/bin/env python
################################################################################
#
# Copyright (c) 2012, Alexander Todorov <atodorov@nospam.dif.io>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on | an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
import os
from urlgrabber.grabber import URLGrabber
def download_file(url, dirname):
"""
Download @url and save to @dirname.
@return - filename of saved file
"""
# pycurl is picky abou | t Unicode URLs, see rhbz #515797
url = url.encode('ascii', 'ignore')
if not os.path.exists(dirname):
os.makedirs(dirname)
basename = os.path.basename(url)
filename = "%s/%s" % (dirname, basename)
if os.path.exists(filename):
raise Exception("File %s already exists! Not downloading!" % filename)
g = URLGrabber(reget=None)
local_filename = g.urlgrab(url, filename)
return local_filename
def remove_file(filename):
"""
Remove @filename.
"""
os.remove(filename)
if __name__ == "__main__":
import tar
####### GEM
dirname = '/tmp/newdir'
f = download_file('https://rubygems.org/gems/columnize-0.3.5.gem', dirname)
print "Downloaded ", f
# tar.extract_gem(f, "%s/columnize" % dirname)
# remove_file(f)
# print "Removed ", f
|
takaakiaoki/PyFoam | PyFoam/Infrastructure/RunHooks/PrintMessageHook.py | Python | gpl-2.0 | 443 | 0.018059 | """A simple hook that only prints a user-specified message"""
from PyFoam.Infrastructure.RunHook import RunHook
from PyFoam.ThirdParty.six import print_
class PrintMessageHook(RunHook):
"""Print a small message"""
def __init__(self,runner,name):
RunHook.__init__(self,runner,name)
self.message=self.conf().get("message")
def __call__(self):
print_(self.message)
# Should work with Python3 and | Python2
| |
emijrp/pywikibot-core | pywikibot/__init__.py | Python | mit | 27,152 | 0.000332 | # -*- coding: utf-8 -*-
"""The initialization file for the Pywikibot framework."""
#
# (C) Pywikibot team, 2008-2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__release__ = '2.0b3'
__version__ = '$Id$'
import datetime
import math
import re
import sys
import threading
import json
if sys.version_info[0] > 2:
from queue import Queue
long = int
else:
from Queue import Queue
from warnings import warn
# Use pywikibot. prefix for all in-package imports; this is to prevent
# confusion with similarly-named modules in version 1 framework, for users
# who want to continue using both
from pywikibot import config2 as config
from pywikibot.bot import (
output, warning, error, critical, debug, stdout, exception,
input, input_choice, input_yn, inputChoice, handle_args, showHelp, ui, log,
calledModuleName, Bot, CurrentPageBot, WikidataBot,
# the following are flagged as deprecated on usage
handleArgs,
)
from pywikibot.exceptions import (
Error, InvalidTitle, BadTitle, NoPage, NoMoveTarget, SectionError,
SiteDefinitionError, NoSuchSite, UnknownSite, UnknownFamily,
UnknownExtension,
NoUsername, UserBlocked,
PageRelatedError, IsRedirectPage, IsNotRedirectPage,
PageSaveRelatedError, PageNotSaved, OtherPageSaveError,
LockedPage, CascadeLockedPage, LockedNoPage, NoCreateError,
EditConflict, PageDeletedConflict, PageCreatedConflict,
ServerError, FatalServerError, Server504Error,
CaptchaError, SpamfilterError, CircularRedirect, InterwikiRedirectPage,
WikiBaseError, CoordinateGlobeUnknownException,
)
from pywikibot.tools import UnicodeMixin, redirect_func
from pywikibot.i18n import translate
from pywikibot.data.api import UploadWarning
from pywikibot.diff import PatchManager
import pywikibot.textlib as textlib
import pywikibot.tools
textlib_methods = (
'unescape', 'replaceExcept', 'removeDisabledParts', 'removeHTMLParts',
'isDisabled', 'interwikiFormat', 'interwikiSort',
'getLanguageLinks', 'replaceLanguageLinks',
'removeLanguageLinks', 'removeLanguageLinksAndSeparator',
'getCategoryLinks', 'categoryFormat', 'replaceCategoryLinks',
'removeCategoryLinks', 'removeCategoryLinksAndSeparator',
'replaceCategoryInPlace', 'compileLinkR', 'extract_templates_and_params',
)
# pep257 doesn't understand when the first entry is on the next line
__all__ = ('config', 'ui', 'UnicodeMixin', 'translate',
'Page', 'FilePage', 'Category', 'Link', 'User',
'ItemPage', 'PropertyPage', 'Claim', 'TimeStripper',
'html2unicode', 'url2unicode', 'unicode2html',
'stdout', 'output', 'warning', 'error', 'critical', 'debug',
'exception', 'input_choice', 'input', 'input_yn', 'inputChoice',
'handle_args', 'handleArgs', 'showHelp', 'ui', 'log',
'calledModuleName', 'Bot', 'CurrentPageBot', 'WikidataBot',
'Error', 'InvalidTitle', 'BadTitle', 'NoPage', 'NoMoveTarget',
'SectionError',
'SiteDefinitionError', 'NoSuchSite', 'UnknownSite', 'UnknownFamily',
'UnknownExtension',
'NoUsername', 'UserBlocked', 'UserActionRefuse',
'PageRelatedError', 'IsRedirectPage', 'IsNotRedirectPage',
'PageSaveRelatedError', 'PageNotSaved', 'OtherPageSaveError',
'LockedPage', 'CascadeLockedPage', 'LockedNoPage', 'NoCreateError',
'EditConflict', 'PageDeletedConflict', 'PageCreatedConflict',
'UploadWarning',
'ServerError', 'FatalServerError', 'Server504Error',
'CaptchaError', 'SpamfilterError', 'CircularRedirect',
'InterwikiRedirectPage',
'WikiBaseError', 'CoordinateGlobeUnknownException',
'QuitKeyboardInterrupt',
)
# flake8 is unable to detect concatenation in the same operation
# like:
# ) + textlib_methods
# pep257 also doesn't support __all__ multiple times in a document
# so instead use this trick
globals()['__all__'] = globals()['__all__'] + textlib_methods
for _name in textlib_methods:
target = getattr(textlib, _name)
wrapped_func = redirect_func(target)
globals()[_name] = wrapped_func
deprecated = redirect_func(pywikibot.tools.deprecated)
deprecate_arg = redirect_func(pywikibot.tools.deprecate_arg)
class Timestamp(datetime.datetime):
"""Class for handling MediaWiki timestamps.
This inherits from datetime.datetime, so it can use all of the methods
and operations of a datetime object. To ensure that the results of any
operation are also a Timestamp object, be sure to use only Timestamp
objects (and datetime.timedeltas) in any operation.
Use Timestamp.fromISOformat() and Timestamp.fromtimestampformat() to
create Timestamp objects from MediaWiki string formats.
As these constructors are typically used to create objects using data
passed provided by site and page methods, some of which return a Timestamp
when previously they returned a MediaWiki string representation, these
methods also accept a Timestamp object, in which case they return a clone.
Use Site.getcurrenttime() for the current time; this is more reliable
than using Timestamp.utcnow().
"""
mediawikiTSFormat = "%Y%m%d%H%M%S"
ISO8601Format = "%Y-%m-%dT%H:%M:%SZ"
def clone(self):
"""Clone this instance."""
return self.replace(microsecond=self.microsecond)
@classmethod
def fromISOformat(cls, ts):
"""Convert an ISO 8601 timestamp to a Timestamp object."""
# If inadvertantly passed a Timestamp object, use replace()
# to create a clone.
if isinstance(ts, cls):
return ts.clone()
return cls.strptime(ts, cls.ISO8601Format)
@classmethod
def fromtimestampformat(cls, ts):
"""Convert a M | ediaWiki internal timestamp to a Timestamp object."""
# If inadvertantly passed a Timestamp object, use replace()
# to create a clone.
if isinstance(ts, cls):
return ts.clone()
return cls.strptime(ts, cls.mediawikiTSFormat)
def isoformat(self):
"""
Convert object to an ISO 8601 timestamp accepted by MediaWiki.
datetime.datetime.isoformat does not postfix the ISO formatted date
with a 'Z' unless a timezone | is included, which causes MediaWiki
~1.19 and earlier to fail.
"""
return self.strftime(self.ISO8601Format)
toISOformat = redirect_func(isoformat, old_name='toISOformat',
class_name='Timestamp')
def totimestampformat(self):
"""Convert object to a MediaWiki internal timestamp."""
return self.strftime(self.mediawikiTSFormat)
def __str__(self):
"""Return a string format recognized by the API."""
return self.isoformat()
def __add__(self, other):
"""Perform addition, returning a Timestamp instead of datetime."""
newdt = super(Timestamp, self).__add__(other)
if isinstance(newdt, datetime.datetime):
return Timestamp(newdt.year, newdt.month, newdt.day, newdt.hour,
newdt.minute, newdt.second, newdt.microsecond,
newdt.tzinfo)
else:
return newdt
def __sub__(self, other):
"""Perform substraction, returning a Timestamp instead of datetime."""
newdt = super(Timestamp, self).__sub__(other)
if isinstance(newdt, datetime.datetime):
return Timestamp(newdt.year, newdt.month, newdt.day, newdt.hour,
newdt.minute, newdt.second, newdt.microsecond,
newdt.tzinfo)
else:
return newdt
class Coordinate(object):
"""
Class for handling and storing Coordinates.
For now its just being used for DataSite, but
in the future we can use it for the GeoData extension.
"""
def __init__(self, lat, lon, alt=None, precision=None, globe='earth',
typ="", name="", dim=None, site=None, entity=''):
"""
Represent a geo coordinate.
@param lat: Latitude
|
beekpr/wsgiservice | docs/conf.py | Python | bsd-2-clause | 6,526 | 0.006436 | # -*- coding: utf-8 -*-
#
# WsgiService documentation build configuration file, created by
# sphinx-quickstart on Fri May 1 16:34:26 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import wsgiservice
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'WsgiService'
copyright = u'2009-2014, Patrice Neff'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = wsgiservice.__version__
if 'pre' in version:
version = version[:version.index('pre')]
# The full version, including alpha/beta/rc tags.
release = wsgiservice.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_us | e_index = True
# If tr | ue, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'WsgiServicedoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'WsgiService.tex', u'WsgiService Documentation',
u'Patrice Neff', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
todo_include_todos = True
|
kcsry/wurst | wurst/core/utils/schema_import.py | Python | mit | 3,317 | 0.000603 | import sys
from collections import defaultdict, OrderedDict
import toml
from wurst.core.models import IssueType, Priority, Status, Transition
def sift(iterable, predica | te):
"""
Sift an iterable into two lists, those which pass the predicate and those who don't.
:param iterable:
:param predicate:
:return: (True-list, False-list)
:rtype: tuple[list, list | ]
"""
t_list = []
f_list = []
for obj in iterable:
(t_list if predicate(obj) else f_list).append(obj)
return (t_list, f_list)
class SchemaImporter:
"""
An utility to import an issue type/priority/status/... schema.
After the import is finished, the ``objects`` field will be populated
with the imported objects.
"""
stderr = sys.stderr
stdout = sys.stdout
type_to_class = OrderedDict([
("type", IssueType),
("status", Status),
("priority", Priority),
("transition", Transition),
])
def __init__(self):
self.objects = defaultdict(dict)
def import_from_toml(self, fp):
"""
Import from a file-like object where TOML markup can be read from.
:param fp: A filelike object.
:return: Naught.
"""
data = toml.load(fp)
self.import_from_data(data)
def import_from_data(self, data):
"""
Import objects into the database from the given data dictionary.
:param data: Data dictionary
:type data: dict[str,list[dict]]
:return: Does not return a value, but the instance's
`.objects` dict will have been modified
"""
for obj_type, klass in self.type_to_class.items():
items = data.get(obj_type, [])
if not isinstance(items, list):
continue
importer = getattr(self, "import_%s" % obj_type, None)
if not importer:
importer = self.generic_importer
if not importer:
self.stderr.write("No importer for %r" % obj_type)
for val in items:
importer(obj_type, val)
def generic_importer(self, obj_type, datum):
"""
Import an object using the `type_to_class` mapping.
As an added bonus, will not try reimporting objects if a slug
is specified.
:param obj_type: Object type string, e.g. "priority"
:param datum: An object datum
:type datum: dict[str,object]
:return: The created object.
"""
model_class = self.type_to_class[obj_type]
if hasattr(model_class, "mangle_import_datum"):
datum = model_class.mangle_import_datum(datum)
obj = None
if "slug" in datum: # See if we already got one...
obj = model_class.objects.filter(slug=datum["slug"]).first()
if obj is None: # Not found? Create it.
m2m_fields, non_m2m_fields = sift(datum.items(), lambda item: isinstance(item[1], list))
obj = model_class.objects.create(**dict(non_m2m_fields))
for field, value in m2m_fields:
setattr(obj, field, value)
idfr = getattr(obj, "slug", obj.pk)
self.objects[obj_type][idfr] = obj
self.stdout.write("%s processed: %s" % (obj_type.title(), idfr))
return obj
|
chipx86/reviewboard | reviewboard/diffviewer/tests/test_myersdiff.py | Python | mit | 1,475 | 0 | from __future__ import unicode_literals
from reviewboard.diffviewer.myersdiff import MyersDiffer
from reviewboard.testing import TestCase
class MyersDifferTest(TestCase):
"""Unit tests for MyersDiffer."""
def test_equals(self):
"""Testing MyersDiffer with equal chunk"""
self._test_diff(['1', '2', '3'],
['1', '2', '3'],
[('equal', 0, 3, 0, 3), ])
def test_delete(self):
"""Testing MyersDiffer with delete chunk"""
self._tes | t_diff(['1', '2', '3'],
[],
[('delete', 0, 3, 0, 0), ])
def test_insert_before_lines(self):
"""Testing MyersDiffer with insert before | existing lines"""
self._test_diff('1\n2\n3\n',
'0\n1\n2\n3\n',
[('insert', 0, 0, 0, 2),
('equal', 0, 6, 2, 8)])
def test_replace_insert_between_lines(self):
"""Testing MyersDiffer with replace and insert between existing lines
"""
self._test_diff('1\n2\n3\n7\n',
'1\n2\n4\n5\n6\n7\n',
[('equal', 0, 4, 0, 4),
('replace', 4, 5, 4, 5),
('insert', 5, 5, 5, 9),
('equal', 5, 8, 9, 12)])
def _test_diff(self, a, b, expected):
opcodes = list(MyersDiffer(a, b).get_opcodes())
self.assertEqual(opcodes, expected)
|
pronexo-odoo/odoo-argentina | l10n_ar_account_check_duo/account.py | Python | agpl-3.0 | 1,557 | 0.003213 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2012 OpenERP - Team de Localización Argentina.
# https://launchpad.net/~openerp-l10n-ar-localization
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
################################################### | ###########################
from openerp.osv import osv, fields
class account_journal(osv.osv):
_name = 'account.journal'
_inherit = 'account.journal'
_columns = {
'use_issued_check': fields.boolean('Use Issued Checks', help='Allow to user Issued Checks in associated vouchers.'),
'use_third_check': fields.boolean('Use Third Checks', help='Allow to user Third Checks in associated vouchers.'),
'validate_only_checks': fields.boolean( | 'Validate only Checks', help='If marked, when validating a voucher, verifies that the total amounth of the voucher is the same as the checks used.'),
}
account_journal()
|
nmercier/linux-cross-gcc | win32/bin/Lib/idlelib/idle_test/test_calltips.py | Python | bsd-3-clause | 7,325 | 0.003549 | import unittest
import idlelib.CallTips as ct
CTi = ct.CallTips() # needed for get_entity test in 2.7
import textwrap
import types
import warnings
default_tip = ''
# Test Class TC is used in multiple get_argspec test methods
class TC(object):
'doc'
tip = "(ai=None, *args)"
def __init__(self, ai=None, *b): 'doc'
__init__.tip = "(self, ai=None, *args)"
def t1(self): 'doc'
t1.tip = "(self)"
def t2(self, ai, b=None): 'doc'
t2.tip = "(self, ai, b=None)"
def t3(self, ai, *args): 'doc'
t3.tip = "(self, ai, *args)"
def t4(self, *args): 'doc'
t4.tip = "(self, *args)"
def t5(self, ai, b=None, *args, **kw): 'doc'
t5.tip = "(self, ai, b=None, *args, **kwargs)"
def t6(no, self): 'doc'
t6.tip = "(no, self)"
def __call__(self, ci): 'doc'
__call__.tip = "(self, ci)"
# attaching .tip to wrapped methods does not work
@classmethod
def cm(cls, a): 'doc'
@staticmethod
def sm(b): 'doc'
tc = TC()
signature = ct.get_arg_text # 2.7 and 3.x use different functions
class Get_signatureTest(unittest.TestCase):
# The signature function must return a string, even if blank.
# Test a variety of objects to be sure that none cause it to raise
# (quite aside from getting as correct an answer as possible).
# The tests of builtins may break if the docstrings change,
# but a red buildbot is better than a user crash (as has happened).
# For a simple mismatch, change the expected output to the actual.
def test_builtins(self):
# 2.7 puts '()\n' where 3.x does not, other minor differences
# Python class that inherits builtin methods
class List(list): "List() doc"
# Simulate builtin with no docstring for default argspec test
class SB: __call__ = None
def gtest(obj, out):
self.assertEqual(signature(obj), out)
if List.__doc__ is not None:
gtest(List, '()\n' + List.__doc__)
gtest(list.__new__,
'T.__new__(S, ...) -> a new object with type S, a subtype of T')
gtest(list.__init__,
'x.__init__(...) initializes x; see help(type(x)) for signature')
append_doc = "L.append(object) -- append object to end"
gtest(list.append, append_doc)
gtest([].append, append_doc)
gtest(List.append, append_doc)
gtest(types.MethodType, '()\ninstancemethod(function, instance, class)')
gtest(SB(), default_tip)
def test_signature_wrap(self):
# This is also a test of an old-style class
if textwrap.TextWrapper.__doc__ is not None:
self.assertEqual(signature(textwrap.TextWrapper), '''\
(width=70, initial_indent='', subsequent_indent='', expand_tabs=True,
replace_whitespace=True, fix_sentence_endings=False, break_long_words=True,
drop_whitespace=True, break_on_hyphens=True)''')
def test_docline_truncation(self):
def f(): pass
f.__doc__ = 'a'*300
self.assertEqual(signature(f), '()\n' + 'a' * (ct._MAX_COLS-3) + '...')
def test_multiline_docstring(self):
# Test fewer lines than max.
self.assertEqual(signature(list),
"()\nlist() -> new empty list\n"
"list(iterable) -> new list initialized from iterable's items")
# Test max lines and line (currently) too long.
def f():
pass
s = 'a\nb\nc\nd\n'
f.__doc__ = s + 300 * 'e' + 'f'
self.assertEqual(signature(f),
'()\n' + s + (ct._MAX_COLS - 3) * 'e' + '...')
def test_functions(self):
def t1(): 'doc'
t1.tip = "()"
def t2(a, b=None): 'doc'
t2.tip = "(a, b=None)"
def t3(a, *args): 'doc'
t3.tip = "(a, *args)"
def t4(*args): 'doc'
t4.tip = "(*args)"
def t5(a, b=None, *args, **kwds): 'doc'
t5.tip = "(a, b=None, *args, **kwargs)"
doc = '\ndoc' if t1.__doc__ is not None else ''
for func in (t1, t2, t3, t4, t5, TC):
self.assertEqual(signature(func), func.tip + doc)
def test_methods(self):
doc = '\ndoc' if TC.__doc__ is not None else ''
for meth in (TC.t1, TC.t2, TC.t3, TC.t4, TC.t5, TC.t6, TC.__call__):
self.assertEqual(signature(meth), meth.tip + doc)
self.assertEqual(signature(TC.cm), "(a)" + doc)
self.assertEqual(signature(TC.sm), "(b)" + doc)
def test_bound_methods(self):
# test that first parameter is correctly removed from argspec
doc = '\ndoc' if TC.__doc__ is not None else ''
for meth, mtip in ((tc.t1, "()"), (tc.t4, "(*args)"), (tc.t6, "(self)"),
(tc.__call__, '(ci)'), (tc, '(ci)'), (TC.cm, "(a)"),):
self.assertEqual(signature(meth), mtip + doc)
def test_starred_paramete | r(self):
# test that starred first parameter is *not* removed from argspec
class C:
def m1(*args): pass
def m2(**kwds): pass
def f1(args, kwargs, *a, **k): | pass
def f2(args, kwargs, args1, kwargs1, *a, **k): pass
c = C()
self.assertEqual(signature(C.m1), '(*args)')
self.assertEqual(signature(c.m1), '(*args)')
self.assertEqual(signature(C.m2), '(**kwargs)')
self.assertEqual(signature(c.m2), '(**kwargs)')
self.assertEqual(signature(f1), '(args, kwargs, *args1, **kwargs1)')
self.assertEqual(signature(f2),
'(args, kwargs, args1, kwargs1, *args2, **kwargs2)')
def test_no_docstring(self):
def nd(s): pass
TC.nd = nd
self.assertEqual(signature(nd), "(s)")
self.assertEqual(signature(TC.nd), "(s)")
self.assertEqual(signature(tc.nd), "()")
def test_attribute_exception(self):
class NoCall(object):
def __getattr__(self, name):
raise BaseException
class Call(NoCall):
def __call__(self, ci):
pass
for meth, mtip in ((NoCall, '()'), (Call, '()'),
(NoCall(), ''), (Call(), '(ci)')):
self.assertEqual(signature(meth), mtip)
def test_non_callables(self):
for obj in (0, 0.0, '0', b'0', [], {}):
self.assertEqual(signature(obj), '')
class Get_entityTest(unittest.TestCase):
# In 3.x, get_entity changed from 'instance method' to module function
# since 'self' not used. Use dummy instance until change 2.7 also.
def test_bad_entity(self):
self.assertIsNone(CTi.get_entity('1//0'))
def test_good_entity(self):
self.assertIs(CTi.get_entity('int'), int)
class Py2Test(unittest.TestCase):
def test_paramtuple_float(self):
# 18539: (a,b) becomes '.0' in code object; change that but not 0.0
with warnings.catch_warnings():
# Suppess message of py3 deprecation of parameter unpacking
warnings.simplefilter("ignore")
exec "def f((a,b), c=0.0): pass"
self.assertEqual(signature(f), '(<tuple>, c=0.0)')
if __name__ == '__main__':
unittest.main(verbosity=2, exit=False)
|
kajgan/stbgui | lib/python/Plugins/Extensions/GraphMultiEPG/GraphMultiEpg.py | Python | gpl-2.0 | 57,664 | 0.031614 | from skin import parseColor, parseFont, parseSize
from Components.config import config, ConfigClock, ConfigInteger, ConfigSubsection, ConfigYesNo, ConfigSelection, ConfigSelectionNumber
from Components.Pixmap import Pixmap
from Components.Button import Button
from Components.ActionMap import HelpableActionMap
from Components.HTMLComponent import HTMLComponent
from Components.GUIComponent import GUIComponent
from Components.EpgList import Rect
from Components.Sources.Event import Event
from Components.MultiContent import MultiContentEntryText, MultiContentEntryPixmapAlphaTest, MultiContentEntryPixmapAlphaBlend
from Components.TimerList import TimerList
from Components.Renderer.Picon import getPiconName
from Components.Sources.ServiceEvent import ServiceEvent
import Screens.InfoBar
from Screens.Screen import Screen
from Screens.HelpMenu import HelpableScreen
from Screens.EventView import EventViewEPGSelect
from Screens.InputBox import PinInput
from Screens.TimeDateInput import TimeDateInput
from Screens.TimerEntry import TimerEntry
from Screens.EpgSelection import EPGSelection
from Screens.TimerEdit import TimerSanityConflict, TimerEditList
from Screens.MessageBox import MessageBox
from Screens.ChoiceBox import ChoiceBox
from Tools.Directories import resolveFilename, SCOPE_CURRENT_SKIN
from RecordTimer import RecordTimerEntry, parseEvent, AFTEREVENT
from ServiceReference import ServiceReference, isPlayableForCur
from Tools.LoadPixmap import LoadPixmap
from Tools.Alternatives import CompareWithAlternatives
from Tools.TextBoundary import getTextBoundarySize
from Tools import Notifications
from enigma import eEPGCache, eListbox, gFont, eListboxPythonMultiContent, RT_HALIGN_LEFT, RT_HALIGN_RIGHT, RT_HALIGN_CENTER,\
RT_VALIGN_CENTER, RT_WRAP, BT_SCALE, BT_KEEP_ASPECT_RATIO, eSize, eRect, eTimer, getBestPlayableServiceReference, loadPNG, eServiceReference
from GraphMultiEpgSetup import GraphMultiEpgSetup
from time import localtime, time, strftime, mktime
from Components.PluginComponent import plugins
from Plugins.Plugin import PluginDescriptor
from Tools.BoundFunction import boundFunction
MAX_TIMELINES = 6
config.misc.graph_mepg = ConfigSubsection()
config.misc.graph_mepg.prev_time = ConfigClock(default = time())
config.misc.graph_mepg.prev_time_period = ConfigInteger(default = 120, limits = (60, 300))
now_time = [x for x in localtime()]
now_time[3] = 20
now_time[4] = 30
config.misc.graph_mepg.prime_time = ConfigClock(default = mktime(now_time))
config.misc.graph_mepg.ev_fontsize = ConfigSelectionNumber(default = 0, stepwidth = 1, min = -12, max = 12, wraparound = True)
config.misc.graph_mepg.items_per_page = ConfigSelectionNumber(min = 3, max = 40, stepwidth = 1, default = 6, wraparound = True)
config.misc.graph_mepg.items_per_page_listscreen = ConfigSelectionNumber(min = 3, max = 60, stepwidth = 1, default = 12, wraparound = True)
config.misc.graph_mepg.default_mode = ConfigYesNo(default = False)
config.misc.graph_mepg.overjump = ConfigYesNo(default = True)
config.misc.graph_mepg.center_timeline = ConfigYesNo(default = False)
config.misc.graph_mepg.servicetitle_mode = ConfigSelection(default = "picon+servicename", choices = [
("servicename", _("Servicename")),
("picon", _("Picon")),
("picon+servicename", _("Picon and servicename")),
("number+servicename", _("Channelnumber and servicename")),
("number+picon", _("Channelnumber and picon")),
("number+picon+servicename", _("Channelnumber, picon and servicename")) ])
config.misc.graph_mepg.roundTo = ConfigSelection(default = "900", choices = [("900", _("%d minutes") % 15), ("1800", _("%d minutes") % 30), ("3600", _("%d minutes") % 60)])
config.misc.graph_mepg.OKButton = ConfigSelection(default = "info", choices = [("info", _("Show detailed event info")), ("zap", _("Zap to selected channel"))])
possibleAlignmentChoices = [
( str(RT_HALIGN_LEFT | RT_VALIGN_CENTER ) , _("left")),
( str(RT_HALIGN_CENTER | RT_VALIGN_CENTER ) , _("centered")),
( str(RT_HALIGN_RIGHT | RT_VALIGN_CENTER ) , _("right")),
( str(RT_HALIGN_LEFT | RT_VALIGN_CENTER | RT_WRAP) , _("left, wrapped")),
( str(RT_HALIGN_CENTER | RT_VALIGN_CENTER | RT_WRAP) , _("centered, wrapped")),
( str(RT_HALIGN_RIGHT | RT_VALIGN_CENTER | RT_WRAP) , _("right, wrapped"))]
config.misc.graph_mepg.event_alignment = ConfigSelection(default = possibleAlignmentChoices[0][0], choices = possibleAlignmentChoices)
config.misc.graph_mepg.show_timelines = ConfigSelection(default = "all", choices = [("nothing", _("no")), ("all", _("all")), ("now", _("actual time only"))])
config.misc.graph_mepg.servicename_alignment = ConfigSelection(default = possibleAlignmentChoices[0][0], choices = possibleAlignmentChoices)
con | fig.misc.graph_mepg.extension_menu = ConfigYesNo(default = False)
config.misc.graph_mepg.show_record_clocks = ConfigYesNo(default = True)
config.misc.graph_mep | g.zap_blind_bouquets = ConfigYesNo(default = False)
listscreen = config.misc.graph_mepg.default_mode.value
class EPGList(HTMLComponent, GUIComponent):
def __init__(self, selChangedCB = None, timer = None, time_epoch = 120, overjump_empty = True, epg_bouquet=None):
GUIComponent.__init__(self)
self.cur_event = None
self.cur_service = None
self.offs = 0
self.timer = timer
self.last_time = time()
self.onSelChanged = [ ]
if selChangedCB is not None:
self.onSelChanged.append(selChangedCB)
self.l = eListboxPythonMultiContent()
self.l.setBuildFunc(self.buildEntry)
self.setOverjump_Empty(overjump_empty)
self.epg_bouquet = epg_bouquet
self.epgcache = eEPGCache.getInstance()
self.clocks = [ LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/epgclock_add.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/epgclock_pre.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/epgclock.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/epgclock_prepost.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/epgclock_post.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zapclock_add.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zapclock_pre.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zapclock.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zapclock_prepost.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zapclock_post.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zaprecclock_add.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zaprecclock_pre.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zaprecclock.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zaprecclock_prepost.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zaprecclock_post.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repepgclock_add.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repepgclock_pre.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repepgclock.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repepgclock_prepost.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repepgclock_post.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repzapclock_add.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repzapclock_pre.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repzapclock.png')),
LoadP |
m-kiuchi/ouimeaux | setup.py | Python | bsd-3-clause | 1,705 | 0.002346 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
here = lambda *a: os.path.join(os.path.dirname(__file__), *a)
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os. | system('python setup.py sdist upload')
sys.exit()
readme = open(here('README.rst')).read()
history = open(here('HISTORY.rst')).read().replace('.. :changelog:', '')
requirements = [x.strip() for x in open(here('requirements.txt')).readlines()]
setup(
name='ouimeaux',
version='0.7.9',
description='Open source control for Belkin WeMo devices',
long_description=readme + '\n\n' + history,
author='Ian McCracken',
author_email='ian.mccracken@gmail.com',
url='https://github | .com/iancmcc/ouimeaux',
packages=[
'ouimeaux',
],
package_dir={'ouimeaux': 'ouimeaux'},
include_package_data=True,
install_requires=requirements,
license="BSD",
zip_safe=False,
keywords='ouimeaux',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Topic :: Home Automation',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
],
entry_points={
'console_scripts': [
'wemo = ouimeaux.cli:wemo'
]
},
extras_require = {
'server': ["flask-restful", "gevent-socketio"],
},
test_suite='tests',
) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.