text stringlengths 4 1.02M | meta dict |
|---|---|
"""
Module where wpadmin dashboard classes are defined.
"""
# DJANGO IMPORTS
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from django import forms
# WPADMIN IMPORTS
from wpadmin.dashboard import modules
from wpadmin.dashboard.utils import get_admin_site_name
class Dashboard(object):
"""
Base class for dashboards.
The Dashboard class is a simple python list that has three additional
properties:
``title``
The dashboard title, by default, it is displayed above the dashboard
in a ``h2`` tag. Default value: 'Dashboard'.
``template``
The template to use to render the dashboard.
Default value: 'admin_tools/dashboard/dashboard.html'
``columns``
An integer that represents the number of columns for the dashboard.
Default value: 2.
If you want to customize the look of your dashboard and it's modules, you
can declare css stylesheets and/or javascript files to include when
rendering the dashboard (these files should be placed in your
media path), for example::
from admin_tools.dashboard import Dashboard
class MyDashboard(Dashboard):
class Media:
css = {
'all': (
'css/mydashboard.css',
'css/mystyles.css',
),
}
js = (
'js/mydashboard.js',
'js/myscript.js',
)
Here's an example of a custom dashboard::
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from admin_tools.dashboard import modules, Dashboard
class MyDashboard(Dashboard):
# we want a 3 columns layout
columns = 3
def __init__(self, **kwargs):
super(MyDashboard, self).__init__(**kwargs)
# append an app list module for "Applications"
self.children.append(modules.AppList(
title=_('Applications'),
exclude=('django.contrib.*',),
))
# append an app list module for "Administration"
self.children.append(modules.AppList(
title=_('Administration'),
models=('django.contrib.*',),
))
# append a recent actions module
self.children.append(modules.RecentActions(
title=_('Recent Actions'),
limit=5
))
"""
# Using Django's Media meta class
__metaclass__ = forms.MediaDefiningClass
def _media(self):
return forms.Media()
media = property(_media)
title = _('Dashboard')
template = 'wpadmin/dashboard/dashboard.html'
columns = 2
children = None
def __init__(self, **kwargs):
for key in kwargs:
if hasattr(self.__class__, key):
setattr(self, key, kwargs[key])
self.children = self.children or []
def init_with_context(self, context):
"""
Sometimes you may need to access context or request variables to build
your dashboard, this is what the ``init_with_context()`` method is for.
This method is called just before the display with a
``django.template.RequestContext`` as unique argument, so you can
access to all context variables and to the ``django.http.HttpRequest``.
"""
pass
def get_id(self):
"""
Internal method used to distinguish different dashboards in js code.
"""
return 'wpadmin-dashboard'
class DefaultIndexDashboard(Dashboard):
"""
The default dashboard displayed on the admin index page.
To change the default dashboard you'll have to type the following from the
commandline in your project root directory::
python manage.py customdashboard
And then set the `WPADMIN_INDEX_DASHBOARD`` settings variable to
point to your custom index dashboard class.
"""
def init_with_context(self, context):
site_name = get_admin_site_name(context)
# append a link list module for "quick links"
self.children.append(modules.LinkList(
_('Quick links'),
collapsible=False,
children=[
[_('Return to site'), '/'],
[_('Change password'),
reverse('%s:password_change' % site_name)],
[_('Log out'), reverse('%s:logout' % site_name)],
]
))
# append an app list module for "Applications"
self.children.append(modules.AppList(
_('Applications'),
exclude=('django.contrib.*',),
))
# append an app list module for "Administration"
self.children.append(modules.AppList(
_('Administration'),
models=('django.contrib.*',),
))
# append a recent actions module
self.children.append(modules.RecentActions(_('Recent Actions'), 5))
# append a feed module
self.children.append(modules.Feed(
_('Latest Django News'),
feed_url='http://www.djangoproject.com/rss/weblog/',
limit=5
))
# append another link list module for "support".
self.children.append(modules.LinkList(
_('Support'),
children=[
{
'title': _('Django documentation'),
'url': 'http://docs.djangoproject.com/',
'external': True,
},
{
'title': _('Django "django-users" mailing list'),
'url': 'http://groups.google.com/group/django-users',
'external': True,
},
{
'title': _('Django irc channel'),
'url': 'irc://irc.freenode.net/django',
'external': True,
},
]
))
| {
"content_hash": "f68913c8f6db508db947889a6cabf81f",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 79,
"avg_line_length": 32.357894736842105,
"alnum_prop": 0.5517241379310345,
"repo_name": "nwaxiomatic/django-wpadmin",
"id": "7b7d26d420a22edbcbb33f78f7afef194e235f10",
"size": "6165",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "wpadmin/dashboard/dashboards.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "51789"
},
{
"name": "HTML",
"bytes": "49369"
},
{
"name": "JavaScript",
"bytes": "5244"
},
{
"name": "Python",
"bytes": "75785"
},
{
"name": "Shell",
"bytes": "742"
}
],
"symlink_target": ""
} |
"""
Kay test views.
:Copyright: (c) 2009 Takashi Matsuo <tmatsuo@candit.jp> All rights reserved.
:license: BSD, see LICENSE for more details.
"""
from werkzeug import (
unescape, redirect, Response,
)
from google.appengine.ext.ndb import context
from kay.utils import (
local, render_to_response, url_for,
)
from kay.handlers import BaseHandler
from kay.i18n import lazy_gettext as _
from kay.utils.decorators import maintenance_check, cron_only
@maintenance_check
def index(request):
return Response("test")
@maintenance_check("tests/no_decorator")
def index2(request):
return Response("test")
def no_decorator(request):
return Response("test")
class MaintenanceCheck(BaseHandler):
@maintenance_check
def get(self):
return Response("test")
class MaintenanceCheckWithArgument(BaseHandler):
@maintenance_check("tests/no_decorator")
def get(self):
return Response("test")
def oldpage(request):
return Resposne("Old")
def newpage(request):
return Response("New")
def countup(request):
count = request.session.get('count', 0) + 1
request.session['count'] = count
return Response(str(count))
@cron_only
def cron(request):
return Response("OK")
class CronOnly(BaseHandler):
@cron_only
def get(self):
return Response("OK")
| {
"content_hash": "68fd23f5580175447ad43b64f468ac8a",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 76,
"avg_line_length": 20.125,
"alnum_prop": 0.7251552795031055,
"repo_name": "yosukesuzuki/kay-template",
"id": "bfa4ee0a30e08b68ac4de17c2fcc609d3270ff6a",
"size": "1313",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "project/kay/tests/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1704"
},
{
"name": "HTML",
"bytes": "39770"
},
{
"name": "Python",
"bytes": "757509"
}
],
"symlink_target": ""
} |
"""Popup calendar widget module."""
import time
from gi.repository import (
GObject,
Gdk,
Gtk,
)
class InvalidDate(Exception):
"""Invalid date custom exception class.
:param str date: the invalid date string to put in the message
"""
def __init__(self, date):
"""Set exception message."""
super(InvalidDate, self).__init__()
self.message = 'Invalid date "%s".' % (date)
def __str__(self):
"""Set str representation of class to exception message."""
return self.message
class DateEntry(Gtk.Entry):
"""Clickable text box that launches a calendar for populating with a date.
:param parent_window: Main window of the application
:type parent_window: class:`Gtk.Window`
"""
__gsignals__ = dict(
date_changed=(GObject.SignalFlags.RUN_FIRST, None, ()))
DEFAULT_DATE_FORMAT = '%e-%b-%Y'
# Different data formats used to try to parse a text date
DATE_FORMATS = (
'%Y-%m-%d %H:%M:%S',
'%d-%m-%Y',
'%d-%b-%Y',
'%d-%B-%Y',
'%d-%m-%y',
'%d-%b-%y',
'%d-%B-%y',
'%Y-%m-%d',
'%Y-%b-%d',
'%Y-%B-%d',
'%d/%m/%Y',
'%d/%b/%Y',
'%d/%B/%Y',
'%d/%m/%y',
'%d/%b/%y',
'%d/%B/%y',
'%Y/%m/%d',
'%Y/%b/%d',
'%Y/%B/%d'
)
def __init__(self, parent_window):
"""Set up widget."""
super(DateEntry, self).__init__()
self.connect('focus_out_event', self.on_focus_out_event)
self.connect('button_press_event', self.on_button_press_event)
self.connect('activate', lambda widget: widget.get_toplevel()
.child_focus(Gtk.DirectionType.TAB_FORWARD))
assert parent_window, 'Parent window needed'
self.parent_window = parent_window
self.set_width_chars(11)
self.calendar_dialog = False
self.timestamp = None
def popup_calendar(self):
"""Display the calendar dialog."""
self.calendar_dialog = True
dialog = Gtk.Dialog(
None, self.parent_window,
Gtk.DialogFlags.DESTROY_WITH_PARENT | Gtk.DialogFlags.MODAL,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OK, Gtk.ResponseType.OK)
)
# self.dialog.set_position(Gtk.WindowPosition.MOUSE)
calendar = Gtk.Calendar()
dialog.vbox.pack_start(calendar, expand=True, fill=True, padding=0)
dialog.set_decorated(False)
response_clear = 99
clear_btn = dialog.add_button('Clear', response_clear)
action_area = dialog.get_action_area()
action_area.reorder_child(clear_btn, 0)
clear_btn.set_sensitive(bool(self.get_date()))
calendar.connect('day_selected_double_click',
self.on_day_selected, dialog)
timestamp = self.timestamp
if timestamp is None:
timestamp = time.localtime()
if timestamp:
calendar.select_month(timestamp[1] - 1, timestamp[0])
calendar.select_day(timestamp[2])
dialog.show_all()
result = dialog.run()
if result == Gtk.ResponseType.OK:
self.on_day_selected(calendar, dialog)
elif result == response_clear:
self.set_date(None)
dialog.destroy()
else:
dialog.destroy()
self.calendar_dialog = False
def set_today(self):
"""Set widget to today's date."""
# round the current time into a date
timestamp = time.strptime(
time.strftime('%d-%b-%Y', time.localtime()), '%d-%b-%Y')
self.check_for_signal(timestamp)
super(DateEntry, self).set_text(self.get_date())
def set_date(self, date, date_format=None):
"""Set the date in the widget.
:param str date: date string
:param str date_format: date format string
:raises ValueError: if format is None
"""
if date is None or len(date.strip()) == 0:
self.check_for_signal(None)
super(DateEntry, self).set_text('')
return
if date_format is None:
date_format = self.check_formats(date)
else:
self.timestamp = time.strptime(date, date_format)
super(DateEntry, self).set_text(self.get_date())
if date_format is None:
raise ValueError('Unknown date format - %s' % (date))
def get_date(self, date_format=None):
"""Get the date currently in widget.
:param str date_format: date format string
:return: formatted date time
:rtype: str
"""
# check if the widget has the focus
if self.is_focus():
# the widget has the focus
# we need to check if the current text is OK
text = super(DateEntry, self).get_text()
if len(text) > 0:
timestamp = self.check_formats(text)
if not timestamp:
raise InvalidDate(text)
else:
return None
if self.timestamp is None:
return None
if date_format is None:
date_format = self.DEFAULT_DATE_FORMAT
return time.strftime(date_format, self.timestamp)
def check_for_signal(self, current):
"""Emit date changed signal if changed."""
if current != self.timestamp:
self.timestamp = current
self.emit('date_changed')
def check_formats(self, text):
"""Ensure valid date format string is being used (?).
:param str text: the text of the date string
"""
# try multple formats for converting to a timestamp
try:
timestamp = time.strptime(text, self.DEFAULT_DATE_FORMAT)
except ValueError:
# Ignore parsing errors
pass
else:
super(DateEntry, self).set_text(
time.strftime(self.DEFAULT_DATE_FORMAT, timestamp))
self.check_for_signal(timestamp)
return timestamp
for date_format in self.DATE_FORMATS:
try:
timestamp = time.strptime(text, date_format)
except ValueError:
# Ignore parsing errors
pass
else:
super(DateEntry, self).set_text(
time.strftime(self.DEFAULT_DATE_FORMAT, timestamp))
self.check_for_signal(timestamp)
return timestamp
return None
def set_text(self, _text):
"""Disallow use of ``set_text``.
:raises AttributeError: if method is used
"""
raise AttributeError('Use set_date()')
def get_text(self):
"""Get the text currently in the date widget."""
return self.get_date()
def clear_date(self):
"""Clear the date widget."""
super(DateEntry, self).set_text('')
self.timestamp = None
###
# Signal handler callbacks
###
def on_button_press_event(self, _widget, event):
"""Signal handler to launch calendar widget.
:param widget: the widget that called the event
:type widget: class:`Gtk.Widget`
:param event: button press event
:type event: :class:`Gdk.Event`
"""
if (event.button == Gdk.BUTTON_PRIMARY and
event.type == Gdk.EventType.BUTTON_PRESS):
text = super(DateEntry, self).get_text()
if text is None or len(text.strip()) == 0:
# we don't want to emit a signal as the popup will do so when
# needed
self.timestamp = None
self.popup_calendar()
return True
else:
return False
def on_focus_out_event(self, _widget, _event):
"""Validate date when dialog gets out of focus.
Display error dialog if it's not possible to parse date.
:param _widget: The widget that emitted the focus_out_event signal
:type _widget: Gtk.Entry
:param _event: The event that triggered the signal
:type _event: Gdk.Event
"""
text = super(DateEntry, self).get_text()
if text is None or len(text.strip()) == 0:
self.check_for_signal(None)
return
timestamp = self.check_formats(text)
if timestamp is None and not self.calendar_dialog:
dialog = Gtk.MessageDialog(
self.parent_window,
Gtk.DialogFlags.DESTROY_WITH_PARENT | Gtk.DialogFlags.MODAL,
Gtk.MessageType.ERROR,
Gtk.ButtonsType.CANCEL,
'Unknown date format\n%s' % (text))
dialog.connect('response', self.on_dialog_response)
dialog.show()
def on_day_selected(self, widget, dialog):
"""Update text when day is selected or OK button in dialog is clicked.
:param widget: The calendar in which the date was selected
:type widget: Gtk.Calendar
:param dialog: A dialog that is used to display the calendar widget
:type dialog: Gtk.Dialog
"""
(year, month, day) = widget.get_date()
current = time.strptime(
'%d-%d-%d' % (year, month + 1, day), '%Y-%m-%d')
super(DateEntry, self).set_text(
time.strftime(self.DEFAULT_DATE_FORMAT, current))
self.check_for_signal(current)
dialog.destroy()
def on_dialog_response(self, dialog, _response):
"""Close error message dialog and grab fous on main one.
:param dialog: The dialog used to display an error message
:type dialog: Gtk.Dialog
:param _response: The dialog response code
:type _response: Gtk.ResponseType.*
"""
GObject.timeout_add(100, self.grab_focus)
dialog.destroy()
| {
"content_hash": "63a3045954e5edb025777a9802752a00",
"timestamp": "",
"source": "github",
"line_count": 303,
"max_line_length": 78,
"avg_line_length": 32.64026402640264,
"alnum_prop": 0.560262891809909,
"repo_name": "jcollado/datagrid-gtk3",
"id": "12b88cc82d29ddb1ff6d5756009b5204870dd35c",
"size": "9890",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "datagrid_gtk3/ui/popupcal.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2312"
},
{
"name": "Python",
"bytes": "213841"
}
],
"symlink_target": ""
} |
"""
History Handler
"""
from base import BaseHandler
from views import history_tpl as tpl
from models import *
__author__ = "Luke Southam <luke@devthe.com>"
__copyright__ = "Copyright 2012, DEVTHE.COM LIMITED"
__license__ = "The BSD 3-Clause License"
__status__ = "Development"
class Handler(BaseHandler):
def get(self, name=None):
if not name:
return self.redirect("/_history/FrontPage")
page = get_page(name)
if not page:
return self.redirect("/"+name)
history = get_edits(page)
edits = [(history[i], i) for i in range(len(history))]
edits.reverse()
self.write(tpl.render(page=page, edits=edits, user=self.user)) | {
"content_hash": "f699d14e2603c81122632e0887ddea20",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 70,
"avg_line_length": 29.083333333333332,
"alnum_prop": 0.6318051575931232,
"repo_name": "o4dev/PythonicWiki",
"id": "acf15ed33487e9dec3e1a455debf432a7101fcf5",
"size": "2294",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/handlers/history.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CoffeeScript",
"bytes": "6756"
},
{
"name": "Python",
"bytes": "120952"
},
{
"name": "Shell",
"bytes": "2898"
}
],
"symlink_target": ""
} |
from paillier import *
class Voter(object):
def __init__(self, name, voterID):
self.name = name
self.voterID = voterID
self.voted = False
priv, pub = generate_keypair(128)
self.private_key = priv
self.public_key = pub
def __repr__(self):
return 'Voter(\'%s\',%s,voted:%s)' % (self.name, self.voterID,str(self.voted))
def __eq__(self, other):
return self.voterID == other.voterID
| {
"content_hash": "c42c57b36f87c3f792f0c583a46b6fb0",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 86,
"avg_line_length": 27.294117647058822,
"alnum_prop": 0.5668103448275862,
"repo_name": "kelleyb/CryptoProject",
"id": "24260e9be3b7b9cb484d8a0dc4070420a1777f0b",
"size": "464",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "voter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20980"
}
],
"symlink_target": ""
} |
import scrapy
from lfcs_scraping.items import PersonItem
import re
class CurrentLfcs(scrapy.Spider):
name = "currentlfcs"
#allowed_domains = ["http://wcms.inf.ed.ac.uk/"]
start_urls = [
#"http://web.archive.org/web/20110319001651/http://wcms.inf.ed.ac.uk/lfcs/people"
"http://wcms.inf.ed.ac.uk/lfcs/people"
]
def parse(self, response):
#two cases for name formatting
for name in response.xpath('//a/strong/u | //a/strong[not(u)] | //strong/a'):
try:
m = re.search('(.+) (\S+)', name.xpath('text()').extract()[0])
#if fails, it's because there was no text to extract, was a false positive
except:
pass
if m:
item = PersonItem()
#item['url'] = name.xpath('@href | ../@href | ../../@href').extract()
item['role'] = name.xpath('preceding::h3[1]/text() | preceding::h3[1]/a/text()').extract()
item['last'] = m.group(2)
item['first'] = m.group(1)
yield item
#if no match then it isnt a name
else:
pass
| {
"content_hash": "d133f40e7cb06981864f082ae27c5da5",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 106,
"avg_line_length": 34.82857142857143,
"alnum_prop": 0.4946677604593929,
"repo_name": "AWilcke/LFCS-History",
"id": "e60cd418db6502e3e11938391bc58e3086fef1df",
"size": "1219",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scraping/lfcs_scraping/spiders/basic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "790"
},
{
"name": "HTML",
"bytes": "87928"
},
{
"name": "JavaScript",
"bytes": "9236"
},
{
"name": "Python",
"bytes": "99394"
}
],
"symlink_target": ""
} |
"""
Renaming domains testcases
List of tested functions :
--------------------------
- [renameDomain] function
Test cases :
------------
- Nominal cases
- Renaming errors
- Special cases
"""
import os
from Util.PfwUnitTestLib import PfwTestCase
from Util import ACTLogging
log=ACTLogging.Logger()
# Test of Domains - Rename
class TestCases(PfwTestCase):
def setUp(self):
self.pfw.sendCmd("setTuningMode", "on")
self.domain_name = "domain_white"
self.new_domain_name = "domain_black"
self.renaming_iterations = 5
def tearDown(self):
self.pfw.sendCmd("setTuningMode", "off")
def test_Nominal_Case(self):
"""
Nominal case
------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- Renaming a domain
Tested commands :
~~~~~~~~~~~~~~~~~
- [renameDomain] function
- [createDomain] function
- [listDomains] function
Expected result :
~~~~~~~~~~~~~~~~~
- domains correctly renamed
"""
log.D(self.test_Nominal_Case.__doc__)
# New domain creation
log.I("New domain creation : %s" % (self.domain_name))
log.I("command [createDomain]" )
out, err = self.pfw.sendCmd("createDomain",self.domain_name, "")
assert out == "Done", out
assert err == None, "ERROR : command [createDomain] - ERROR while creating domain %s" % (self.domain_name)
log.I("command [createDomain] correctly executed")
log.I("Domain %s created" % (self.domain_name))
# Initial domains listing using "listDomains" command
log.I("Creating a domains listing backup")
log.I("command [listDomains]")
out, err = self.pfw.sendCmd("listDomains","","")
assert err == None, "INFO : command [listDomains] - ERROR while listing domains"
log.I("command [listDomains] correctly executed")
# Saving initial domains names
f_init_domains = open("f_init_domains", "w")
f_init_domains.write(out)
f_init_domains.close()
log.I("Domains listing backup created")
# Checking domains number
f_init_domains = open("f_init_domains", "r")
domains_nbr = 0
line=f_init_domains.readline()
while line!="":
line=f_init_domains.readline()
domains_nbr+=1
f_init_domains.close()
os.remove("f_init_domains")
log.I("%s domains names saved" % domains_nbr)
# Domain renaming iterations
log.I("Checking domain renaming - %s iterations" % self.renaming_iterations)
old_name = self.domain_name
new_name = self.new_domain_name
for iteration in range (self.renaming_iterations):
log.I("Iteration %s" % (iteration))
log.I("Renaming domain %s to %s" % (old_name,new_name))
log.I("command [renameDomain]")
out, err = self.pfw.sendCmd("renameDomain",old_name,new_name)
assert out == "Done", out
assert err == None, "ERROR : command [renameDomain] - ERROR while renaming domain %s" % (old_name)
# Domains listing using "listDomains" command
log.I("Creating a domains listing")
log.I("command [listDomains]")
out, err = self.pfw.sendCmd("listDomains","","")
assert err == None, "ERROR : command [listDomains] - ERROR while listing domains"
log.I("command [listDomains] correctly executed")
# Saving domains names
f_domains = open("f_domains", "w")
f_domains.write(out)
f_domains.close()
log.I("Domains listing created")
# Checking renaming
log.I("Checking that renaming is correct in domains listing")
f_domains = open("f_domains", "r")
for line in range(domains_nbr):
if (line >= (domains_nbr - 1)):
domain_renamed = f_domains.readline().strip('\n')
assert domain_renamed==new_name, "ERROR : Error while renaming domain %s" % (old_name)
else:
f_domains.readline()
f_domains.close()
log.I("New domain name %s conform to expected value" % (new_name))
temp = old_name
old_name = new_name
new_name = temp
os.remove("f_domains")
def test_Renaming_Error(self):
"""
Renaming errors
---------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- renaming a non existent domain
- renaming a domain with an already existent domain name
Tested commands :
~~~~~~~~~~~~~~~~~
- [renameDomain] function
- [createDomain] function
- [renameDomain] function
Expected result :
~~~~~~~~~~~~~~~~~
- error detected
- domains names remain unchanged
"""
log.D(self.test_Renaming_Error.__doc__)
# New domains creation
log.I("New domain creation : %s" % (self.domain_name))
log.I("command [createDomain]")
out, err = self.pfw.sendCmd("createDomain",self.domain_name, "")
assert out == "Done", out
assert err == None, "ERROR : command [createDomain] - Error while creating domain %s" % (self.domain_name)
log.I("command [createDomain] - correctly executed")
log.I("command Domain %s created" % (self.domain_name))
# Initial domains listing using "listDomains" command
log.I("Creating a domains listing backup")
log.I("command [listDomains]")
out, err = self.pfw.sendCmd("listDomains","","")
assert err == None, "INFO : command [listDomains] - Error while listing domains"
log.I("command [listDomains] correctly executed")
# Saving initial domains names
f_init_domains = open("f_init_domains", "w")
f_init_domains.write(out)
f_init_domains.close()
log.I("Domains listing backup created")
# Checking domains number
f_init_domains = open("f_init_domains", "r")
domains_nbr = 0
line=f_init_domains.readline()
while line!="":
line=f_init_domains.readline()
domains_nbr+=1
f_init_domains.close()
log.I("%s domains names saved" % domains_nbr)
# Domain renaming error : renamed domain does not exist
log.I("Renaming a non existent domain")
log.I("Renaming domain FAKE to NEW_NAME")
log.I("command [renameDomain]")
out, err = self.pfw.sendCmd("renameDomain",'FAKE','NEW_NAME', expectSuccess=False)
assert out != "Done", out
assert err == None, "ERROR : command [renameDomain] - Error while renaming domain"
log.I("command [renameDomain] - renaming error correctly detected")
# Domains listing using "listDomains" command
log.I("Creating a domains listing")
log.I("command [listDomains]")
out, err = self.pfw.sendCmd("listDomains","","")
assert err == None, "ERROR : command [listDomains] - Error while listing domains"
log.I("command [listDomains] correctly executed")
# Saving domains names
f_domains = open("f_domains", "w")
f_domains.write(out)
f_domains.close()
log.I("Domains listing created")
# Checking domains names integrity
log.I("Checking domains names integrity")
f_domains = open("f_domains", "r")
f_init_domains = open("f_init_domains", "r")
for line in range(domains_nbr):
domain_name = f_domains.readline().strip('\n')
domain_backup_name = f_init_domains.readline().strip('\n')
assert domain_name==domain_backup_name, "ERROR : Domain name %s affected by the renaming error" % (domain_backup_name)
f_domains.close()
f_init_domains.close()
log.I("Domains names not affected by the renaming error")
os.remove("f_domains")
# Domain renaming error : renaming a domain with an already existent domain name
log.I("renaming a domain with an already existent domain name")
log.I("Renaming domain %s to %s" % (self.domain_name,self.new_domain_name) )
log.I("command [renameDomain]")
out, err = self.pfw.sendCmd("renameDomain",self.domain_name,self.new_domain_name, expectSuccess=False)
assert out != "Done", out
assert err == None, "INFO : command [renameDomain] - Error while renaming domain"
log.I("command [renameDomain] - renaming error correctly detected")
# Domains listing using "listDomains" command
log.I("Creating a domains listing")
log.I("command [listDomains]")
out, err = self.pfw.sendCmd("listDomains","","")
assert err == None, "ERROR : command [listDomains] - Error while listing domains"
log.I("command [listDomains] correctly executed")
# Saving domains names
f_domains = open("f_domains", "w")
f_domains.write(out)
f_domains.close()
log.I("Domains listing created")
# Checking domains names integrity
log.I("Checking domains names integrity")
f_domains = open("f_domains", "r")
f_init_domains = open("f_init_domains", "r")
for line in range(domains_nbr):
domain_name = f_domains.readline().strip('\n')
domain_backup_name = f_init_domains.readline().strip('\n')
assert domain_name==domain_backup_name, "ERROR : domain name %s affected by the renaming error" % (domain_backup_name)
f_domains.close()
f_init_domains.close()
log.I("Domains names not affected by the renaming error")
os.remove("f_domains")
os.remove("f_init_domains")
def test_Special_Cases(self):
"""
Special cases
-------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- renaming a domain with its own name
Tested commands :
~~~~~~~~~~~~~~~~~
- [renameDomain] function
- [createDomain] function
- [listDomains] function
Expected result :
~~~~~~~~~~~~~~~~~
- no error
- domains names remain unchanged
"""
log.D(self.test_Special_Cases.__doc__)
# New domain creation
# Already created in previous test
# Initial domains listing using "listDomains" command
log.I("Creating a domains listing backup")
log.I("command [listDomains]")
out, err = self.pfw.sendCmd("listDomains","","")
assert err == None, "ERROR : command [listDomains] - Error while listing domains"
log.I("command [listDomains] correctly executed")
# Saving initial domains names
f_init_domains = open("f_init_domains", "w")
f_init_domains.write(out)
f_init_domains.close()
log.I("Domains listing backup created")
# Checking domains number
f_init_domains = open("f_init_domains", "r")
domains_nbr = 0
line=f_init_domains.readline()
while line!="":
line=f_init_domains.readline()
domains_nbr+=1
f_init_domains.close()
log.I("%s domains names saved" % domains_nbr)
# Domain renaming error : renaming a domain with its own name
log.I("renaming a domain with its own name")
log.I("Renaming domain %s to %s" % (self.domain_name,self.domain_name))
log.I("command [renameDomain]")
out, err = self.pfw.sendCmd("renameDomain",self.domain_name,self.domain_name)
assert out == "Done", out
assert err == None, "ERROR : command [renameDomain] - Error while renaming domain"
log.I("command [renameDomain] correctly executed")
# Domains listing using "listDomains" command
log.I("Creating a domains listing")
log.I("command [listDomains]")
out, err = self.pfw.sendCmd("listDomains","","")
assert err == None, "ERROR : command [listDomains] - Error while listing domains"
log.I("command [listDomains] correctly executed")
# Saving domains names
f_domains = open("f_domains", "w")
f_domains.write(out)
f_domains.close()
log.I("Domains listing created")
# Checking domains names integrity
log.I("Checking domains names integrity")
f_domains = open("f_domains", "r")
f_init_domains = open("f_init_domains", "r")
for line in range(domains_nbr):
domain_name = f_domains.readline().strip('\n')
domain_backup_name = f_init_domains.readline().strip('\n')
assert domain_name==domain_backup_name, "ERROR : domain name %s affected by the renaming" % (domain_backup_name)
f_domains.close()
f_init_domains.close()
log.I("Domains names not affected by the renaming")
os.remove("f_domains")
os.remove("f_init_domains")
| {
"content_hash": "2fe364dd6d1014009e9319506ca4fe0b",
"timestamp": "",
"source": "github",
"line_count": 307,
"max_line_length": 130,
"avg_line_length": 43.14983713355049,
"alnum_prop": 0.5726579602928965,
"repo_name": "dawagner/parameter-framework",
"id": "2ee54148e875149324607adf8c174719e2ecca67",
"size": "14812",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "test/functional-tests-legacy/PfwTestCase/Domains/tDomain_rename.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "15260"
},
{
"name": "C++",
"bytes": "1342192"
},
{
"name": "CMake",
"bytes": "83634"
},
{
"name": "Python",
"bytes": "666061"
}
],
"symlink_target": ""
} |
"""Reader for a single worksheet."""
# Python stdlib imports
try:
from xml.etree.cElementTree import iterparse
except ImportError:
from xml.etree.ElementTree import iterparse
from itertools import ifilter
from StringIO import StringIO
# package imports
from openpyxl.cell import Cell, coordinate_from_string
from openpyxl.worksheet import Worksheet
def _get_xml_iter(xml_source):
if not hasattr(xml_source, 'name'):
return StringIO(xml_source)
else:
xml_source.seek(0)
return xml_source
def read_dimension(xml_source):
source = _get_xml_iter(xml_source)
it = iterparse(source)
for event, element in it:
if element.tag == '{http://schemas.openxmlformats.org/spreadsheetml/2006/main}dimension':
ref = element.get('ref')
if ':' in ref:
min_range, max_range = ref.split(':')
else:
min_range = max_range = ref
min_col, min_row = coordinate_from_string(min_range)
max_col, max_row = coordinate_from_string(max_range)
return min_col, min_row, max_col, max_row
else:
element.clear()
return None
def filter_cells((event, element)):
return element.tag == '{http://schemas.openxmlformats.org/spreadsheetml/2006/main}c'
def fast_parse(ws, xml_source, string_table, style_table):
source = _get_xml_iter(xml_source)
it = iterparse(source)
for event, element in ifilter(filter_cells, it):
value = element.findtext('{http://schemas.openxmlformats.org/spreadsheetml/2006/main}v')
if value is not None:
coordinate = element.get('r')
data_type = element.get('t', 'n')
style_id = element.get('s')
if data_type == Cell.TYPE_STRING:
value = string_table.get(int(value))
ws.cell(coordinate).value = value
if style_id is not None:
ws._styles[coordinate] = style_table.get(int(style_id))
# to avoid memory exhaustion, clear the item after use
element.clear()
from openpyxl.reader.iter_worksheet import IterableWorksheet
def read_worksheet(xml_source, parent, preset_title, string_table,
style_table, workbook_name = None, sheet_codename = None):
"""Read an xml worksheet"""
if workbook_name and sheet_codename:
ws = IterableWorksheet(parent, preset_title, workbook_name,
sheet_codename, xml_source)
else:
ws = Worksheet(parent, preset_title)
fast_parse(ws, xml_source, string_table, style_table)
return ws
| {
"content_hash": "e497afd3cc31a48f1907a06b1c2e96a9",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 97,
"avg_line_length": 29.021978021978022,
"alnum_prop": 0.6262779250283983,
"repo_name": "chronossc/openpyxl",
"id": "dd96ac896b8a2cef0a2d6ff07d7b5aa94d00338f",
"size": "3854",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "openpyxl/reader/worksheet.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "303430"
},
{
"name": "Shell",
"bytes": "4269"
}
],
"symlink_target": ""
} |
"""
Table of command codes is in Section 4.1, page 58.
"""
import re
from decimal import Decimal
def parse_command(s):
if s[0] == '%':
# Extended commands, identify by first 3 chars
code = s[1:3]
cls = extended_commands[code]
return cls.from_string(s)
else:
# For normal commands, identify by end
code = s[-4:-1]
if code in normal_commands:
cls = normal_commands[code]
return cls.from_string(s)
else:
# If not in the map it's a set aperture command
return SetApertureCommand.from_string(s)
class Command(object):
"""
Base class for Gerber commands.
"""
deprecated = False
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, self.__dict__)
@classmethod
def from_string(cls, s):
return cls()
class UnitCommand(Command):
"""
Command Code MO - Extended
Section 4.10, p98
"""
def __init__(self, unit):
self.unit = unit
@classmethod
def from_string(cls, s):
unit = s[3:5]
assert unit in ('IN', 'MM'), "invalid unit %r" % unit
return cls(unit=unit)
def to_string(self):
return '%MO' + self.unit + '*%'
def execute(self, state, plane):
state.set_unit(self.unit)
class CoordinateFormatCommand(Command):
"""
Command Code FS - Extended
Section 4.9, p96
"""
def __init__(self, integer_digits, fractional_digits):
self.integer_digits = integer_digits
self.fractional_digits = fractional_digits
@classmethod
def from_string(cls, s):
assert s.startswith('%FSLAX')
xformat = s[6:8]
yformat = s[9:11]
assert xformat == yformat
return cls(integer_digits=int(xformat[0]),
fractional_digits=int(xformat[1]))
def to_string(self):
format = '%d%d' % (self.integer_digits, self.fractional_digits)
return '%FSLAX' + format + 'Y' + format + '*%'
def execute(self, state, plane):
state.set_coordinate_format(integer_digits=self.integer_digits,
fractional_digits=self.fractional_digits)
class OffsetCommand(Command):
"""
Comamnd Code OF - Extended, Deprecated
Section 7.1.7, p163
Syntax is like %OFA1.2B-1.0*%
"""
deprecated = True
def __init__(self, offset_a, offset_b):
self.offset_a = offset_a
self.offset_b = offset_b
@classmethod
def from_string(cls, s):
assert s.startswith('%OFA')
format = s[4:-2]
raw_a, raw_b = format.split('B')
offset_a = Decimal(raw_a)
offset_b = Decimal(raw_b)
return cls(offset_a=offset_a, offset_b=offset_b)
def to_string(self):
return '%OFA' + str(self.offset_a) + 'B' + str(self.offset_b) + '*%'
def execute(self, state, plane):
# XXX
pass
class ImagePolarityCommand(Command):
"""
Command Code IP - Extended, Deprecated
Section 7.1.3, p160
"""
deprecated = True
def __init__(self, polarity):
self.polarity = polarity
@classmethod
def from_string(cls, s):
polarity = s[3:-2]
assert polarity in ('POS', 'NEG')
return cls(polarity=polarity)
def to_string(self):
return '%IP' + self.polarity + '*%'
def execute(self, state, plane):
# XXX
pass
class LevelPolarityCommand(Command):
"""
Command Code LP - Extended
Section 4.15.1, p132
Syntax is like %LPD*% or %LPC*%
C = clear
D = dark
"""
def __init__(self, polarity):
self.polarity = polarity
@classmethod
def from_string(cls, s):
polarity = s[3]
assert polarity in ('D', 'C')
return cls(polarity=polarity)
def to_string(self):
return '%LP' + self.polarity + '*%'
def execute(self, state, plane):
state.set_level_polarity('dark' if self.polarity == 'D' else 'clear')
class MacroApertureCommand(Command):
"""
Command Code AM - Extended
Section 4.13.1 - p106
Syntax is complex, return to this later
"""
# XXX This is missing a lot of stuff
def __init__(self, template_name, s):
self.template_name = template_name
self.s = s
@classmethod
def from_string(cls, s):
assert s.startswith('%AM')
template_name = s.split('*', 1)[0][3:]
return cls(template_name=template_name,
s=s)
def to_string(self):
return self.s
def execute(self, state, plane):
# XXX
pass
class ApertureDefinitionCommand(Command):
"""
Comamnd Code AD - Extended
Section 4.11.1 p p99
Syntax is complex, return to this later
Aperture definitions can either include relevant information directly, or
can reference a named aperture macro created by a MacroApertureCommand.
"""
# XXX This is missing a lot of stuff
def __init__(self, aperture_number, template_name, s):
self.aperture_number = aperture_number
self.template_name = template_name
self.s = s
@classmethod
def from_string(cls, s):
assert s.startswith('%ADD')
content = s[4:-2]
m = re.match('^(\d+)([a-zA-Z_.]+)', content)
aperture_number = int(m.group(1))
template_name = m.group(2)
return cls(aperture_number=aperture_number,
template_name=template_name,
s=s)
def to_string(self):
return self.s
def execute(self, state, plane):
# XXX
pass
class SetApertureCommand(Command):
"""
Command Code Dnnnn
Section 4.3.1, p64
Syntax is like Dnnn*
"""
def __init__(self, aperture_number):
assert aperture_number >= 10
self.aperture_number = aperture_number
@classmethod
def from_string(cls, s):
aperture_number = int(s[1:-1])
return cls(aperture_number=aperture_number)
def to_string(self):
return 'D' + str(self.aperture_number) + '*'
def execute(self, state, plane):
state.set_current_aperture(self.aperture_number)
class InterpolateCommand(Command):
"""
Command Code D01
Section 4.2.2, p61
Syntax is like XnnnYnnnInnnJnnnD01* in circular interpolation modes
Syntax is like XnnnYnnnD01* in linear interpolation mode
XnnnYnnn indicates the end point
InnnJnnn indicates the center point offsets in circular modes
"""
def __init__(self, x_string, y_string, i_string=None, j_string=None):
self.x_string = x_string
self.y_string = y_string
self.i_string = i_string
self.j_string = j_string
@classmethod
def from_string(cls, s):
if 'I' in s:
m = re.match('X(\d+)Y(\d+)I(\d+)J(\d+)', s)
x_string = m.group(1)
y_string = m.group(2)
i_string = m.group(3)
j_string = m.group(4)
return cls(x_string=x_string, y_string=y_string,
i_string=i_string, j_string=j_string)
else:
m = re.match('X(\d+)Y(\d+)', s)
x_string = m.group(1)
y_string = m.group(2)
return cls(x_string=x_string, y_string=y_string)
def to_string(self):
if self.i_string:
return 'X%sY%sI%sJ%sD01*' % (self.x_string, self.y_string,
self.i_string, self.j_string)
else:
return 'X%sY%sD01*' % (self.x_string, self.y_string)
def execute(self, state, plane):
# XXX
pass
class MoveCommand(Command):
"""
Command Code D02
Section 4.2.3, p62
Syntax is like XnnnYnnnD02*
"""
def __init__(self, x_string, y_string):
self.x_string = x_string
self.y_string = y_string
@classmethod
def from_string(cls, s):
raw = s[1:-4]
x_string, y_string = raw.split('Y')
return cls(x_string=x_string, y_string=y_string)
def to_string(self):
return 'X' + self.x_string + 'Y' + self.y_string + 'D02*'
def execute(self, state, plane):
# XXX
pass
class FlashCommand(Command):
"""
Command Code D03
Section 4.2.4, p62
Syntax is like XnnnYnnnD03*
"""
def __init__(self, x_string, y_string):
self.x_string = x_string
self.y_string = y_string
@classmethod
def from_string(cls, s):
raw = s[1:-4]
x_string, y_string = raw.split('Y')
return cls(x_string=x_string, y_string=y_string)
def to_string(self):
return 'X' + self.x_string + 'Y' + self.y_string + 'D03*'
def execute(self, state, plane):
# XXX
pass
class LinearInterpolationModeCommand(Command):
"""
Command Code G01
Section 4.4.1, p65
No args
"""
def to_string(self):
return 'G01*'
def execute(self, state, plane):
state.set_interpolation_mode('linear')
class CWCircularInterpolationModeCommand(Command):
"""
Command Code G02
Section 4.5.3, p68
No args
"""
def to_string(self):
return 'G02*'
def execute(self, state, plane):
state.set_interpolation_mode('clockwie-circular')
class CCWCircularInterpolationModeCommand(Command):
"""
Command Code G03
Section 4.5.4, p68
No args
"""
def to_string(self):
return 'G03*'
def execute(self, state, plane):
state.set_interpolation_mode('counterclockwise-circular')
class SingleQuadrantCommand(Command):
"""
Command Code G74
Section 4.5.5, p68
No args
"""
def to_string(self):
return 'G74*'
def execute(self, state, plane):
state.set_quadrant_mode('single')
class MultiQuadrantCommand(Command):
"""
Command Code G75
Section 4.5.6, p68
No args
"""
def to_string(self):
return 'G75*'
def execute(self, state, plane):
state.set_quadrant_mode('multi')
class EnableRegionModeCommand(Command):
"""
Command Code G36
Section 4.6.2, p76
No args
"""
def to_string(self):
return 'G36*'
def execute(self, state, plane):
state.set_region_mode('on')
class DisableRegionModeCommand(Command):
"""
Command Code G37
Section 4.6.3, p76
No args
"""
def to_string(self):
return 'G37*'
def execute(self, state, plane):
state.set_region_mode('off')
class CommentCommand(Command):
"""
Command Code G04
Section 4.7, p94
"""
def __init__(self, comment):
self.comment = comment
@classmethod
def from_string(cls, s):
comment = s[3:-1]
return cls(comment=comment)
def to_string(self):
return 'G04' + self.comment + '*'
def execute(self, state, plane):
pass
class EOFCommand(Command):
"""
Command Code M02
No args
"""
def to_string(self):
return 'M02*'
def execute(self, state, plane):
pass
extended_commands = {
'MO': UnitCommand,
'FS': CoordinateFormatCommand,
'OF': OffsetCommand,
'IP': ImagePolarityCommand,
'LP': LevelPolarityCommand,
'AM': MacroApertureCommand,
'AD': ApertureDefinitionCommand,
}
normal_commands = {
'D01': InterpolateCommand,
'D02': MoveCommand,
'D03': FlashCommand,
'G01': LinearInterpolationModeCommand,
'G02': CWCircularInterpolationModeCommand,
'G03': CCWCircularInterpolationModeCommand,
'G74': SingleQuadrantCommand,
'G75': MultiQuadrantCommand,
'G36': EnableRegionModeCommand,
'G37': DisableRegionModeCommand,
'G04': CommentCommand,
'M02': EOFCommand,
}
| {
"content_hash": "5d78b3bc6591678d24f4efcfcefad319",
"timestamp": "",
"source": "github",
"line_count": 485,
"max_line_length": 77,
"avg_line_length": 24.17938144329897,
"alnum_prop": 0.5774707938944317,
"repo_name": "storborg/regerberate",
"id": "a28d01245c67f4a2728d6e6dd3be13973604f44d",
"size": "11727",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "regerberate/gerber/commands.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19408"
}
],
"symlink_target": ""
} |
from automlk.dataset import get_dataset_list
from automlk.store import get_key_store, set_key_store, exists_key_store
"""
update results with pct and cv
"""
missing = []
for dt in get_dataset_list():
res = get_key_store('dataset:%s:rounds' % dt.dataset_id)
flag = False
if res is not None:
for r in res:
if 'pct' not in r.keys():
r['pct'] = 1.
flag = True
if 'cv' not in r.keys():
r['cv'] = True
flag = True
if 'mode' not in r.keys():
r['mode'] = 'search'
flag = True
# update store
if flag:
set_key_store('dataset:%s:rounds' % dt.dataset_id, res)
print('updating results:', dt.name)
key = 'dataset:%s:best' % dt.dataset_id
if exists_key_store(key):
res = get_key_store(key)
flag = False
if res is not None:
for r in res:
if 'pct' not in r.keys():
r['pct'] = 1.
flag = True
if 'cv' not in r.keys():
r['cv'] = True
flag = True
if 'mode' not in r.keys():
r['mode'] = 'search'
flag = True
# update store
if flag:
set_key_store(key, res)
print('updating best results:', dt.name)
key = 'dataset:%s:best_pp' % dt.dataset_id
if exists_key_store(key):
res = get_key_store(key)
flag = False
if res is not None:
for cat, rr in res:
for r in rr:
if 'pct' not in r.keys():
r['pct'] = 1.
flag = True
if 'cv' not in r.keys():
r['cv'] = True
flag = True
if 'mode' not in r.keys():
r['mode'] = 'search'
flag = True
# update store
if flag:
set_key_store(key, res)
print('updating best results pp:', dt.name)
| {
"content_hash": "3e538105e4e6ecfa9ede3adbdf9dfc10",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 72,
"avg_line_length": 30.855072463768117,
"alnum_prop": 0.42743071864725224,
"repo_name": "pierre-chaville/automlk",
"id": "3657408b63803ca24a2f038247417b4d58fd5203",
"size": "2129",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/update_pct_cv.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "867"
},
{
"name": "CSS",
"bytes": "207943"
},
{
"name": "HTML",
"bytes": "108986"
},
{
"name": "Jupyter Notebook",
"bytes": "25275"
},
{
"name": "Python",
"bytes": "322808"
},
{
"name": "Shell",
"bytes": "337"
}
],
"symlink_target": ""
} |
from bomb_defusal.view.module import Module
from bomb_defusal.view.utils.helper import load_model
class Port(Module):
def __init__(self, model):
"""
Initializes a new instance of the :class:`Port` class.
:param bomb_defusal.modules.Port model: Model of the port module
"""
super().__init__(model, base=None)
ports = load_model('resources/components/ports.obj')
port_name = self._model.port.value.replace(' ', '_').replace('/', '_')
self._static = ports.meshes[port_name]
| {
"content_hash": "673d631038febb0faf96e93406578c9b",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 33.9375,
"alnum_prop": 0.6243093922651933,
"repo_name": "leupibr/BombDefusal",
"id": "23ecab21d69ee67dfb4a82e2a459e19bcf17e1ef",
"size": "543",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "bomb_defusal/view/port.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "200225"
}
],
"symlink_target": ""
} |
import pytest
import sys
import time
import cPickle as pickle
from test_base_class import TestBaseClass
aerospike = pytest.importorskip("aerospike")
try:
from aerospike.exception import *
except:
print "Please install aerospike python client."
sys.exit(1)
class SomeClass(object):
pass
class TestDropUser(TestBaseClass):
pytestmark = pytest.mark.skipif(
TestBaseClass().get_hosts()[1] == None,
reason="No user specified, may be not secured cluster.")
def setup_method(self, method):
"""
Setup method.
"""
hostlist, user, password = TestBaseClass().get_hosts()
config = {'hosts': hostlist}
TestDropUser.Me = self
self.client = aerospike.client(config).connect(user, password)
try:
self.client.admin_drop_user("foo-test")
except:
pass
def teardown_method(self, method):
"""
Teardoen method.
"""
self.client.close()
def test_drop_user_with_no_parameters(self):
"""
Invoke drop_user() without any mandatory parameters.
"""
with pytest.raises(TypeError) as typeError:
self.client.admin_drop_user()
assert "Required argument 'user' (pos 1) not found" in typeError.value
def test_drop_user_with_policy_none(self):
"""
Invoke drop_user() with policy none
"""
policy = None
user = "foo-test"
password = "foo1"
roles = ["read", "read-write", "sys-admin"]
status = self.client.admin_create_user( user, password, roles, policy )
time.sleep(2)
assert status == 0
user_details = self.client.admin_query_user( user, policy )
assert user_details == ['read', 'read-write', 'sys-admin']
status = self.client.admin_drop_user( user, policy )
assert status == 0
try:
user_details = self.client.admin_query_user( user )
except InvalidUser as exception:
assert exception.code == 60L
assert exception.msg == 'AEROSPIKE_INVALID_USER'
def test_drop_user_with_user_none(self):
"""
Invoke drop_user() with policy none
"""
policy = {'timeout': 1000}
try:
self.client.admin_drop_user( None, policy )
except ParamError as exception:
assert exception.code == -2L
assert exception.msg == 'Username should be a string'
def test_drop_user_positive(self):
"""
Invoke drop_user() with correct arguments.
"""
policy = {'timeout': 1000}
user = "foo-test"
password = "foo1"
roles = ["read", "read-write", "sys-admin"]
status = self.client.admin_create_user( user, password, roles, policy )
time.sleep(1)
assert status == 0
user_details = self.client.admin_query_user( user, policy )
assert user_details == ['read', 'read-write', 'sys-admin']
status = self.client.admin_drop_user( user, policy )
assert status == 0
time.sleep(1)
try:
user_details = self.client.admin_query_user( user, policy )
except InvalidUser as exception:
assert exception.code == 60L
assert exception.msg == 'AEROSPIKE_INVALID_USER'
def test_drop_user_positive_without_policy(self):
"""
Invoke drop_user() with correct arguments.
"""
policy = {
'timeout': 1000
}
user = "foo-test"
password = "foo1"
roles = ["read", "read-write", "sys-admin"]
status = self.client.admin_create_user( user, password, roles, policy )
time.sleep(1)
assert status == 0
user_details = self.client.admin_query_user( user, policy )
assert user_details == ['read', 'read-write', 'sys-admin']
status = self.client.admin_drop_user( user )
assert status == 0
time.sleep(1)
try:
user_details = self.client.admin_query_user( user, policy )
except InvalidUser as exception:
assert exception.code == 60L
assert exception.msg == 'AEROSPIKE_INVALID_USER'
def test_drop_user_negative(self):
"""
Invoke drop_user() with non-existent user.
"""
policy = {}
user = "foo-test"
password = "foo1"
roles = ["read", "read-write", "sys-admin"]
try:
user_details = self.client.admin_query_user( user, policy )
except InvalidUser as exception:
assert exception.code == 60L
assert exception.msg == 'AEROSPIKE_INVALID_USER'
try:
status = self.client.admin_drop_user( user )
except InvalidUser as exception:
assert exception.code == 60L
assert exception.msg == 'AEROSPIKE_INVALID_USER'
def test_drop_user_policy_incorrect(self):
"""
Invoke drop_user() with policy incorrect
"""
policy = {'timeout': 1000}
user = "incorrect-policy"
password = "foo1"
roles = ["read", "read-write", "sys-admin"]
status = self.client.admin_create_user( user, password, roles, policy )
time.sleep(1)
assert status == 0
user_details = self.client.admin_query_user( user, policy )
assert user_details == ['read', 'read-write', 'sys-admin']
policy = {
'timeout': 0.2
}
try:
status = self.client.admin_drop_user( user, policy )
except ParamError as exception:
assert exception.code == -2L
assert exception.msg == 'timeout is invalid'
status = self.client.admin_drop_user( user )
def test_drop_user_with_extra_argument(self):
"""
Invoke drop_user() with extra argument.
"""
policy = {'timeout': 1000}
with pytest.raises(TypeError) as typeError:
self.client.admin_drop_user( "foo-test", policy, "" )
assert "admin_drop_user() takes at most 2 arguments (3 given)" in typeError.value
def test_drop_user_with_too_long_username(self):
policy = {}
user = "user$" * 1000
password = "user10"
roles = ["sys-admin"]
try:
status = self.client.admin_create_user( user, password, roles, policy )
except InvalidUser as exception:
assert exception.code == 60
assert exception.msg == "AEROSPIKE_INVALID_USER"
try:
status = self.client.admin_drop_user( user, policy )
except InvalidUser as exception:
assert exception.code == 60
assert exception.msg == "AEROSPIKE_INVALID_USER"
def test_drop_user_with_special_characters_in_username(self):
policy = {}
user = "!#Q#AEQ@#$%&^*((^&*~~~````"
password = "user4"
roles = ["read-write"]
try:
status = self.client.admin_create_user( user, password, roles, policy )
assert status == 0
except:
pass
status = self.client.admin_drop_user( user )
assert status == 0
| {
"content_hash": "35a24c5124f92c269a1a5e310be1a972",
"timestamp": "",
"source": "github",
"line_count": 252,
"max_line_length": 89,
"avg_line_length": 28.773809523809526,
"alnum_prop": 0.5639222176251552,
"repo_name": "arthurprs/aerospike-client-python",
"id": "1970d9461659337d7fa14cf147dd4d70ed27e598",
"size": "7276",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/test_admin_drop_user.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "649569"
},
{
"name": "Lua",
"bytes": "6124"
},
{
"name": "Python",
"bytes": "523698"
},
{
"name": "Shell",
"bytes": "13148"
}
],
"symlink_target": ""
} |
import copy
from itertools import chain
from django import forms
from django.contrib.postgres.validators import (
ArrayMaxLengthValidator, ArrayMinLengthValidator,
)
from django.core.exceptions import ValidationError
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from ..utils import prefix_validation_error
class SimpleArrayField(forms.CharField):
default_error_messages = {
'item_invalid': _('Item %(nth)s in the array did not validate: '),
}
def __init__(self, base_field, delimiter=',', max_length=None, min_length=None, *args, **kwargs):
self.base_field = base_field
self.delimiter = delimiter
super(SimpleArrayField, self).__init__(*args, **kwargs)
if min_length is not None:
self.min_length = min_length
self.validators.append(ArrayMinLengthValidator(int(min_length)))
if max_length is not None:
self.max_length = max_length
self.validators.append(ArrayMaxLengthValidator(int(max_length)))
def prepare_value(self, value):
if isinstance(value, list):
return self.delimiter.join(str(self.base_field.prepare_value(v)) for v in value)
return value
def to_python(self, value):
if isinstance(value, list):
items = value
elif value:
items = value.split(self.delimiter)
else:
items = []
errors = []
values = []
for index, item in enumerate(items):
try:
values.append(self.base_field.to_python(item))
except ValidationError as error:
errors.append(prefix_validation_error(
error,
prefix=self.error_messages['item_invalid'],
code='item_invalid',
params={'nth': index},
))
if errors:
raise ValidationError(errors)
return values
def validate(self, value):
super(SimpleArrayField, self).validate(value)
errors = []
for index, item in enumerate(value):
try:
self.base_field.validate(item)
except ValidationError as error:
errors.append(prefix_validation_error(
error,
prefix=self.error_messages['item_invalid'],
code='item_invalid',
params={'nth': index},
))
if errors:
raise ValidationError(errors)
def run_validators(self, value):
super(SimpleArrayField, self).run_validators(value)
errors = []
for index, item in enumerate(value):
try:
self.base_field.run_validators(item)
except ValidationError as error:
errors.append(prefix_validation_error(
error,
prefix=self.error_messages['item_invalid'],
code='item_invalid',
params={'nth': index},
))
if errors:
raise ValidationError(errors)
class SplitArrayWidget(forms.Widget):
def __init__(self, widget, size, **kwargs):
self.widget = widget() if isinstance(widget, type) else widget
self.size = size
super(SplitArrayWidget, self).__init__(**kwargs)
@property
def is_hidden(self):
return self.widget.is_hidden
def value_from_datadict(self, data, files, name):
return [self.widget.value_from_datadict(data, files, '%s_%s' % (name, index))
for index in range(self.size)]
def value_omitted_from_data(self, data, files, name):
return all(
self.widget.value_omitted_from_data(data, files, '%s_%s' % (name, index))
for index in range(self.size)
)
def id_for_label(self, id_):
# See the comment for RadioSelect.id_for_label()
if id_:
id_ += '_0'
return id_
def render(self, name, value, attrs=None, renderer=None):
if self.is_localized:
self.widget.is_localized = self.is_localized
value = value or []
output = []
final_attrs = self.build_attrs(attrs)
id_ = final_attrs.get('id')
for i in range(max(len(value), self.size)):
try:
widget_value = value[i]
except IndexError:
widget_value = None
if id_:
final_attrs = dict(final_attrs, id='%s_%s' % (id_, i))
output.append(self.widget.render(name + '_%s' % i, widget_value, final_attrs, renderer))
return mark_safe(self.format_output(output))
def format_output(self, rendered_widgets):
return ''.join(rendered_widgets)
@property
def media(self):
return self.widget.media
def __deepcopy__(self, memo):
obj = super(SplitArrayWidget, self).__deepcopy__(memo)
obj.widget = copy.deepcopy(self.widget)
return obj
@property
def needs_multipart_form(self):
return self.widget.needs_multipart_form
class SplitArrayField(forms.Field):
default_error_messages = {
'item_invalid': _('Item %(nth)s in the array did not validate: '),
}
def __init__(self, base_field, size, remove_trailing_nulls=False, **kwargs):
self.base_field = base_field
self.size = size
self.remove_trailing_nulls = remove_trailing_nulls
widget = SplitArrayWidget(widget=base_field.widget, size=size)
kwargs.setdefault('widget', widget)
super(SplitArrayField, self).__init__(**kwargs)
def clean(self, value):
cleaned_data = []
errors = []
if not any(value) and self.required:
raise ValidationError(self.error_messages['required'])
max_size = max(self.size, len(value))
for index in range(max_size):
item = value[index]
try:
cleaned_data.append(self.base_field.clean(item))
except ValidationError as error:
errors.append(prefix_validation_error(
error,
self.error_messages['item_invalid'],
code='item_invalid',
params={'nth': index},
))
cleaned_data.append(None)
else:
errors.append(None)
if self.remove_trailing_nulls:
null_index = None
for i, value in reversed(list(enumerate(cleaned_data))):
if value in self.base_field.empty_values:
null_index = i
else:
break
if null_index is not None:
cleaned_data = cleaned_data[:null_index]
errors = errors[:null_index]
errors = list(filter(None, errors))
if errors:
raise ValidationError(list(chain.from_iterable(errors)))
return cleaned_data
| {
"content_hash": "776892e13aae9616ea6df3c659f91bfe",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 101,
"avg_line_length": 35.301507537688444,
"alnum_prop": 0.5639857651245551,
"repo_name": "twz915/django",
"id": "9a9e871a438dbf85231a4a5eb27b4803560ddfec",
"size": "7025",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django/contrib/postgres/forms/array.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "55929"
},
{
"name": "HTML",
"bytes": "182880"
},
{
"name": "JavaScript",
"bytes": "252645"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11852079"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
from stagecraft.apps.datasets.models.data_group import DataGroup
from stagecraft.apps.datasets.models.data_set import DataSet
from stagecraft.apps.datasets.models.data_set import generate_data_set_name
from stagecraft.apps.datasets.models.data_type import DataType
from stagecraft.apps.datasets.models.oauth_user import OAuthUser
| {
"content_hash": "d6f306bc90be44e7d2b3e1d8487beeed",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 75,
"avg_line_length": 66,
"alnum_prop": 0.8545454545454545,
"repo_name": "alphagov/stagecraft",
"id": "2105dc6d511c2541f9bbb3a69e5746c30cdfbe2a",
"size": "330",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stagecraft/apps/datasets/models/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "134"
},
{
"name": "HTML",
"bytes": "855"
},
{
"name": "JavaScript",
"bytes": "3223"
},
{
"name": "Python",
"bytes": "622720"
},
{
"name": "Shell",
"bytes": "14467"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
from django.apps import AppConfig
class TXMoneyRatesConfig(AppConfig):
name = 'txmoney.rates'
label = 'txmoney'
verbose_name = "TXMoney Rates"
def ready(self):
pass
| {
"content_hash": "9ed239f83a6622dd383429a929b11dd5",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 56,
"avg_line_length": 20.833333333333332,
"alnum_prop": 0.692,
"repo_name": "txerpa/dj-txmoney",
"id": "fe8ed5d3a3c4bf5d89a49fe9ae8fb36fa2ba6908",
"size": "265",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "txmoney/rates/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "121003"
}
],
"symlink_target": ""
} |
import collections
import hashlib
import json
from github import MainClass
import six
from stackalytics.openstack.common import log as logging
from stackalytics.processor import normalizer
from stackalytics.processor import record_processor
from stackalytics.processor import utils
from stackalytics.processor import vcs
LOG = logging.getLogger(__name__)
def _check_default_data_change(runtime_storage_inst, default_data):
h = hashlib.new('sha1')
h.update(json.dumps(default_data))
digest = h.hexdigest()
p_digest = runtime_storage_inst.get_by_key('default_data_digest')
if digest == p_digest:
LOG.debug('No changes in default data, sha1: %s', digest)
return False
LOG.debug('Default data has changes, sha1: %s', digest)
runtime_storage_inst.set_by_key('default_data_digest', digest)
return True
def _retrieve_project_list_from_github(project_sources):
LOG.info('Retrieving project list from GitHub')
github = MainClass.Github(timeout=60)
repos = []
for project_source in project_sources:
organization = project_source['organization']
LOG.debug('Get list of projects for organization %s', organization)
try:
github_repos = github.get_organization(organization).get_repos()
except Exception as e:
LOG.exception(e)
LOG.warn('Fail to retrieve list of projects. Keep it unmodified')
return False
exclude = set(project_source.get('exclude', []))
for repo in github_repos:
if repo.name not in exclude:
r = {
'branches': ['master'],
'module': repo.name,
'organization': organization,
'uri': repo.git_url,
'releases': []
}
repos.append(r)
LOG.debug('Project is added to default data: %s', r)
return repos
def _create_module_groups_for_project_sources(project_sources, repos):
organizations = collections.defaultdict(list)
for repo in repos:
organizations[repo['organization']].append(repo['module'])
ps_organizations = dict([(ps.get('organization'),
ps.get('module_group_name') or
ps.get('organization'))
for ps in project_sources])
module_groups = []
for ogn, modules in six.iteritems(organizations):
module_groups.append(utils.make_module_group(
ogn, name=ps_organizations.get(ogn, ogn), modules=modules,
tag='organization'))
return module_groups
def _update_project_list(default_data):
configured_repos = set([r['uri'] for r in default_data['repos']])
repos = _retrieve_project_list_from_github(default_data['project_sources'])
if repos:
default_data['repos'] += [r for r in repos
if r['uri'] not in configured_repos]
default_data['module_groups'] += _create_module_groups_for_project_sources(
default_data['project_sources'], default_data['repos'])
def _store_users(runtime_storage_inst, users):
for user in users:
stored_user = utils.load_user(runtime_storage_inst, user['user_id'])
if stored_user:
stored_user.update(user)
user = stored_user
utils.store_user(runtime_storage_inst, user)
def _store_companies(runtime_storage_inst, companies):
domains_index = {}
for company in companies:
for domain in company['domains']:
domains_index[domain] = company['company_name']
if 'aliases' in company:
for alias in company['aliases']:
domains_index[alias] = company['company_name']
runtime_storage_inst.set_by_key('companies', domains_index)
def _store_module_groups(runtime_storage_inst, module_groups):
stored_mg = runtime_storage_inst.get_by_key('module_groups') or {}
for mg in module_groups:
name = mg['module_group_name']
module_group_id = mg.get('id') or name
stored_mg[module_group_id] = utils.make_module_group(
module_group_id, name=name, modules=mg['modules'],
tag=mg.get('tag', 'group'))
runtime_storage_inst.set_by_key('module_groups', stored_mg)
STORE_FUNCS = {
'users': _store_users,
'companies': _store_companies,
'module_groups': _store_module_groups,
}
def _store_default_data(runtime_storage_inst, default_data):
normalizer.normalize_default_data(default_data)
LOG.debug('Update runtime storage with default data')
for key, value in six.iteritems(default_data):
if key in STORE_FUNCS:
STORE_FUNCS[key](runtime_storage_inst, value)
else:
runtime_storage_inst.set_by_key(key, value)
def _update_records(runtime_storage_inst, sources_root):
LOG.debug('Update existing records')
release_index = {}
for repo in utils.load_repos(runtime_storage_inst):
vcs_inst = vcs.get_vcs(repo, sources_root)
release_index.update(vcs_inst.get_release_index())
record_processor_inst = record_processor.RecordProcessor(
runtime_storage_inst)
record_processor_inst.update(release_index)
def _get_changed_member_records(runtime_storage_inst, record_processor_inst):
for record in runtime_storage_inst.get_all_records():
if record['record_type'] == 'member' and 'company_name' in record:
company_draft = record['company_draft']
company_name = record_processor_inst.domains_index.get(
company_draft) or company_draft
if company_name != record['company_name']:
record['company_name'] = company_name
yield record
def _update_members_company_name(runtime_storage_inst):
LOG.debug('Update company names for members')
record_processor_inst = record_processor.RecordProcessor(
runtime_storage_inst)
member_iterator = _get_changed_member_records(runtime_storage_inst,
record_processor_inst)
for record in member_iterator:
company_name = record['company_name']
user = utils.load_user(runtime_storage_inst, record['user_id'])
user['companies'] = [{
'company_name': company_name,
'end_date': 0,
}]
user['company_name'] = company_name
utils.store_user(runtime_storage_inst, user)
LOG.debug('Company name changed for user %s', user)
record_id = record['record_id']
runtime_storage_inst.memcached.set(
runtime_storage_inst._get_record_name(record_id), record)
runtime_storage_inst._commit_update(record_id)
def process(runtime_storage_inst, default_data, sources_root, force_update):
LOG.debug('Process default data')
dd_changed = _check_default_data_change(runtime_storage_inst, default_data)
if 'project_sources' in default_data:
_update_project_list(default_data)
if dd_changed or force_update:
_store_default_data(runtime_storage_inst, default_data)
_update_records(runtime_storage_inst, sources_root)
_update_members_company_name(runtime_storage_inst)
| {
"content_hash": "2dd3ca97075cd32acb270c9b72f40ee4",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 79,
"avg_line_length": 34.90865384615385,
"alnum_prop": 0.6344856080429693,
"repo_name": "knewstadt/stackalytics",
"id": "79212e0d23921ea9b7175b2c5f7d228a16fff084",
"size": "7843",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stackalytics/processor/default_data_processor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import pytest
import tensorflow as tf
import numpy as np
from tfs.dataset.skdata import *
from tfs.data_processor import StandardScaler,MinMaxScaler,Normalizer
@pytest.fixture
def data():
return MakeBlobs(test_percent=0.3,n_samples=100)
class TestSKlearnProcessor:
def test_standard_scaler(self,data):
p = StandardScaler()
d = data.train.data.copy()
data.process(p)
np.testing.assert_approx_equal(np.std(data.train.data),1)
data.inv_process(p)
np.testing.assert_array_almost_equal(d,data.train.data)
def test_minmax_scaler(self,data):
p = MinMaxScaler()
d = data.train.data.copy()
data.process(p)
np.testing.assert_approx_equal(np.min(data.train.data),0)
np.testing.assert_approx_equal(np.max(data.train.data),1)
data.inv_process(p)
np.testing.assert_array_almost_equal(d,data.train.data)
def test_normalizer(self,data):
p = Normalizer()
data.process(p)
dd = data.train.data
norms = np.linalg.norm(dd,axis=1)
np.testing.assert_approx_equal(np.min(norms),1)
np.testing.assert_approx_equal(np.max(norms),1)
| {
"content_hash": "6f8fc9f5fa7756077d71b3b3c848e5d6",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 69,
"avg_line_length": 26.19047619047619,
"alnum_prop": 0.7081818181818181,
"repo_name": "crackhopper/TFS-toolbox",
"id": "ca42d54dc7a02ba45e55f3ee16aff9277c356021",
"size": "1100",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/data_processor/processor_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1935"
},
{
"name": "Jupyter Notebook",
"bytes": "1820326"
},
{
"name": "Python",
"bytes": "99327"
},
{
"name": "Shell",
"bytes": "178"
}
],
"symlink_target": ""
} |
from __future__ import division, print_function, absolute_import
import warnings
import threading
from . import sigtools
from scipy._lib.six import callable
from scipy._lib._version import NumpyVersion
from scipy import linalg
from scipy.fftpack import (fft, ifft, ifftshift, fft2, ifft2, fftn,
ifftn, fftfreq)
from numpy.fft import rfftn, irfftn
from numpy import (allclose, angle, arange, argsort, array, asarray,
atleast_1d, atleast_2d, cast, dot, exp, expand_dims,
iscomplexobj, mean, ndarray, newaxis, ones, pi,
poly, polyadd, polyder, polydiv, polymul, polysub, polyval,
prod, product, r_, ravel, real_if_close, reshape,
roots, sort, sum, take, transpose, unique, where, zeros,
zeros_like)
import numpy as np
from scipy.special import factorial
from .windows import get_window
from ._arraytools import axis_slice, axis_reverse, odd_ext, even_ext, const_ext
__all__ = ['correlate', 'fftconvolve', 'convolve', 'convolve2d', 'correlate2d',
'order_filter', 'medfilt', 'medfilt2d', 'wiener', 'lfilter',
'lfiltic', 'sosfilt', 'deconvolve', 'hilbert', 'hilbert2',
'cmplx_sort', 'unique_roots', 'invres', 'invresz', 'residue',
'residuez', 'resample', 'detrend', 'lfilter_zi', 'sosfilt_zi',
'filtfilt', 'decimate', 'vectorstrength']
_modedict = {'valid': 0, 'same': 1, 'full': 2}
_boundarydict = {'fill': 0, 'pad': 0, 'wrap': 2, 'circular': 2, 'symm': 1,
'symmetric': 1, 'reflect': 4}
_rfft_mt_safe = (NumpyVersion(np.__version__) >= '1.9.0.dev-e24486e')
_rfft_lock = threading.Lock()
def _valfrommode(mode):
try:
val = _modedict[mode]
except KeyError:
if mode not in [0, 1, 2]:
raise ValueError("Acceptable mode flags are 'valid' (0),"
" 'same' (1), or 'full' (2).")
val = mode
return val
def _bvalfromboundary(boundary):
try:
val = _boundarydict[boundary] << 2
except KeyError:
if val not in [0, 1, 2]:
raise ValueError("Acceptable boundary flags are 'fill', 'wrap'"
" (or 'circular'), \n and 'symm'"
" (or 'symmetric').")
val = boundary << 2
return val
def _check_valid_mode_shapes(shape1, shape2):
for d1, d2 in zip(shape1, shape2):
if not d1 >= d2:
raise ValueError(
"in1 should have at least as many items as in2 in "
"every dimension for 'valid' mode.")
def correlate(in1, in2, mode='full'):
"""
Cross-correlate two N-dimensional arrays.
Cross-correlate `in1` and `in2`, with the output size determined by the
`mode` argument.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`;
if sizes of `in1` and `in2` are not equal then `in1` has to be the
larger array.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear cross-correlation
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
Returns
-------
correlate : array
An N-dimensional array containing a subset of the discrete linear
cross-correlation of `in1` with `in2`.
Notes
-----
The correlation z of two d-dimensional arrays x and y is defined as:
z[...,k,...] = sum[..., i_l, ...]
x[..., i_l,...] * conj(y[..., i_l + k,...])
Examples
--------
Implement a matched filter using cross-correlation, to recover a signal
that has passed through a noisy channel.
>>> from scipy import signal
>>> sig = np.repeat([0., 1., 1., 0., 1., 0., 0., 1.], 128)
>>> sig_noise = sig + np.random.randn(len(sig))
>>> corr = signal.correlate(sig_noise, np.ones(128), mode='same') / 128
>>> import matplotlib.pyplot as plt
>>> clock = np.arange(64, len(sig), 128)
>>> fig, (ax_orig, ax_noise, ax_corr) = plt.subplots(3, 1, sharex=True)
>>> ax_orig.plot(sig)
>>> ax_orig.plot(clock, sig[clock], 'ro')
>>> ax_orig.set_title('Original signal')
>>> ax_noise.plot(sig_noise)
>>> ax_noise.set_title('Signal with noise')
>>> ax_corr.plot(corr)
>>> ax_corr.plot(clock, corr[clock], 'ro')
>>> ax_corr.axhline(0.5, ls=':')
>>> ax_corr.set_title('Cross-correlated with rectangular pulse')
>>> ax_orig.margins(0, 0.1)
>>> fig.tight_layout()
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
# Don't use _valfrommode, since correlate should not accept numeric modes
try:
val = _modedict[mode]
except KeyError:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full'.")
if in1.ndim == in2.ndim == 0:
return in1 * in2
elif not in1.ndim == in2.ndim:
raise ValueError("in1 and in2 should have the same dimensionality")
if mode == 'valid':
_check_valid_mode_shapes(in1.shape, in2.shape)
ps = [i - j + 1 for i, j in zip(in1.shape, in2.shape)]
out = np.empty(ps, in1.dtype)
z = sigtools._correlateND(in1, in2, out, val)
else:
# _correlateND is far slower when in2.size > in1.size, so swap them
# and then undo the effect afterward
swapped_inputs = (mode == 'full') and (in2.size > in1.size)
if swapped_inputs:
in1, in2 = in2, in1
ps = [i + j - 1 for i, j in zip(in1.shape, in2.shape)]
# zero pad input
in1zpadded = np.zeros(ps, in1.dtype)
sc = [slice(0, i) for i in in1.shape]
in1zpadded[sc] = in1.copy()
if mode == 'full':
out = np.empty(ps, in1.dtype)
elif mode == 'same':
out = np.empty(in1.shape, in1.dtype)
z = sigtools._correlateND(in1zpadded, in2, out, val)
# Reverse and conjugate to undo the effect of swapping inputs
if swapped_inputs:
slice_obj = [slice(None, None, -1)] * len(z.shape)
z = z[slice_obj].conj()
return z
def _centered(arr, newsize):
# Return the center newsize portion of the array.
newsize = asarray(newsize)
currsize = array(arr.shape)
startind = (currsize - newsize) // 2
endind = startind + newsize
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def _next_regular(target):
"""
Find the next regular number greater than or equal to target.
Regular numbers are composites of the prime factors 2, 3, and 5.
Also known as 5-smooth numbers or Hamming numbers, these are the optimal
size for inputs to FFTPACK.
Target must be a positive integer.
"""
if target <= 6:
return target
# Quickly check if it's already a power of 2
if not (target & (target-1)):
return target
match = float('inf') # Anything found will be smaller
p5 = 1
while p5 < target:
p35 = p5
while p35 < target:
# Ceiling integer division, avoiding conversion to float
# (quotient = ceil(target / p35))
quotient = -(-target // p35)
# Quickly find next power of 2 >= quotient
try:
p2 = 2**((quotient - 1).bit_length())
except AttributeError:
# Fallback for Python <2.7
p2 = 2**(len(bin(quotient - 1)) - 2)
N = p2 * p35
if N == target:
return N
elif N < match:
match = N
p35 *= 3
if p35 == target:
return p35
if p35 < match:
match = p35
p5 *= 5
if p5 == target:
return p5
if p5 < match:
match = p5
return match
def fftconvolve(in1, in2, mode="full"):
"""Convolve two N-dimensional arrays using FFT.
Convolve `in1` and `in2` using the fast Fourier transform method, with
the output size determined by the `mode` argument.
This is generally much faster than `convolve` for large arrays (n > ~500),
but can be slower when only a few output values are needed, and can only
output float arrays (int or object array inputs will be cast to float).
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`;
if sizes of `in1` and `in2` are not equal then `in1` has to be the
larger array.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
Returns
-------
out : array
An N-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
Examples
--------
Autocorrelation of white noise is an impulse. (This is at least 100 times
as fast as `convolve`.)
>>> from scipy import signal
>>> sig = np.random.randn(1000)
>>> autocorr = signal.fftconvolve(sig, sig[::-1], mode='full')
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_mag) = plt.subplots(2, 1)
>>> ax_orig.plot(sig)
>>> ax_orig.set_title('White noise')
>>> ax_mag.plot(np.arange(-len(sig)+1,len(sig)), autocorr)
>>> ax_mag.set_title('Autocorrelation')
>>> fig.tight_layout()
>>> fig.show()
Gaussian blur implemented using FFT convolution. Notice the dark borders
around the image, due to the zero-padding beyond its boundaries.
The `convolve2d` function allows for other types of image boundaries,
but is far slower.
>>> from scipy import misc
>>> lena = misc.lena()
>>> kernel = np.outer(signal.gaussian(70, 8), signal.gaussian(70, 8))
>>> blurred = signal.fftconvolve(lena, kernel, mode='same')
>>> fig, (ax_orig, ax_kernel, ax_blurred) = plt.subplots(1, 3)
>>> ax_orig.imshow(lena, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_kernel.imshow(kernel, cmap='gray')
>>> ax_kernel.set_title('Gaussian kernel')
>>> ax_kernel.set_axis_off()
>>> ax_blurred.imshow(blurred, cmap='gray')
>>> ax_blurred.set_title('Blurred')
>>> ax_blurred.set_axis_off()
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if in1.ndim == in2.ndim == 0: # scalar inputs
return in1 * in2
elif not in1.ndim == in2.ndim:
raise ValueError("in1 and in2 should have the same dimensionality")
elif in1.size == 0 or in2.size == 0: # empty arrays
return array([])
s1 = array(in1.shape)
s2 = array(in2.shape)
complex_result = (np.issubdtype(in1.dtype, complex) or
np.issubdtype(in2.dtype, complex))
shape = s1 + s2 - 1
if mode == "valid":
_check_valid_mode_shapes(s1, s2)
# Speed up FFT by padding to optimal size for FFTPACK
fshape = [_next_regular(int(d)) for d in shape]
fslice = tuple([slice(0, int(sz)) for sz in shape])
# Pre-1.9 NumPy FFT routines are not threadsafe. For older NumPys, make
# sure we only call rfftn/irfftn from one thread at a time.
if not complex_result and (_rfft_mt_safe or _rfft_lock.acquire(False)):
try:
ret = irfftn(rfftn(in1, fshape) *
rfftn(in2, fshape), fshape)[fslice].copy()
finally:
if not _rfft_mt_safe:
_rfft_lock.release()
else:
# If we're here, it's either because we need a complex result, or we
# failed to acquire _rfft_lock (meaning rfftn isn't threadsafe and
# is already in use by another thread). In either case, use the
# (threadsafe but slower) SciPy complex-FFT routines instead.
ret = ifftn(fftn(in1, fshape) * fftn(in2, fshape))[fslice].copy()
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
return _centered(ret, s1)
elif mode == "valid":
return _centered(ret, s1 - s2 + 1)
else:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full'.")
def convolve(in1, in2, mode='full'):
"""
Convolve two N-dimensional arrays.
Convolve `in1` and `in2`, with the output size determined by the
`mode` argument.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`;
if sizes of `in1` and `in2` are not equal then `in1` has to be the
larger array.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
Returns
-------
convolve : array
An N-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
See also
--------
numpy.polymul : performs polynomial multiplication (same operation, but
also accepts poly1d objects)
Examples
--------
Smooth a square pulse using a Hann window:
>>> from scipy import signal
>>> sig = np.repeat([0., 1., 0.], 100)
>>> win = signal.hann(50)
>>> filtered = signal.convolve(sig, win, mode='same') / sum(win)
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_win, ax_filt) = plt.subplots(3, 1, sharex=True)
>>> ax_orig.plot(sig)
>>> ax_orig.set_title('Original pulse')
>>> ax_orig.margins(0, 0.1)
>>> ax_win.plot(win)
>>> ax_win.set_title('Filter impulse response')
>>> ax_win.margins(0, 0.1)
>>> ax_filt.plot(filtered)
>>> ax_filt.set_title('Filtered signal')
>>> ax_filt.margins(0, 0.1)
>>> fig.tight_layout()
>>> fig.show()
"""
volume = asarray(in1)
kernel = asarray(in2)
if volume.ndim == kernel.ndim == 0:
return volume * kernel
slice_obj = [slice(None, None, -1)] * len(kernel.shape)
if np.iscomplexobj(kernel):
return correlate(volume, kernel[slice_obj].conj(), mode)
else:
return correlate(volume, kernel[slice_obj], mode)
def order_filter(a, domain, rank):
"""
Perform an order filter on an N-dimensional array.
Perform an order filter on the array in. The domain argument acts as a
mask centered over each pixel. The non-zero elements of domain are
used to select elements surrounding each input pixel which are placed
in a list. The list is sorted, and the output for that pixel is the
element corresponding to rank in the sorted list.
Parameters
----------
a : ndarray
The N-dimensional input array.
domain : array_like
A mask array with the same number of dimensions as `in`.
Each dimension should have an odd number of elements.
rank : int
A non-negative integer which selects the element from the
sorted list (0 corresponds to the smallest element, 1 is the
next smallest element, etc.).
Returns
-------
out : ndarray
The results of the order filter in an array with the same
shape as `in`.
Examples
--------
>>> from scipy import signal
>>> x = np.arange(25).reshape(5, 5)
>>> domain = np.identity(3)
>>> x
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])
>>> signal.order_filter(x, domain, 0)
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 2., 0.],
[ 0., 5., 6., 7., 0.],
[ 0., 10., 11., 12., 0.],
[ 0., 0., 0., 0., 0.]])
>>> signal.order_filter(x, domain, 2)
array([[ 6., 7., 8., 9., 4.],
[ 11., 12., 13., 14., 9.],
[ 16., 17., 18., 19., 14.],
[ 21., 22., 23., 24., 19.],
[ 20., 21., 22., 23., 24.]])
"""
domain = asarray(domain)
size = domain.shape
for k in range(len(size)):
if (size[k] % 2) != 1:
raise ValueError("Each dimension of domain argument "
" should have an odd number of elements.")
return sigtools._order_filterND(a, domain, rank)
def medfilt(volume, kernel_size=None):
"""
Perform a median filter on an N-dimensional array.
Apply a median filter to the input array using a local window-size
given by `kernel_size`.
Parameters
----------
volume : array_like
An N-dimensional input array.
kernel_size : array_like, optional
A scalar or an N-length list giving the size of the median filter
window in each dimension. Elements of `kernel_size` should be odd.
If `kernel_size` is a scalar, then this scalar is used as the size in
each dimension. Default size is 3 for each dimension.
Returns
-------
out : ndarray
An array the same size as input containing the median filtered
result.
"""
volume = atleast_1d(volume)
if kernel_size is None:
kernel_size = [3] * len(volume.shape)
kernel_size = asarray(kernel_size)
if kernel_size.shape == ():
kernel_size = np.repeat(kernel_size.item(), volume.ndim)
for k in range(len(volume.shape)):
if (kernel_size[k] % 2) != 1:
raise ValueError("Each element of kernel_size should be odd.")
domain = ones(kernel_size)
numels = product(kernel_size, axis=0)
order = numels // 2
return sigtools._order_filterND(volume, domain, order)
def wiener(im, mysize=None, noise=None):
"""
Perform a Wiener filter on an N-dimensional array.
Apply a Wiener filter to the N-dimensional array `im`.
Parameters
----------
im : ndarray
An N-dimensional array.
mysize : int or arraylike, optional
A scalar or an N-length list giving the size of the Wiener filter
window in each dimension. Elements of mysize should be odd.
If mysize is a scalar, then this scalar is used as the size
in each dimension.
noise : float, optional
The noise-power to use. If None, then noise is estimated as the
average of the local variance of the input.
Returns
-------
out : ndarray
Wiener filtered result with the same shape as `im`.
"""
im = asarray(im)
if mysize is None:
mysize = [3] * len(im.shape)
mysize = asarray(mysize)
if mysize.shape == ():
mysize = np.repeat(mysize.item(), im.ndim)
# Estimate the local mean
lMean = correlate(im, ones(mysize), 'same') / product(mysize, axis=0)
# Estimate the local variance
lVar = (correlate(im ** 2, ones(mysize), 'same') / product(mysize, axis=0)
- lMean ** 2)
# Estimate the noise power if needed.
if noise is None:
noise = mean(ravel(lVar), axis=0)
res = (im - lMean)
res *= (1 - noise / lVar)
res += lMean
out = where(lVar < noise, lMean, res)
return out
def convolve2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
"""
Convolve two 2-dimensional arrays.
Convolve `in1` and `in2` with output size determined by `mode`, and
boundary conditions determined by `boundary` and `fillvalue`.
Parameters
----------
in1, in2 : array_like
Two-dimensional input arrays to be convolved.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
boundary : str {'fill', 'wrap', 'symm'}, optional
A flag indicating how to handle boundaries:
``fill``
pad input arrays with fillvalue. (default)
``wrap``
circular boundary conditions.
``symm``
symmetrical boundary conditions.
fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
Returns
-------
out : ndarray
A 2-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
Examples
--------
Compute the gradient of an image by 2D convolution with a complex Scharr
operator. (Horizontal operator is real, vertical is imaginary.) Use
symmetric boundary condition to avoid creating edges at the image
boundaries.
>>> from scipy import signal
>>> from scipy import misc
>>> lena = misc.lena()
>>> scharr = np.array([[ -3-3j, 0-10j, +3 -3j],
... [-10+0j, 0+ 0j, +10 +0j],
... [ -3+3j, 0+10j, +3 +3j]]) # Gx + j*Gy
>>> grad = signal.convolve2d(lena, scharr, boundary='symm', mode='same')
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_mag, ax_ang) = plt.subplots(1, 3)
>>> ax_orig.imshow(lena, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_mag.imshow(np.absolute(grad), cmap='gray')
>>> ax_mag.set_title('Gradient magnitude')
>>> ax_mag.set_axis_off()
>>> ax_ang.imshow(np.angle(grad), cmap='hsv') # hsv is cyclic, like angles
>>> ax_ang.set_title('Gradient orientation')
>>> ax_ang.set_axis_off()
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if mode == 'valid':
_check_valid_mode_shapes(in1.shape, in2.shape)
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
with warnings.catch_warnings():
warnings.simplefilter('ignore', np.ComplexWarning)
# FIXME: some cast generates a warning here
out = sigtools._convolve2d(in1, in2, 1, val, bval, fillvalue)
return out
def correlate2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
"""
Cross-correlate two 2-dimensional arrays.
Cross correlate `in1` and `in2` with output size determined by `mode`, and
boundary conditions determined by `boundary` and `fillvalue`.
Parameters
----------
in1, in2 : array_like
Two-dimensional input arrays to be convolved.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear cross-correlation
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
boundary : str {'fill', 'wrap', 'symm'}, optional
A flag indicating how to handle boundaries:
``fill``
pad input arrays with fillvalue. (default)
``wrap``
circular boundary conditions.
``symm``
symmetrical boundary conditions.
fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
Returns
-------
correlate2d : ndarray
A 2-dimensional array containing a subset of the discrete linear
cross-correlation of `in1` with `in2`.
Examples
--------
Use 2D cross-correlation to find the location of a template in a noisy
image:
>>> from scipy import signal
>>> from scipy import misc
>>> lena = misc.lena() - misc.lena().mean()
>>> template = np.copy(lena[235:295, 310:370]) # right eye
>>> template -= template.mean()
>>> lena = lena + np.random.randn(*lena.shape) * 50 # add noise
>>> corr = signal.correlate2d(lena, template, boundary='symm', mode='same')
>>> y, x = np.unravel_index(np.argmax(corr), corr.shape) # find the match
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_template, ax_corr) = plt.subplots(1, 3)
>>> ax_orig.imshow(lena, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_template.imshow(template, cmap='gray')
>>> ax_template.set_title('Template')
>>> ax_template.set_axis_off()
>>> ax_corr.imshow(corr, cmap='gray')
>>> ax_corr.set_title('Cross-correlation')
>>> ax_corr.set_axis_off()
>>> ax_orig.plot(x, y, 'ro')
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if mode == 'valid':
_check_valid_mode_shapes(in1.shape, in2.shape)
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
with warnings.catch_warnings():
warnings.simplefilter('ignore', np.ComplexWarning)
# FIXME: some cast generates a warning here
out = sigtools._convolve2d(in1, in2, 0, val, bval, fillvalue)
return out
def medfilt2d(input, kernel_size=3):
"""
Median filter a 2-dimensional array.
Apply a median filter to the `input` array using a local window-size
given by `kernel_size` (must be odd).
Parameters
----------
input : array_like
A 2-dimensional input array.
kernel_size : array_like, optional
A scalar or a list of length 2, giving the size of the
median filter window in each dimension. Elements of
`kernel_size` should be odd. If `kernel_size` is a scalar,
then this scalar is used as the size in each dimension.
Default is a kernel of size (3, 3).
Returns
-------
out : ndarray
An array the same size as input containing the median filtered
result.
"""
image = asarray(input)
if kernel_size is None:
kernel_size = [3] * 2
kernel_size = asarray(kernel_size)
if kernel_size.shape == ():
kernel_size = np.repeat(kernel_size.item(), 2)
for size in kernel_size:
if (size % 2) != 1:
raise ValueError("Each element of kernel_size should be odd.")
return sigtools._medfilt2d(image, kernel_size)
def lfilter(b, a, x, axis=-1, zi=None):
"""
Filter data along one-dimension with an IIR or FIR filter.
Filter a data sequence, `x`, using a digital filter. This works for many
fundamental data types (including Object type). The filter is a direct
form II transposed implementation of the standard difference equation
(see Notes).
Parameters
----------
b : array_like
The numerator coefficient vector in a 1-D sequence.
a : array_like
The denominator coefficient vector in a 1-D sequence. If ``a[0]``
is not 1, then both `a` and `b` are normalized by ``a[0]``.
x : array_like
An N-dimensional input array.
axis : int, optional
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis. Default is -1.
zi : array_like, optional
Initial conditions for the filter delays. It is a vector
(or array of vectors for an N-dimensional input) of length
``max(len(a),len(b))-1``. If `zi` is None or is not given then
initial rest is assumed. See `lfiltic` for more information.
Returns
-------
y : array
The output of the digital filter.
zf : array, optional
If `zi` is None, this is not returned, otherwise, `zf` holds the
final filter delay values.
Notes
-----
The filter function is implemented as a direct II transposed structure.
This means that the filter implements::
a[0]*y[n] = b[0]*x[n] + b[1]*x[n-1] + ... + b[nb]*x[n-nb]
- a[1]*y[n-1] - ... - a[na]*y[n-na]
using the following difference equations::
y[m] = b[0]*x[m] + z[0,m-1]
z[0,m] = b[1]*x[m] + z[1,m-1] - a[1]*y[m]
...
z[n-3,m] = b[n-2]*x[m] + z[n-2,m-1] - a[n-2]*y[m]
z[n-2,m] = b[n-1]*x[m] - a[n-1]*y[m]
where m is the output sample number and n=max(len(a),len(b)) is the
model order.
The rational transfer function describing this filter in the
z-transform domain is::
-1 -nb
b[0] + b[1]z + ... + b[nb] z
Y(z) = ---------------------------------- X(z)
-1 -na
a[0] + a[1]z + ... + a[na] z
"""
a = np.atleast_1d(a)
if len(a) == 1:
# This path only supports types fdgFDGO to mirror _linear_filter below.
# Any of b, a, x, or zi can set the dtype, but there is no default
# casting of other types; instead a NotImplementedError is raised.
b = np.asarray(b)
a = np.asarray(a)
if b.ndim != 1 and a.ndim != 1:
raise ValueError('object of too small depth for desired array')
x = np.asarray(x)
inputs = [b, a, x]
if zi is not None:
# _linear_filter does not broadcast zi, but does do expansion of singleton dims.
zi = np.asarray(zi)
if zi.ndim != x.ndim:
raise ValueError('object of too small depth for desired array')
expected_shape = list(x.shape)
expected_shape[axis] = b.shape[0] - 1
expected_shape = tuple(expected_shape)
# check the trivial case where zi is the right shape first
if zi.shape != expected_shape:
strides = zi.ndim * [None]
if axis < 0:
axis += zi.ndim
for k in range(zi.ndim):
if k == axis and zi.shape[k] == expected_shape[k]:
strides[k] = zi.strides[k]
elif k != axis and zi.shape[k] == expected_shape[k]:
strides[k] = zi.strides[k]
elif k != axis and zi.shape[k] == 1:
strides[k] = 0
else:
raise ValueError('Unexpected shape for zi: expected '
'%s, found %s.' %
(expected_shape, zi.shape))
zi = np.lib.stride_tricks.as_strided(zi, expected_shape, strides)
inputs.append(zi)
dtype = np.result_type(*inputs)
if dtype.char not in 'fdgFDGO':
raise NotImplementedError("input type '%s' not supported" % dtype)
b = np.array(b, dtype=dtype)
a = np.array(a, dtype=dtype, copy=False)
b /= a[0]
x = np.array(x, dtype=dtype, copy=False)
out_full = np.apply_along_axis(lambda y: np.convolve(b, y), axis, x)
ind = out_full.ndim * [slice(None)]
if zi is not None:
ind[axis] = slice(zi.shape[axis])
out_full[ind] += zi
ind[axis] = slice(out_full.shape[axis] - len(b) + 1)
out = out_full[ind]
if zi is None:
return out
else:
ind[axis] = slice(out_full.shape[axis] - len(b) + 1, None)
zf = out_full[ind]
return out, zf
else:
if zi is None:
return sigtools._linear_filter(b, a, x, axis)
else:
return sigtools._linear_filter(b, a, x, axis, zi)
def lfiltic(b, a, y, x=None):
"""
Construct initial conditions for lfilter.
Given a linear filter (b, a) and initial conditions on the output `y`
and the input `x`, return the initial conditions on the state vector zi
which is used by `lfilter` to generate the output given the input.
Parameters
----------
b : array_like
Linear filter term.
a : array_like
Linear filter term.
y : array_like
Initial conditions.
If ``N=len(a) - 1``, then ``y = {y[-1], y[-2], ..., y[-N]}``.
If `y` is too short, it is padded with zeros.
x : array_like, optional
Initial conditions.
If ``M=len(b) - 1``, then ``x = {x[-1], x[-2], ..., x[-M]}``.
If `x` is not given, its initial conditions are assumed zero.
If `x` is too short, it is padded with zeros.
Returns
-------
zi : ndarray
The state vector ``zi``.
``zi = {z_0[-1], z_1[-1], ..., z_K-1[-1]}``, where ``K = max(M,N)``.
See Also
--------
lfilter
"""
N = np.size(a) - 1
M = np.size(b) - 1
K = max(M, N)
y = asarray(y)
if y.dtype.kind in 'bui':
# ensure calculations are floating point
y = y.astype(np.float64)
zi = zeros(K, y.dtype)
if x is None:
x = zeros(M, y.dtype)
else:
x = asarray(x)
L = np.size(x)
if L < M:
x = r_[x, zeros(M - L)]
L = np.size(y)
if L < N:
y = r_[y, zeros(N - L)]
for m in range(M):
zi[m] = sum(b[m + 1:] * x[:M - m], axis=0)
for m in range(N):
zi[m] -= sum(a[m + 1:] * y[:N - m], axis=0)
return zi
def deconvolve(signal, divisor):
"""Deconvolves ``divisor`` out of ``signal``.
Returns the quotient and remainder such that
``signal = convolve(divisor, quotient) + remainder``
Parameters
----------
signal : array_like
Signal data, typically a recorded signal
divisor : array_like
Divisor data, typically an impulse response or filter that was
applied to the original signal
Returns
-------
quotient : ndarray
Quotient, typically the recovered original signal
remainder : ndarray
Remainder
Examples
--------
Deconvolve a signal that's been filtered:
>>> from scipy import signal
>>> original = [0, 1, 0, 0, 1, 1, 0, 0]
>>> impulse_response = [2, 1]
>>> recorded = signal.convolve(impulse_response, original)
>>> recorded
array([0, 2, 1, 0, 2, 3, 1, 0, 0])
>>> recovered, remainder = signal.deconvolve(recorded, impulse_response)
>>> recovered
array([ 0., 1., 0., 0., 1., 1., 0., 0.])
See also
--------
numpy.polydiv : performs polynomial division (same operation, but
also accepts poly1d objects)
"""
num = atleast_1d(signal)
den = atleast_1d(divisor)
N = len(num)
D = len(den)
if D > N:
quot = []
rem = num
else:
input = ones(N - D + 1, float)
input[1:] = 0
quot = lfilter(num, den, input)
rem = num - convolve(den, quot, mode='full')
return quot, rem
def hilbert(x, N=None, axis=-1):
"""
Compute the analytic signal, using the Hilbert transform.
The transformation is done along the last axis by default.
Parameters
----------
x : array_like
Signal data. Must be real.
N : int, optional
Number of Fourier components. Default: ``x.shape[axis]``
axis : int, optional
Axis along which to do the transformation. Default: -1.
Returns
-------
xa : ndarray
Analytic signal of `x`, of each 1-D array along `axis`
Notes
-----
The analytic signal ``x_a(t)`` of signal ``x(t)`` is:
.. math:: x_a = F^{-1}(F(x) 2U) = x + i y
where `F` is the Fourier transform, `U` the unit step function,
and `y` the Hilbert transform of `x`. [1]_
In other words, the negative half of the frequency spectrum is zeroed
out, turning the real-valued signal into a complex signal. The Hilbert
transformed signal can be obtained from ``np.imag(hilbert(x))``, and the
original signal from ``np.real(hilbert(x))``.
Examples
---------
In this example we use the Hilbert transform to determine the amplitude
envelope and instantaneous frequency of an amplitude-modulated signal.
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.signal import hilbert, chirp
>>> duration = 1.0
>>> fs = 400.0
>>> samples = int(fs*duration)
>>> t = np.arange(samples) / fs
We create a chirp of which the frequency increases from 20 Hz to 100 Hz and
apply an amplitude modulation.
>>> signal = chirp(t, 20.0, t[-1], 100.0)
>>> signal *= (1.0 + 0.5 * np.sin(2.0*np.pi*3.0*t) )
The amplitude envelope is given by magnitude of the analytic signal. The
instantaneous frequency can be obtained by differentiating the instantaneous
phase in respect to time. The instantaneous phase corresponds to the phase
angle of the analytic signal.
>>> analytic_signal = hilbert(signal)
>>> amplitude_envelope = np.abs(analytic_signal)
>>> instantaneous_phase = np.unwrap(np.angle(analytic_signal))
>>> instantaneous_frequency = np.diff(instantaneous_phase) / (2.0*np.pi) * fs
>>> fig = plt.figure()
>>> ax0 = fig.add_subplot(211)
>>> ax0.plot(t, signal, label='signal')
>>> ax0.plot(t, amplitude_envelope, label='envelope')
>>> ax0.set_xlabel("time in seconds")
>>> ax0.legend()
>>> ax1 = fig.add_subplot(212)
>>> ax1.plot(t[1:], instantaneous_frequency)
>>> ax1.set_xlabel("time in seconds")
>>> ax1.set_ylim(0.0, 120.0)
References
----------
.. [1] Wikipedia, "Analytic signal".
http://en.wikipedia.org/wiki/Analytic_signal
.. [2] Leon Cohen, "Time-Frequency Analysis", 1995. Chapter 2.
.. [3] Alan V. Oppenheim, Ronald W. Schafer. Discrete-Time Signal Processing,
Third Edition, 2009. Chapter 12. ISBN 13: 978-1292-02572-8
"""
x = asarray(x)
if iscomplexobj(x):
raise ValueError("x must be real.")
if N is None:
N = x.shape[axis]
if N <= 0:
raise ValueError("N must be positive.")
Xf = fft(x, N, axis=axis)
h = zeros(N)
if N % 2 == 0:
h[0] = h[N // 2] = 1
h[1:N // 2] = 2
else:
h[0] = 1
h[1:(N + 1) // 2] = 2
if len(x.shape) > 1:
ind = [newaxis] * x.ndim
ind[axis] = slice(None)
h = h[ind]
x = ifft(Xf * h, axis=axis)
return x
def hilbert2(x, N=None):
"""
Compute the '2-D' analytic signal of `x`
Parameters
----------
x : array_like
2-D signal data.
N : int or tuple of two ints, optional
Number of Fourier components. Default is ``x.shape``
Returns
-------
xa : ndarray
Analytic signal of `x` taken along axes (0,1).
References
----------
.. [1] Wikipedia, "Analytic signal",
http://en.wikipedia.org/wiki/Analytic_signal
"""
x = atleast_2d(x)
if len(x.shape) > 2:
raise ValueError("x must be 2-D.")
if iscomplexobj(x):
raise ValueError("x must be real.")
if N is None:
N = x.shape
elif isinstance(N, int):
if N <= 0:
raise ValueError("N must be positive.")
N = (N, N)
elif len(N) != 2 or np.any(np.asarray(N) <= 0):
raise ValueError("When given as a tuple, N must hold exactly "
"two positive integers")
Xf = fft2(x, N, axes=(0, 1))
h1 = zeros(N[0], 'd')
h2 = zeros(N[1], 'd')
for p in range(2):
h = eval("h%d" % (p + 1))
N1 = N[p]
if N1 % 2 == 0:
h[0] = h[N1 // 2] = 1
h[1:N1 // 2] = 2
else:
h[0] = 1
h[1:(N1 + 1) // 2] = 2
exec("h%d = h" % (p + 1), globals(), locals())
h = h1[:, newaxis] * h2[newaxis, :]
k = len(x.shape)
while k > 2:
h = h[:, newaxis]
k -= 1
x = ifft2(Xf * h, axes=(0, 1))
return x
def cmplx_sort(p):
"""Sort roots based on magnitude.
Parameters
----------
p : array_like
The roots to sort, as a 1-D array.
Returns
-------
p_sorted : ndarray
Sorted roots.
indx : ndarray
Array of indices needed to sort the input `p`.
"""
p = asarray(p)
if iscomplexobj(p):
indx = argsort(abs(p))
else:
indx = argsort(p)
return take(p, indx, 0), indx
def unique_roots(p, tol=1e-3, rtype='min'):
"""
Determine unique roots and their multiplicities from a list of roots.
Parameters
----------
p : array_like
The list of roots.
tol : float, optional
The tolerance for two roots to be considered equal. Default is 1e-3.
rtype : {'max', 'min, 'avg'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
- 'max': pick the maximum of those roots.
- 'min': pick the minimum of those roots.
- 'avg': take the average of those roots.
Returns
-------
pout : ndarray
The list of unique roots, sorted from low to high.
mult : ndarray
The multiplicity of each root.
Notes
-----
This utility function is not specific to roots but can be used for any
sequence of values for which uniqueness and multiplicity has to be
determined. For a more general routine, see `numpy.unique`.
Examples
--------
>>> from scipy import signal
>>> vals = [0, 1.3, 1.31, 2.8, 1.25, 2.2, 10.3]
>>> uniq, mult = signal.unique_roots(vals, tol=2e-2, rtype='avg')
Check which roots have multiplicity larger than 1:
>>> uniq[mult > 1]
array([ 1.305])
"""
if rtype in ['max', 'maximum']:
comproot = np.max
elif rtype in ['min', 'minimum']:
comproot = np.min
elif rtype in ['avg', 'mean']:
comproot = np.mean
else:
raise ValueError("`rtype` must be one of "
"{'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}")
p = asarray(p) * 1.0
tol = abs(tol)
p, indx = cmplx_sort(p)
pout = []
mult = []
indx = -1
curp = p[0] + 5 * tol
sameroots = []
for k in range(len(p)):
tr = p[k]
if abs(tr - curp) < tol:
sameroots.append(tr)
curp = comproot(sameroots)
pout[indx] = curp
mult[indx] += 1
else:
pout.append(tr)
curp = tr
sameroots = [tr]
indx += 1
mult.append(1)
return array(pout), array(mult)
def invres(r, p, k, tol=1e-3, rtype='avg'):
"""
Compute b(s) and a(s) from partial fraction expansion.
If ``M = len(b)`` and ``N = len(a)``::
b(s) b[0] x**(M-1) + b[1] x**(M-2) + ... + b[M-1]
H(s) = ------ = ----------------------------------------------
a(s) a[0] x**(N-1) + a[1] x**(N-2) + ... + a[N-1]
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer than tol), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
Parameters
----------
r : ndarray
Residues.
p : ndarray
Poles.
k : ndarray
Coefficients of the direct polynomial term.
tol : float, optional
The tolerance for two roots to be considered equal. Default is 1e-3.
rtype : {'max', 'min, 'avg'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
'max': pick the maximum of those roots.
'min': pick the minimum of those roots.
'avg': take the average of those roots.
See Also
--------
residue, unique_roots
"""
extra = k
p, indx = cmplx_sort(p)
r = take(r, indx, 0)
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for k in range(len(pout)):
p.extend([pout[k]] * mult[k])
a = atleast_1d(poly(p))
if len(extra) > 0:
b = polymul(extra, a)
else:
b = [0]
indx = 0
for k in range(len(pout)):
temp = []
for l in range(len(pout)):
if l != k:
temp.extend([pout[l]] * mult[l])
for m in range(mult[k]):
t2 = temp[:]
t2.extend([pout[k]] * (mult[k] - m - 1))
b = polyadd(b, r[indx] * atleast_1d(poly(t2)))
indx += 1
b = real_if_close(b)
while allclose(b[0], 0, rtol=1e-14) and (b.shape[-1] > 1):
b = b[1:]
return b, a
def residue(b, a, tol=1e-3, rtype='avg'):
"""
Compute partial-fraction expansion of b(s) / a(s).
If ``M = len(b)`` and ``N = len(a)``, then the partial-fraction
expansion H(s) is defined as::
b(s) b[0] s**(M-1) + b[1] s**(M-2) + ... + b[M-1]
H(s) = ------ = ----------------------------------------------
a(s) a[0] s**(N-1) + a[1] s**(N-2) + ... + a[N-1]
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer together than `tol`), then H(s)
has terms like::
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
Returns
-------
r : ndarray
Residues.
p : ndarray
Poles.
k : ndarray
Coefficients of the direct polynomial term.
See Also
--------
invres, numpy.poly, unique_roots
"""
b, a = map(asarray, (b, a))
rscale = a[0]
k, b = polydiv(b, a)
p = roots(a)
r = p * 0.0
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for n in range(len(pout)):
p.extend([pout[n]] * mult[n])
p = asarray(p)
# Compute the residue from the general formula
indx = 0
for n in range(len(pout)):
bn = b.copy()
pn = []
for l in range(len(pout)):
if l != n:
pn.extend([pout[l]] * mult[l])
an = atleast_1d(poly(pn))
# bn(s) / an(s) is (s-po[n])**Nn * b(s) / a(s) where Nn is
# multiplicity of pole at po[n]
sig = mult[n]
for m in range(sig, 0, -1):
if sig > m:
# compute next derivative of bn(s) / an(s)
term1 = polymul(polyder(bn, 1), an)
term2 = polymul(bn, polyder(an, 1))
bn = polysub(term1, term2)
an = polymul(an, an)
r[indx + m - 1] = (polyval(bn, pout[n]) / polyval(an, pout[n])
/ factorial(sig - m))
indx += sig
return r / rscale, p, k
def residuez(b, a, tol=1e-3, rtype='avg'):
"""
Compute partial-fraction expansion of b(z) / a(z).
If ``M = len(b)`` and ``N = len(a)``::
b(z) b[0] + b[1] z**(-1) + ... + b[M-1] z**(-M+1)
H(z) = ------ = ----------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N-1] z**(-N+1)
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than tol), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
See also
--------
invresz, unique_roots
"""
b, a = map(asarray, (b, a))
gain = a[0]
brev, arev = b[::-1], a[::-1]
krev, brev = polydiv(brev, arev)
if krev == []:
k = []
else:
k = krev[::-1]
b = brev[::-1]
p = roots(a)
r = p * 0.0
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for n in range(len(pout)):
p.extend([pout[n]] * mult[n])
p = asarray(p)
# Compute the residue from the general formula (for discrete-time)
# the polynomial is in z**(-1) and the multiplication is by terms
# like this (1-p[i] z**(-1))**mult[i]. After differentiation,
# we must divide by (-p[i])**(m-k) as well as (m-k)!
indx = 0
for n in range(len(pout)):
bn = brev.copy()
pn = []
for l in range(len(pout)):
if l != n:
pn.extend([pout[l]] * mult[l])
an = atleast_1d(poly(pn))[::-1]
# bn(z) / an(z) is (1-po[n] z**(-1))**Nn * b(z) / a(z) where Nn is
# multiplicity of pole at po[n] and b(z) and a(z) are polynomials.
sig = mult[n]
for m in range(sig, 0, -1):
if sig > m:
# compute next derivative of bn(s) / an(s)
term1 = polymul(polyder(bn, 1), an)
term2 = polymul(bn, polyder(an, 1))
bn = polysub(term1, term2)
an = polymul(an, an)
r[indx + m - 1] = (polyval(bn, 1.0 / pout[n]) /
polyval(an, 1.0 / pout[n]) /
factorial(sig - m) / (-pout[n]) ** (sig - m))
indx += sig
return r / gain, p, k
def invresz(r, p, k, tol=1e-3, rtype='avg'):
"""
Compute b(z) and a(z) from partial fraction expansion.
If ``M = len(b)`` and ``N = len(a)``::
b(z) b[0] + b[1] z**(-1) + ... + b[M-1] z**(-M+1)
H(z) = ------ = ----------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N-1] z**(-N+1)
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1)...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than tol), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
See Also
--------
residuez, unique_roots, invres
"""
extra = asarray(k)
p, indx = cmplx_sort(p)
r = take(r, indx, 0)
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for k in range(len(pout)):
p.extend([pout[k]] * mult[k])
a = atleast_1d(poly(p))
if len(extra) > 0:
b = polymul(extra, a)
else:
b = [0]
indx = 0
brev = asarray(b)[::-1]
for k in range(len(pout)):
temp = []
# Construct polynomial which does not include any of this root
for l in range(len(pout)):
if l != k:
temp.extend([pout[l]] * mult[l])
for m in range(mult[k]):
t2 = temp[:]
t2.extend([pout[k]] * (mult[k] - m - 1))
brev = polyadd(brev, (r[indx] * atleast_1d(poly(t2)))[::-1])
indx += 1
b = real_if_close(brev[::-1])
return b, a
def resample(x, num, t=None, axis=0, window=None):
"""
Resample `x` to `num` samples using Fourier method along the given axis.
The resampled signal starts at the same value as `x` but is sampled
with a spacing of ``len(x) / num * (spacing of x)``. Because a
Fourier method is used, the signal is assumed to be periodic.
Parameters
----------
x : array_like
The data to be resampled.
num : int
The number of samples in the resampled signal.
t : array_like, optional
If `t` is given, it is assumed to be the sample positions
associated with the signal data in `x`.
axis : int, optional
The axis of `x` that is resampled. Default is 0.
window : array_like, callable, string, float, or tuple, optional
Specifies the window applied to the signal in the Fourier
domain. See below for details.
Returns
-------
resampled_x or (resampled_x, resampled_t)
Either the resampled array, or, if `t` was given, a tuple
containing the resampled array and the corresponding resampled
positions.
Notes
-----
The argument `window` controls a Fourier-domain window that tapers
the Fourier spectrum before zero-padding to alleviate ringing in
the resampled values for sampled signals you didn't intend to be
interpreted as band-limited.
If `window` is a function, then it is called with a vector of inputs
indicating the frequency bins (i.e. fftfreq(x.shape[axis]) ).
If `window` is an array of the same length as `x.shape[axis]` it is
assumed to be the window to be applied directly in the Fourier
domain (with dc and low-frequency first).
For any other type of `window`, the function `scipy.signal.get_window`
is called to generate the window.
The first sample of the returned vector is the same as the first
sample of the input vector. The spacing between samples is changed
from ``dx`` to ``dx * len(x) / num``.
If `t` is not None, then it represents the old sample positions,
and the new sample positions will be returned as well as the new
samples.
As noted, `resample` uses FFT transformations, which can be very
slow if the number of input or output samples is large and prime;
see `scipy.fftpack.fft`.
Examples
--------
Note that the end of the resampled data rises to meet the first
sample of the next cycle:
>>> from scipy import signal
>>> x = np.linspace(0, 10, 20, endpoint=False)
>>> y = np.cos(-x**2/6.0)
>>> f = signal.resample(y, 100)
>>> xnew = np.linspace(0, 10, 100, endpoint=False)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'go-', xnew, f, '.-', 10, y[0], 'ro')
>>> plt.legend(['data', 'resampled'], loc='best')
>>> plt.show()
"""
x = asarray(x)
X = fft(x, axis=axis)
Nx = x.shape[axis]
if window is not None:
if callable(window):
W = window(fftfreq(Nx))
elif isinstance(window, ndarray):
if window.shape != (Nx,):
raise ValueError('window must have the same length as data')
W = window
else:
W = ifftshift(get_window(window, Nx))
newshape = [1] * x.ndim
newshape[axis] = len(W)
W.shape = newshape
X = X * W
sl = [slice(None)] * len(x.shape)
newshape = list(x.shape)
newshape[axis] = num
N = int(np.minimum(num, Nx))
Y = zeros(newshape, 'D')
sl[axis] = slice(0, (N + 1) // 2)
Y[sl] = X[sl]
sl[axis] = slice(-(N - 1) // 2, None)
Y[sl] = X[sl]
y = ifft(Y, axis=axis) * (float(num) / float(Nx))
if x.dtype.char not in ['F', 'D']:
y = y.real
if t is None:
return y
else:
new_t = arange(0, num) * (t[1] - t[0]) * Nx / float(num) + t[0]
return y, new_t
def vectorstrength(events, period):
'''
Determine the vector strength of the events corresponding to the given
period.
The vector strength is a measure of phase synchrony, how well the
timing of the events is synchronized to a single period of a periodic
signal.
If multiple periods are used, calculate the vector strength of each.
This is called the "resonating vector strength".
Parameters
----------
events : 1D array_like
An array of time points containing the timing of the events.
period : float or array_like
The period of the signal that the events should synchronize to.
The period is in the same units as `events`. It can also be an array
of periods, in which case the outputs are arrays of the same length.
Returns
-------
strength : float or 1D array
The strength of the synchronization. 1.0 is perfect synchronization
and 0.0 is no synchronization. If `period` is an array, this is also
an array with each element containing the vector strength at the
corresponding period.
phase : float or array
The phase that the events are most strongly synchronized to in radians.
If `period` is an array, this is also an array with each element
containing the phase for the corresponding period.
References
----------
van Hemmen, JL, Longtin, A, and Vollmayr, AN. Testing resonating vector
strength: Auditory system, electric fish, and noise.
Chaos 21, 047508 (2011);
doi: 10.1063/1.3670512
van Hemmen, JL. Vector strength after Goldberg, Brown, and von Mises:
biological and mathematical perspectives. Biol Cybern.
2013 Aug;107(4):385-96. doi: 10.1007/s00422-013-0561-7.
van Hemmen, JL and Vollmayr, AN. Resonating vector strength: what happens
when we vary the "probing" frequency while keeping the spike times
fixed. Biol Cybern. 2013 Aug;107(4):491-94.
doi: 10.1007/s00422-013-0560-8
'''
events = asarray(events)
period = asarray(period)
if events.ndim > 1:
raise ValueError('events cannot have dimensions more than 1')
if period.ndim > 1:
raise ValueError('period cannot have dimensions more than 1')
# we need to know later if period was originally a scalar
scalarperiod = not period.ndim
events = atleast_2d(events)
period = atleast_2d(period)
if (period <= 0).any():
raise ValueError('periods must be positive')
# this converts the times to vectors
vectors = exp(dot(2j*pi/period.T, events))
# the vector strength is just the magnitude of the mean of the vectors
# the vector phase is the angle of the mean of the vectors
vectormean = mean(vectors, axis=1)
strength = abs(vectormean)
phase = angle(vectormean)
# if the original period was a scalar, return scalars
if scalarperiod:
strength = strength[0]
phase = phase[0]
return strength, phase
def detrend(data, axis=-1, type='linear', bp=0):
"""
Remove linear trend along axis from data.
Parameters
----------
data : array_like
The input data.
axis : int, optional
The axis along which to detrend the data. By default this is the
last axis (-1).
type : {'linear', 'constant'}, optional
The type of detrending. If ``type == 'linear'`` (default),
the result of a linear least-squares fit to `data` is subtracted
from `data`.
If ``type == 'constant'``, only the mean of `data` is subtracted.
bp : array_like of ints, optional
A sequence of break points. If given, an individual linear fit is
performed for each part of `data` between two break points.
Break points are specified as indices into `data`.
Returns
-------
ret : ndarray
The detrended input data.
Examples
--------
>>> from scipy import signal
>>> randgen = np.random.RandomState(9)
>>> npoints = 1e3
>>> noise = randgen.randn(npoints)
>>> x = 3 + 2*np.linspace(0, 1, npoints) + noise
>>> (signal.detrend(x) - noise).max() < 0.01
True
"""
if type not in ['linear', 'l', 'constant', 'c']:
raise ValueError("Trend type must be 'linear' or 'constant'.")
data = asarray(data)
dtype = data.dtype.char
if dtype not in 'dfDF':
dtype = 'd'
if type in ['constant', 'c']:
ret = data - expand_dims(mean(data, axis), axis)
return ret
else:
dshape = data.shape
N = dshape[axis]
bp = sort(unique(r_[0, bp, N]))
if np.any(bp > N):
raise ValueError("Breakpoints must be less than length "
"of data along given axis.")
Nreg = len(bp) - 1
# Restructure data so that axis is along first dimension and
# all other dimensions are collapsed into second dimension
rnk = len(dshape)
if axis < 0:
axis = axis + rnk
newdims = r_[axis, 0:axis, axis + 1:rnk]
newdata = reshape(transpose(data, tuple(newdims)),
(N, prod(dshape, axis=0) // N))
newdata = newdata.copy() # make sure we have a copy
if newdata.dtype.char not in 'dfDF':
newdata = newdata.astype(dtype)
# Find leastsq fit and remove it for each piece
for m in range(Nreg):
Npts = bp[m + 1] - bp[m]
A = ones((Npts, 2), dtype)
A[:, 0] = cast[dtype](arange(1, Npts + 1) * 1.0 / Npts)
sl = slice(bp[m], bp[m + 1])
coef, resids, rank, s = linalg.lstsq(A, newdata[sl])
newdata[sl] = newdata[sl] - dot(A, coef)
# Put data back in original shape.
tdshape = take(dshape, newdims, 0)
ret = reshape(newdata, tuple(tdshape))
vals = list(range(1, rnk))
olddims = vals[:axis] + [0] + vals[axis:]
ret = transpose(ret, tuple(olddims))
return ret
def lfilter_zi(b, a):
"""
Compute an initial state `zi` for the lfilter function that corresponds
to the steady state of the step response.
A typical use of this function is to set the initial state so that the
output of the filter starts at the same value as the first element of
the signal to be filtered.
Parameters
----------
b, a : array_like (1-D)
The IIR filter coefficients. See `lfilter` for more
information.
Returns
-------
zi : 1-D ndarray
The initial state for the filter.
Notes
-----
A linear filter with order m has a state space representation (A, B, C, D),
for which the output y of the filter can be expressed as::
z(n+1) = A*z(n) + B*x(n)
y(n) = C*z(n) + D*x(n)
where z(n) is a vector of length m, A has shape (m, m), B has shape
(m, 1), C has shape (1, m) and D has shape (1, 1) (assuming x(n) is
a scalar). lfilter_zi solves::
zi = A*zi + B
In other words, it finds the initial condition for which the response
to an input of all ones is a constant.
Given the filter coefficients `a` and `b`, the state space matrices
for the transposed direct form II implementation of the linear filter,
which is the implementation used by scipy.signal.lfilter, are::
A = scipy.linalg.companion(a).T
B = b[1:] - a[1:]*b[0]
assuming `a[0]` is 1.0; if `a[0]` is not 1, `a` and `b` are first
divided by a[0].
Examples
--------
The following code creates a lowpass Butterworth filter. Then it
applies that filter to an array whose values are all 1.0; the
output is also all 1.0, as expected for a lowpass filter. If the
`zi` argument of `lfilter` had not been given, the output would have
shown the transient signal.
>>> from numpy import array, ones
>>> from scipy.signal import lfilter, lfilter_zi, butter
>>> b, a = butter(5, 0.25)
>>> zi = lfilter_zi(b, a)
>>> y, zo = lfilter(b, a, ones(10), zi=zi)
>>> y
array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])
Another example:
>>> x = array([0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0])
>>> y, zf = lfilter(b, a, x, zi=zi*x[0])
>>> y
array([ 0.5 , 0.5 , 0.5 , 0.49836039, 0.48610528,
0.44399389, 0.35505241])
Note that the `zi` argument to `lfilter` was computed using
`lfilter_zi` and scaled by `x[0]`. Then the output `y` has no
transient until the input drops from 0.5 to 0.0.
"""
# FIXME: Can this function be replaced with an appropriate
# use of lfiltic? For example, when b,a = butter(N,Wn),
# lfiltic(b, a, y=numpy.ones_like(a), x=numpy.ones_like(b)).
#
# We could use scipy.signal.normalize, but it uses warnings in
# cases where a ValueError is more appropriate, and it allows
# b to be 2D.
b = np.atleast_1d(b)
if b.ndim != 1:
raise ValueError("Numerator b must be 1-D.")
a = np.atleast_1d(a)
if a.ndim != 1:
raise ValueError("Denominator a must be 1-D.")
while len(a) > 1 and a[0] == 0.0:
a = a[1:]
if a.size < 1:
raise ValueError("There must be at least one nonzero `a` coefficient.")
if a[0] != 1.0:
# Normalize the coefficients so a[0] == 1.
b = b / a[0]
a = a / a[0]
n = max(len(a), len(b))
# Pad a or b with zeros so they are the same length.
if len(a) < n:
a = np.r_[a, np.zeros(n - len(a))]
elif len(b) < n:
b = np.r_[b, np.zeros(n - len(b))]
IminusA = np.eye(n - 1) - linalg.companion(a).T
B = b[1:] - a[1:] * b[0]
# Solve zi = A*zi + B
zi = np.linalg.solve(IminusA, B)
# For future reference: we could also use the following
# explicit formulas to solve the linear system:
#
# zi = np.zeros(n - 1)
# zi[0] = B.sum() / IminusA[:,0].sum()
# asum = 1.0
# csum = 0.0
# for k in range(1,n-1):
# asum += a[k]
# csum += b[k] - a[k]*b[0]
# zi[k] = asum*zi[0] - csum
return zi
def sosfilt_zi(sos):
"""
Compute an initial state `zi` for the sosfilt function that corresponds
to the steady state of the step response.
A typical use of this function is to set the initial state so that the
output of the filter starts at the same value as the first element of
the signal to be filtered.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
zi : ndarray
Initial conditions suitable for use with ``sosfilt``, shape
``(n_sections, 2)``.
See Also
--------
sosfilt, zpk2sos
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
Filter a rectangular pulse that begins at time 0, with and without
the use of the `zi` argument of `scipy.signal.sosfilt`.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> sos = signal.butter(9, 0.125, output='sos')
>>> zi = signal.sosfilt_zi(sos)
>>> x = (np.arange(250) < 100).astype(int)
>>> f1 = signal.sosfilt(sos, x)
>>> f2, zo = signal.sosfilt(sos, x, zi=zi)
>>> plt.plot(x, 'k--', label='x')
>>> plt.plot(f1, 'b', alpha=0.5, linewidth=2, label='filtered')
>>> plt.plot(f2, 'g', alpha=0.25, linewidth=4, label='filtered with zi')
>>> plt.legend(loc='best')
>>> plt.show()
"""
sos = np.asarray(sos)
if sos.ndim != 2 or sos.shape[1] != 6:
raise ValueError('sos must be shape (n_sections, 6)')
n_sections = sos.shape[0]
zi = np.empty((n_sections, 2))
scale = 1.0
for section in range(n_sections):
b = sos[section, :3]
a = sos[section, 3:]
zi[section] = scale * lfilter_zi(b, a)
# If H(z) = B(z)/A(z) is this section's transfer function, then
# b.sum()/a.sum() is H(1), the gain at omega=0. That's the steady
# state value of this section's step response.
scale *= b.sum() / a.sum()
return zi
def _filtfilt_gust(b, a, x, axis=-1, irlen=None):
"""Forward-backward IIR filter that uses Gustafsson's method.
Apply the IIR filter defined by `(b,a)` to `x` twice, first forward
then backward, using Gustafsson's initial conditions [1]_.
Let ``y_fb`` be the result of filtering first forward and then backward,
and let ``y_bf`` be the result of filtering first backward then forward.
Gustafsson's method is to compute initial conditions for the forward
pass and the backward pass such that ``y_fb == y_bf``.
Parameters
----------
b : scalar or 1-D ndarray
Numerator coefficients of the filter.
a : scalar or 1-D ndarray
Denominator coefficients of the filter.
x : ndarray
Data to be filtered.
axis : int, optional
Axis of `x` to be filtered. Default is -1.
irlen : int or None, optional
The length of the nonnegligible part of the impulse response.
If `irlen` is None, or if the length of the signal is less than
``2 * irlen``, then no part of the impulse response is ignored.
Returns
-------
y : ndarray
The filtered data.
x0 : ndarray
Initial condition for the forward filter.
x1 : ndarray
Initial condition for the backward filter.
Notes
-----
Typically the return values `x0` and `x1` are not needed by the
caller. The intended use of these return values is in unit tests.
References
----------
.. [1] F. Gustaffson. Determining the initial states in forward-backward
filtering. Transactions on Signal Processing, 46(4):988-992, 1996.
"""
# In the comments, "Gustafsson's paper" and [1] refer to the
# paper referenced in the docstring.
b = np.atleast_1d(b)
a = np.atleast_1d(a)
order = max(len(b), len(a)) - 1
if order == 0:
# The filter is just scalar multiplication, with no state.
scale = (b[0] / a[0])**2
y = scale * x
return y, np.array([]), np.array([])
if axis != -1 or axis != x.ndim - 1:
# Move the axis containing the data to the end.
x = np.swapaxes(x, axis, x.ndim - 1)
# n is the number of samples in the data to be filtered.
n = x.shape[-1]
if irlen is None or n <= 2*irlen:
m = n
else:
m = irlen
# Create Obs, the observability matrix (called O in the paper).
# This matrix can be interpreted as the operator that propagates
# an arbitrary initial state to the output, assuming the input is
# zero.
# In Gustafsson's paper, the forward and backward filters are not
# necessarily the same, so he has both O_f and O_b. We use the same
# filter in both directions, so we only need O. The same comment
# applies to S below.
Obs = np.zeros((m, order))
zi = np.zeros(order)
zi[0] = 1
Obs[:, 0] = lfilter(b, a, np.zeros(m), zi=zi)[0]
for k in range(1, order):
Obs[k:, k] = Obs[:-k, 0]
# Obsr is O^R (Gustafsson's notation for row-reversed O)
Obsr = Obs[::-1]
# Create S. S is the matrix that applies the filter to the reversed
# propagated initial conditions. That is,
# out = S.dot(zi)
# is the same as
# tmp, _ = lfilter(b, a, zeros(), zi=zi) # Propagate ICs.
# out = lfilter(b, a, tmp[::-1]) # Reverse and filter.
# Equations (5) & (6) of [1]
S = lfilter(b, a, Obs[::-1], axis=0)
# Sr is S^R (row-reversed S)
Sr = S[::-1]
# M is [(S^R - O), (O^R - S)]
if m == n:
M = np.hstack((Sr - Obs, Obsr - S))
else:
# Matrix described in section IV of [1].
M = np.zeros((2*m, 2*order))
M[:m, :order] = Sr - Obs
M[m:, order:] = Obsr - S
# Naive forward-backward and backward-forward filters.
# These have large transients because the filters use zero initial
# conditions.
y_f = lfilter(b, a, x)
y_fb = lfilter(b, a, y_f[..., ::-1])[..., ::-1]
y_b = lfilter(b, a, x[..., ::-1])[..., ::-1]
y_bf = lfilter(b, a, y_b)
delta_y_bf_fb = y_bf - y_fb
if m == n:
delta = delta_y_bf_fb
else:
start_m = delta_y_bf_fb[..., :m]
end_m = delta_y_bf_fb[..., -m:]
delta = np.concatenate((start_m, end_m), axis=-1)
# ic_opt holds the "optimal" initial conditions.
# The following code computes the result shown in the formula
# of the paper between equations (6) and (7).
if delta.ndim == 1:
ic_opt = linalg.lstsq(M, delta)[0]
else:
# Reshape delta so it can be used as an array of multiple
# right-hand-sides in linalg.lstsq.
delta2d = delta.reshape(-1, delta.shape[-1]).T
ic_opt0 = linalg.lstsq(M, delta2d)[0].T
ic_opt = ic_opt0.reshape(delta.shape[:-1] + (M.shape[-1],))
# Now compute the filtered signal using equation (7) of [1].
# First, form [S^R, O^R] and call it W.
if m == n:
W = np.hstack((Sr, Obsr))
else:
W = np.zeros((2*m, 2*order))
W[:m, :order] = Sr
W[m:, order:] = Obsr
# Equation (7) of [1] says
# Y_fb^opt = Y_fb^0 + W * [x_0^opt; x_{N-1}^opt]
# `wic` is (almost) the product on the right.
# W has shape (m, 2*order), and ic_opt has shape (..., 2*order),
# so we can't use W.dot(ic_opt). Instead, we dot ic_opt with W.T,
# so wic has shape (..., m).
wic = ic_opt.dot(W.T)
# `wic` is "almost" the product of W and the optimal ICs in equation
# (7)--if we're using a truncated impulse response (m < n), `wic`
# contains only the adjustments required for the ends of the signal.
# Here we form y_opt, taking this into account if necessary.
y_opt = y_fb
if m == n:
y_opt += wic
else:
y_opt[..., :m] += wic[..., :m]
y_opt[..., -m:] += wic[..., -m:]
x0 = ic_opt[..., :order]
x1 = ic_opt[..., -order:]
if axis != -1 or axis != x.ndim - 1:
# Restore the data axis to its original position.
x0 = np.swapaxes(x0, axis, x.ndim - 1)
x1 = np.swapaxes(x1, axis, x.ndim - 1)
y_opt = np.swapaxes(y_opt, axis, x.ndim - 1)
return y_opt, x0, x1
def filtfilt(b, a, x, axis=-1, padtype='odd', padlen=None, method='pad',
irlen=None):
"""
A forward-backward filter.
This function applies a linear filter twice, once forward and once
backwards. The combined filter has linear phase.
The function provides options for handling the edges of the signal.
When `method` is "pad", the function pads the data along the given axis
in one of three ways: odd, even or constant. The odd and even extensions
have the corresponding symmetry about the end point of the data. The
constant extension extends the data with the values at the end points. On
both the forward and backward passes, the initial condition of the
filter is found by using `lfilter_zi` and scaling it by the end point of
the extended data.
When `method` is "gust", Gustafsson's method [1]_ is used. Initial
conditions are chosen for the forward and backward passes so that the
forward-backward filter gives the same result as the backward-forward
filter.
Parameters
----------
b : (N,) array_like
The numerator coefficient vector of the filter.
a : (N,) array_like
The denominator coefficient vector of the filter. If ``a[0]``
is not 1, then both `a` and `b` are normalized by ``a[0]``.
x : array_like
The array of data to be filtered.
axis : int, optional
The axis of `x` to which the filter is applied.
Default is -1.
padtype : str or None, optional
Must be 'odd', 'even', 'constant', or None. This determines the
type of extension to use for the padded signal to which the filter
is applied. If `padtype` is None, no padding is used. The default
is 'odd'.
padlen : int or None, optional
The number of elements by which to extend `x` at both ends of
`axis` before applying the filter. This value must be less than
``x.shape[axis] - 1``. ``padlen=0`` implies no padding.
The default value is ``3 * max(len(a), len(b))``.
method : str, optional
Determines the method for handling the edges of the signal, either
"pad" or "gust". When `method` is "pad", the signal is padded; the
type of padding is determined by `padtype` and `padlen`, and `irlen`
is ignored. When `method` is "gust", Gustafsson's method is used,
and `padtype` and `padlen` are ignored.
irlen : int or None, optional
When `method` is "gust", `irlen` specifies the length of the
impulse response of the filter. If `irlen` is None, no part
of the impulse response is ignored. For a long signal, specifying
`irlen` can significantly improve the performance of the filter.
Returns
-------
y : ndarray
The filtered output, an array of type numpy.float64 with the same
shape as `x`.
See Also
--------
lfilter_zi, lfilter
Notes
-----
The option to use Gustaffson's method was added in scipy version 0.16.0.
References
----------
.. [1] F. Gustaffson, "Determining the initial states in forward-backward
filtering", Transactions on Signal Processing, Vol. 46, pp. 988-992,
1996.
Examples
--------
The examples will use several functions from `scipy.signal`.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
First we create a one second signal that is the sum of two pure sine
waves, with frequencies 5 Hz and 250 Hz, sampled at 2000 Hz.
>>> t = np.linspace(0, 1.0, 2001)
>>> xlow = np.sin(2 * np.pi * 5 * t)
>>> xhigh = np.sin(2 * np.pi * 250 * t)
>>> x = xlow + xhigh
Now create a lowpass Butterworth filter with a cutoff of 0.125 times
the Nyquist rate, or 125 Hz, and apply it to ``x`` with `filtfilt`.
The result should be approximately ``xlow``, with no phase shift.
>>> b, a = signal.butter(8, 0.125)
>>> y = signal.filtfilt(b, a, x, padlen=150)
>>> np.abs(y - xlow).max()
9.1086182074789912e-06
We get a fairly clean result for this artificial example because
the odd extension is exact, and with the moderately long padding,
the filter's transients have dissipated by the time the actual data
is reached. In general, transient effects at the edges are
unavoidable.
The following example demonstrates the option ``method="gust"``.
First, create a filter.
>>> b, a = signal.ellip(4, 0.01, 120, 0.125) # Filter to be applied.
>>> np.random.seed(123456)
`sig` is a random input signal to be filtered.
>>> n = 60
>>> sig = np.random.randn(n)**3 + 3*np.random.randn(n).cumsum()
Apply `filtfilt` to `sig`, once using the Gustafsson method, and
once using padding, and plot the results for comparison.
>>> fgust = signal.filtfilt(b, a, sig, method="gust")
>>> fpad = signal.filtfilt(b, a, sig, padlen=50)
>>> plt.plot(sig, 'k-', label='input')
>>> plt.plot(fgust, 'b-', linewidth=4, label='gust')
>>> plt.plot(fpad, 'c-', linewidth=1.5, label='pad')
>>> plt.legend(loc='best')
>>> plt.show()
The `irlen` argument can be used to improve the performance
of Gustafsson's method.
Estimate the impulse response length of the filter.
>>> z, p, k = signal.tf2zpk(b, a)
>>> eps = 1e-9
>>> r = np.max(np.abs(p))
>>> approx_impulse_len = int(np.ceil(np.log(eps) / np.log(r)))
>>> approx_impulse_len
137
Apply the filter to a longer signal, with and without the `irlen`
argument. The difference between `y1` and `y2` is small. For long
signals, using `irlen` gives a significant performance improvement.
>>> x = np.random.randn(5000)
>>> y1 = signal.filtfilt(b, a, x, method='gust')
>>> y2 = signal.filtfilt(b, a, x, method='gust', irlen=approx_impulse_len)
>>> print(np.max(np.abs(y1 - y2)))
1.80056858312e-10
"""
b = np.atleast_1d(b)
a = np.atleast_1d(a)
x = np.asarray(x)
if method not in ["pad", "gust"]:
raise ValueError("method must be 'pad' or 'gust'.")
if method == "gust":
y, z1, z2 = _filtfilt_gust(b, a, x, axis=axis, irlen=irlen)
return y
# `method` is "pad"...
ntaps = max(len(a), len(b))
if padtype not in ['even', 'odd', 'constant', None]:
raise ValueError(("Unknown value '%s' given to padtype. padtype "
"must be 'even', 'odd', 'constant', or None.") %
padtype)
if padtype is None:
padlen = 0
if padlen is None:
# Original padding; preserved for backwards compatibility.
edge = ntaps * 3
else:
edge = padlen
# x's 'axis' dimension must be bigger than edge.
if x.shape[axis] <= edge:
raise ValueError("The length of the input vector x must be at least "
"padlen, which is %d." % edge)
if padtype is not None and edge > 0:
# Make an extension of length `edge` at each
# end of the input array.
if padtype == 'even':
ext = even_ext(x, edge, axis=axis)
elif padtype == 'odd':
ext = odd_ext(x, edge, axis=axis)
else:
ext = const_ext(x, edge, axis=axis)
else:
ext = x
# Get the steady state of the filter's step response.
zi = lfilter_zi(b, a)
# Reshape zi and create x0 so that zi*x0 broadcasts
# to the correct value for the 'zi' keyword argument
# to lfilter.
zi_shape = [1] * x.ndim
zi_shape[axis] = zi.size
zi = np.reshape(zi, zi_shape)
x0 = axis_slice(ext, stop=1, axis=axis)
# Forward filter.
(y, zf) = lfilter(b, a, ext, axis=axis, zi=zi * x0)
# Backward filter.
# Create y0 so zi*y0 broadcasts appropriately.
y0 = axis_slice(y, start=-1, axis=axis)
(y, zf) = lfilter(b, a, axis_reverse(y, axis=axis), axis=axis, zi=zi * y0)
# Reverse y.
y = axis_reverse(y, axis=axis)
if edge > 0:
# Slice the actual signal from the extended signal.
y = axis_slice(y, start=edge, stop=-edge, axis=axis)
return y
def sosfilt(sos, x, axis=-1, zi=None):
"""
Filter data along one dimension using cascaded second-order sections
Filter a data sequence, `x`, using a digital IIR filter defined by
`sos`. This is implemented by performing `lfilter` for each
second-order section. See `lfilter` for details.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. Each row corresponds to a second-order
section, with the first three columns providing the numerator
coefficients and the last three providing the denominator
coefficients.
x : array_like
An N-dimensional input array.
axis : int, optional
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis. Default is -1.
zi : array_like, optional
Initial conditions for the cascaded filter delays. It is a (at
least 2D) vector of shape ``(n_sections, ..., 2, ...)``, where
``..., 2, ...`` denotes the shape of `x`, but with ``x.shape[axis]``
replaced by 2. If `zi` is None or is not given then initial rest
(i.e. all zeros) is assumed.
Note that these initial conditions are *not* the same as the initial
conditions given by `lfiltic` or `lfilter_zi`.
Returns
-------
y : ndarray
The output of the digital filter.
zf : ndarray, optional
If `zi` is None, this is not returned, otherwise, `zf` holds the
final filter delay values.
See Also
--------
zpk2sos, sos2zpk, sosfilt_zi
Notes
-----
The filter function is implemented as a series of second-order filters
with direct-form II transposed structure. It is designed to minimize
numerical precision errors for high-order filters.
.. versionadded:: 0.16.0
Examples
--------
Plot a 13th-order filter's impulse response using both `lfilter` and
`sosfilt`, showing the instability that results from trying to do a
13th-order filter in a single stage (the numerical error pushes some poles
outside of the unit circle):
>>> import matplotlib.pyplot as plt
>>> from scipy import signal
>>> b, a = signal.ellip(13, 0.009, 80, 0.05, output='ba')
>>> sos = signal.ellip(13, 0.009, 80, 0.05, output='sos')
>>> x = np.zeros(700)
>>> x[0] = 1.
>>> y_tf = signal.lfilter(b, a, x)
>>> y_sos = signal.sosfilt(sos, x)
>>> plt.plot(y_tf, 'r', label='TF')
>>> plt.plot(y_sos, 'k', label='SOS')
>>> plt.legend(loc='best')
>>> plt.show()
"""
x = np.asarray(x)
sos = atleast_2d(sos)
if sos.ndim != 2:
raise ValueError('sos array must be 2D')
n_sections, m = sos.shape
if m != 6:
raise ValueError('sos array must be shape (n_sections, 6)')
use_zi = zi is not None
if use_zi:
zi = np.asarray(zi)
x_zi_shape = list(x.shape)
x_zi_shape[axis] = 2
x_zi_shape = tuple([n_sections] + x_zi_shape)
if zi.shape != x_zi_shape:
raise ValueError('Invalid zi shape. With axis=%r, an input with '
'shape %r, and an sos array with %d sections, zi '
'must have shape %r.' %
(axis, x.shape, n_sections, x_zi_shape))
zf = zeros_like(zi)
for section in range(n_sections):
if use_zi:
x, zf[section] = lfilter(sos[section, :3], sos[section, 3:],
x, axis, zi=zi[section])
else:
x = lfilter(sos[section, :3], sos[section, 3:], x, axis)
out = (x, zf) if use_zi else x
return out
from scipy.signal.filter_design import cheby1
from scipy.signal.fir_filter_design import firwin
def decimate(x, q, n=None, ftype='iir', axis=-1):
"""
Downsample the signal by using a filter.
By default, an order 8 Chebyshev type I filter is used. A 30 point FIR
filter with hamming window is used if `ftype` is 'fir'.
Parameters
----------
x : ndarray
The signal to be downsampled, as an N-dimensional array.
q : int
The downsampling factor.
n : int, optional
The order of the filter (1 less than the length for 'fir').
ftype : str {'iir', 'fir'}, optional
The type of the lowpass filter.
axis : int, optional
The axis along which to decimate.
Returns
-------
y : ndarray
The down-sampled signal.
See also
--------
resample
"""
if not isinstance(q, int):
raise TypeError("q must be an integer")
if n is None:
if ftype == 'fir':
n = 30
else:
n = 8
if ftype == 'fir':
b = firwin(n + 1, 1. / q, window='hamming')
a = 1.
else:
b, a = cheby1(n, 0.05, 0.8 / q)
y = lfilter(b, a, x, axis=axis)
sl = [slice(None)] * y.ndim
sl[axis] = slice(None, None, q)
return y[sl]
| {
"content_hash": "13d5f4ed078af68397eef0911cb64791",
"timestamp": "",
"source": "github",
"line_count": 2670,
"max_line_length": 92,
"avg_line_length": 32.77378277153558,
"alnum_prop": 0.5587616849130346,
"repo_name": "trankmichael/scipy",
"id": "d8605f606386fbc89385d41093c5bbebc5da4909",
"size": "87548",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scipy/signal/signaltools.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4250392"
},
{
"name": "C++",
"bytes": "3627410"
},
{
"name": "FORTRAN",
"bytes": "5570905"
},
{
"name": "HTML",
"bytes": "124328"
},
{
"name": "Makefile",
"bytes": "4562"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "9418436"
},
{
"name": "Shell",
"bytes": "3172"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
} |
'''
Created on May 30, 2013
@author: vieglais
TODO:
Command line:
d1cache [options] [operation]
operation:
update
purge
summary
serve
'''
import os
import logging
import datetime
import time
import yaml
import shutil
import threading
import Queue
from collections import deque
from sqlalchemy import create_engine, func, or_
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.pool import SingletonThreadPool
import d1_common.const
import d1_common.types
import d1_common.types.generated.dataoneTypes_1_1 as dataoneTypes
from d1_common import date_time
from d1_client import d1baseclient
from d1_client import cnclient
from d1_client import objectlistiterator
from d1_local_cache.util import mjd
from d1_local_cache.ocache import models
DEFAULT_CACHE_PATH = "dataone_content"
DEFAULT_CACHE_DATABASE = "cache.sqdb"
MAX_WORKER_THREADS = 6
class ObjectCache():
'''
'''
def __init__(self,
cachePath=DEFAULT_CACHE_PATH,
dbname=DEFAULT_CACHE_DATABASE,
baseUrl=None,
loadData=False,
instrument=None,
certificate=None):
self._log = logging.getLogger("ObjectCache")
self.instrument = instrument
self.cachePath = cachePath
self._dbname = dbname
self._loadData = loadData
self.sessionmaker = None
self.engine = None
self.config = {}
self._pidlist = []
self._maxthreads = MAX_WORKER_THREADS
self._certificate = certificate
self.setUp()
if not baseUrl is None:
self.config["baseUrl"] = baseUrl
def _cacheDataBaseName(self):
fullpath = os.path.abspath(os.path.join(self.cachePath, self._dbname))
return "sqlite:///%s" % fullpath
def setUp(self):
#Create folder structure and database
fullpath = os.path.abspath(self.cachePath)
if not os.path.exists(fullpath):
os.makedirs(fullpath)
contentpath = os.path.join(fullpath, "content")
if not os.path.exists(contentpath):
os.makedirs(contentpath)
#populate with object formats if necessary
self.engine = create_engine(self._cacheDataBaseName(),
poolclass=SingletonThreadPool,
pool_size=self._maxthreads)
self.sessionmaker = scoped_session(sessionmaker(bind=self.engine))
models.Base.metadata.bind = self.engine
models.Base.metadata.create_all()
self.loadState()
def loadState(self):
conf = models.PersistedDictionary(self.sessionmaker())
for k in conf.keys():
self.config[k] = conf[k]
def storeState(self):
conf = models.PersistedDictionary(self.sessionmaker())
for k in self.config.keys():
conf[k] = self.config[k]
@property
def newestEntry(self):
session = self.sessionmaker()
tstamp = session.query(func.max(models.CacheEntry.tstamp)).one()[0]
session.close()
return tstamp
@property
def lastModified(self):
session = self.sessionmaker()
tstamp = session.query(func.max(models.CacheEntry.modified)).one()[0]
session.close()
return tstamp
def populatePidList(self):
'''Loads all pids from the cache database into a list
'''
session = self.sessionmaker()
self._pidlist = []
for pid, in session.query(models.CacheEntry.pid):
self._pidlist.append(pid)
@property
def pidcount(self):
session = self.sessionmaker()
npids = session.query(models.CacheEntry).count()
session.close()
return npids
def purgeContent(self):
'''Remove all entries from format list, content list, and
'''
self._log.warn("Purging all content from cache.")
session = self.sessionmaker()
session.query(models.CacheEntry).delete()
session.commit()
session.query(models.ShortUid).delete()
session.commit()
session.query(models.D1ObjectFormat).delete()
session.commit()
@property
def baseUrl(self):
res = None
try:
res = self.config['baseUrl']
except KeyError:
res = d1_common.const.URL_DATAONE_ROOT
self.config['baseUrl'] = res
return res
@baseUrl.setter
def baseUrl(self, v):
self.config['baseUrl'] = v
@property
def lastLoaded(self):
try:
return self.config['lastLoaded']
except KeyError:
return 0.0
@lastLoaded.setter
def lastLoaded(self, v):
if isinstance(v, datetime.datetime):
v = mjd.dateTime2MJD(v)
self.config['lastLoaded'] = v
def getObjectPath(self, suid, isSystemMetadata=True):
subf = suid[0:1]
fname = "%s_content.xml" % suid
if isSystemMetadata:
fname = "%s_sysm.xml" % suid
path = os.path.join(self.cachePath, "content", subf)
if not os.path.exists(os.path.abspath(path)):
os.makedirs(os.path.abspath(path))
return os.path.join(path, fname)
def getSystemMetadata(self, suid):
fpath = self.getObjectPath(suid, isSystemMetadata=True)
xml = file(fpath, 'rb').read()
xml = xml.replace(u"<accessPolicy/>", u"")
xml = xml.replace(u"<preferredMemberNode/>", u"")
xml = xml.replace(u"<blockedMemberNode/>", u"")
xml = xml.replace(u"<preferredMemberNode></preferredMemberNode>", u"")
xml = xml.replace(u"<blockedMemberNode></blockedMemberNode>", u"")
return dataoneTypes.CreateFromDocument(xml)
def populateObjectFormats(self):
session = self.sessionmaker()
client = cnclient.CoordinatingNodeClient(base_url=self.baseUrl)
models.loadObjectFormats(session, client)
session.close()
def loadObjectList(self, objectList):
self._log.info("Prefetching identifiers...")
self.populatePidList()
session = self.sessionmaker()
n = 0
self._log.info("Paging through object list...")
for o in objectList:
pid = o.identifier.value()
#if not models.PIDexists(session, pid):
if not pid in self._pidlist:
self._log.debug("Adding PID: %s" % pid)
tmod = mjd.dateTime2MJD( o.dateSysMetadataModified )
res = models.addObjectCacheEntry(session,
pid,
o.formatId,
o.size,
tmod)
if res is None:
self._log.error("Could not add PID: %s" % pid)
else:
self._log.info("Added PID: %s" % pid)
n += 1
if self.instrument is not None:
self.instrument.gauge("PIDs", n)
session.close()
return n
def countByType(self, otype="METADATA", status=None, cstatus=None):
session = self.sessionmaker()
try:
if not status is None:
res = session.query(models.CacheEntry).join(models.D1ObjectFormat)\
.filter( models.D1ObjectFormat.formatType==otype )\
.filter(models.CacheEntry.sysmstatus==status)
elif cstatus is not None:
res = session.query(models.CacheEntry).join(models.D1ObjectFormat)\
.filter( models.D1ObjectFormat.formatType==otype )\
.filter(models.CacheEntry.contentstatus==cstatus)
else:
res = session.query(models.CacheEntry).join(models.D1ObjectFormat)\
.filter( models.D1ObjectFormat.formatType==otype )
self._log.debug(str(res))
return res.count()
except Exception as e:
self._log.error(e)
finally:
session.close()
def countByTypeDateUploaded(self, otype='METADATA', mjd_uploaded=None):
'''Returns a count of objects matching the provided object type and
optionally older than or equal to date_uploaded.
If specified, mjd_uploaded should be a floating point MJD value.
'''
session = self.sessionmaker()
try:
if mjd_uploaded is None:
res = session.query(models.CacheEntry).join(models.D1ObjectFormat) \
.filter( models.D1ObjectFormat.formatType == otype )
else:
res = session.query(models.CacheEntry).join(models.D1ObjectFormat) \
.filter( models.D1ObjectFormat.formatType == otype ) \
.filter( models.CacheEntry.uploaded <= mjd_uploaded)
return res.count()
except Exception as e:
self._log.error(e)
finally:
session.close()
def __str__(self):
'''Return a string representation of self
'''
res = {}
res["baseURL"] = self.baseUrl
res["count"] = self.pidcount
res["lasttimestamp"] = self.newestEntry
res["newestobject"] = self.lastModified
res['numzerostatus'] = 0
counts = {}
for otype in ["DATA", "METADATA", "RESOURCE"]:
counts[otype] = self.countByType(otype=otype, status=None)
res['counts'] = counts
counts = {}
for otype in ["DATA", "METADATA", "RESOURCE"]:
counts[otype] = self.countByType(otype=otype, status=0)
res['zcounts'] = counts
counts = {}
for otype in ["DATA", "METADATA", "RESOURCE"]:
counts[otype] = self.countByType(otype=otype, status=200)
res['okcounts'] = counts
counts = {}
for otype in ["DATA", "METADATA", "RESOURCE"]:
counts[otype] = self.countByType(otype=otype, cstatus=200)
res['cokcounts'] = counts
return yaml.dump(res)
def loadSystemMetadata(self, withstatus=0):
#Queue to hold the tasks that need to be processed
Q = Queue.Queue()
CQ = deque([],100)
def worker():
'''Pulls PIDs off the queue and downloads the associated system metadata,
updates the cache database.
'''
_log = logging.getLogger("loadSysmeta.worker.%s" % str(threading.current_thread().ident))
client = d1baseclient.DataONEBaseClient(self.baseUrl,
cert_path=self._certificate)
tsession = self.sessionmaker()
moreWork = True
while moreWork:
idx, pid = Q.get()
_log.info( "Loading system metadata for %s" % pid )
try:
wo = tsession.query(models.CacheEntry).get(pid)
sysmeta = client.getSystemMetadataResponse(pid)
#sysmeta.read()
#sysmeta.close()
spath = self.getObjectPath(wo.suid.uid, isSystemMetadata=True)
wo.sysmeta = spath
fdest = open(os.path.abspath(spath), "wb")
shutil.copyfileobj(sysmeta, fdest)
fdest.close()
wo.sysmstatus = sysmeta.status
tsession.commit()
CQ.append(time.time())
except d1_common.types.exceptions.DataONEException as e:
_log.error(e)
except Exception as e:
_log.warn("Unanticipated exception for pid: %s" % pid)
_log.error(e)
finally:
pass
Q.task_done()
moreWork = not Q.empty()
if self.instrument is not None:
self.instrument.gauge("QSize", Q.qsize())
if (len(CQ) == CQ.maxlen):
try:
dt = CQ.maxlen / ((CQ[CQ.maxlen-1] - CQ[0]))
self.instrument.gauge('sysm.sec-1', "{:.3f}".format(dt))
except Exception as e:
_log.error(e)
tsession.close()
_log.debug("Thread %s terminated." % str(threading.current_thread().ident))
#20 seems about right
nworkers = self._maxthreads - 1
#stage the workers
for i in range(nworkers):
wt = threading.Thread(target = worker)
wt.daemon = True
wt.start()
self._log.debug("Thread %d as %s started" % (i, str(wt.ident)))
#Get the list of PIDs to work with
session = self.sessionmaker()
#work = session.query(models.CacheEntry).join(models.D1ObjectFormat)\
# .filter( or_( models.D1ObjectFormat.formatType=="METADATA",
# models.D1ObjectFormat.formatType=="RESOURCE"))\
# .filter(models.CacheEntry.sysmstatus==0)
#Load system metadata for everything
work = session.query(models.CacheEntry)\
.filter(models.CacheEntry.sysmstatus==withstatus)
i=0
for o in work:
Q.put( [i, o.pid] )
i += 1
session.close()
Q.join()
def loadContent(self, nthreads=1):
work_queue = Queue.Queue()
#++++++++++++++++++++++++++++++++++
def worker():
_log = logging.getLogger("loadContent.worker.%s" % str(threading.current_thread().ident))
client = d1baseclient.DataONEBaseClient(self.baseUrl,
cert_path=self._certificate)
tsession = self.sessionmaker()
moreWork = True
while moreWork:
idx, pid = work_queue.get()
_log.info( "Loading content for %s" % pid )
try:
wo = tsession.query(models.CacheEntry).get(pid)
content = client.getResponse(pid)
cpath = self.getObjectPath(wo.suid.uid, isSystemMetadata=False)
wo.content = cpath
fdest = open(os.path.abspath(cpath), "wb")
shutil.copyfileobj(content, fdest)
fdest.close()
wo.contentstatus = content.status
tsession.commit()
except d1_common.types.exceptions.DataONEException as e:
_log.error(e)
except Exception as e:
_log.warn("Unanticipated exception for pid: %s" % pid)
_log.error(e)
finally:
pass
work_queue.task_done()
moreWork = not work_queue.empty()
tsession.close()
#----------------------------------
nworkers = self._maxthreads - 1
#stage the workers
for i in range(nworkers):
wt = threading.Thread(target = worker)
wt.daemon = True
wt.start()
self._log.debug("Thread %d as %s started" % (i, str(wt.ident)))
session = self.sessionmaker()
work = session.query(models.CacheEntry).join(models.D1ObjectFormat)\
.filter( or_( models.D1ObjectFormat.formatType=="METADATA",
models.D1ObjectFormat.formatType=="RESOURCE"))\
.filter(models.CacheEntry.contentstatus==0)
#work = session.query(models.CacheEntry).join(models.D1ObjectFormat)\
# .filter( models.D1ObjectFormat.formatType=="RESOURCE")\
# .filter(models.CacheEntry.contentstatus==0)
i = 0
for o in work:
work_queue.put( [i, o.pid] )
i += 1
session.close()
work_queue.join()
def loadSysmetaContent(self, startTime=None, startFrom=None,
onNextPage=None):
maxtoload = -1
pagesize = 1000
start = startFrom
if startFrom is None:
start = self.pidcount-1
if isinstance(startTime, float):
#Assume the provided startTime is a MJD
startTime = date_time.to_xsd_datetime( mjd.MJD2dateTime( startTime ))
self._log.info("Starting PID load from: %d" % start)
if start < 0:
start = 0;
self.lastLoaded = mjd.now()
client = d1baseclient.DataONEBaseClient(self.baseUrl,
cert_path=self._certificate)
self._log.info( "Loading identifiers..." )
# objects = objectlistiterator.ObjectListIterator(client, start=start,
# pagesize=pagesize,
# max=maxtoload,
# fromDate=startTime,
# pagecallback=onNextPage)
objects = objectlistiterator.ObjectListIterator(client, start=start,
pagesize=pagesize,
max=maxtoload,
fromDate=startTime)
n = self.loadObjectList(objects)
self._log.info( "Added %d identifiers" % n )
self._log.info( "Loading System Metadata..." )
self.loadSystemMetadata()
#self._log.info( "Loading content..." )
#self.loadContent()
self.storeState()
self._log.info( "Done." )
def adjustSysMetaentries(self):
'''Iterate through all system metadata entries and:
1. adjust the path to be relative
2. populate a new column for the dateUploaded
3. populate a new column for the originMemberNode
4. populate a new column for archived
5. populate a new column for obsoletedBy
'''
session = self.sessionmaker()
work = session.query(models.CacheEntry)\
.filter(models.CacheEntry.uploaded==0)
counter = 0
total = 0
for o in work:
self._log.info("{0:s} : {1:s}".format(o.suid.uid, o.pid))
sysm = self.getSystemMetadata(o.suid.uid)
o.uploaded = mjd.dateTime2MJD(sysm.dateUploaded)
o.archived = sysm.archived
o.origin = sysm.originMemberNode.value()
if sysm.obsoletes is not None:
o.obsoletes = sysm.obsoletes.value()
if sysm.obsoletedBy is not None:
o.obsoleted_by = sysm.obsoletedBy.value()
counter += 1
total += 1
if counter > 1000:
session.commit()
counter = 0
self.instrument.gauge('sysm.fix', total)
session.commit()
| {
"content_hash": "efa2be81fcb75beb6ca34e0381bdc7fa",
"timestamp": "",
"source": "github",
"line_count": 518,
"max_line_length": 95,
"avg_line_length": 32.66216216216216,
"alnum_prop": 0.6084283941131272,
"repo_name": "vdave/d1_local_cache",
"id": "e1ad7b3544fa20643704e417c997236f1c80a85c",
"size": "16919",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/d1_local_cache/ocache/object_cache_manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "39454"
}
],
"symlink_target": ""
} |
from weaver.compat import execfile
from weaver.logger import D_SCRIPT, debug, fatal
from weaver.nest import Nest
from weaver.options import Options
from weaver.util import Container
from weaver.script import Script
from weaver.script import ABSTRACTIONS
from weaver.script import DATASETS
from weaver.script import FUNCTIONS
from weaver.script import NESTS
from weaver.script import OPTIONS
from weaver.script import STACKS
import weaver.logger
import collections
import os
import sys
import time
class Wfunction(object):
""" Weaver Script class.
Parses command line environment and sets up run-time configuration.
"""
def __init__(self, function=None, force=False, import_builtins=True, output_directory=None,
execute_dag=False, engine_wrapper=None, engine_arguments=None, args=[]):
self.function = function
self.arguments = args
self.force = force # Ignore warnings
self.import_builtins = True # Load built-ins
if output_directory is None:
self.output_directory = os.curdir # Where to create artifacts
else:
self.output_directory = output_directory
self.start_time = time.time() # Record beginning of compiling
self.options = Options()
self.nested_abstractions = False
self.inline_tasks = 1
self.execute_dag = execute_dag
self.globals = {}
self.engine_wrapper = engine_wrapper
self.engine_arguments = engine_arguments
self.include_symbols = False
debug(D_SCRIPT, 'force = {0}'.format(self.force))
debug(D_SCRIPT, 'import_builtins = {0}'.format(self.import_builtins))
debug(D_SCRIPT, 'output_directory = {0}'.format(self.output_directory))
debug(D_SCRIPT, 'start_time = {0}'.format(self.start_time))
debug(D_SCRIPT, 'options = {0}'.format(self.options))
debug(D_SCRIPT, 'nested_abstractions = {0}'.format(self.nested_abstractions))
debug(D_SCRIPT, 'inline_tasks = {0}'.format(self.inline_tasks))
debug(D_SCRIPT, 'execute_dag = {0}'.format(self.execute_dag))
debug(D_SCRIPT, 'engine_wrapper = {0}'.format(self.engine_wrapper))
debug(D_SCRIPT, 'engine_arguments = {0}'.format(self.engine_arguments))
def _import(self, module, symbols):
""" Import ``symbols`` from ``module`` into global namespace. """
# Import module
m = 'weaver.{0}'.format(module)
m = __import__(m, self.globals, self.globals, symbols, -1)
# Import symbols from module into global namespace, which we store as
# an attribute for later use (i.e. during compile)
for symbol in symbols:
self.globals[symbol] = getattr(m, symbol)
debug(D_SCRIPT, 'Imported {0} from {1}'.format(symbol, module))
def compile(self):
""" Compile script in the specified working directory. """
# Save active script instance and set this one as active
work_dir = self.output_directory
# Add nest path and path to script to Python module path to allow
# for importing modules outside of $PYTHONPATH
sys.path.insert(0, os.path.abspath(os.path.dirname(work_dir)))
# Load built-ins if specified on command line. If built-ins are
# not automatically loaded by the Script object, then the user must
# load them manually in their Weaver scripts using the standard
# Python import facilities.
if self.import_builtins:
self._import('abstraction', ABSTRACTIONS)
self._import('dataset', DATASETS)
self._import('function', FUNCTIONS)
self._import('nest', NESTS)
self._import('options', OPTIONS)
self._import('stack', STACKS)
# Execute nest
with Nest(work_dir, wrapper=self.engine_wrapper) as nest:
with self.options:
try:
self.function(*self.arguments)
nest.compile()
except Exception as e:
fatal(D_SCRIPT, 'Error compiling script: {0}'.format(e), print_traceback=True)
if self.execute_dag:
debug(D_SCRIPT, 'Executing generated DAG {0} with {1}'.format(
nest.dag_path, nest.path))
nest.execute(self.engine_arguments, exit_on_failure=True)
| {
"content_hash": "199d7933dff94a38fa5dc6c0d5bdb0fc",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 98,
"avg_line_length": 43.26923076923077,
"alnum_prop": 0.62,
"repo_name": "FAANG/faang-methylation",
"id": "b98e9d073245154b55449ce0d788ec373e852836",
"size": "5170",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "workflowbs/src/jflow/function.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "43224"
},
{
"name": "JavaScript",
"bytes": "924691"
},
{
"name": "Perl",
"bytes": "22867"
},
{
"name": "Python",
"bytes": "809140"
},
{
"name": "R",
"bytes": "155062"
},
{
"name": "Shell",
"bytes": "53823"
}
],
"symlink_target": ""
} |
import os
import urlparse
from unipath import FSPath as Path
from django.core.exceptions import ImproperlyConfigured
PROJECT_DIR = Path(__file__).absolute().ancestor(3)
def get_env_variable(var_name):
""" Get the environment variable or return an exception """
try:
return os.environ[var_name]
except KeyError:
error_msg = "Set the {} environment variable".format(var_name)
raise ImproperlyConfigured(error_msg)
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Idan Gazit', 'idan@gazit.me'),
)
DATABASES = {}
RETHINKDB_URL = urlparse.urlparse(get_env_variable('RETHINKDB_URL'))
urlparse.uses_netloc.append('rethinkdb')
RETHINK_CONNARGS = {}
rethink_argmap = {'hostname': 'host',
'port': 'port',
'username': 'db',
'password': 'auth_key'}
for k,v in rethink_argmap.items():
p = getattr(RETHINKDB_URL, k, None)
if p is not None:
RETHINK_CONNARGS[v] = p
MANAGERS = ADMINS
ALLOWED_HOSTS = []
TIME_ZONE = 'Etc/UTC'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = False
USE_L10N = True
USE_TZ = True
MEDIA_ROOT = PROJECT_DIR.child('media')
# the following line is a total lie except in production
# MEDIA_URL = 'http://{}.s3.amazonaws.com/media/'.format(AWS_STORAGE_BUCKET_NAME)
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
STATICFILES_ROOT = PROJECT_DIR.child('static')
STATICFILES_DIRS = [
(subdir, str(STATICFILES_ROOT.child(subdir))) for subdir in
['css', 'fonts', 'img', 'js']]
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
SECRET_KEY = get_env_variable('APP_SECRET_KEY')
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'gistio.urls'
WSGI_APPLICATION = 'gistio.wsgi.application'
TEMPLATE_DIRS = (
PROJECT_DIR.child('templates')
)
INSTALLED_APPS = (
# 'django.contrib.auth',
# 'django.contrib.contenttypes',
'django.contrib.sessions',
# 'django.contrib.sites',
# 'django.contrib.messages',
'django.contrib.staticfiles',
'publicsite',
'githubauth',
'gists',
)
SESSION_ENGINE = 'django.contrib.sessions.backends.signed_cookies'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
GITHUB_CLIENT_ID = get_env_variable('GITHUB_CLIENT_ID')
GITHUB_CLIENT_SECRET = get_env_variable('GITHUB_CLIENT_SECRET')
GITHUB_AUTH_PARAMS = {'client_id': GITHUB_CLIENT_ID,
'client_secret': GITHUB_CLIENT_SECRET}
GIST_PUBLIC_CACHE_SECONDS = 60
| {
"content_hash": "73187ac5c0b4ead5b0c0c11ef4733163",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 81,
"avg_line_length": 27.492957746478872,
"alnum_prop": 0.6644467213114754,
"repo_name": "Teino1978-Corp/Teino1978-Corp-gistio",
"id": "d40c03cca26f95b457d99b48d42f203ad91caa83",
"size": "3904",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gistio/settings/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "23127"
},
{
"name": "HTML",
"bytes": "9849"
},
{
"name": "JavaScript",
"bytes": "2115"
},
{
"name": "Python",
"bytes": "20635"
},
{
"name": "Ruby",
"bytes": "785"
}
],
"symlink_target": ""
} |
import sys
sys.path.append('gen-py')
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TCompactProtocol
from ping import Ping
def bench(n):
transport = TSocket.TSocket('127.0.0.1', 9090)
tranport = TTransport.TFramedTransport(transport)
protocol = TCompactProtocol.TCompactProtocol(tranport)
client = Ping.Client(protocol)
tranport.open()
for i in range(0, n):
client.ping()
tranport.close()
def main():
assert(len(sys.argv) == 2)
n = int(sys.argv[1])
import time
start = time.time()
bench(n)
end = time.time()
print('bench_ping n:%s diff:%s', n, end - start)
if __name__ == '__main__':
main()
| {
"content_hash": "e9f4903f213e925eb75990141b00ca3a",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 58,
"avg_line_length": 21.96969696969697,
"alnum_prop": 0.6579310344827586,
"repo_name": "decimalbell/muduo-thrift",
"id": "3a97169398222cba0bf93e37440ac96d133bf6d1",
"size": "725",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/ping/pingclient.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "21479"
},
{
"name": "CMake",
"bytes": "5944"
},
{
"name": "Python",
"bytes": "6816"
},
{
"name": "Thrift",
"bytes": "157"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from wrecks.models import Wreck
from wrecks.models import WreckType
class WreckAdmin(admin.ModelAdmin):
list_display = ('name', 'year_sunk', 'depth_meters', 'location')
list_filter = ('source', 'wreck_type')
admin.site.register(Wreck, WreckAdmin)
admin.site.register(WreckType) | {
"content_hash": "8532cc8f002ec42912687c974a544049",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 68,
"avg_line_length": 29.181818181818183,
"alnum_prop": 0.7507788161993769,
"repo_name": "greencoder/shipwrecksproject",
"id": "4e550165632d5deb921ee42b9dfe90e59ea23d94",
"size": "321",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wrecks/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "33"
},
{
"name": "Python",
"bytes": "14373"
}
],
"symlink_target": ""
} |
import logging; logger = logging.getLogger("morse." + __name__)
import morse.core.robot
import PhysicsConstraints
class HummerClass(morse.core.robot.MorseRobotClass):
""" Class definition for the Hummer.
Sub class of Morse_Object. """
def __init__(self, obj, parent=None):
""" Constructor method.
Receives the reference to the Blender object.
Optionally it gets the name of the object's parent,
but that information is not currently used for a robot. """
# Call the constructor of the parent class
logger.info('%s initialization' % obj.name)
super(self.__class__,self).__init__(obj, parent)
#
# This section runs only once to create the vehicle:
#
for child in obj.children:
if 'wheel1' in child.name:
wheel1 = child
wheel1.removeParent()
if 'wheel2' in child.name:
wheel2 = child
wheel2.removeParent()
if 'wheel3' in child.name:
wheel3 = child
wheel3.removeParent()
if 'wheel4' in child.name:
wheel4 = child
wheel4.removeParent()
obj['init'] = 1
physicsid = obj.getPhysicsId()
vehicle = PhysicsConstraints.createConstraint(physicsid,0,11)
obj['cid'] = vehicle.getConstraintId()
self.vehicle = PhysicsConstraints.getVehicleConstraint(obj['cid'])
# Wheel location from vehicle center
wx = 1.3
wy = 1.6
wz = -.5
#wheelAttachDirLocal:
#Direction the suspension is pointing
wheelAttachDirLocal = [0,0,-1]
#wheelAxleLocal:
#Determines the rotational angle where the
#wheel is mounted.
wheelAxleLocal = [-1,0,0]
#suspensionRestLength:
#The length of the suspension when it's fully
#extended:
suspensionRestLength = .3
#wheelRadius:
#Radius of the Physics Wheel.
#Turn on Game:Show Physics Visualization to see
#a purple line representing the wheel radius.
wheelRadius = .5
#hasSteering:
#Determines whether or not the coming wheel
#assignment will be affected by the steering
#value:
hasSteering = 1
#
# Front wheels:
#
logger.debug(dir(wheel1))
#Where the wheel is attached to the car based
#on the vehicle's Center
wheelAttachPosLocal = [wx, wy, wz]
#creates the first wheel using all of the variables
#created above:
self.vehicle.addWheel(wheel1,wheelAttachPosLocal,wheelAttachDirLocal,wheelAxleLocal,suspensionRestLength,wheelRadius,hasSteering)
#Positions this wheel on the opposite side of the car by using a
#negative values for the x position.
wheelAttachPosLocal = [-wx, wy, wz]
#creates the second wheel:
self.vehicle.addWheel(wheel2,wheelAttachPosLocal,wheelAttachDirLocal,wheelAxleLocal,suspensionRestLength,wheelRadius,hasSteering)
#
# Rear Wheels:
#
#Change the hasSteering value to 0 so the rear wheels don't turn
#when the steering value is changed.
hasSteering = 0
# Adjust the location the rear wheels are attached.
wx = 1.3
wy = 2.3
# Set the wheelAttachPosLocal to the new location for rear wheels:
# -y moves the position toward the back of the car
wheelAttachPosLocal = [wx ,-wy, wz]
#Creates the 3rd wheel (first rear wheel)
self.vehicle.addWheel(wheel3,wheelAttachPosLocal,wheelAttachDirLocal,wheelAxleLocal,suspensionRestLength,wheelRadius,hasSteering)
#Adjust the attach position for the next wheel:
# changed to -x to place the wheel on the opposite side of the car
# the same distance away from the vehicle's center
wheelAttachPosLocal = [-wx ,-wy, wz]
#create the last wheel using the above variables:
self.vehicle.addWheel(wheel4,wheelAttachPosLocal,wheelAttachDirLocal,wheelAxleLocal,suspensionRestLength,wheelRadius,hasSteering)
#The Rolling Influence:
#How easy it will be for the vehicle to roll over while turning:
#0 = Little to no rolling over
# .1 and higher easier to roll over
#Wheels that loose contact with the ground will be unable to
#steer the vehicle as well.
#influence = 0.1
influence = 0.05
self.vehicle.setRollInfluence(influence,0)
self.vehicle.setRollInfluence(influence,1)
self.vehicle.setRollInfluence(influence,2)
self.vehicle.setRollInfluence(influence,3)
#Stiffness:
#Affects how quickly the suspension will 'spring back'
#0 = No Spring back
# .001 and higher = faster spring back
#stiffness = 10.0
stiffness = 15
self.vehicle.setSuspensionStiffness(stiffness,0)
self.vehicle.setSuspensionStiffness(stiffness,1)
self.vehicle.setSuspensionStiffness(stiffness,2)
self.vehicle.setSuspensionStiffness(stiffness,3)
#Dampening:
#Determines how much the suspension will absorb the
#compression.
#0 = Bounce like a super ball
#greater than 0 = less bounce
damping = 10
self.vehicle.setSuspensionDamping(damping,0)
self.vehicle.setSuspensionDamping(damping,1)
self.vehicle.setSuspensionDamping(damping,2)
self.vehicle.setSuspensionDamping(damping,3)
#Compression:
#Resistance to compression of the overall suspension length.
#0 = Compress the entire length of the suspension
#Greater than 0 = compress less than the entire suspension length.
#10 = almost no compression
compression = 2
self.vehicle.setSuspensionCompression(compression,0)
self.vehicle.setSuspensionCompression(compression,1)
self.vehicle.setSuspensionCompression(compression,2)
self.vehicle.setSuspensionCompression(compression,3)
#Friction:
#Wheel's friction to the ground
#How fast you can accelerate from a standstill.
#Also affects steering wheel's ability to turn vehicle.
#0 = Very Slow Acceleration:
# .1 and higher = Faster Acceleration / more friction:
friction = obj['friction']
self.vehicle.setTyreFriction(friction,0)
self.vehicle.setTyreFriction(friction,1)
self.vehicle.setTyreFriction(friction,2)
self.vehicle.setTyreFriction(friction,3)
logger.info('Component initialized')
def default_action(self):
""" Main function of this component. """
#
# This section runs continuously after the initial set up:
# Updating Speed, Friction, Braking, Suspension, etc:
#
pass
| {
"content_hash": "42bd9e77c92dbbc509ebd1053e204da1",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 137,
"avg_line_length": 36.883597883597886,
"alnum_prop": 0.6349160809066131,
"repo_name": "Arkapravo/morse-0.6",
"id": "5cd81e69fba112f0ee6d55c4b5995c8ae5663061",
"size": "6971",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/morse/robots/hummer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "46148"
},
{
"name": "C++",
"bytes": "30878"
},
{
"name": "Perl",
"bytes": "1705"
},
{
"name": "Python",
"bytes": "1117700"
},
{
"name": "Shell",
"bytes": "684"
}
],
"symlink_target": ""
} |
from moksha.api.widgets.live import LiveWidget
class HelloWorldWidget(LiveWidget):
topic = "helloworld"
template = """
<b>Hello World Widget</b>
<form onsubmit="return send_msg()">
<input name="text" id="text"/>
</form>
<ul id="data"/>
<script>
function send_msg() {
moksha.send_message('helloworld', {'msg': $('#text').val()});
$('#text').val('');
return false;
}
</script>
"""
onmessage = """
$('<li/>').text(json.msg).prependTo('#data');
"""
| {
"content_hash": "2a40f83a6bd482276d2e0ee8400136f4",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 77,
"avg_line_length": 26.434782608695652,
"alnum_prop": 0.46875,
"repo_name": "ralphbean/moksha",
"id": "d77e9749fda80d08bfb075d22bab1ca2c583b5b2",
"size": "608",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "moksha/apps/helloworld/demo/widgets/live.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "1249457"
},
{
"name": "Python",
"bytes": "731300"
},
{
"name": "Shell",
"bytes": "1776"
}
],
"symlink_target": ""
} |
from msrest.paging import Paged
class OperationPaged(Paged):
"""
A paging container for iterating over a list of :class:`Operation <azure.mgmt.eventgrid.models.Operation>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[Operation]'}
}
def __init__(self, *args, **kwargs):
super(OperationPaged, self).__init__(*args, **kwargs)
| {
"content_hash": "c3d46c81e6007cdaf905a0c4e6b8775a",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 117,
"avg_line_length": 28.3125,
"alnum_prop": 0.5982339955849889,
"repo_name": "samedder/azure-cli",
"id": "2c253a55d5ac3ecf89e1c10ddc640b82326a105a",
"size": "1142",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/command_modules/azure-cli-eventgrid/azure/cli/command_modules/eventgrid/sdk/models/operation_paged.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11279"
},
{
"name": "C++",
"bytes": "275"
},
{
"name": "JavaScript",
"bytes": "380"
},
{
"name": "Python",
"bytes": "5627973"
},
{
"name": "Shell",
"bytes": "25031"
}
],
"symlink_target": ""
} |
"""
This module provides utility classes for string operations.
"""
import re
from fractions import Fraction
SUBSCRIPT_UNICODE = {
"0": "₀",
"1": "₁",
"2": "₂",
"3": "₃",
"4": "₄",
"5": "₅",
"6": "₆",
"7": "₇",
"8": "₈",
"9": "₉",
}
SUPERSCRIPT_UNICODE = {
"0": "⁰",
"1": "¹",
"2": "²",
"3": "³",
"4": "⁴",
"5": "⁵",
"6": "⁶",
"7": "⁷",
"8": "⁸",
"9": "⁹",
"+": "⁺",
"-": "⁻",
}
# TODO: make standalone functions in this module use the same implementation as Stringify
# Note: previous deprecations of standalone functions in this module were removed due to
# a community need.
class Stringify:
"""
Mix-in class for string formatting, e.g. superscripting numbers and symbols or superscripting.
"""
STRING_MODE = "SUBSCRIPT"
def to_pretty_string(self) -> str:
"""
:return: A pretty string representation. By default, the __str__ output is used, but this method can be
overridden if a different representation from default is desired.
"""
return self.__str__()
def to_latex_string(self) -> str:
"""
Generates a LaTeX formatted string. The mode is set by the class variable STRING_MODE, which defaults to
"SUBSCRIPT". E.g., Fe2O3 is transformed to Fe$_{2}$O$_{3}$. Setting STRING_MODE to "SUPERSCRIPT" creates
superscript, e.g., Fe2+ becomes Fe^{2+}. The initial string is obtained from the class's __str__ method.
:return: String for display as in LaTeX with proper superscripts and subscripts.
"""
str_ = self.to_pretty_string()
# First we process strings that already have _ and ^ by escaping the relevant parts.
str_ = re.sub(r"_(\d+)", r"$_{\1}$", str_)
str_ = re.sub(r"\^([\d\+\-]+)", r"$^{\1}$", str_)
if self.STRING_MODE == "SUBSCRIPT":
return re.sub(r"([A-Za-z\(\)])([\d\+\-\.]+)", r"\1$_{\2}$", str_)
if self.STRING_MODE == "SUPERSCRIPT":
return re.sub(r"([A-Za-z\(\)])([\d\+\-\.]+)", r"\1$^{\2}$", str_)
return str_
def to_html_string(self) -> str:
"""
Generates a HTML formatted string. This uses the output from to_latex_string to generate a HTML output.
:return: HTML formatted string.
"""
str_ = re.sub(r"\$_\{([^}]+)\}\$", r"<sub>\1</sub>", self.to_latex_string())
str_ = re.sub(r"\$\^\{([^}]+)\}\$", r"<sup>\1</sup>", str_)
return re.sub(r"\$\\overline\{([^}]+)\}\$", r'<span style="text-decoration:overline">\1</span>', str_)
def to_unicode_string(self):
"""
:return: Unicode string with proper sub and superscripts. Note that this works only with systems where the sub
and superscripts are pure integers.
"""
str_ = self.to_latex_string()
for m in re.finditer(r"\$_\{(\d+)\}\$", str_):
s1 = m.group()
s2 = [SUBSCRIPT_UNICODE[s] for s in m.group(1)]
str_ = str_.replace(s1, "".join(s2))
for m in re.finditer(r"\$\^\{([\d\+\-]+)\}\$", str_):
s1 = m.group()
s2 = [SUPERSCRIPT_UNICODE[s] for s in m.group(1)]
str_ = str_.replace(s1, "".join(s2))
return str_
def str_delimited(results, header=None, delimiter="\t"):
"""
Given a tuple of tuples, generate a delimited string form.
>>> results = [["a","b","c"],["d","e","f"],[1,2,3]]
>>> print(str_delimited(results,delimiter=","))
a,b,c
d,e,f
1,2,3
Args:
result: 2d sequence of arbitrary types.
header: optional header
Returns:
Aligned string output in a table-like format.
"""
returnstr = ""
if header is not None:
returnstr += delimiter.join(header) + "\n"
return returnstr + "\n".join([delimiter.join([str(m) for m in result]) for result in results])
def formula_double_format(afloat, ignore_ones=True, tol=1e-8):
"""
This function is used to make pretty formulas by formatting the amounts.
Instead of Li1.0 Fe1.0 P1.0 O4.0, you get LiFePO4.
Args:
afloat (float): a float
ignore_ones (bool): if true, floats of 1 are ignored.
tol (float): Tolerance to round to nearest int. i.e. 2.0000000001 -> 2
Returns:
A string representation of the float for formulas.
"""
if ignore_ones and afloat == 1:
return ""
if abs(afloat - int(afloat)) < tol:
return str(int(afloat))
return str(round(afloat, 8))
def latexify(formula):
"""
Generates a LaTeX formatted formula. E.g., Fe2O3 is transformed to
Fe$_{2}$O$_{3}$.
Note that Composition now has a to_latex_string() method that may
be used instead.
Args:
formula (str): Input formula.
Returns:
Formula suitable for display as in LaTeX with proper subscripts.
"""
return re.sub(r"([A-Za-z\(\)])([\d\.]+)", r"\1$_{\2}$", formula)
def htmlify(formula):
"""
Generates a HTML formatted formula, e.g. Fe2O3 is transformed to
Fe<sub>2</sub>O</sub>3</sub>
Note that Composition now has a to_html_string() method that may
be used instead.
:param formula:
:return:
"""
return re.sub(r"([A-Za-z\(\)])([\d\.]+)", r"\1<sub>\2</sub>", formula)
def unicodeify(formula):
"""
Generates a formula with unicode subscripts, e.g. Fe2O3 is transformed
to Fe₂O₃. Does not support formulae with decimal points.
Note that Composition now has a to_unicode_string() method that may
be used instead.
:param formula:
:return:
"""
if "." in formula:
raise ValueError("No unicode character exists for subscript period.")
for original_subscript, subscript_unicode in SUBSCRIPT_UNICODE.items():
formula = formula.replace(str(original_subscript), subscript_unicode)
return formula
def latexify_spacegroup(spacegroup_symbol):
r"""
Generates a latex formatted spacegroup. E.g., P2_1/c is converted to
P2$_{1}$/c and P-1 is converted to P$\\overline{1}$.
Note that SymmetryGroup now has a to_latex_string() method that may
be called instead.
Args:
spacegroup_symbol (str): A spacegroup symbol
Returns:
A latex formatted spacegroup with proper subscripts and overlines.
"""
sym = re.sub(r"_(\d+)", r"$_{\1}$", spacegroup_symbol)
return re.sub(r"-(\d)", r"$\\overline{\1}$", sym)
def unicodeify_spacegroup(spacegroup_symbol):
r"""
Generates a unicode formatted spacegroup. E.g., P2$_{1}$/c is converted to
P2₁/c and P$\\overline{1}$ is converted to P̅1.
Note that SymmetryGroup now has a to_unicode_string() method that
may be called instead.
Args:
spacegroup_symbol (str): A spacegroup symbol as LaTeX
Returns:
A unicode spacegroup with proper subscripts and overlines.
"""
if not spacegroup_symbol:
return ""
symbol = latexify_spacegroup(spacegroup_symbol)
for number, unicode_number in SUBSCRIPT_UNICODE.items():
symbol = symbol.replace("$_{" + str(number) + "}$", unicode_number)
symbol = symbol.replace("_" + str(number), unicode_number)
overline = "\u0305" # u"\u0304" (macron) is also an option
symbol = symbol.replace("$\\overline{", "")
symbol = symbol.replace("$", "")
symbol = symbol.replace("{", "")
# overline unicode symbol comes after the character with the overline
symbol = symbol.replace("}", overline)
return symbol
def unicodeify_species(specie_string):
r"""
Generates a unicode formatted species string, with appropriate
superscripts for oxidation states.
Note that Species now has a to_unicode_string() method that
may be used instead.
Args:
specie_string (str): Species string, e.g. O2-
Returns:
Species string, e.g. O²⁻
"""
if not specie_string:
return ""
for character, unicode_character in SUPERSCRIPT_UNICODE.items():
specie_string = specie_string.replace(character, unicode_character)
return specie_string
def stream_has_colours(stream):
"""
True if stream supports colours. Python cookbook, #475186
"""
if not hasattr(stream, "isatty"):
return False
if not stream.isatty():
return False # auto color only on TTYs
try:
import curses
curses.setupterm()
return curses.tigetnum("colors") > 2
except Exception:
return False # guess false in case of error
def transformation_to_string(matrix, translation_vec=(0, 0, 0), components=("x", "y", "z"), c="", delim=","):
"""
Convenience method. Given matrix returns string, e.g. x+2y+1/4
:param matrix
:param translation_vec
:param components: either ('x', 'y', 'z') or ('a', 'b', 'c')
:param c: optional additional character to print (used for magmoms)
:param delim: delimiter
:return: xyz string
"""
parts = []
for i in range(3):
s = ""
m = matrix[i]
t = translation_vec[i]
for j, dim in enumerate(components):
if m[j] != 0:
f = Fraction(m[j]).limit_denominator()
if s != "" and f >= 0:
s += "+"
if abs(f.numerator) != 1:
s += str(f.numerator)
elif f < 0:
s += "-"
s += c + dim
if f.denominator != 1:
s += "/" + str(f.denominator)
if t != 0:
s += ("+" if (t > 0 and s != "") else "") + str(Fraction(t).limit_denominator())
if s == "":
s += "0"
parts.append(s)
return delim.join(parts)
def disordered_formula(disordered_struct, symbols=("x", "y", "z"), fmt="plain"):
"""
Returns a formula of a form like AxB1-x (x=0.5)
for disordered structures. Will only return a
formula for disordered structures with one
kind of disordered site at present.
Args:
disordered_struct: a disordered structure
symbols: a tuple of characters to use for
subscripts, by default this is ('x', 'y', 'z')
but if you have more than three disordered
species more symbols will need to be added
fmt (str): 'plain', 'HTML' or 'LaTeX'
Returns (str): a disordered formula string
"""
# this is in string utils and not in
# Composition because we need to have access
# to site occupancies to calculate this, so
# have to pass the full structure as an argument
# (alternatively this could be made a method on
# Structure)
from pymatgen.core.composition import Composition
from pymatgen.core.periodic_table import get_el_sp
if disordered_struct.is_ordered:
raise ValueError("Structure is not disordered, " "so disordered formula not defined.")
disordered_site_compositions = {site.species for site in disordered_struct if not site.is_ordered}
if len(disordered_site_compositions) > 1:
# this probably won't happen too often
raise ValueError(
"Ambiguous how to define disordered " "formula when more than one type of disordered " "site is present."
)
disordered_site_composition = disordered_site_compositions.pop()
disordered_species = {str(sp) for sp, occu in disordered_site_composition.items()}
if len(disordered_species) > len(symbols):
# this probably won't happen too often either
raise ValueError("Not enough symbols to describe disordered composition: " "{}".format(symbols))
symbols = list(symbols)[0 : len(disordered_species) - 1]
comp = disordered_struct.composition.get_el_amt_dict().items()
# sort by electronegativity, as per composition
comp = sorted(comp, key=lambda x: get_el_sp(x[0]).X)
disordered_comp = []
variable_map = {}
total_disordered_occu = sum([occu for sp, occu in comp if str(sp) in disordered_species])
# composition to get common factor
factor_comp = disordered_struct.composition.as_dict()
factor_comp["X"] = total_disordered_occu
for sp in disordered_species:
del factor_comp[str(sp)]
factor_comp = Composition.from_dict(factor_comp)
factor = factor_comp.get_reduced_formula_and_factor()[1]
total_disordered_occu /= factor
remainder = "{}-{}".format(
formula_double_format(total_disordered_occu, ignore_ones=False),
"-".join(symbols),
)
for sp, occu in comp:
sp = str(sp)
if sp not in disordered_species:
disordered_comp.append((sp, formula_double_format(occu / factor)))
else:
if len(symbols) > 0:
symbol = symbols.pop(0)
disordered_comp.append((sp, symbol))
variable_map[symbol] = occu / total_disordered_occu / factor
else:
disordered_comp.append((sp, remainder))
if fmt == "LaTeX":
sub_start = "_{"
sub_end = "}"
elif fmt == "HTML":
sub_start = "<sub>"
sub_end = "</sub>"
elif fmt != "plain":
raise ValueError("Unsupported output format, " "choose from: LaTeX, HTML, plain")
disordered_formula = []
for sp, occu in disordered_comp:
disordered_formula.append(sp)
if occu: # can be empty string if 1
if fmt != "plain":
disordered_formula.append(sub_start)
disordered_formula.append(occu)
if fmt != "plain":
disordered_formula.append(sub_end)
disordered_formula.append(" ")
disordered_formula += ["{}={} ".format(k, formula_double_format(v)) for k, v in variable_map.items()]
return "".join(map(str, disordered_formula))[0:-1]
| {
"content_hash": "3219267eaf4151152137dea6690124d6",
"timestamp": "",
"source": "github",
"line_count": 425,
"max_line_length": 118,
"avg_line_length": 32.36,
"alnum_prop": 0.5914345960881262,
"repo_name": "gmatteo/pymatgen",
"id": "07ba7c0509e0bc010ebad834c6cc9c05d4330c31",
"size": "13913",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymatgen/util/string.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "277"
},
{
"name": "Python",
"bytes": "7840569"
},
{
"name": "Shell",
"bytes": "711"
}
],
"symlink_target": ""
} |
import os
import pytest
import tarfile
from bs4 import BeautifulSoup
from copy import deepcopy
from six import BytesIO
from dlkit.abstract_osid.osid import errors
from dlkit.json_.osid.objects import OsidObject
from dlkit.records.repository.edx.utilities import *
from ... import utilities
PROJECT_PATH = os.path.dirname(os.path.abspath(__file__))
ABS_PATH = os.path.abspath(os.path.join(PROJECT_PATH, os.pardir))
class TestUtilityMethods(object):
def test_clean_str_removes_non_words(self):
assert clean_str('123 Foo !@#,-_') == '123_Foo_______'
def test_get_byte_stream_size_returns_obj_size(self):
test_stream = BytesIO('foo'.encode('utf-8'))
assert get_byte_stream_size(test_stream) == 3
def test_get_current_time_in_secs_is_accurate(self):
assert isinstance(get_current_time_in_secs(), int)
assert get_current_time_in_secs() > 0
def test_remove_redundant_drafts_works(self):
assert remove_redundant_drafts('/drafts/drafts') == '/drafts'
assert remove_redundant_drafts('/drafts') == '/drafts'
def test_remove_trailing_slash_works(self):
assert remove_trailing_slash('/foo/') == '/foo'
assert remove_trailing_slash('/foo') == '/foo'
def test_slugify_works_without_django(self):
assert slugify('6.001X, 123-') == '6001x-123-'
@pytest.fixture(scope="class")
def edx_utilities_mixin_class_fixture(request):
request.cls.mixin = EdXUtilitiesMixin()
obj_map = deepcopy(utilities.TEST_OBJECT_MAP)
obj_map['displayName'] = {
'text': 'Fake Display Name, 123',
'languageTypeId': '639-2%3AENG%40ISO',
'formatTypeId': 'TextFormats%3APLAIN%40okapia.net',
'scriptTypeId': '15924%3ALATN%40ISO'
}
request.cls.mixin.my_osid_object = OsidObject(object_name='TEST_OBJECT',
osid_object_map=obj_map)
request.cls.test_file = open(os.path.join(ABS_PATH,
'..',
'..',
'..',
'tests',
'fixtures',
'assets',
'draggable.green.dot.png'), 'rb')
def class_tear_down():
request.cls.test_file.close()
request.addfinalizer(class_tear_down)
@pytest.fixture(scope="function")
def edx_utilities_mixin_test_fixture(request):
stream = BytesIO()
request.cls.tarfile = tarfile.open(fileobj=stream, mode='w')
test_file = tarfile.TarInfo(name='/vertical/a-simple-path.xml')
test_data = BytesIO('foo'.encode('utf-8'))
request.cls.tarfile.addfile(test_file, test_data)
@pytest.mark.usefixtures('edx_utilities_mixin_class_fixture', 'edx_utilities_mixin_test_fixture')
class TestEdXUtilitiesMixin(object):
def test_get_unique_name_works(self):
assert self.mixin.get_unique_name(self.tarfile, 'a-simple-path', 'vertical', '/') == 'a-simple-path-1'
def test_can_get_url_property(self):
assert self.mixin.url == 'fake-display-name-123'
def test_can_write_pretty_soup_to_tarfile(self):
soup = BeautifulSoup('<vertical display_name="My hat" />', 'xml')
assert '/vertical/my-hat.xml' not in self.tarfile.getnames()
self.mixin.write_to_tarfile(self.tarfile, '/vertical/my-hat.xml', soup=soup)
assert '/vertical/my-hat.xml' in self.tarfile.getnames()
def test_can_write_unpretty_soup_to_tarfile(self):
soup = BeautifulSoup('<vertical display_name="My hat" />', 'xml')
assert '/vertical/my-hat.xml' not in self.tarfile.getnames()
self.mixin.write_to_tarfile(self.tarfile, '/vertical/my-hat.xml', soup=soup, prettify=False)
assert '/vertical/my-hat.xml' in self.tarfile.getnames()
def test_can_write_file_to_tarfile(self):
file_list = self.tarfile.getnames()
assert 'green.dot.png' not in file_list
self.mixin.write_to_tarfile(self.tarfile, '/green.dot.png', fileobj=self.test_file)
file_list = self.tarfile.getnames()
assert 'green.dot.png' in file_list
def test_cannot_write_both_soup_and_file_to_tarfile(self):
with pytest.raises(errors.IllegalState):
self.mixin.write_to_tarfile(self.tarfile, '/foo.xml', soup='123', fileobj='123')
def test_can_write_empty_directory_to_tarfile(self):
file_list = self.tarfile.getnames()
assert '/foo-directory' not in file_list
self.mixin.write_to_tarfile(self.tarfile, '/foo-directory')
file_list = self.tarfile.getnames()
assert '/foo-directory' in file_list
| {
"content_hash": "755b38404d4660275ca6c56a18cc0540",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 110,
"avg_line_length": 40.572649572649574,
"alnum_prop": 0.6191278702338319,
"repo_name": "mitsei/dlkit",
"id": "b51be66476e2688ac2a84016f90e2a572c760c2e",
"size": "4747",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/records/repository/edx/test_utilities.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25170465"
},
{
"name": "TeX",
"bytes": "1088"
}
],
"symlink_target": ""
} |
import types
import importlib
FUNC_FULL_DISPLAY=False
class Param(object):
def __init__(self,**kwargs):
self.__dict__ = kwargs
def print_str(self):
info = []
for n in self.print_names:
value = self.__dict__[n]
if isinstance(value,types.FunctionType):
if FUNC_FULL_DISPLAY:
value = value.__module__ +'.'+ value.__name__
else:
value = value.__name__
info.append('%s=%s'%(n,str(value)))
return ','.join(info)
def __str__(self):
info=[]
for k in self.__dict__:
value = self.__dict__[k]
if isinstance(value,types.FunctionType):
if FUNC_FULL_DISPLAY:
value = value.__module__ +'.'+ value.__name__
else:
value = value.__name__
info.append('%s=%s'%(k,str(value)))
return ','.join(info)
def __repr__(self):
return 'Param(%s)'%self
def __getitem__(self,key):
return self.__dict__[key]
def __iter__(self):
return iter(self.__dict__)
def __eq__(self,other):
return self.__dict__==other.__dict__
def copy(self):
obj = Param()
obj.__dict__ = self.__dict__.copy()
return obj
# NOTE: all the class that reference the network object should be a component.
class Component(object):
def __init__(self,netobj,**kwargs):
assert netobj is not None
self.net = netobj
self.param = Param(**kwargs)
def to_pickle(self):
o = {}
o['module'] = type(self).__module__
o['typename'] = type(self).__name__
o['param'] = self.param # all the data need to be saved
return o
@staticmethod
def restore(o,net):
module = importlib.import_module(o['module'])
classObj = getattr(module,o['typename'])
if classObj is None:
raise RuntimeError("cannot import %s from %s"%(o['typename'],o['module']))
return classObj(net,**(o['param'].__dict__))
| {
"content_hash": "d05bd350c799c76b0a2fb6c357133a94",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 80,
"avg_line_length": 25.438356164383563,
"alnum_prop": 0.5740441572428648,
"repo_name": "crackhopper/TFS-toolbox",
"id": "d71fa5af2423ff25eb8c7d6ab683b74bca7239ee",
"size": "1857",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tfs/core/elem/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1935"
},
{
"name": "Jupyter Notebook",
"bytes": "1820326"
},
{
"name": "Python",
"bytes": "99327"
},
{
"name": "Shell",
"bytes": "178"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, url
from home import views
urlpatterns = patterns(
'',
url(r'^$', views.IndexView.as_view(), name='index')
)
| {
"content_hash": "35eb8fee9bbd3575c5d1426319c48cd2",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 55,
"avg_line_length": 20.75,
"alnum_prop": 0.6385542168674698,
"repo_name": "ritashugisha/ASUEvents",
"id": "a9736b85ff625b35c4579dc0663f4db9bfaedd5d",
"size": "166",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ASUEvents/home/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "298"
},
{
"name": "ApacheConf",
"bytes": "466"
},
{
"name": "CSS",
"bytes": "3040553"
},
{
"name": "CoffeeScript",
"bytes": "162"
},
{
"name": "HTML",
"bytes": "4294830"
},
{
"name": "JavaScript",
"bytes": "5635235"
},
{
"name": "PHP",
"bytes": "36247"
},
{
"name": "Python",
"bytes": "112483"
},
{
"name": "Shell",
"bytes": "444"
},
{
"name": "TeX",
"bytes": "8541"
}
],
"symlink_target": ""
} |
tests=[
("testExecs/testTransforms.exe","",{}),
("testExecs/testGrid.exe","",{}),
]
longTests=[]
if __name__=='__main__':
import sys
from rdkit import TestRunner
failed,tests = TestRunner.RunScript('test_list.py',0,1)
sys.exit(len(failed))
| {
"content_hash": "4e3f4a85a6c6db531582b770a83ca66a",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 57,
"avg_line_length": 18.285714285714285,
"alnum_prop": 0.6328125,
"repo_name": "adalke/rdkit",
"id": "d6487ed0b23dc13b33fb077a125b4821f137bfbc",
"size": "257",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "Code/Geometry/test_list.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "385"
},
{
"name": "C",
"bytes": "226290"
},
{
"name": "C#",
"bytes": "6745"
},
{
"name": "C++",
"bytes": "7847294"
},
{
"name": "CMake",
"bytes": "611343"
},
{
"name": "CSS",
"bytes": "3231"
},
{
"name": "FORTRAN",
"bytes": "7661"
},
{
"name": "HTML",
"bytes": "63047"
},
{
"name": "Java",
"bytes": "291444"
},
{
"name": "JavaScript",
"bytes": "11595"
},
{
"name": "LLVM",
"bytes": "29594"
},
{
"name": "Lex",
"bytes": "4508"
},
{
"name": "Makefile",
"bytes": "15435"
},
{
"name": "Objective-C",
"bytes": "298"
},
{
"name": "Python",
"bytes": "3138951"
},
{
"name": "QMake",
"bytes": "389"
},
{
"name": "SMT",
"bytes": "3010"
},
{
"name": "Shell",
"bytes": "12651"
},
{
"name": "Smarty",
"bytes": "5864"
},
{
"name": "Yacc",
"bytes": "49429"
}
],
"symlink_target": ""
} |
import os
import logging
import copy
import time
from datetime import timedelta
from ignite.engine import Events, Engine, EventEnum
from ignite.handlers import Timer
from ignite.contrib.handlers import ProgressBar
from ignite.metrics import RunningAverage
from torch.utils.tensorboard import SummaryWriter
import torch
from torch import nn
import numpy as np
class ValidationEvents(EventEnum):
"""
Events based on validation running
"""
VALIDATION_STARTED = 'validation_started'
VALIDATION_COMPLETED = 'validation_completed'
class BackwardsEvents(EventEnum):
"""
Events based on validation running
"""
BACKWARDS_COMPLETED = 'backwards_completed'
def cache_dataset(dataset):
"""
Runs through an entire dataset and caches it if there nussl.datasets.transforms.Cache
is in dataset.transform. If there is no caching, or dataset.cache_populated = True,
then this function just iterates through the dataset and does nothing.
This function can also take a `torch.util.data.DataLoader` object wrapped around
a `nussl.datasets.BaseDataset` object.
Args:
dataset (nussl.datasets.BaseDataset): Must be a subclass of
`nussl.datasets.BaseDataset`.
"""
def dummy_process(engine, data):
pass
cache = Engine(dummy_process)
ProgressBar().attach(cache)
cache.run(dataset)
dataset.cache_populated = True
def create_train_and_validation_engines(train_func, val_func=None, device='cpu'):
"""
Helper function for creating an ignite Engine object with helpful defaults.
This sets up an Engine that has four handlers attached to it:
- prepare_batch: before a batch is passed to train_func or val_func, this
function runs, moving every item in the batch (which is a dictionary) to
the appropriate device ('cpu' or 'cuda').
- book_keeping: sets up some dictionaries that are used for bookkeeping so one
can easily track the epoch and iteration losses for both training and
validation.
- add_to_iter_history: records the iteration, epoch, and past iteration losses
into the dictionaries set up by book_keeping.
- clear_iter_history: resets the current iteration history of losses after moving
the current iteration history into past iteration history.
Args:
train_func (func): Function that provides the closure for training for
a single batch.
val_func (func, optional): Function that provides the closure for
validating a single batch. Defaults to None.
device (str, optional): Device to move tensors to. Defaults to 'cpu'.
"""
# Set up engines for training and validation
trainer = Engine(train_func)
trainer.register_events(*ValidationEvents)
trainer.register_events(*BackwardsEvents)
validator = None if val_func is None else Engine(val_func)
# Before a batch starts, the items should be float and moved to the
# correct device, for both training and validation. Checks to make
# sure "cuda" is available if user requested cuda.
device = device if torch.cuda.is_available() else 'cpu'
device = torch.device(device)
def prepare_batch(engine):
batch = engine.state.batch
for key in batch:
if torch.is_tensor(batch[key]):
batch[key] = batch[key].float().to(device)
engine.state.batch = batch
# Set up stuff for bookkeeping as training progresses.
def book_keeping(engine):
engine.state.epoch_history = {}
engine.state.iter_history = {}
engine.state.past_iter_history = {}
def add_to_iter_history(engine):
for key in engine.state.output:
if key not in engine.state.iter_history:
engine.state.iter_history[key] = []
if key not in engine.state.past_iter_history:
engine.state.past_iter_history[key] = []
engine.state.iter_history[key].append(
engine.state.output[key]
)
engine.state.past_iter_history[key].append(
engine.state.iter_history[key]
)
def clear_iter_history(engine):
engine.state.iter_history = {}
trainer.add_event_handler(
Events.ITERATION_STARTED, prepare_batch)
trainer.add_event_handler(
Events.STARTED, book_keeping)
trainer.add_event_handler(
Events.ITERATION_COMPLETED, add_to_iter_history)
trainer.add_event_handler(
Events.EPOCH_STARTED, clear_iter_history)
if validator is not None:
validator.add_event_handler(
Events.ITERATION_STARTED, prepare_batch)
validator.add_event_handler(
Events.STARTED, book_keeping)
validator.add_event_handler(
Events.ITERATION_COMPLETED, add_to_iter_history)
validator.add_event_handler(
Events.EPOCH_STARTED, clear_iter_history)
return trainer, validator
def add_validate_and_checkpoint(output_folder, model, optimizer, train_data, trainer,
val_data=None, validator=None, save_by_epoch=None):
"""
This adds the following handler to the trainer:
- validate_and_checkpoint: this runs the validator on the validation dataset
(``val_data``) using a defined validation process function ``val_func``.
These are optional. If these are not provided, then no validator is run
and the model is simply checkpointed. The model is always saved to
``{output_folder}/checkpoints/latest.model.pth``. If the model is also the
one with the lowest validation loss, then it is *also* saved to
``{output_folder}/checkpoints/best.model.pth. This is attached to
``Events.EPOCH_COMPLETED`` on the trainer. After completion, it fires a
``ValidationEvents.VALIDATION_COMPLETED`` event.
Args:
model (torch.nn.Module): Model that is being trained (typically a SeparationModel).
optimizer (torch.optim.Optimizer): Optimizer being used to train.
train_data (BaseDataset): dataset that is being used to train the model. This is to
save additional metadata information alongside the model checkpoint such as the
STFTParams, dataset folder, length, list of transforms, etc.
trainer (ignite.Engine): Engine for trainer
validator (ignite.Engine, optional): Engine for validation.
Defaults to None.
val_data (torch.utils.data.Dataset, optional): The validation data.
Defaults to None.
save_by_epoch (int, optional): Save the model by epoch number. If this is set to
N, then every Nth model will be saved in the format epoch{N}.model.pth.
"""
# When the trainer finishes an epoch, it should validate and save
# the model.
@trainer.on(Events.EPOCH_COMPLETED)
def validate_and_checkpoint(trainer):
trainer.fire_event(ValidationEvents.VALIDATION_STARTED)
is_best = True
if validator is not None:
validator.run(val_data)
for key in validator.state.iter_history:
_key = f"validation/{key}"
if _key not in trainer.state.epoch_history:
trainer.state.epoch_history[_key] = []
trainer.state.epoch_history[_key].append(np.mean(
validator.state.iter_history[key]
))
if 'validation/loss' in trainer.state.epoch_history:
cur = trainer.state.epoch_history['validation/loss'][-1]
is_best = cur == min(trainer.state.epoch_history['validation/loss'])
for key in trainer.state.iter_history:
_key = f"train/{key}"
if _key not in trainer.state.epoch_history:
trainer.state.epoch_history[_key] = []
trainer.state.epoch_history[_key].append(np.mean(
trainer.state.iter_history[key]
))
output_paths = [os.path.join(
output_folder, 'checkpoints', 'latest.model.pth')]
if is_best:
output_paths.append(os.path.join(
output_folder, 'checkpoints', 'best.model.pth'
))
if isinstance(model, nn.DataParallel):
_model = model.module
else:
_model = model
for _path in output_paths:
os.makedirs(os.path.join(
output_folder, 'checkpoints'), exist_ok=True)
_model.save(_path, train_data=train_data, val_data=val_data,
trainer=trainer)
torch.save(optimizer.state_dict(),
_path.replace('model.pth', 'optimizer.pth'))
if save_by_epoch is not None:
if trainer.state.epoch % save_by_epoch == 0:
_path = output_paths[0].replace('latest', f'epoch{trainer.state.epoch}')
_model.save(_path, train_data=train_data, val_data=val_data,
trainer=trainer)
trainer.state.saved_model_path = output_paths[-1]
trainer.state.output_folder = output_folder
trainer.fire_event(ValidationEvents.VALIDATION_COMPLETED)
def add_stdout_handler(trainer, validator=None):
"""
This adds the following handler to the trainer engine, and also sets up
Timers:
- log_epoch_to_stdout: This logs the results of a model after it has trained
for a single epoch on both the training and validation set. The output typically
looks like this:
.. code-block:: none
EPOCH SUMMARY
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- Epoch number: 0010 / 0010
- Training loss: 0.583591
- Validation loss: 0.137209
- Epoch took: 00:00:03
- Time since start: 00:00:32
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Saving to test.
Output @ tests/local/trainer
Args:
trainer (ignite.Engine): Engine for trainer
validator (ignite.Engine, optional): Engine for validation.
Defaults to None.
"""
# Set up timers for overall time taken and each epoch
overall_timer = Timer(average=False)
overall_timer.attach(trainer,
start=Events.STARTED, pause=Events.COMPLETED)
epoch_timer = Timer(average=False)
epoch_timer.attach(
trainer, start=Events.EPOCH_STARTED,
pause=ValidationEvents.VALIDATION_COMPLETED
)
@trainer.on(ValidationEvents.VALIDATION_COMPLETED)
def log_epoch_to_stdout(trainer):
epoch_time = epoch_timer.value()
epoch_time = timedelta(seconds=epoch_time)
overall_time = overall_timer.value()
overall_time = timedelta(seconds=overall_time)
epoch_number = trainer.state.epoch
total_epochs = trainer.state.max_epochs
try:
validation_loss = (
f"{trainer.state.epoch_history['validation/loss'][-1]:04f}")
except:
validation_loss = 'N/A'
train_loss = trainer.state.epoch_history['train/loss'][-1]
saved_model_path = trainer.state.saved_model_path
logging_str = (
f"\n\n"
f"EPOCH SUMMARY \n"
f"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \n"
f"- Epoch number: {epoch_number:04d} / {total_epochs:04d} \n"
f"- Training loss: {train_loss:04f} \n"
f"- Validation loss: {validation_loss} \n"
f"- Epoch took: {epoch_time} \n"
f"- Time since start: {overall_time} \n"
f"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \n"
f"Saving to {saved_model_path}. \n"
f"Output @ {trainer.state.output_folder} \n"
)
logging.info(logging_str)
def add_progress_bar_handler(*engines):
"""
Adds a progress bar to each engine. Keeps track of a running
average of the loss as well.
Usage::
.. code-block:: python
tr_engine, val_engine = ...
add_progress_bar_handler(tr_engine, val_engine)
"""
for engine in engines:
output_transform = lambda x: x['loss']
RunningAverage(output_transform=output_transform).attach(engine, 'avg_loss')
ProgressBar().attach(engine, ['avg_loss'])
def add_tensorboard_handler(tensorboard_folder, engine, every_iteration=False):
"""
Every key in engine.state.epoch_history[-1] is logged to TensorBoard.
Args:
tensorboard_folder (str): Where the tensorboard logs should go.
trainer (ignite.Engine): The engine to log.
every_iteration (bool, optional): Whether to also log the values at every
iteration.
"""
writer = SummaryWriter(tensorboard_folder)
@engine.on(ValidationEvents.VALIDATION_COMPLETED)
def log_to_tensorboard(engine):
for key in engine.state.epoch_history:
writer.add_scalar(
key, engine.state.epoch_history[key][-1], engine.state.epoch)
if every_iteration:
@engine.on(Events.ITERATION_COMPLETED)
def log_iteration_to_tensorboard(engine):
for key in engine.state.iter_history:
writer.add_scalar(
key, engine.state.iter_history[key][-1], engine.state.iteration)
| {
"content_hash": "26c130c93a12d121c6bf0e80d019da1a",
"timestamp": "",
"source": "github",
"line_count": 355,
"max_line_length": 91,
"avg_line_length": 37.42816901408451,
"alnum_prop": 0.6270790998720553,
"repo_name": "interactiveaudiolab/nussl",
"id": "cb90e005e4a0591510accb0ac3868959326f791a",
"size": "13287",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nussl/ml/train/trainer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "MATLAB",
"bytes": "11692"
},
{
"name": "Python",
"bytes": "591205"
},
{
"name": "Shell",
"bytes": "26"
}
],
"symlink_target": ""
} |
import os
import re
import sys
import copy
import zlib
import time
import shlex
import shutil
import fnmatch
import tarfile
import argparse
import platform
import datetime
import tempfile
import posixpath
import subprocess
from build.common import *
from build.config import *
from build.build import *
def die (msg):
print msg
sys.exit(-1)
def removeLeadingPath (path, basePath):
# Both inputs must be normalized already
assert os.path.normpath(path) == path
assert os.path.normpath(basePath) == basePath
return path[len(basePath) + 1:]
def findFile (candidates):
for file in candidates:
if os.path.exists(file):
return file
return None
def getFileList (basePath):
allFiles = []
basePath = os.path.normpath(basePath)
for root, dirs, files in os.walk(basePath):
for file in files:
relPath = removeLeadingPath(os.path.normpath(os.path.join(root, file)), basePath)
allFiles.append(relPath)
return allFiles
def toDatetime (dateTuple):
Y, M, D = dateTuple
return datetime.datetime(Y, M, D)
class PackageBuildInfo:
def __init__ (self, releaseConfig, srcBasePath, dstBasePath, tmpBasePath):
self.releaseConfig = releaseConfig
self.srcBasePath = srcBasePath
self.dstBasePath = dstBasePath
self.tmpBasePath = tmpBasePath
def getReleaseConfig (self):
return self.releaseConfig
def getReleaseVersion (self):
return self.releaseConfig.getVersion()
def getReleaseId (self):
# Release id is crc32(releaseConfig + release)
return zlib.crc32(self.releaseConfig.getName() + self.releaseConfig.getVersion()) & 0xffffffff
def getSrcBasePath (self):
return self.srcBasePath
def getTmpBasePath (self):
return self.tmpBasePath
class DstFile (object):
def __init__ (self, dstFile):
self.dstFile = dstFile
def makeDir (self):
dirName = os.path.dirname(self.dstFile)
if not os.path.exists(dirName):
os.makedirs(dirName)
def make (self, packageBuildInfo):
assert False # Should not be called
class CopyFile (DstFile):
def __init__ (self, srcFile, dstFile):
super(CopyFile, self).__init__(dstFile)
self.srcFile = srcFile
def make (self, packageBuildInfo):
self.makeDir()
if os.path.exists(self.dstFile):
die("%s already exists" % self.dstFile)
shutil.copyfile(self.srcFile, self.dstFile)
class GenReleaseInfoFileTarget (DstFile):
def __init__ (self, dstFile):
super(GenReleaseInfoFileTarget, self).__init__(dstFile)
def make (self, packageBuildInfo):
self.makeDir()
scriptPath = os.path.normpath(os.path.join(packageBuildInfo.srcBasePath, "framework", "qphelper", "gen_release_info.py"))
execute([
"python",
"-B", # no .py[co]
scriptPath,
"--name=%s" % packageBuildInfo.getReleaseVersion(),
"--id=0x%08x" % packageBuildInfo.getReleaseId(),
"--out=%s" % self.dstFile
])
class GenCMake (DstFile):
def __init__ (self, srcFile, dstFile, replaceVars):
super(GenCMake, self).__init__(dstFile)
self.srcFile = srcFile
self.replaceVars = replaceVars
def make (self, packageBuildInfo):
self.makeDir()
print " GenCMake: %s" % removeLeadingPath(self.dstFile, packageBuildInfo.dstBasePath)
src = readFile(self.srcFile)
for var, value in self.replaceVars:
src = re.sub('set\(%s\s+"[^"]*"' % re.escape(var),
'set(%s "%s"' % (var, value), src)
writeFile(self.dstFile, src)
def createFileTargets (srcBasePath, dstBasePath, files, filters):
usedFiles = set() # Files that are already included by other filters
targets = []
for isMatch, createFileObj in filters:
# Build list of files that match filter
matchingFiles = []
for file in files:
if not file in usedFiles and isMatch(file):
matchingFiles.append(file)
# Build file objects, add to used set
for file in matchingFiles:
usedFiles.add(file)
targets.append(createFileObj(os.path.join(srcBasePath, file), os.path.join(dstBasePath, file)))
return targets
# Generates multiple file targets based on filters
class FileTargetGroup:
def __init__ (self, srcBasePath, dstBasePath, filters, srcBasePathFunc=PackageBuildInfo.getSrcBasePath):
self.srcBasePath = srcBasePath
self.dstBasePath = dstBasePath
self.filters = filters
self.getSrcBasePath = srcBasePathFunc
def make (self, packageBuildInfo):
fullSrcPath = os.path.normpath(os.path.join(self.getSrcBasePath(packageBuildInfo), self.srcBasePath))
fullDstPath = os.path.normpath(os.path.join(packageBuildInfo.dstBasePath, self.dstBasePath))
allFiles = getFileList(fullSrcPath)
targets = createFileTargets(fullSrcPath, fullDstPath, allFiles, self.filters)
# Make all file targets
for file in targets:
file.make(packageBuildInfo)
# Single file target
class SingleFileTarget:
def __init__ (self, srcFile, dstFile, makeTarget):
self.srcFile = srcFile
self.dstFile = dstFile
self.makeTarget = makeTarget
def make (self, packageBuildInfo):
fullSrcPath = os.path.normpath(os.path.join(packageBuildInfo.srcBasePath, self.srcFile))
fullDstPath = os.path.normpath(os.path.join(packageBuildInfo.dstBasePath, self.dstFile))
target = self.makeTarget(fullSrcPath, fullDstPath)
target.make(packageBuildInfo)
class BuildTarget:
def __init__ (self, baseConfig, generator, targets = None):
self.baseConfig = baseConfig
self.generator = generator
self.targets = targets
def make (self, packageBuildInfo):
print " Building %s" % self.baseConfig.getBuildDir()
# Create config with full build dir path
config = BuildConfig(os.path.join(packageBuildInfo.getTmpBasePath(), self.baseConfig.getBuildDir()),
self.baseConfig.getBuildType(),
self.baseConfig.getArgs(),
srcPath = os.path.join(packageBuildInfo.dstBasePath, "src"))
assert not os.path.exists(config.getBuildDir())
build(config, self.generator, self.targets)
class BuildAndroidTarget:
def __init__ (self, dstFile):
self.dstFile = dstFile
def make (self, packageBuildInfo):
print " Building Android binary"
buildRoot = os.path.join(packageBuildInfo.tmpBasePath, "android-build")
assert not os.path.exists(buildRoot)
os.makedirs(buildRoot)
# Execute build script
scriptPath = os.path.normpath(os.path.join(packageBuildInfo.dstBasePath, "src", "android", "scripts", "build.py"))
execute([
"python",
"-B", # no .py[co]
scriptPath,
"--build-root=%s" % buildRoot,
])
srcFile = os.path.normpath(os.path.join(buildRoot, "package", "bin", "dEQP-debug.apk"))
dstFile = os.path.normpath(os.path.join(packageBuildInfo.dstBasePath, self.dstFile))
CopyFile(srcFile, dstFile).make(packageBuildInfo)
class FetchExternalSourcesTarget:
def __init__ (self):
pass
def make (self, packageBuildInfo):
scriptPath = os.path.normpath(os.path.join(packageBuildInfo.dstBasePath, "src", "external", "fetch_sources.py"))
execute([
"python",
"-B", # no .py[co]
scriptPath,
])
class RemoveSourcesTarget:
def __init__ (self):
pass
def make (self, packageBuildInfo):
shutil.rmtree(os.path.join(packageBuildInfo.dstBasePath, "src"), ignore_errors=False)
class Module:
def __init__ (self, name, targets):
self.name = name
self.targets = targets
def make (self, packageBuildInfo):
for target in self.targets:
target.make(packageBuildInfo)
class ReleaseConfig:
def __init__ (self, name, version, modules, sources = True):
self.name = name
self.version = version
self.modules = modules
self.sources = sources
def getName (self):
return self.name
def getVersion (self):
return self.version
def getModules (self):
return self.modules
def packageWithSources (self):
return self.sources
def matchIncludeExclude (includePatterns, excludePatterns, filename):
components = os.path.normpath(filename).split(os.sep)
for pattern in excludePatterns:
for component in components:
if fnmatch.fnmatch(component, pattern):
return False
for pattern in includePatterns:
for component in components:
if fnmatch.fnmatch(component, pattern):
return True
return False
def copyFileFilter (includePatterns, excludePatterns=[]):
return (lambda f: matchIncludeExclude(includePatterns, excludePatterns, f),
lambda s, d: CopyFile(s, d))
def makeFileCopyGroup (srcDir, dstDir, includePatterns, excludePatterns=[]):
return FileTargetGroup(srcDir, dstDir, [copyFileFilter(includePatterns, excludePatterns)])
def makeTmpFileCopyGroup (srcDir, dstDir, includePatterns, excludePatterns=[]):
return FileTargetGroup(srcDir, dstDir, [copyFileFilter(includePatterns, excludePatterns)], PackageBuildInfo.getTmpBasePath)
def makeFileCopy (srcFile, dstFile):
return SingleFileTarget(srcFile, dstFile, lambda s, d: CopyFile(s, d))
def getReleaseFileName (configName, releaseName):
today = datetime.date.today()
return "dEQP-%s-%04d-%02d-%02d-%s" % (releaseName, today.year, today.month, today.day, configName)
def getTempDir ():
dirName = os.path.join(tempfile.gettempdir(), "dEQP-Releases")
if not os.path.exists(dirName):
os.makedirs(dirName)
return dirName
def makeRelease (releaseConfig):
releaseName = getReleaseFileName(releaseConfig.getName(), releaseConfig.getVersion())
tmpPath = getTempDir()
srcBasePath = DEQP_DIR
dstBasePath = os.path.join(tmpPath, releaseName)
tmpBasePath = os.path.join(tmpPath, releaseName + "-tmp")
packageBuildInfo = PackageBuildInfo(releaseConfig, srcBasePath, dstBasePath, tmpBasePath)
dstArchiveName = releaseName + ".tar.bz2"
print "Creating release %s to %s" % (releaseName, tmpPath)
# Remove old temporary dirs
for path in [dstBasePath, tmpBasePath]:
if os.path.exists(path):
shutil.rmtree(path, ignore_errors=False)
# Make all modules
for module in releaseConfig.getModules():
print " Processing module %s" % module.name
module.make(packageBuildInfo)
# Remove sources?
if not releaseConfig.packageWithSources():
shutil.rmtree(os.path.join(dstBasePath, "src"), ignore_errors=False)
# Create archive
print "Creating %s" % dstArchiveName
archive = tarfile.open(dstArchiveName, 'w:bz2')
archive.add(dstBasePath, arcname=releaseName)
archive.close()
# Remove tmp dirs
for path in [dstBasePath, tmpBasePath]:
if os.path.exists(path):
shutil.rmtree(path, ignore_errors=False)
print "Done!"
# Module declarations
SRC_FILE_PATTERNS = ["*.h", "*.hpp", "*.c", "*.cpp", "*.m", "*.mm", "*.inl", "*.java", "*.aidl", "CMakeLists.txt", "LICENSE.txt", "*.cmake"]
TARGET_PATTERNS = ["*.cmake", "*.h", "*.lib", "*.dll", "*.so", "*.txt"]
BASE = Module("Base", [
makeFileCopy ("LICENSE", "src/LICENSE"),
makeFileCopy ("CMakeLists.txt", "src/CMakeLists.txt"),
makeFileCopyGroup ("targets", "src/targets", TARGET_PATTERNS),
makeFileCopyGroup ("execserver", "src/execserver", SRC_FILE_PATTERNS),
makeFileCopyGroup ("executor", "src/executor", SRC_FILE_PATTERNS),
makeFileCopy ("modules/CMakeLists.txt", "src/modules/CMakeLists.txt"),
makeFileCopyGroup ("external", "src/external", ["CMakeLists.txt", "*.py"]),
# Stylesheet for displaying test logs on browser
makeFileCopyGroup ("doc/testlog-stylesheet", "doc/testlog-stylesheet", ["*"]),
# Non-optional parts of framework
makeFileCopy ("framework/CMakeLists.txt", "src/framework/CMakeLists.txt"),
makeFileCopyGroup ("framework/delibs", "src/framework/delibs", SRC_FILE_PATTERNS),
makeFileCopyGroup ("framework/common", "src/framework/common", SRC_FILE_PATTERNS),
makeFileCopyGroup ("framework/qphelper", "src/framework/qphelper", SRC_FILE_PATTERNS),
makeFileCopyGroup ("framework/platform", "src/framework/platform", SRC_FILE_PATTERNS),
makeFileCopyGroup ("framework/opengl", "src/framework/opengl", SRC_FILE_PATTERNS, ["simplereference"]),
makeFileCopyGroup ("framework/egl", "src/framework/egl", SRC_FILE_PATTERNS),
# android sources
makeFileCopyGroup ("android/package/src", "src/android/package/src", SRC_FILE_PATTERNS),
makeFileCopy ("android/package/AndroidManifest.xml", "src/android/package/AndroidManifest.xml"),
makeFileCopyGroup ("android/package/res", "src/android/package/res", ["*.png", "*.xml"]),
makeFileCopyGroup ("android/scripts", "src/android/scripts", [
"common.py",
"build.py",
"resources.py",
"install.py",
"launch.py",
"debug.py"
]),
# Release info
GenReleaseInfoFileTarget("src/framework/qphelper/qpReleaseInfo.inl")
])
DOCUMENTATION = Module("Documentation", [
makeFileCopyGroup ("doc/pdf", "doc", ["*.pdf"]),
makeFileCopyGroup ("doc", "doc", ["porting_layer_changes_*.txt"]),
])
GLSHARED = Module("Shared GL Tests", [
# Optional framework components
makeFileCopyGroup ("framework/randomshaders", "src/framework/randomshaders", SRC_FILE_PATTERNS),
makeFileCopyGroup ("framework/opengl/simplereference", "src/framework/opengl/simplereference", SRC_FILE_PATTERNS),
makeFileCopyGroup ("framework/referencerenderer", "src/framework/referencerenderer", SRC_FILE_PATTERNS),
makeFileCopyGroup ("modules/glshared", "src/modules/glshared", SRC_FILE_PATTERNS),
])
GLES2 = Module("GLES2", [
makeFileCopyGroup ("modules/gles2", "src/modules/gles2", SRC_FILE_PATTERNS),
makeFileCopyGroup ("data/gles2", "src/data/gles2", ["*.*"]),
makeFileCopyGroup ("doc/testspecs/GLES2", "doc/testspecs/GLES2", ["*.txt"])
])
GLES3 = Module("GLES3", [
makeFileCopyGroup ("modules/gles3", "src/modules/gles3", SRC_FILE_PATTERNS),
makeFileCopyGroup ("data/gles3", "src/data/gles3", ["*.*"]),
makeFileCopyGroup ("doc/testspecs/GLES3", "doc/testspecs/GLES3", ["*.txt"])
])
GLES31 = Module("GLES31", [
makeFileCopyGroup ("modules/gles31", "src/modules/gles31", SRC_FILE_PATTERNS),
makeFileCopyGroup ("data/gles31", "src/data/gles31", ["*.*"]),
makeFileCopyGroup ("doc/testspecs/GLES31", "doc/testspecs/GLES31", ["*.txt"])
])
EGL = Module("EGL", [
makeFileCopyGroup ("modules/egl", "src/modules/egl", SRC_FILE_PATTERNS)
])
INTERNAL = Module("Internal", [
makeFileCopyGroup ("modules/internal", "src/modules/internal", SRC_FILE_PATTERNS),
makeFileCopyGroup ("data/internal", "src/data/internal", ["*.*"]),
])
EXTERNAL_SRCS = Module("External sources", [
FetchExternalSourcesTarget()
])
ANDROID_BINARIES = Module("Android Binaries", [
BuildAndroidTarget ("bin/android/dEQP.apk"),
makeFileCopyGroup ("targets/android", "bin/android", ["*.bat", "*.sh"]),
])
COMMON_BUILD_ARGS = ['-DPNG_SRC_PATH=%s' % os.path.realpath(os.path.join(DEQP_DIR, '..', 'libpng'))]
NULL_X32_CONFIG = BuildConfig('null-x32', 'Release', ['-DDEQP_TARGET=null', '-DCMAKE_C_FLAGS=-m32', '-DCMAKE_CXX_FLAGS=-m32'] + COMMON_BUILD_ARGS)
NULL_X64_CONFIG = BuildConfig('null-x64', 'Release', ['-DDEQP_TARGET=null', '-DCMAKE_C_FLAGS=-m64', '-DCMAKE_CXX_FLAGS=-m64'] + COMMON_BUILD_ARGS)
GLX_X32_CONFIG = BuildConfig('glx-x32', 'Release', ['-DDEQP_TARGET=x11_glx', '-DCMAKE_C_FLAGS=-m32', '-DCMAKE_CXX_FLAGS=-m32'] + COMMON_BUILD_ARGS)
GLX_X64_CONFIG = BuildConfig('glx-x64', 'Release', ['-DDEQP_TARGET=x11_glx', '-DCMAKE_C_FLAGS=-m64', '-DCMAKE_CXX_FLAGS=-m64'] + COMMON_BUILD_ARGS)
EXCLUDE_BUILD_FILES = ["CMakeFiles", "*.a", "*.cmake"]
LINUX_X32_COMMON_BINARIES = Module("Linux x32 Common Binaries", [
BuildTarget (NULL_X32_CONFIG, ANY_UNIX_GENERATOR),
makeTmpFileCopyGroup(NULL_X32_CONFIG.getBuildDir() + "/execserver", "bin/linux32", ["*"], EXCLUDE_BUILD_FILES),
makeTmpFileCopyGroup(NULL_X32_CONFIG.getBuildDir() + "/executor", "bin/linux32", ["*"], EXCLUDE_BUILD_FILES),
])
LINUX_X64_COMMON_BINARIES = Module("Linux x64 Common Binaries", [
BuildTarget (NULL_X64_CONFIG, ANY_UNIX_GENERATOR),
makeTmpFileCopyGroup(NULL_X64_CONFIG.getBuildDir() + "/execserver", "bin/linux64", ["*"], EXCLUDE_BUILD_FILES),
makeTmpFileCopyGroup(NULL_X64_CONFIG.getBuildDir() + "/executor", "bin/linux64", ["*"], EXCLUDE_BUILD_FILES),
])
# Special module to remove src dir, for example after binary build
REMOVE_SOURCES = Module("Remove sources from package", [
RemoveSourcesTarget()
])
# Release configuration
ALL_MODULES = [
BASE,
DOCUMENTATION,
GLSHARED,
GLES2,
GLES3,
GLES31,
EGL,
INTERNAL,
EXTERNAL_SRCS,
]
ALL_BINARIES = [
LINUX_X64_COMMON_BINARIES,
ANDROID_BINARIES,
]
RELEASE_CONFIGS = {
"src": ALL_MODULES,
"src-bin": ALL_MODULES + ALL_BINARIES,
"bin": ALL_MODULES + ALL_BINARIES + [REMOVE_SOURCES],
}
def parseArgs ():
parser = argparse.ArgumentParser(description = "Build release package")
parser.add_argument("-c",
"--config",
dest="config",
choices=RELEASE_CONFIGS.keys(),
required=True,
help="Release configuration")
parser.add_argument("-n",
"--name",
dest="name",
required=True,
help="Package-specific name")
parser.add_argument("-v",
"--version",
dest="version",
required=True,
help="Version code")
return parser.parse_args()
if __name__ == "__main__":
args = parseArgs()
config = ReleaseConfig(args.name, args.version, RELEASE_CONFIGS[args.config])
makeRelease(config)
| {
"content_hash": "81da52f782dde46ec53b8b0d3395aa62",
"timestamp": "",
"source": "github",
"line_count": 512,
"max_line_length": 148,
"avg_line_length": 33.458984375,
"alnum_prop": 0.7032864397875197,
"repo_name": "geekboxzone/mmallow_external_deqp",
"id": "cf3f48923539b109cd5c6d3ff41176aaac2c6a68",
"size": "17989",
"binary": false,
"copies": "2",
"ref": "refs/heads/geekbox",
"path": "scripts/make_release.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "320"
},
{
"name": "C",
"bytes": "472688"
},
{
"name": "C++",
"bytes": "20638791"
},
{
"name": "CMake",
"bytes": "169072"
},
{
"name": "HTML",
"bytes": "55736"
},
{
"name": "Java",
"bytes": "25615"
},
{
"name": "Makefile",
"bytes": "34568"
},
{
"name": "Objective-C",
"bytes": "20396"
},
{
"name": "Objective-C++",
"bytes": "17364"
},
{
"name": "Python",
"bytes": "520672"
},
{
"name": "Shell",
"bytes": "166"
}
],
"symlink_target": ""
} |
from unittest import mock
from oslo_policy import policy as base_policy
from oslo_utils import uuidutils
from neutron import policy
from neutron.tests.unit.conf.policies import test_base as base
class SecurityGroupAPITestCase(base.PolicyBaseTestCase):
def setUp(self):
super(SecurityGroupAPITestCase, self).setUp()
self.target = {'project_id': self.project_id}
self.alt_target = {'project_id': self.alt_project_id}
class SystemAdminSecurityGroupTests(SecurityGroupAPITestCase):
def setUp(self):
super(SystemAdminSecurityGroupTests, self).setUp()
self.context = self.system_admin_ctx
def test_create_security_group(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'create_security_group', self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'create_security_group', self.alt_target)
def test_get_security_group(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'get_security_group', self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'get_security_group', self.alt_target)
def test_update_security_group(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'update_security_group', self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'update_security_group', self.alt_target)
def test_delete_security_group(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'delete_security_group', self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'delete_security_group', self.alt_target)
class SystemMemberSecurityGroupTests(SystemAdminSecurityGroupTests):
def setUp(self):
super(SystemMemberSecurityGroupTests, self).setUp()
self.context = self.system_member_ctx
class SystemReaderSecurityGroupTests(SystemMemberSecurityGroupTests):
def setUp(self):
super(SystemReaderSecurityGroupTests, self).setUp()
self.context = self.system_reader_ctx
class AdminSecurityGroupTests(SecurityGroupAPITestCase):
def setUp(self):
super(AdminSecurityGroupTests, self).setUp()
self.context = self.project_admin_ctx
def test_create_security_group(self):
self.assertTrue(
policy.enforce(self.context, 'create_security_group', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_security_group', self.alt_target)
def test_get_security_group(self):
self.assertTrue(
policy.enforce(self.context, 'get_security_group', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'get_security_group', self.alt_target)
def test_update_security_group(self):
self.assertTrue(
policy.enforce(self.context, 'update_security_group', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_security_group', self.alt_target)
def test_delete_security_group(self):
self.assertTrue(
policy.enforce(self.context, 'delete_security_group', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'delete_security_group', self.alt_target)
class ProjectMemberSecurityGroupTests(AdminSecurityGroupTests):
def setUp(self):
super(ProjectMemberSecurityGroupTests, self).setUp()
self.context = self.project_member_ctx
class ProjectReaderSecurityGroupTests(ProjectMemberSecurityGroupTests):
def setUp(self):
super(ProjectReaderSecurityGroupTests, self).setUp()
self.context = self.project_reader_ctx
def test_create_security_group(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_security_group', self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_security_group', self.alt_target)
def test_update_security_group(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_security_group', self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_security_group', self.alt_target)
def test_delete_security_group(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'delete_security_group', self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'delete_security_group', self.alt_target)
class SecurityGroupRuleAPITestCase(base.PolicyBaseTestCase):
def setUp(self):
super(SecurityGroupRuleAPITestCase, self).setUp()
self.sg = {
'id': uuidutils.generate_uuid(),
'project_id': self.project_id}
self.target = {
'project_id': self.project_id,
'security_group_id': self.sg['id'],
'ext_parent_security_group_id': self.sg['id']}
self.alt_target = {
'project_id': self.alt_project_id,
'security_group_id': self.sg['id'],
'ext_parent_security_group_id': self.sg['id']}
self.plugin_mock = mock.Mock()
self.plugin_mock.get_security_group.return_value = self.sg
mock.patch(
'neutron_lib.plugins.directory.get_plugin',
return_value=self.plugin_mock).start()
class SystemAdminSecurityGroupRuleTests(SecurityGroupRuleAPITestCase):
def setUp(self):
super(SystemAdminSecurityGroupRuleTests, self).setUp()
self.context = self.system_admin_ctx
def test_create_security_group_rule(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'create_security_group_rule', self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'create_security_group_rule', self.alt_target)
def test_get_security_group_rule(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'get_security_group_rule', self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'get_security_group_rule', self.alt_target)
def test_delete_security_group_rule(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'delete_security_group_rule', self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'delete_security_group_rule', self.alt_target)
class SystemMemberSecurityGroupRuleTests(SystemAdminSecurityGroupRuleTests):
def setUp(self):
super(SystemMemberSecurityGroupRuleTests, self).setUp()
self.context = self.system_member_ctx
class SystemReaderSecurityGroupRuleTests(SystemMemberSecurityGroupRuleTests):
def setUp(self):
super(SystemReaderSecurityGroupRuleTests, self).setUp()
self.context = self.system_reader_ctx
class AdminSecurityGroupRuleTests(SecurityGroupRuleAPITestCase):
def setUp(self):
super(AdminSecurityGroupRuleTests, self).setUp()
self.context = self.project_admin_ctx
def test_create_security_group_rule(self):
self.assertTrue(
policy.enforce(self.context,
'create_security_group_rule', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_security_group_rule', self.alt_target)
def test_get_security_group_rule(self):
self.assertTrue(
policy.enforce(self.context,
'get_security_group_rule', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'get_security_group_rule', self.alt_target)
# Owner of the security group can get rule which belongs to that group,
# even if security group rule belongs to someone else
sg_owner_target = {
'project_id': 'some-other-project',
'security_group:tenant_id': self.project_id,
'security_group_id': self.sg['id'],
'ext_parent_security_group_id': self.sg['id']}
self.assertTrue(
policy.enforce(self.context,
'get_security_group_rule', sg_owner_target))
def test_delete_security_group_rule(self):
self.assertTrue(
policy.enforce(self.context,
'delete_security_group_rule', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'delete_security_group_rule', self.alt_target)
class ProjectMemberSecurityGroupRuleTests(AdminSecurityGroupRuleTests):
def setUp(self):
super(ProjectMemberSecurityGroupRuleTests, self).setUp()
self.context = self.project_member_ctx
class ProjectReaderSecurityGroupRuleTests(ProjectMemberSecurityGroupRuleTests):
def setUp(self):
super(ProjectReaderSecurityGroupRuleTests, self).setUp()
self.context = self.project_reader_ctx
def test_create_security_group_rule(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_security_group_rule', self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_security_group_rule', self.alt_target)
def test_delete_security_group_rule(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'delete_security_group_rule', self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'delete_security_group_rule', self.alt_target)
| {
"content_hash": "325510f29224682f7415b82e7c42f08c",
"timestamp": "",
"source": "github",
"line_count": 313,
"max_line_length": 79,
"avg_line_length": 35.150159744408946,
"alnum_prop": 0.639338302126886,
"repo_name": "openstack/neutron",
"id": "cdb11c653016d7ab6257083dd462330bbef460db",
"size": "11584",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/conf/policies/test_security_group.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "2773"
},
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "15932611"
},
{
"name": "Ruby",
"bytes": "1257"
},
{
"name": "Shell",
"bytes": "83270"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import json
from datetime import timedelta as td
from unittest.mock import patch
from django.core import mail
from django.utils.timezone import now
from hc.api.models import Channel, Check, Notification
from hc.test import BaseTestCase
class NotifyWhatsAppTestCase(BaseTestCase):
def _setup_data(self, notify_up=True, notify_down=True):
self.check = Check(project=self.project)
self.check.status = "down"
self.check.last_ping = now() - td(minutes=61)
self.check.save()
definition = {"value": "+1234567890", "up": notify_up, "down": notify_down}
self.channel = Channel(project=self.project, kind="whatsapp")
self.channel.value = json.dumps(definition)
self.channel.save()
self.channel.checks.add(self.check)
@patch("hc.api.transports.curl.request")
def test_it_works(self, mock_post):
mock_post.return_value.status_code = 200
self._setup_data()
self.channel.notify(self.check)
args, kwargs = mock_post.call_args
payload = kwargs["data"]
self.assertEqual(payload["To"], "whatsapp:+1234567890")
n = Notification.objects.get()
callback_path = f"/api/v1/notifications/{n.code}/status"
self.assertTrue(payload["StatusCallback"].endswith(callback_path))
# sent SMS counter should go up
self.profile.refresh_from_db()
self.assertEqual(self.profile.sms_sent, 1)
@patch("hc.api.transports.curl.request")
def test_it_obeys_up_down_flags(self, mock_post):
self._setup_data(notify_down=False)
self.check.last_ping = now() - td(hours=2)
self.channel.notify(self.check)
self.assertEqual(Notification.objects.count(), 0)
self.assertFalse(mock_post.called)
@patch("hc.api.transports.curl.request")
def test_it_enforces_limit(self, mock_post):
# At limit already:
self.profile.last_sms_date = now()
self.profile.sms_sent = 50
self.profile.save()
self._setup_data()
self.channel.notify(self.check)
self.assertFalse(mock_post.called)
n = Notification.objects.get()
self.assertTrue("Monthly message limit exceeded" in n.error)
# And email should have been sent
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
self.assertEqual(email.to[0], "alice@example.org")
self.assertEqual(email.subject, "Monthly WhatsApp Limit Reached")
@patch("hc.api.transports.curl.request")
def test_it_does_not_escape_special_characters(self, mock_post):
self._setup_data()
self.check.name = "Foo > Bar & Co"
mock_post.return_value.status_code = 200
self.channel.notify(self.check)
args, kwargs = mock_post.call_args
payload = kwargs["data"]
self.assertIn("Foo > Bar & Co", payload["Body"])
| {
"content_hash": "b1b09099ded00a59632c04d310313645",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 83,
"avg_line_length": 32.577777777777776,
"alnum_prop": 0.6480218281036835,
"repo_name": "healthchecks/healthchecks",
"id": "caca72a8908eb9d59df4722f80a2801f4d046089",
"size": "2949",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hc/api/tests/test_notify_whatsapp.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "65959"
},
{
"name": "Dockerfile",
"bytes": "1088"
},
{
"name": "HTML",
"bytes": "716643"
},
{
"name": "JavaScript",
"bytes": "50869"
},
{
"name": "Less",
"bytes": "211300"
},
{
"name": "Python",
"bytes": "1043149"
},
{
"name": "Shell",
"bytes": "1655"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers.python.layers import layers as layers_lib
from tensorflow.contrib.layers.python.layers import regularizers
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
import tensorflow.contrib.slim as slim
import tensorflow as tf
def vgg_arg_scope(weight_decay=0.0005):
"""Defines the VGG arg scope.
Args:
weight_decay: The l2 regularization coefficient.
Returns:
An arg_scope.
"""
with arg_scope(
[slim.conv2d, slim.fully_connected],
activation_fn=nn_ops.relu,
weights_regularizer=regularizers.l2_regularizer(weight_decay),
biases_initializer=init_ops.zeros_initializer()):
with arg_scope([slim.conv2d], padding='SAME') as arg_sc:
return arg_sc
def vgg_16(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='vgg_16'):
"""Oxford Net VGG 16-Layers version D Example.
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
Returns:
the last op containing the log predictions and end_points dict.
"""
# Set weight_decay for weights in Conv and FC layers.
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
weights_initializer=tf.truncated_normal_initializer(0.0, 0.01),
biases_initializer=tf.zeros_initializer(),
weights_regularizer=slim.l2_regularizer(0.0005)):
with slim.arg_scope([slim.conv2d],
stride=1, padding='SAME'):
# stride=1, padding='SAME',
# normalizer_params=batch_norm_params):
net = inputs
net = slim.repeat(
net, 2, slim.conv2d, 64, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
# Use conv2d instead of fully_connected layers.
n_conv_out = 7*7*512
net = tf.reshape(net, [-1,n_conv_out])
net = slim.fully_connected(net, 4096, scope='fc6')
net = slim.dropout(
net, dropout_keep_prob, is_training=is_training, scope='dropout6')
net = slim.fully_connected(net, 4096, scope='fc7')
net = slim.dropout(
net, dropout_keep_prob, is_training=is_training, scope='dropout7')
net = slim.fully_connected(
net,
num_classes,
activation_fn=None,
scope='fc8')
return net
vgg_16.default_image_size = 224
| {
"content_hash": "8197ce33367af1258918f2607a6c2a8e",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 87,
"avg_line_length": 45.79120879120879,
"alnum_prop": 0.6193904487640989,
"repo_name": "Aaron-Zhao123/nn_library",
"id": "f9e63af23f7835218b4d78fe695bf308ebb8c6d6",
"size": "4167",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models/vgg_model_slim.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1083"
},
{
"name": "Python",
"bytes": "145265"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import collections
import functools
import logging
import warnings
from ._collections import RecentlyUsedContainer
from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool
from .connectionpool import port_by_scheme
from .exceptions import (
HTTPWarning,
LocationValueError,
MaxRetryError,
ProxySchemeUnknown,
ProxySchemeUnsupported,
)
from .packages import six
from .packages.six.moves.urllib.parse import urljoin
from .request import RequestMethods
from .util.url import parse_url
from .util.retry import Retry
__all__ = ["PoolManager", "ProxyManager", "proxy_from_url"]
class InvalidProxyConfigurationWarning(HTTPWarning):
"""Raised when a user has an HTTPS proxy without enabling HTTPS proxies."""
pass
log = logging.getLogger(__name__)
SSL_KEYWORDS = (
"key_file",
"cert_file",
"cert_reqs",
"ca_certs",
"ssl_version",
"ca_cert_dir",
"ssl_context",
"key_password",
)
# All known keyword arguments that could be provided to the pool manager, its
# pools, or the underlying connections. This is used to construct a pool key.
_key_fields = (
"key_scheme", # str
"key_host", # str
"key_port", # int
"key_timeout", # int or float or Timeout
"key_retries", # int or Retry
"key_strict", # bool
"key_block", # bool
"key_source_address", # str
"key_key_file", # str
"key_key_password", # str
"key_cert_file", # str
"key_cert_reqs", # str
"key_ca_certs", # str
"key_ssl_version", # str
"key_ca_cert_dir", # str
"key_ssl_context", # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext
"key_maxsize", # int
"key_headers", # dict
"key__proxy", # parsed proxy url
"key__proxy_headers", # dict
"key_socket_options", # list of (level (int), optname (int), value (int or str)) tuples
"key__socks_options", # dict
"key_assert_hostname", # bool or string
"key_assert_fingerprint", # str
"key_server_hostname", # str
)
#: The namedtuple class used to construct keys for the connection pool.
#: All custom key schemes should include the fields in this key at a minimum.
PoolKey = collections.namedtuple("PoolKey", _key_fields)
def _default_key_normalizer(key_class, request_context):
"""
Create a pool key out of a request context dictionary.
According to RFC 3986, both the scheme and host are case-insensitive.
Therefore, this function normalizes both before constructing the pool
key for an HTTPS request. If you wish to change this behaviour, provide
alternate callables to ``key_fn_by_scheme``.
:param key_class:
The class to use when constructing the key. This should be a namedtuple
with the ``scheme`` and ``host`` keys at a minimum.
:type key_class: namedtuple
:param request_context:
A dictionary-like object that contain the context for a request.
:type request_context: dict
:return: A namedtuple that can be used as a connection pool key.
:rtype: PoolKey
"""
# Since we mutate the dictionary, make a copy first
context = request_context.copy()
context["scheme"] = context["scheme"].lower()
context["host"] = context["host"].lower()
# These are both dictionaries and need to be transformed into frozensets
for key in ("headers", "_proxy_headers", "_socks_options"):
if key in context and context[key] is not None:
context[key] = frozenset(context[key].items())
# The socket_options key may be a list and needs to be transformed into a
# tuple.
socket_opts = context.get("socket_options")
if socket_opts is not None:
context["socket_options"] = tuple(socket_opts)
# Map the kwargs to the names in the namedtuple - this is necessary since
# namedtuples can't have fields starting with '_'.
for key in list(context.keys()):
context["key_" + key] = context.pop(key)
# Default to ``None`` for keys missing from the context
for field in key_class._fields:
if field not in context:
context[field] = None
return key_class(**context)
#: A dictionary that maps a scheme to a callable that creates a pool key.
#: This can be used to alter the way pool keys are constructed, if desired.
#: Each PoolManager makes a copy of this dictionary so they can be configured
#: globally here, or individually on the instance.
key_fn_by_scheme = {
"http": functools.partial(_default_key_normalizer, PoolKey),
"https": functools.partial(_default_key_normalizer, PoolKey),
}
pool_classes_by_scheme = {"http": HTTPConnectionPool, "https": HTTPSConnectionPool}
class PoolManager(RequestMethods):
"""
Allows for arbitrary requests while transparently keeping track of
necessary connection pools for you.
:param num_pools:
Number of connection pools to cache before discarding the least
recently used pool.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param \\**connection_pool_kw:
Additional parameters are used to create fresh
:class:`urllib3.connectionpool.ConnectionPool` instances.
Example::
>>> manager = PoolManager(num_pools=2)
>>> r = manager.request('GET', 'http://google.com/')
>>> r = manager.request('GET', 'http://google.com/mail')
>>> r = manager.request('GET', 'http://yahoo.com/')
>>> len(manager.pools)
2
"""
proxy = None
def __init__(self, num_pools=10, headers=None, **connection_pool_kw):
RequestMethods.__init__(self, headers)
self.connection_pool_kw = connection_pool_kw
self.pools = RecentlyUsedContainer(num_pools, dispose_func=lambda p: p.close())
# Locally set the pool classes and keys so other PoolManagers can
# override them.
self.pool_classes_by_scheme = pool_classes_by_scheme
self.key_fn_by_scheme = key_fn_by_scheme.copy()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.clear()
# Return False to re-raise any potential exceptions
return False
def _new_pool(self, scheme, host, port, request_context=None):
"""
Create a new :class:`ConnectionPool` based on host, port, scheme, and
any additional pool keyword arguments.
If ``request_context`` is provided, it is provided as keyword arguments
to the pool class used. This method is used to actually create the
connection pools handed out by :meth:`connection_from_url` and
companion methods. It is intended to be overridden for customization.
"""
pool_cls = self.pool_classes_by_scheme[scheme]
if request_context is None:
request_context = self.connection_pool_kw.copy()
# Although the context has everything necessary to create the pool,
# this function has historically only used the scheme, host, and port
# in the positional args. When an API change is acceptable these can
# be removed.
for key in ("scheme", "host", "port"):
request_context.pop(key, None)
if scheme == "http":
for kw in SSL_KEYWORDS:
request_context.pop(kw, None)
return pool_cls(host, port, **request_context)
def clear(self):
"""
Empty our store of pools and direct them all to close.
This will not affect in-flight connections, but they will not be
re-used after completion.
"""
self.pools.clear()
def connection_from_host(self, host, port=None, scheme="http", pool_kwargs=None):
"""
Get a :class:`ConnectionPool` based on the host, port, and scheme.
If ``port`` isn't given, it will be derived from the ``scheme`` using
``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is
provided, it is merged with the instance's ``connection_pool_kw``
variable and used to create the new connection pool, if one is
needed.
"""
if not host:
raise LocationValueError("No host specified.")
request_context = self._merge_pool_kwargs(pool_kwargs)
request_context["scheme"] = scheme or "http"
if not port:
port = port_by_scheme.get(request_context["scheme"].lower(), 80)
request_context["port"] = port
request_context["host"] = host
return self.connection_from_context(request_context)
def connection_from_context(self, request_context):
"""
Get a :class:`ConnectionPool` based on the request context.
``request_context`` must at least contain the ``scheme`` key and its
value must be a key in ``key_fn_by_scheme`` instance variable.
"""
scheme = request_context["scheme"].lower()
pool_key_constructor = self.key_fn_by_scheme[scheme]
pool_key = pool_key_constructor(request_context)
return self.connection_from_pool_key(pool_key, request_context=request_context)
def connection_from_pool_key(self, pool_key, request_context=None):
"""
Get a :class:`ConnectionPool` based on the provided pool key.
``pool_key`` should be a namedtuple that only contains immutable
objects. At a minimum it must have the ``scheme``, ``host``, and
``port`` fields.
"""
with self.pools.lock:
# If the scheme, host, or port doesn't match existing open
# connections, open a new ConnectionPool.
pool = self.pools.get(pool_key)
if pool:
return pool
# Make a fresh ConnectionPool of the desired type
scheme = request_context["scheme"]
host = request_context["host"]
port = request_context["port"]
pool = self._new_pool(scheme, host, port, request_context=request_context)
self.pools[pool_key] = pool
return pool
def connection_from_url(self, url, pool_kwargs=None):
"""
Similar to :func:`urllib3.connectionpool.connection_from_url`.
If ``pool_kwargs`` is not provided and a new pool needs to be
constructed, ``self.connection_pool_kw`` is used to initialize
the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs``
is provided, it is used instead. Note that if a new pool does not
need to be created for the request, the provided ``pool_kwargs`` are
not used.
"""
u = parse_url(url)
return self.connection_from_host(
u.host, port=u.port, scheme=u.scheme, pool_kwargs=pool_kwargs
)
def _merge_pool_kwargs(self, override):
"""
Merge a dictionary of override values for self.connection_pool_kw.
This does not modify self.connection_pool_kw and returns a new dict.
Any keys in the override dictionary with a value of ``None`` are
removed from the merged dictionary.
"""
base_pool_kwargs = self.connection_pool_kw.copy()
if override:
for key, value in override.items():
if value is None:
try:
del base_pool_kwargs[key]
except KeyError:
pass
else:
base_pool_kwargs[key] = value
return base_pool_kwargs
def _proxy_requires_url_absolute_form(self, parsed_url):
"""
Indicates if the proxy requires the complete destination URL in the
request.
Normally this is only needed when not using an HTTP CONNECT tunnel.
"""
if self.proxy is None:
return False
return parsed_url.scheme == "http" or self.proxy.scheme == "https"
def urlopen(self, method, url, redirect=True, **kw):
"""
Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen`
with custom cross-host redirect logic and only sends the request-uri
portion of the ``url``.
The given ``url`` parameter must be absolute, such that an appropriate
:class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
"""
u = parse_url(url)
conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
kw["assert_same_host"] = False
kw["redirect"] = False
if "headers" not in kw:
kw["headers"] = self.headers.copy()
if self._proxy_requires_url_absolute_form(u):
response = conn.urlopen(method, url, **kw)
else:
response = conn.urlopen(method, u.request_uri, **kw)
redirect_location = redirect and response.get_redirect_location()
if not redirect_location:
return response
# Support relative URLs for redirecting.
redirect_location = urljoin(url, redirect_location)
# RFC 7231, Section 6.4.4
if response.status == 303:
method = "GET"
retries = kw.get("retries")
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect)
# Strip headers marked as unsafe to forward to the redirected location.
# Check remove_headers_on_redirect to avoid a potential network call within
# conn.is_same_host() which may use socket.gethostbyname() in the future.
if retries.remove_headers_on_redirect and not conn.is_same_host(
redirect_location
):
headers = list(six.iterkeys(kw["headers"]))
for header in headers:
if header.lower() in retries.remove_headers_on_redirect:
kw["headers"].pop(header, None)
try:
retries = retries.increment(method, url, response=response, _pool=conn)
except MaxRetryError:
if retries.raise_on_redirect:
response.drain_conn()
raise
return response
kw["retries"] = retries
kw["redirect"] = redirect
log.info("Redirecting %s -> %s", url, redirect_location)
response.drain_conn()
return self.urlopen(method, redirect_location, **kw)
class ProxyManager(PoolManager):
"""
Behaves just like :class:`PoolManager`, but sends all requests through
the defined proxy, using the CONNECT method for HTTPS URLs.
:param proxy_url:
The URL of the proxy to be used.
:param proxy_headers:
A dictionary containing headers that will be sent to the proxy. In case
of HTTP they are being sent with each request, while in the
HTTPS/CONNECT case they are sent only once. Could be used for proxy
authentication.
:param _allow_https_proxy_to_see_traffic:
Allows forwarding of HTTPS requests to HTTPS proxies. The proxy will
have visibility of all the traffic sent. ONLY USE IF YOU KNOW WHAT
YOU'RE DOING. This flag might be removed at any time in any future
update.
Example:
>>> proxy = urllib3.ProxyManager('http://localhost:3128/')
>>> r1 = proxy.request('GET', 'http://google.com/')
>>> r2 = proxy.request('GET', 'http://httpbin.org/')
>>> len(proxy.pools)
1
>>> r3 = proxy.request('GET', 'https://httpbin.org/')
>>> r4 = proxy.request('GET', 'https://twitter.com/')
>>> len(proxy.pools)
3
"""
def __init__(
self,
proxy_url,
num_pools=10,
headers=None,
proxy_headers=None,
_allow_https_proxy_to_see_traffic=False,
**connection_pool_kw
):
if isinstance(proxy_url, HTTPConnectionPool):
proxy_url = "%s://%s:%i" % (
proxy_url.scheme,
proxy_url.host,
proxy_url.port,
)
proxy = parse_url(proxy_url)
if proxy.scheme not in ("http", "https"):
raise ProxySchemeUnknown(proxy.scheme)
if not proxy.port:
port = port_by_scheme.get(proxy.scheme, 80)
proxy = proxy._replace(port=port)
self.proxy = proxy
self.proxy_headers = proxy_headers or {}
connection_pool_kw["_proxy"] = self.proxy
connection_pool_kw["_proxy_headers"] = self.proxy_headers
self.allow_insecure_proxy = _allow_https_proxy_to_see_traffic
super(ProxyManager, self).__init__(num_pools, headers, **connection_pool_kw)
def connection_from_host(self, host, port=None, scheme="http", pool_kwargs=None):
if scheme == "https":
return super(ProxyManager, self).connection_from_host(
host, port, scheme, pool_kwargs=pool_kwargs
)
return super(ProxyManager, self).connection_from_host(
self.proxy.host, self.proxy.port, self.proxy.scheme, pool_kwargs=pool_kwargs
)
def _set_proxy_headers(self, url, headers=None):
"""
Sets headers needed by proxies: specifically, the Accept and Host
headers. Only sets headers not provided by the user.
"""
headers_ = {"Accept": "*/*"}
netloc = parse_url(url).netloc
if netloc:
headers_["Host"] = netloc
if headers:
headers_.update(headers)
return headers_
def _validate_proxy_scheme_url_selection(self, url_scheme):
if (
url_scheme == "https"
and self.proxy.scheme == "https"
and not self.allow_insecure_proxy
):
warnings.warn(
"Your proxy configuration specified an HTTPS scheme for the proxy. "
"Are you sure you want to use HTTPS to contact the proxy? "
"This most likely indicates an error in your configuration."
"If you are sure you want use HTTPS to contact the proxy, enable "
"the _allow_https_proxy_to_see_traffic.",
InvalidProxyConfigurationWarning,
)
raise ProxySchemeUnsupported(
"Contacting HTTPS destinations through HTTPS proxies is not supported."
)
def urlopen(self, method, url, redirect=True, **kw):
"Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
u = parse_url(url)
self._validate_proxy_scheme_url_selection(u.scheme)
if u.scheme == "http" or self.proxy.scheme == "https":
# For connections using HTTP CONNECT, httplib sets the necessary
# headers on the CONNECT to the proxy. For HTTP or when talking
# HTTPS to the proxy, we'll definitely need to set 'Host' at the
# very least.
headers = kw.get("headers", self.headers)
kw["headers"] = self._set_proxy_headers(url, headers)
return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw)
def proxy_from_url(url, **kw):
return ProxyManager(proxy_url=url, **kw)
| {
"content_hash": "9d20ac3e35d6b2bacf5e0108d8edfb9d",
"timestamp": "",
"source": "github",
"line_count": 530,
"max_line_length": 92,
"avg_line_length": 36.073584905660375,
"alnum_prop": 0.6205868507767143,
"repo_name": "gregbdunn/aws-ec2rescue-linux",
"id": "a0e5b974b900da80ee975bbdcabde9e11b36b8d2",
"size": "19119",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "lib/urllib3/poolmanager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "701"
},
{
"name": "Makefile",
"bytes": "5044"
},
{
"name": "Python",
"bytes": "4595518"
},
{
"name": "Shell",
"bytes": "5229"
}
],
"symlink_target": ""
} |
"""
This module contains a Salesforce Hook which allows you to connect to your Salesforce instance,
retrieve data from it, and write that data to a file for other uses.
.. note:: this hook also relies on the simple_salesforce package:
https://github.com/simple-salesforce/simple-salesforce
"""
import logging
import time
from typing import Any, Dict, Iterable, List, Optional
import pandas as pd
from requests import Session
from simple_salesforce import Salesforce, api
from airflow.compat.functools import cached_property
from airflow.hooks.base import BaseHook
log = logging.getLogger(__name__)
class SalesforceHook(BaseHook):
"""
Creates new connection to Salesforce and allows you to pull data out of SFDC and save it to a file.
You can then use that file with other Airflow operators to move the data into another data source.
:param conn_id: The name of the connection that has the parameters needed to connect to Salesforce.
The connection should be of type `Salesforce`.
:param session_id: The access token for a given HTTP request session.
:param session: A custom HTTP request session. This enables the use of requests Session features not
otherwise exposed by `simple_salesforce`.
.. note::
A connection to Salesforce can be created via several authentication options:
* Password: Provide Username, Password, and Security Token
* Direct Session: Provide a `session_id` and either Instance or Instance URL
* OAuth 2.0 JWT: Provide a Consumer Key and either a Private Key or Private Key File Path
* IP Filtering: Provide Username, Password, and an Organization ID
If in sandbox, enter a Domain value of 'test'.
"""
conn_name_attr = "salesforce_conn_id"
default_conn_name = "salesforce_default"
conn_type = "salesforce"
hook_name = "Salesforce"
def __init__(
self,
salesforce_conn_id: str = default_conn_name,
session_id: Optional[str] = None,
session: Optional[Session] = None,
) -> None:
super().__init__()
self.conn_id = salesforce_conn_id
self.session_id = session_id
self.session = session
@staticmethod
def get_connection_form_widgets() -> Dict[str, Any]:
"""Returns connection widgets to add to connection form"""
from flask_appbuilder.fieldwidgets import BS3PasswordFieldWidget, BS3TextFieldWidget
from flask_babel import lazy_gettext
from wtforms import PasswordField, StringField
return {
"extra__salesforce__security_token": PasswordField(
lazy_gettext("Security Token"), widget=BS3PasswordFieldWidget()
),
"extra__salesforce__domain": StringField(lazy_gettext("Domain"), widget=BS3TextFieldWidget()),
"extra__salesforce__consumer_key": StringField(
lazy_gettext("Consumer Key"), widget=BS3TextFieldWidget()
),
"extra__salesforce__private_key_file_path": PasswordField(
lazy_gettext("Private Key File Path"), widget=BS3PasswordFieldWidget()
),
"extra__salesforce__private_key": PasswordField(
lazy_gettext("Private Key"), widget=BS3PasswordFieldWidget()
),
"extra__salesforce__organization_id": StringField(
lazy_gettext("Organization ID"), widget=BS3TextFieldWidget()
),
"extra__salesforce__instance": StringField(lazy_gettext("Instance"), widget=BS3TextFieldWidget()),
"extra__salesforce__instance_url": StringField(
lazy_gettext("Instance URL"), widget=BS3TextFieldWidget()
),
"extra__salesforce__proxies": StringField(lazy_gettext("Proxies"), widget=BS3TextFieldWidget()),
"extra__salesforce__version": StringField(
lazy_gettext("API Version"), widget=BS3TextFieldWidget()
),
"extra__salesforce__client_id": StringField(
lazy_gettext("Client ID"), widget=BS3TextFieldWidget()
),
}
@staticmethod
def get_ui_field_behaviour() -> Dict[str, Any]:
"""Returns custom field behaviour"""
return {
"hidden_fields": ["schema", "port", "extra", "host"],
"relabeling": {
"login": "Username",
},
}
@cached_property
def conn(self) -> api.Salesforce:
"""Returns a Salesforce instance. (cached)"""
connection = self.get_connection(self.conn_id)
extras = connection.extra_dejson
# all extras below (besides the version one) are explicitly defaulted to None
# because simple-salesforce has a built-in authentication-choosing method that
# relies on which arguments are None and without "or None" setting this connection
# in the UI will result in the blank extras being empty strings instead of None,
# which would break the connection if "get" was used on its own.
conn = Salesforce(
username=connection.login,
password=connection.password,
security_token=extras.get('extra__salesforce__security_token') or None,
domain=extras.get('extra__salesforce__domain') or None,
session_id=self.session_id,
instance=extras.get('extra__salesforce__instance') or None,
instance_url=extras.get('extra__salesforce__instance_url') or None,
organizationId=extras.get('extra__salesforce__organization_id') or None,
version=extras.get('extra__salesforce__version') or api.DEFAULT_API_VERSION,
proxies=extras.get('extra__salesforce__proxies') or None,
session=self.session,
client_id=extras.get('extra__salesforce__client_id') or None,
consumer_key=extras.get('extra__salesforce__consumer_key') or None,
privatekey_file=extras.get('extra__salesforce__private_key_file_path') or None,
privatekey=extras.get('extra__salesforce__private_key') or None,
)
return conn
def get_conn(self) -> api.Salesforce:
"""Returns a Salesforce instance. (cached)"""
return self.conn
def make_query(
self, query: str, include_deleted: bool = False, query_params: Optional[dict] = None
) -> dict:
"""
Make a query to Salesforce.
:param query: The query to make to Salesforce.
:param include_deleted: True if the query should include deleted records.
:param query_params: Additional optional arguments
:return: The query result.
:rtype: dict
"""
conn = self.get_conn()
self.log.info("Querying for all objects")
query_params = query_params or {}
query_results = conn.query_all(query, include_deleted=include_deleted, **query_params)
self.log.info(
"Received results: Total size: %s; Done: %s", query_results['totalSize'], query_results['done']
)
return query_results
def describe_object(self, obj: str) -> dict:
"""
Get the description of an object from Salesforce.
This description is the object's schema and
some extra metadata that Salesforce stores for each object.
:param obj: The name of the Salesforce object that we are getting a description of.
:return: the description of the Salesforce object.
:rtype: dict
"""
conn = self.get_conn()
return conn.__getattr__(obj).describe()
def get_available_fields(self, obj: str) -> List[str]:
"""
Get a list of all available fields for an object.
:param obj: The name of the Salesforce object that we are getting a description of.
:return: the names of the fields.
:rtype: list(str)
"""
obj_description = self.describe_object(obj)
return [field['name'] for field in obj_description['fields']]
def get_object_from_salesforce(self, obj: str, fields: Iterable[str]) -> dict:
"""
Get all instances of the `object` from Salesforce.
For each model, only get the fields specified in fields.
All we really do underneath the hood is run:
SELECT <fields> FROM <obj>;
:param obj: The object name to get from Salesforce.
:param fields: The fields to get from the object.
:return: all instances of the object from Salesforce.
:rtype: dict
"""
query = f"SELECT {','.join(fields)} FROM {obj}"
self.log.info(
"Making query to Salesforce: %s",
query if len(query) < 30 else " ... ".join([query[:15], query[-15:]]),
)
return self.make_query(query)
@classmethod
def _to_timestamp(cls, column: pd.Series) -> pd.Series:
"""
Convert a column of a dataframe to UNIX timestamps if applicable
:param column: A Series object representing a column of a dataframe.
:return: a new series that maintains the same index as the original
:rtype: pandas.Series
"""
# try and convert the column to datetimes
# the column MUST have a four digit year somewhere in the string
# there should be a better way to do this,
# but just letting pandas try and convert every column without a format
# caused it to convert floats as well
# For example, a column of integers
# between 0 and 10 are turned into timestamps
# if the column cannot be converted,
# just return the original column untouched
try:
column = pd.to_datetime(column)
except ValueError:
log.error("Could not convert field to timestamps: %s", column.name)
return column
# now convert the newly created datetimes into timestamps
# we have to be careful here
# because NaT cannot be converted to a timestamp
# so we have to return NaN
converted = []
for value in column:
try:
converted.append(value.timestamp())
except (ValueError, AttributeError):
converted.append(pd.np.NaN)
return pd.Series(converted, index=column.index)
def write_object_to_file(
self,
query_results: List[dict],
filename: str,
fmt: str = "csv",
coerce_to_timestamp: bool = False,
record_time_added: bool = False,
) -> pd.DataFrame:
"""
Write query results to file.
Acceptable formats are:
- csv:
comma-separated-values file. This is the default format.
- json:
JSON array. Each element in the array is a different row.
- ndjson:
JSON array but each element is new-line delimited instead of comma delimited like in `json`
This requires a significant amount of cleanup.
Pandas doesn't handle output to CSV and json in a uniform way.
This is especially painful for datetime types.
Pandas wants to write them as strings in CSV, but as millisecond Unix timestamps.
By default, this function will try and leave all values as they are represented in Salesforce.
You use the `coerce_to_timestamp` flag to force all datetimes to become Unix timestamps (UTC).
This is can be greatly beneficial as it will make all of your datetime fields look the same,
and makes it easier to work with in other database environments
:param query_results: the results from a SQL query
:param filename: the name of the file where the data should be dumped to
:param fmt: the format you want the output in. Default: 'csv'
:param coerce_to_timestamp: True if you want all datetime fields to be converted into Unix timestamps.
False if you want them to be left in the same format as they were in Salesforce.
Leaving the value as False will result in datetimes being strings. Default: False
:param record_time_added: True if you want to add a Unix timestamp field
to the resulting data that marks when the data was fetched from Salesforce. Default: False
:return: the dataframe that gets written to the file.
:rtype: pandas.Dataframe
"""
fmt = fmt.lower()
if fmt not in ['csv', 'json', 'ndjson']:
raise ValueError(f"Format value is not recognized: {fmt}")
df = self.object_to_df(
query_results=query_results,
coerce_to_timestamp=coerce_to_timestamp,
record_time_added=record_time_added,
)
# write the CSV or JSON file depending on the option
# NOTE:
# datetimes here are an issue.
# There is no good way to manage the difference
# for to_json, the options are an epoch or a ISO string
# but for to_csv, it will be a string output by datetime
# For JSON we decided to output the epoch timestamp in seconds
# (as is fairly standard for JavaScript)
# And for csv, we do a string
if fmt == "csv":
# there are also a ton of newline objects that mess up our ability to write to csv
# we remove these newlines so that the output is a valid CSV format
self.log.info("Cleaning data and writing to CSV")
possible_strings = df.columns[df.dtypes == "object"]
df[possible_strings] = (
df[possible_strings]
.astype(str)
.apply(lambda x: x.str.replace("\r\n", "").str.replace("\n", ""))
)
# write the dataframe
df.to_csv(filename, index=False)
elif fmt == "json":
df.to_json(filename, "records", date_unit="s")
elif fmt == "ndjson":
df.to_json(filename, "records", lines=True, date_unit="s")
return df
def object_to_df(
self, query_results: List[dict], coerce_to_timestamp: bool = False, record_time_added: bool = False
) -> pd.DataFrame:
"""
Export query results to dataframe.
By default, this function will try and leave all values as they are represented in Salesforce.
You use the `coerce_to_timestamp` flag to force all datetimes to become Unix timestamps (UTC).
This is can be greatly beneficial as it will make all of your datetime fields look the same,
and makes it easier to work with in other database environments
:param query_results: the results from a SQL query
:param coerce_to_timestamp: True if you want all datetime fields to be converted into Unix timestamps.
False if you want them to be left in the same format as they were in Salesforce.
Leaving the value as False will result in datetimes being strings. Default: False
:param record_time_added: True if you want to add a Unix timestamp field
to the resulting data that marks when the data was fetched from Salesforce. Default: False
:return: the dataframe.
:rtype: pandas.Dataframe
"""
# this line right here will convert all integers to floats
# if there are any None/np.nan values in the column
# that's because None/np.nan cannot exist in an integer column
# we should write all of our timestamps as FLOATS in our final schema
df = pd.DataFrame.from_records(query_results, exclude=["attributes"])
df.columns = [column.lower() for column in df.columns]
# convert columns with datetime strings to datetimes
# not all strings will be datetimes, so we ignore any errors that occur
# we get the object's definition at this point and only consider
# features that are DATE or DATETIME
if coerce_to_timestamp and df.shape[0] > 0:
# get the object name out of the query results
# it's stored in the "attributes" dictionary
# for each returned record
object_name = query_results[0]['attributes']['type']
self.log.info("Coercing timestamps for: %s", object_name)
schema = self.describe_object(object_name)
# possible columns that can be converted to timestamps
# are the ones that are either date or datetime types
# strings are too general and we risk unintentional conversion
possible_timestamp_cols = [
field['name'].lower()
for field in schema['fields']
if field['type'] in ["date", "datetime"] and field['name'].lower() in df.columns
]
df[possible_timestamp_cols] = df[possible_timestamp_cols].apply(self._to_timestamp)
if record_time_added:
fetched_time = time.time()
df["time_fetched_from_salesforce"] = fetched_time
return df
| {
"content_hash": "a0cab73062f3554763418403ed0f50ce",
"timestamp": "",
"source": "github",
"line_count": 384,
"max_line_length": 110,
"avg_line_length": 44.255208333333336,
"alnum_prop": 0.6299870542544428,
"repo_name": "Acehaidrey/incubator-airflow",
"id": "97c2697b452cc586504d271f2865b36b4ead33a4",
"size": "17783",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "airflow/providers/salesforce/hooks/salesforce.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25785"
},
{
"name": "Dockerfile",
"bytes": "76693"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "164512"
},
{
"name": "JavaScript",
"bytes": "236992"
},
{
"name": "Jinja",
"bytes": "37155"
},
{
"name": "Jupyter Notebook",
"bytes": "2929"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "21727510"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "495253"
},
{
"name": "TypeScript",
"bytes": "326556"
}
],
"symlink_target": ""
} |
__all__ = ['BaseResolver', 'Resolver']
from error import *
from nodes import *
import re
class ResolverError(YAMLError):
pass
class BaseResolver(object):
DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str'
DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq'
DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map'
yaml_implicit_resolvers = {}
yaml_path_resolvers = {}
def __init__(self):
self.resolver_exact_paths = []
self.resolver_prefix_paths = []
def add_implicit_resolver(cls, tag, regexp, first):
if not 'yaml_implicit_resolvers' in cls.__dict__:
cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
if first is None:
first = [None]
for ch in first:
cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
add_implicit_resolver = classmethod(add_implicit_resolver)
def add_path_resolver(cls, tag, path, kind=None):
# Note: `add_path_resolver` is experimental. The API could be changed.
# `new_path` is a pattern that is matched against the path from the
# root to the node that is being considered. `node_path` elements are
# tuples `(node_check, index_check)`. `node_check` is a node class:
# `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
# matches any kind of a node. `index_check` could be `None`, a boolean
# value, a string value, or a number. `None` and `False` match against
# any _value_ of sequence and mapping nodes. `True` matches against
# any _key_ of a mapping node. A string `index_check` matches against
# a mapping value that corresponds to a scalar key which content is
# equal to the `index_check` value. An integer `index_check` matches
# against a sequence value with the index equal to `index_check`.
if not 'yaml_path_resolvers' in cls.__dict__:
cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
new_path = []
for element in path:
if isinstance(element, (list, tuple)):
if len(element) == 2:
node_check, index_check = element
elif len(element) == 1:
node_check = element[0]
index_check = True
else:
raise ResolverError("Invalid path element: %s" % element)
else:
node_check = None
index_check = element
if node_check is str:
node_check = ScalarNode
elif node_check is list:
node_check = SequenceNode
elif node_check is dict:
node_check = MappingNode
elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
and not isinstance(node_check, basestring) \
and node_check is not None:
raise ResolverError("Invalid node checker: %s" % node_check)
if not isinstance(index_check, (basestring, int)) \
and index_check is not None:
raise ResolverError("Invalid index checker: %s" % index_check)
new_path.append((node_check, index_check))
if kind is str:
kind = ScalarNode
elif kind is list:
kind = SequenceNode
elif kind is dict:
kind = MappingNode
elif kind not in [ScalarNode, SequenceNode, MappingNode] \
and kind is not None:
raise ResolverError("Invalid node kind: %s" % kind)
cls.yaml_path_resolvers[tuple(new_path), kind] = tag
add_path_resolver = classmethod(add_path_resolver)
def descend_resolver(self, current_node, current_index):
if not self.yaml_path_resolvers:
return
exact_paths = {}
prefix_paths = []
if current_node:
depth = len(self.resolver_prefix_paths)
for path, kind in self.resolver_prefix_paths[-1]:
if self.check_resolver_prefix(depth, path, kind,
current_node, current_index):
if len(path) > depth:
prefix_paths.append((path, kind))
else:
exact_paths[kind] = self.yaml_path_resolvers[path, kind]
else:
for path, kind in self.yaml_path_resolvers:
if not path:
exact_paths[kind] = self.yaml_path_resolvers[path, kind]
else:
prefix_paths.append((path, kind))
self.resolver_exact_paths.append(exact_paths)
self.resolver_prefix_paths.append(prefix_paths)
def ascend_resolver(self):
if not self.yaml_path_resolvers:
return
self.resolver_exact_paths.pop()
self.resolver_prefix_paths.pop()
def check_resolver_prefix(self, depth, path, kind,
current_node, current_index):
node_check, index_check = path[depth-1]
if isinstance(node_check, basestring):
if current_node.tag != node_check:
return
elif node_check is not None:
if not isinstance(current_node, node_check):
return
if index_check is True and current_index is not None:
return
if (index_check is False or index_check is None) \
and current_index is None:
return
if isinstance(index_check, basestring):
if not (isinstance(current_index, ScalarNode)
and index_check == current_index.value):
return
elif isinstance(index_check, int) and not isinstance(index_check, bool):
if index_check != current_index:
return
return True
def resolve(self, kind, value, implicit):
if kind is ScalarNode and implicit[0]:
if value == u'':
resolvers = self.yaml_implicit_resolvers.get(u'', [])
else:
resolvers = self.yaml_implicit_resolvers.get(value[0], [])
resolvers += self.yaml_implicit_resolvers.get(None, [])
for tag, regexp in resolvers:
if regexp.match(value):
return tag
implicit = implicit[1]
if self.yaml_path_resolvers:
exact_paths = self.resolver_exact_paths[-1]
if kind in exact_paths:
return exact_paths[kind]
if None in exact_paths:
return exact_paths[None]
if kind is ScalarNode:
return self.DEFAULT_SCALAR_TAG
elif kind is SequenceNode:
return self.DEFAULT_SEQUENCE_TAG
elif kind is MappingNode:
return self.DEFAULT_MAPPING_TAG
class Resolver(BaseResolver):
pass
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:bool',
re.compile(ur'''^(?:yes|Yes|YES|no|No|NO
|true|True|TRUE|false|False|FALSE
|on|On|ON|off|Off|OFF)$''', re.X),
list(u'yYnNtTfFoO'))
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:float',
re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
|\.[0-9_]+(?:[eE][-+][0-9]+)?
|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
|[-+]?\.(?:inf|Inf|INF)
|\.(?:nan|NaN|NAN))$''', re.X),
list(u'-+0123456789.'))
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:int',
re.compile(ur'''^(?:[-+]?0b[0-1_]+
|[-+]?0[0-7_]+
|[-+]?(?:0|[1-9][0-9_]*)
|[-+]?0x[0-9a-fA-F_]+
|[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
list(u'-+0123456789'))
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:merge',
re.compile(ur'^(?:<<)$'),
[u'<'])
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:null',
re.compile(ur'''^(?: ~
|null|Null|NULL
| )$''', re.X),
[u'~', u'n', u'N', u''])
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:timestamp',
re.compile(ur'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
|[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
(?:[Tt]|[ \t]+)[0-9][0-9]?
:[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
(?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
list(u'0123456789'))
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:value',
re.compile(ur'^(?:=)$'),
[u'='])
# The following resolver is only for documentation purposes. It cannot work
# because plain scalars cannot start with '!', '&', or '*'.
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:yaml',
re.compile(ur'^(?:!|&|\*)$'),
list(u'!&*'))
| {
"content_hash": "336360f2e9c512fdc435bf9e574d24bc",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 82,
"avg_line_length": 40.228699551569505,
"alnum_prop": 0.5292609519563036,
"repo_name": "KaranToor/MA450",
"id": "1d6ee2e8fafc10a93e420fb09a64ddd08159e701",
"size": "8994",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "google-cloud-sdk/platform/bq/third_party/yaml/resolver.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3162"
},
{
"name": "CSS",
"bytes": "1930"
},
{
"name": "HTML",
"bytes": "13381"
},
{
"name": "Java",
"bytes": "151442"
},
{
"name": "JavaScript",
"bytes": "4906"
},
{
"name": "Makefile",
"bytes": "1636"
},
{
"name": "Objective-C",
"bytes": "13335"
},
{
"name": "PHP",
"bytes": "9086"
},
{
"name": "Pascal",
"bytes": "62"
},
{
"name": "Python",
"bytes": "19710731"
},
{
"name": "Roff",
"bytes": "2069494"
},
{
"name": "Ruby",
"bytes": "690"
},
{
"name": "Shell",
"bytes": "32272"
},
{
"name": "Smarty",
"bytes": "4968"
},
{
"name": "SourcePawn",
"bytes": "616"
},
{
"name": "Swift",
"bytes": "14225"
}
],
"symlink_target": ""
} |
"""
PyValitron
~~~~~~
Python Inputs Validation Library
:copyright: (c) 2016 by Clivern (hello@clivern.com).
:license: MIT, see LICENSE for more details.
"""
from .validator import Validator
from .sanitizer import Sanitizer
from .utils import Utils
from .exceptions import PyValitronError
class Form(object):
_inputs = {}
_errors = {}
_sinputs = {}
_vstatus = False
_sstatus = False
_validator = None
_sanitizer = None
_sanitizers = []
_validators = []
_utils = None
def __init__(self, inputs={}):
"""Init Form Module"""
self._inputs = inputs
self._validator = Validator()
self._sanitizer = Sanitizer()
self._utils = Utils()
def add_inputs(self, inputs={}):
"""Set inputs"""
self._inputs = inputs
def get_inputs(self):
"""Get Original Inputs Values"""
return self._inputs
def get_errors(self):
"""Get All Errors"""
return self._errors
def get_vstatus(self):
"""Get Overall Sanitization Status"""
return self._vstatus
def get_sstatus(self):
"""Get Overall Sanitization Status"""
return self._sstatus
def process(self, direction=['sanitize', 'validate']):
"""Process both validation and sanitization"""
if direction[0] == 'sanitize':
if 'sanitize' in direction:
self._sanitize()
if 'validate' in direction:
self._validate()
else:
if 'validate' in direction:
self._validate()
if 'sanitize' in direction:
self._sanitize()
def add_validator(self, val_instance):
"""Add custom validator"""
self._validators.append(val_instance)
def add_sanitizer(self, san_instance):
"""Add custom sanitizer"""
self._sanitizers.append(san_instance)
def _validate(self):
"""Validate, Set Errors and Return Overall Status"""
# Validate current inputs value
status = True
for current_input, validation_rule in self._inputs.items():
# Push input value to validator
self._validator.set_input(self._inputs[current_input]['value'])
if 'validate' in validation_rule:
self._errors[current_input] = []
for rule_name, rule_args in validation_rule['validate'].items():
self._update_validator(rule_name)
# Check if param exist and pass them to the method
if 'param' in rule_args.keys() and len(rule_args['param']) > 0:
current_status = getattr(self._validator, rule_name)(*rule_args['param'])
else:
current_status = getattr(self._validator, rule_name)()
self._inputs[current_input]['status'] = current_status
status &= current_status
if not current_status and 'error' in rule_args.keys():
self._errors[current_input].append(rule_args['error'])
# Set and return Overall status
self._vstatus = status
return status
def _sanitize(self):
"""Sanitize Inputs and Store Them"""
# Sanitize current input value
status = True
for current_input, sanitization_rule in self._inputs.items():
# Push input value to sanitizer
self._sanitizer.set_input(self._inputs[current_input]['value'])
self._sanitizer.set_sinput(None)
if 'sanitize' in sanitization_rule:
for rule_name, rule_args in sanitization_rule['sanitize'].items():
self._update_sanitizer(rule_name)
# Check if param provided and pass them to the method
if 'param' in rule_args.keys() and len(rule_args['param']) > 0:
sanitized_value = getattr(self._sanitizer, rule_name)(*rule_args['param'])
else:
sanitized_value = getattr(self._sanitizer, rule_name)()
self._inputs[current_input]['svalue'] = sanitized_value
self._inputs[current_input]['is_exact'] = True if self._inputs[current_input]['value'] == self._sanitizer.get_sinput() else False
status &= self._inputs[current_input]['is_exact']
# Set and return Overall status
self._sstatus = status
return status
def _update_validator(self, rule_name):
"""Update current validator"""
if hasattr(self._validator, rule_name):
return True
for validator in self._validators:
if hasattr(validator, rule_name):
self._validator = validator
return True
raise PyValitronError('Non existent validation rule %s' % rule_name)
def _update_sanitizer(self, rule_name):
"""Update current sanitizer"""
if hasattr(self._sanitizer, rule_name):
if self._sanitizer.get_sinput() is None:
self._sanitizer.set_input(self._sanitizer.get_input())
self._sanitizer.set_sinput(None)
else:
self._sanitizer.set_input(self._sanitizer.get_sinput())
return True
for sanitizer in self._sanitizers:
if hasattr(sanitizer, rule_name):
if self._sanitizer.get_sinput() is None:
sanitizer.set_input(self._sanitizer.get_input())
sanitizer.set_sinput(None)
else:
sanitizer.set_input(self._sanitizer.get_sinput())
self._sanitizer = sanitizer
return True
raise PyValitronError('Non existent sanitization rule %s' % rule_name)
| {
"content_hash": "38dc8ebc01d41c9808aec91249727b68",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 149,
"avg_line_length": 37.210191082802545,
"alnum_prop": 0.562307428962684,
"repo_name": "Clivern/PyValitron",
"id": "1220bea091af8f4559d8b51b76d7fd1fbc91c087",
"size": "5866",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyvalitron/form.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27245"
}
],
"symlink_target": ""
} |
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Codec:
def serialize(self, root):
"""
Encodes a tree to a single string.
:type root: TreeNode
:rtype: str
"""
def doit(node):
if node:
vals.append(str(node.val))
doit(node.left)
doit(node.right)
else:
vals.append('#')
vals = []
doit(root)
return ' '.join(vals)
def deserialize(self, data):
"""
Decodes your encoded data to tree.
:type data: str
:rtype: TreeNode
"""
def doit():
val = next(vals)
if val == '#':
return None
node = TreeNode(int(val))
node.left = doit()
node.right = doit()
return node
vals = iter(data.split())
return doit()
mytree = TreeNode(1)
mytree.left = TreeNode(2)
mytree.right = TreeNode(3)
mytree.right.left = TreeNode(12) # -1 2 # # 3 12 # # #-
# Your Codec object will be instantiated and called as such:
codec = Codec()
codec.deserialize(codec.serialize(mytree))
print(codec.serialize(mytree))
| {
"content_hash": "d4f629152612cea062bb43616ca4bfd5",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 60,
"avg_line_length": 24.53846153846154,
"alnum_prop": 0.4992163009404389,
"repo_name": "rx2130/Leetcode",
"id": "3fd33492e1df680754307d2098a7f37997bf00a3",
"size": "1313",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/297 Serialize and Deserialize Binary Tree.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "277012"
}
],
"symlink_target": ""
} |
import chstrings
import config
import unittest
import mock
class CHStringsTest(unittest.TestCase):
@classmethod
def add_smoke_test(cls, cfg):
def test(self):
# We just want to see if this will blow up. Use the fallback
# lang_tag across all tests.
lang_tag = cfg.lang_code
if cfg.accept_language:
lang_tag = cfg.accept_language[-1]
self.assertNotEqual({},
chstrings.get_localized_strings(cfg, lang_tag))
name = 'test_' + cfg.lang_code + '_smoke_test'
setattr(cls, name, test)
def test_fallback_lang_tag(self):
gcfg = config.get_global_config()
cfg = config.get_localized_config(gcfg.fallback_lang_tag, api = False)
fallback_strings = chstrings.get_localized_strings(
cfg, gcfg.fallback_lang_tag)
original = chstrings._load_strings_for_lang_tag(gcfg.fallback_lang_tag)
with mock.patch('chstrings._load_strings_for_lang_tag') as m:
# Simulate an incomplete strings file.
def _load_strings_side_effect(lang_tag):
if lang_tag == 'fake':
return {'tooltitle': 'Test Citation Hunt'}
elif lang_tag == gcfg.fallback_lang_tag:
return original
raise ValueError
m.side_effect = _load_strings_side_effect
# The incomplete strings must have been merged with the fallback
# ones.
strings = chstrings.get_localized_strings(cfg, 'fake')
self.assertEqual('Test Citation Hunt', strings['tooltitle'])
self.assertEqual(fallback_strings['instructions_goal'],
strings['instructions_goal'])
def test_missing_lang_tag_has_no_fallback(self):
# We must only apply the fallback strings if there is an incomplete
# strings file (test_fallback_lang_tag exercises that behavior).
# If the file does not exist at all, we must return {} rather than just
# the fallback strings.
self.assertEqual(
chstrings.get_localized_strings(cfg, 'fake'), {})
if __name__ == '__main__':
for lc in config.LANG_CODES_TO_LANG_NAMES:
cfg = config.get_localized_config(lc, api = False)
CHStringsTest.add_smoke_test(cfg)
unittest.main()
| {
"content_hash": "745f7d2a498c125d53fa087c0dfa10b8",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 79,
"avg_line_length": 39.88135593220339,
"alnum_prop": 0.6060348491287718,
"repo_name": "eggpi/citationhunt",
"id": "1fcd8aae6acab413894b68bcd1a3bbcd9121c311",
"size": "2353",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chstrings/chstrings_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "13227"
},
{
"name": "HTML",
"bytes": "16959"
},
{
"name": "JavaScript",
"bytes": "112497"
},
{
"name": "Python",
"bytes": "193620"
},
{
"name": "Shell",
"bytes": "861"
}
],
"symlink_target": ""
} |
from django.db import models
from django.utils.translation import ugettext_lazy as _, get_language
from django.contrib.auth.models import User, Group
from django.conf import settings
from django.utils.encoding import python_2_unicode_compatible
from parler.models import TranslatableModel, TranslatedFields
from model_utils.models import TimeStampedModel
from aldryn_translation_tools.models import TranslationHelperMixin
from allink_core.allink_mailchimp.config import MailChimpConfig
from allink_core.allink_mailchimp.helpers import list_members_delete, list_members_put, get_status_if_new
from allink_core.allink_base.models import AllinkTranslatedAutoSlugifyMixin
config = MailChimpConfig()
@python_2_unicode_compatible
class Members(TranslationHelperMixin, AllinkTranslatedAutoSlugifyMixin, TranslatableModel, TimeStampedModel):
slug_source_field_name = 'full_name'
translations = TranslatedFields(
slug=models.SlugField(
_(u'Slug'),
max_length=255,
default='',
blank=True,
help_text=_(u'Leave blank to auto-generate a unique slug.')
)
)
member_nr = models.CharField(
_(u'Member Number'),
max_length=30,
unique=True,
blank=False
)
email = models.EmailField(
_(u'Email'),
)
first_name = models.CharField(
_(u'Firstname'),
max_length=30,
blank=True,
null=True
)
last_name = models.CharField(
_(u'Lastname'),
max_length=30,
blank=True
)
language = models.CharField(
_(u'Language'),
max_length=3,
default=settings.LANGUAGE_CODE
)
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
class Meta:
app_label = 'members'
verbose_name = _('Members')
verbose_name_plural = _('Members')
def __str__(self):
return u'{}: {} {}'.format(self.member_nr, self.user.first_name, self.user.last_name)
@property
def full_name(self):
return u'{} {}'.format(self.first_name, self.last_name)
@classmethod
def get_verbose_name(cls):
from allink_core.allink_config.models import AllinkConfig
try:
field_name = cls._meta.model_name + '_verbose'
return getattr(AllinkConfig.get_solo(), field_name)
except AttributeError:
return cls._meta.verbose_name
@classmethod
def get_verbose_name_plural(cls):
from allink_core.allink_config.models import AllinkConfig
try:
field_name = cls._meta.model_name + '_verbose_plural'
return getattr(AllinkConfig.get_solo(), field_name)
except AttributeError:
return cls._meta.verbose_name_plural
def save(self, **kwargs):
if not self.pk:
user, created = User.objects.get_or_create(
username=self.member_nr,
email=self.email,
# password=settings.MEMBERS_INITIAL_PWD,
first_name=self.first_name,
last_name=self.last_name
)
group = Group.objects.get_or_create(name='Mitglieder')
user.groups.add(group[0])
self.language = get_language()
self.user = user
# self.put_to_mailchimp_list()
else:
self.user.username = self.member_nr
self.user.email = self.email
self.user.last_name = self.last_name
self.user.first_name = self.first_name
self.user.save()
super(Members, self).save(**kwargs)
def delete(self, *args, **kwargs):
if self.user:
self.user.delete()
self.delete_from_mailchimp_list()
super(Members, self).delete(*args, **kwargs)
def get_absolute_url(self):
from django.core.urlresolvers import reverse
app = '{}:detail'.format(self._meta.model_name)
return reverse(app, kwargs={'slug': self.slug})
def log(self, log, description):
MembersLog.objects.create(members=self, log=log, description=description)
def put_to_mailchimp_list(self, member_hash_email=None):
data = {
'email_address': self.email,
'status': 'subscribed',
'status_if_new': get_status_if_new(),
'language': self.language,
'merge_fields': {
'FNAME': self.first_name,
'LNAME': self.last_name
}
}
if config.merge_vars:
data = data.append(config.merge_vars)
list_members_put(data=data, member_hash_email=member_hash_email)
def delete_from_mailchimp_list(self):
# delete member
data = {
'email_address': self.email,
}
if config.merge_vars:
data = data.append(config.merge_vars)
list_members_delete(data)
@python_2_unicode_compatible
class MembersLog(models.Model):
log = models.CharField(
_(u'Log'),
max_length=255
)
description = models.CharField(
_(u'Description'),
max_length=255
)
members = models.ForeignKey(Members)
created = models.DateTimeField(
auto_now_add=True
)
class Meta:
ordering = ('-created',)
def __str__(self):
return self.log
| {
"content_hash": "0910f2456b31417e444f364bf07333cb",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 109,
"avg_line_length": 30.305084745762713,
"alnum_prop": 0.6038404175988069,
"repo_name": "allink/allink-apps",
"id": "052968f9af71e16fbcbb5b986f5709b231f4caed",
"size": "5388",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "members/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "994"
},
{
"name": "HTML",
"bytes": "47533"
},
{
"name": "Python",
"bytes": "183917"
}
],
"symlink_target": ""
} |
from django.shortcuts import render
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from wagtail.wagtailcore.models import Page
from wagtail.wagtailsearch.models import Query
import requests
try:
# Wagtail >= 1.1
from wagtail.contrib.wagtailsearchpromotions.models import SearchPromotion
except ImportError:
# Wagtail < 1.1
from wagtail.wagtailsearch.models import EditorsPick as SearchPromotion
def send_simple_message():
return requests.post(
"https://api.mailgun.net/v3/sandboxeeb88f43cd0248f0983239c425f90ab6.mailgun.org/messages",
auth=("api", "key-267ace68e12daf2953d28972a5e7831c"),
data={"from": "Mailgun Sandbox <postmaster@sandboxeeb88f43cd0248f0983239c425f90ab6.mailgun.org>",
"to": "child <childvoiceindiaorg@gmail.com>",
"subject": "Hello child",
"text": "Congratulations child, you just sent an email with Mailgun! You are truly awesome! You can see a record of this email in your logs: https://mailgun.com/cp/log . You can send up to 300 emails/day from this sandbox server. Next, you should add your own domain so you can send 10,000 emails/month for free."})
def search(request):
# Search
send_simple_message()
search_query = request.GET.get('query', None)
if search_query:
search_results = Page.objects.live().search(search_query)
query = Query.get(search_query)
# Record hit
query.add_hit()
# Get search picks
search_picks = query.editors_picks.all()
else:
search_results = Page.objects.none()
search_picks = SearchPromotion.objects.none()
# Pagination
page = request.GET.get('page', 1)
paginator = Paginator(search_results, 10)
try:
search_results = paginator.page(page)
except PageNotAnInteger:
search_results = paginator.page(1)
except EmptyPage:
search_results = paginator.page(paginator.num_pages)
return render(request, 'demo/search_results.html', {
'search_query': search_query,
'search_results': search_results,
'search_picks': search_picks,
})
| {
"content_hash": "20031b22d737ad507086ced9b97bc619",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 333,
"avg_line_length": 37.91228070175438,
"alnum_prop": 0.6881073577047663,
"repo_name": "igauravsehrawat/child-voice",
"id": "a72cb35def1a3623a9639f511dd1318ebfaab4a9",
"size": "2161",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "demo/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "31678"
},
{
"name": "HTML",
"bytes": "52814"
},
{
"name": "Java",
"bytes": "773"
},
{
"name": "JavaScript",
"bytes": "5997"
},
{
"name": "Python",
"bytes": "88536"
},
{
"name": "Shell",
"bytes": "1346"
}
],
"symlink_target": ""
} |
import pytest
from pook import MockEngine, Engine
from pook.interceptors import BaseInterceptor
class Interceptor(BaseInterceptor):
def activate(self):
self.active = True
def disable(self):
self.active = False
@pytest.fixture
def engine():
return MockEngine(Engine())
def test_mock_engine_instance(engine):
assert isinstance(engine.engine, Engine)
assert isinstance(engine.interceptors, list)
assert len(engine.interceptors) >= 2
def test_mock_engine_flush(engine):
assert len(engine.interceptors) >= 2
engine.flush_interceptors()
assert len(engine.interceptors) == 0
def test_mock_engine_interceptors(engine):
engine.flush_interceptors()
engine.add_interceptor(Interceptor)
assert len(engine.interceptors) == 1
assert isinstance(engine.interceptors[0], Interceptor)
engine.remove_interceptor('Interceptor')
assert len(engine.interceptors) == 0
def test_mock_engine_status(engine):
engine.flush_interceptors()
engine.add_interceptor(Interceptor)
assert len(engine.interceptors) == 1
interceptor = engine.interceptors[0]
engine.activate()
assert interceptor.active
engine.disable()
assert not interceptor.active
| {
"content_hash": "06fe99ab3dd0e6f73eccdd5b87fe77a0",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 58,
"avg_line_length": 24.215686274509803,
"alnum_prop": 0.725506072874494,
"repo_name": "h2non/pook",
"id": "19943649caa2fa426ae09405dfbed67b2639cdd8",
"size": "1260",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/mock_engine_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1528"
},
{
"name": "Python",
"bytes": "132129"
}
],
"symlink_target": ""
} |
from datetime import date
from dateutil.relativedelta import relativedelta as rd
from holidays.constants import JAN, MAR, MAY, JUN, NOV, DEC, SAT, SUN
from holidays.holiday_base import HolidayBase
from holidays.utils import islamic_to_gre
OBSERVED_SUFFIX = " (Observed)"
class Azerbaijan(HolidayBase):
# https://en.wikipedia.org/wiki/Public_holidays_in_Azerbaijan
country = "AZ"
def __init__(self, **kwargs):
HolidayBase.__init__(self, **kwargs)
def _add_observed(self, holiday: date) -> None:
if self.observed and holiday.weekday() in (SAT, SUN):
next_monday = holiday + rd(days=7 - holiday.weekday())
if not self.get(next_monday, None):
self[next_monday] = self[holiday] + OBSERVED_SUFFIX
def _populate(self, year):
# 1st of Jan
self[date(year, JAN, 1)] = "New Year's Day"
self[date(year, JAN, 2)] = "New Year's Day"
self._add_observed(date(year, JAN, 2))
# Black January
self[date(year, JAN, 20)] = "Black January"
self._add_observed(date(year, JAN, 20))
# International Women's Day
self[date(year, MAR, 8)] = "International Women's Day"
self._add_observed(date(year, MAR, 8))
# Novruz
for i in range(5):
self[date(year, MAR, 20 + i)] = "Novruz"
self._add_observed(date(year, MAR, 24))
# Victory Day
self[date(year, MAY, 9)] = "Victory Day over Fascism"
self._add_observed(date(year, MAY, 9))
# Republic Day
self[date(year, MAY, 28)] = "Republic Day"
self._add_observed(date(year, MAY, 28))
# National Salvation Day
self[date(year, JUN, 15)] = "National Salvation Day"
self._add_observed(date(year, JUN, 15))
# Azerbaijan Armed Forces Day
self[date(year, JUN, 26)] = "Azerbaijan Armed Forces Day"
self._add_observed(date(year, JUN, 26))
# Victory Day
if year > 2020:
self[date(year, NOV, 8)] = "Victory Day"
self._add_observed(date(year, NOV, 8))
# Flag Day
self[date(year, NOV, 9)] = "Flag Day"
self._add_observed(date(year, NOV, 9))
# International Solidarity Day of Azerbaijanis
self[
date(year, DEC, 31)
] = "International Solidarity Day of Azerbaijanis"
self._add_observed(date(year, DEC, 31))
# Ramadan
# Date of observance is announced yearly, This is an estimate.
hol_date = islamic_to_gre(year, 10, 1)[0]
self[hol_date] = "Ramadan"
self[hol_date + rd(days=1)] = "Ramadan"
self._add_observed(hol_date + rd(days=1))
# Festival of the Sacrifice
# Date of observance is announced yearly, This is an estimate.
hol_date = islamic_to_gre(year, 12, 10)[0]
self[hol_date] = "Festival of the Sacrifice"
self[hol_date + rd(days=1)] = "Festival of the Sacrifice"
self._add_observed(hol_date + rd(days=1))
class AZ(Azerbaijan):
pass
class AZE(Azerbaijan):
pass
| {
"content_hash": "4e50978fe100802408d0740c7790e9d3",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 70,
"avg_line_length": 32.135416666666664,
"alnum_prop": 0.5918962722852512,
"repo_name": "ryanss/holidays.py",
"id": "0dba4bbc83d43023b199fae3bb94fab3e67df966",
"size": "3587",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "holidays/countries/azerbaijan.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "214061"
}
],
"symlink_target": ""
} |
import uuid
import time
import base64
import odict
import hashlib
import hmac
import json
from db_interface import (
verify_timestamp, verify_nonce, accepted_hash_method,
get_key_and_id_for_qr_login
)
from constants import API_SECRET
# common functions used by the QRAccess SDK
def auth(customer_id, secret_key, http_method, resource, params={}, hash_method="sha1"):
"""
create and return a set of custom headers to go in a cqr-authorized API request,
including the corresponding CQR 1.0 Authorization: header
"""
# initialize the custom authorization headers
auth_headers = odict.odict()
auth_headers['X-Client-Id'] = customer_id
auth_headers['X-Timestamp'] = str(int(time.time()))
auth_headers['X-Nonce'] = uuid.uuid4().hex
auth_headers['X-Hash-Method'] = hash_method
# prepare signature - headers and sorted parameters
signature = http_method.upper() + " " + resource.lower() + "\n"
for k, v in auth_headers.items():
signature = signature + k + ":" + v + "\n"
if type(params) == type({}):
for k in sorted(params.iterkeys()):
signature = signature + k + "=" + str(params[k]) + "\n"
if len(params):
signature = signature[:-1]
elif type(params) == type(""):
signature = signature + params
# hash with client secret key and base64-encode
h = ""
if hash_method == "md5":
h = hmac.new(secret_key, signature, hashlib.md5)
elif hash_method == "sha1":
h = hmac.new(secret_key, signature, hashlib.sha1)
elif hash_method == "sha256":
h = hmac.new(secret_key, signature, hashlib.sha256)
elif hash_method == "sha512":
h = hmac.new(secret_key, signature, hashlib.sha512)
auth_headers['Authorization'] = "CQR 1.0 " + base64.b64encode(h.digest())
# return the array of headers
return auth_headers
def verify(http_method, uri, headers, params, authorization):
"""
we verify a set of parameters against a given authorization string; return a boolean
"""
if authorization is None:
return False
auth_parts = authorization.split()
if (len(auth_parts) != 3) or (auth_parts[0] != "CQR") or (auth_parts[1] != "1.0"):
return False
received_crypto_sig = auth_parts[2]
# recompose signature with method, url...
signature = http_method.upper() + " " + uri.lower() + "\n"
# ... auth headers...
auth_headers = ['X-Client-Id', 'X-Timestamp', 'X-Nonce', 'X-Hash-Method']
for header in auth_headers:
value = headers.get(header)
if value is None:
return False
signature = signature + header + ":" + value + "\n"
# ... and request params (if any)
if type(params) == type({}) and len(params) > 0:
for k in sorted(params.iterkeys()):
signature = signature + k + "=" + str(params[k]) + "\n"
signature = signature[:-1]
elif type(params) == type(""):
signature = signature + params
# verify timestamp drift
if not verify_timestamp(int(headers['X-Timestamp'])):
return False
# verify nonce uniqueness
if not verify_nonce(headers['X-Client-Id'], headers['X-Nonce']):
return False
# build encrypted signature
h = ""
if headers['X-Hash-Method'] == "md5":
h = hmac.new(API_SECRET, signature, hashlib.md5)
elif headers['X-Hash-Method'] == "sha1":
h = hmac.new(API_SECRET, signature, hashlib.sha1)
elif headers['X-Hash-Method'] == "sha256":
h = hmac.new(API_SECRET, signature, hashlib.sha256)
elif headers['X-Hash-Method'] == "sha512":
h = hmac.new(API_SECRET, signature, hashlib.sha512)
return base64.b64encode(h.digest()) == received_crypto_sig
def authorize(auth_data):
# A mobile user sent us, via the API server, an array of parameters:
# user= their user id
# timestamp= the request timestamp collected at source, in epoch format
# nonce= a string randomly generated by the user, that should not repeat in a certain time interval
# hash_method= the hash method used to generate the authorization string
# authorization= the authorization string
# We reconstruct the signature based on this data, and check that we have the same signature hash
# for them in our database
# user data validation
user = auth_data.get('cw_user', "")
authorization = auth_data.get('authorization', "")
timestamp = auth_data.get('timestamp', "")
nonce = auth_data.get('nonce', "")
h_meth = accepted_hash_method(auth_data.get('hash_method', ""))
if (
len(user) == 0 or
len(authorization) == 0 or
not timestamp.isdigit() or
not verify_timestamp(int(timestamp)) or
not verify_nonce(user, nonce) or
len(h_meth) == 0
):
return None
# lets go
# pull signature from list of auth parameters - doesnt go into calculation
h_sig_received = authorization.replace(".", "+")
del auth_data['authorization']
# order the user's auth params list and build the signature string
signature = ""
for k in sorted(auth_data.iterkeys()):
signature = signature + k + "=" + str(auth_data[k]) + "\n"
signature = signature[:-1]
# get user's secret key and hash the signature with it
uid, uk, umeth = get_key_and_id_for_qr_login(user)
if uid is None or umeth != h_meth:
return None
h = ""
if h_meth == "md5":
h = hmac.new(uk, signature, hashlib.md5)
elif h_meth == "sha1":
h = hmac.new(uk, signature, hashlib.sha1)
elif h_meth == "sha256":
h = hmac.new(uk, signature, hashlib.sha256)
elif h_meth == "sha512":
h = hmac.new(uk, signature, hashlib.sha512)
return uid if base64.b64encode(h.digest()) == h_sig_received else None
| {
"content_hash": "717f8e3d0e200294bdc12747ed0ba402",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 106,
"avg_line_length": 37.583333333333336,
"alnum_prop": 0.6215248166467678,
"repo_name": "drivefast/pycipherwallet",
"id": "478b10abb64fe033601fd7f1a8c414aa94a10776",
"size": "5863",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cipherwallet/cqr_auth.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2903"
},
{
"name": "JavaScript",
"bytes": "3118"
},
{
"name": "Python",
"bytes": "52329"
}
],
"symlink_target": ""
} |
import contextlib
import os
import tempfile
from cinder import db
from cinder import exception
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
from cinder import test
from cinder.tests.image import fake as fake_image
from cinder.tests.test_volume import DriverTestCase
from cinder.volume.driver import RBDDriver
LOG = logging.getLogger(__name__)
class FakeImageService:
def download(self, context, image_id, path):
pass
class RBDTestCase(test.TestCase):
def setUp(self):
super(RBDTestCase, self).setUp()
def fake_execute(*args):
pass
self.driver = RBDDriver(execute=fake_execute)
def test_good_locations(self):
locations = [
'rbd://fsid/pool/image/snap',
'rbd://%2F/%2F/%2F/%2F',
]
map(self.driver._parse_location, locations)
def test_bad_locations(self):
locations = [
'rbd://image',
'http://path/to/somewhere/else',
'rbd://image/extra',
'rbd://image/',
'rbd://fsid/pool/image/',
'rbd://fsid/pool/image/snap/',
'rbd://///',
]
for loc in locations:
self.assertRaises(exception.ImageUnacceptable,
self.driver._parse_location,
loc)
self.assertFalse(self.driver._is_cloneable(loc))
def test_cloneable(self):
self.stubs.Set(self.driver, '_get_fsid', lambda: 'abc')
location = 'rbd://abc/pool/image/snap'
self.assertTrue(self.driver._is_cloneable(location))
def test_uncloneable_different_fsid(self):
self.stubs.Set(self.driver, '_get_fsid', lambda: 'abc')
location = 'rbd://def/pool/image/snap'
self.assertFalse(self.driver._is_cloneable(location))
def test_uncloneable_unreadable(self):
def fake_exc(*args):
raise exception.ProcessExecutionError()
self.stubs.Set(self.driver, '_get_fsid', lambda: 'abc')
self.stubs.Set(self.driver, '_execute', fake_exc)
location = 'rbd://abc/pool/image/snap'
self.assertFalse(self.driver._is_cloneable(location))
def _copy_image(self):
@contextlib.contextmanager
def fake_temp_file(dir):
class FakeTmp:
def __init__(self, name):
self.name = name
yield FakeTmp('test')
self.stubs.Set(tempfile, 'NamedTemporaryFile', fake_temp_file)
self.stubs.Set(os.path, 'exists', lambda x: True)
self.driver.copy_image_to_volume(None, {'name': 'test'},
FakeImageService(), None)
def test_copy_image_no_volume_tmp(self):
self.flags(volume_tmp_dir=None)
self._copy_image()
def test_copy_image_volume_tmp(self):
self.flags(volume_tmp_dir='/var/run/cinder/tmp')
self._copy_image()
class FakeRBDDriver(RBDDriver):
def _clone(self):
pass
def _resize(self):
pass
class ManagedRBDTestCase(DriverTestCase):
driver_name = "cinder.tests.test_rbd.FakeRBDDriver"
def setUp(self):
super(ManagedRBDTestCase, self).setUp()
fake_image.stub_out_image_service(self.stubs)
def _clone_volume_from_image(self, expected_status,
clone_works=True):
"""Try to clone a volume from an image, and check the status
afterwards"""
def fake_clone_image(volume, image_location):
pass
def fake_clone_error(volume, image_location):
raise exception.CinderException()
self.stubs.Set(self.volume.driver, '_is_cloneable', lambda x: True)
if clone_works:
self.stubs.Set(self.volume.driver, 'clone_image', fake_clone_image)
else:
self.stubs.Set(self.volume.driver, 'clone_image', fake_clone_error)
image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
volume_id = 1
# creating volume testdata
db.volume_create(self.context, {'id': volume_id,
'updated_at': timeutils.utcnow(),
'display_description': 'Test Desc',
'size': 20,
'status': 'creating',
'instance_uuid': None,
'host': 'dummy'})
try:
if clone_works:
self.volume.create_volume(self.context,
volume_id,
image_id=image_id)
else:
self.assertRaises(exception.CinderException,
self.volume.create_volume,
self.context,
volume_id,
image_id=image_id)
volume = db.volume_get(self.context, volume_id)
self.assertEqual(volume['status'], expected_status)
finally:
# cleanup
db.volume_destroy(self.context, volume_id)
def test_clone_image_status_available(self):
"""Verify that before cloning, an image is in the available state."""
self._clone_volume_from_image('available', True)
def test_clone_image_status_error(self):
"""Verify that before cloning, an image is in the available state."""
self._clone_volume_from_image('error', False)
def test_clone_success(self):
self.stubs.Set(self.volume.driver, '_is_cloneable', lambda x: True)
self.stubs.Set(self.volume.driver, 'clone_image', lambda a, b: True)
image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
self.assertTrue(self.volume.driver.clone_image({}, image_id))
def test_clone_bad_image_id(self):
self.stubs.Set(self.volume.driver, '_is_cloneable', lambda x: True)
self.assertFalse(self.volume.driver.clone_image({}, None))
def test_clone_uncloneable(self):
self.stubs.Set(self.volume.driver, '_is_cloneable', lambda x: False)
self.assertFalse(self.volume.driver.clone_image({}, 'dne'))
| {
"content_hash": "4e7899aba36c9df662d74638522b8dbb",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 79,
"avg_line_length": 35.774566473988436,
"alnum_prop": 0.5737598965907255,
"repo_name": "NewpTone/stacklab-cinder",
"id": "4ce9cfa973766322d3a9b1dbcc03f240974ec2e1",
"size": "6861",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinder/tests/test_rbd.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "1941585"
},
{
"name": "Shell",
"bytes": "7441"
}
],
"symlink_target": ""
} |
'''
Welcome to another python 3 basics video, in this video we will carryon with
functions. In the last video you were shown a very basic function, without
any parameters
In this video, lets include a parameter, and give this function more...
functionality.
'''
# changed name to simple math, to better describe our intentions
'''
Now we've specified 2 parameters for our function, calling them num1
and num2, for number 1 and number 2.
Now, we carry on writing our function, where we can specify what we
desire to do with num1 and num2.
in our case, we want to do simple addition.
'''
def simple_addition(num1,num2):
answer = num1 + num2
print('num1 is', num1)
# so here the answer variable will be filled with whatever
# num 1 plus num 2 is.
print(answer)
# then at the end, we want to print out the answer to the client
'''
so now we run this, and when we want to do some simple_addition...
'''
simple_addition(5,3)
# here we will do 5 + 3, for an answer of 8
'''
There is no limit to the amount of variables you can have. The only thing
you will want to look out for at this point is the order of the variables,
as well as the quantity.
You can protect yourself from order by doing the following in your calling:
'''
simple_addition(num1=3,num2=5)
# or more clearly #
simple_addition(num2=3,num1=5)
# in this case, if you are clear in your specification, it does not matter
# the order. Most people, however, do not write out the variables like that,
# they just maintain the order.
#finally, it is important to use the proper quantity of variables.
# will not work, too many vars
simple_addition(3,5,6)
# will not work, too few vars
simple_addition(3)
'''
That's it for this video, in the next video I willbe covering default variable
assignments.
'''
| {
"content_hash": "4970dbb8bf283a483235a3a832cab9cd",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 78,
"avg_line_length": 23.82894736842105,
"alnum_prop": 0.7261181667586969,
"repo_name": "PythonProgramming/Python-3-basics-series",
"id": "abf5d5fd72825544c01c38c82ae3d4ba8137aa21",
"size": "1811",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "11. Function Parameters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "50861"
}
],
"symlink_target": ""
} |
from textwrap import dedent, wrap
try:
import ConfigParser as configparser
except ImportError:
import configparser
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import smtplib
import markdown
class Email(object):
"""Created and send emails."""
def title(self, title):
return [title.title(), '-' * len(title), '']
def preferred(self, lds_preferred):
parts = [_.strip() for _ in lds_preferred.split(',')]
return ' '.join(parts)
def para(self, append_to, text):
append_to.extend(wrap(dedent(text).lstrip()))
append_to.append('')
def make(self, body, **kwargs):
"""Make HTML email with text version."""
msg = MIMEMultipart('alternative')
for k, v in kwargs.items():
msg[k] = v
msg.attach(MIMEText(body, 'text'))
body = markdown.markdown(body, ['markdown.extensions.tables',
'markdown.extensions.smarty'])
html = dedent("""
<html><head>
<style type='text/css'>
h2 { font-size: 150%% }
table { border-collapse: collapse }
th {text-align: center}
td {padding-left:1em }
tbody tr:nth-child(odd) { background: #eee; }
tbody tr:hover { background: yellow; }
</style></head><body>
%s
</body></html>""").strip() % body
msg.attach(MIMEText(html, 'html'))
return msg
class SMTPStdout(object):
is_dummy = True
def sendmail(self, from_addr, to_addrs, msg):
print("-"*60)
print(msg.split('\n\n')[0])
print(to_addrs)
def quit(self):
return True
def get_smtp(config_file, section='SMTP'):
"""Get SMTP information from a config file.
SMTP section in the config file looks like:
[SMTP]
DOMAIN = smtp.gmail.com:587
USERNAME = my-username
PASSWORD = my-password
TLS = True
Use your domain, username, and password. This is showing the values
for gmail. Without a domain, it will just print to stdout.
Args:
config_file (str): filename
section (str): section in config_file, default "SMTP"
Returns:
smtplib.SMTP object
"""
config = configparser.ConfigParser()
config.read(config_file)
domain = config.get(section, 'DOMAIN')
if not domain:
return SMTPStdout()
domain = domain.split(':')
params = {}
if len(domain) > 1:
params['port'] = int(domain[1])
server = smtplib.SMTP(domain[0], **params)
if config.has_option(section, 'TLS') and config.getboolean(section, 'TLS'):
server.starttls()
server.login(config.get(section, 'USERNAME'),
config.get(section, 'PASSWORD'))
return server
| {
"content_hash": "431c93960a5b756a4bdac0b3b6153cd6",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 79,
"avg_line_length": 28.32,
"alnum_prop": 0.5840395480225988,
"repo_name": "jidn/lds-temple-recommend-notice",
"id": "4dd85a09a34b6e8199caad14d66f46d715828e67",
"size": "2847",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "email_markdown.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "5698"
},
{
"name": "Python",
"bytes": "14586"
}
],
"symlink_target": ""
} |
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'securestdb.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'j+)xve3tfyu-t!@d6&k&fbj!f5hq%k)!-a@7h182j1ecl**hym'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'securest.modules.djangosecurest.utils.Middleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'securesttest.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'securesttest.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'securest.modules.djangosecurest',
'securesttest',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
SECUREST_SERVER_CERTIFICATE_ID = 'cert123'
SECUREST_PRIVATE_KEY = '-----BEGIN RSA PRIVATE KEY-----\nMIICXQIBAAKBgQC7hb9QECBgetfCKxwwcVaAXjDowKq9D5EtHVTAJMTxZVgpcl+a\nY5cZv0YTvWdbFtboqKBzDsBk2BqLe37OkMkQz8uvlO5Istmz4xZh3tLqoum098ml\nGtSRxnTpovtogspTP5nV9ld7Js9OiuC8SEIJNXHRAz/9meTBblMEuCRTAQIDAQAB\nAoGBAKtJjSn9wQU/0EsdA7/AFypkXzqLmqndSVezPhHQxFBXqQMa5PbqVz/+dm1b\nicgrgZy9Quo2JEk5GG4mHPtkRzNRaPvmktIfj0auO2HhZh3r0flD6depgZ99rQuX\n4CLMn8FKvO2HyXfexgfCSpiwEuC/5+KJSKX5MfVHk6ZvXFEdAkEAycANOPHp5LNm\nRNuYpnNlKHbJtApu9eke2sT05E6U3rnkKHEaFuMSNg/ybi1/0hxYAa5yrGTo/VAX\nAw0yPdYq9wJBAO3yS4c9JHgGXaPDWEWklZrhvgq3SgG6Td5N2FHZgh2zXK/W7/t+\ny2iS0qHMWyRcwrPIse8qE/MQM4K1OAjOO8cCQHYuMBCpdO9pP4EaecMXWaxoWSND\n8VfPpSlVsnaCqaYGkmY2SqgZf7N3h/WOZY+41ry1aaseuXSZk8FP25xpdmUCQQCz\n+2Yt8Y24S/hges42P3v6H27EZEfSbr2fvXo2zzOgi588UnRL42iwvP4d7Jm0M4YC\nv3f30/grDsInyDOOVZDZAkBHx6/WjcMFwBYhkSb7jcRXyTseD33l2r8fZxZE/kga\nTxZbW28cL54pdsdh/JkGFrBf8hwH33xqFUBekz39vl9c\n-----END RSA PRIVATE KEY-----'
SECUREST_PROTECT_LIST = [
'/test/',
'/register/'
] | {
"content_hash": "2753244ec89984c8eb0c8aaaadd23f46",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 925,
"avg_line_length": 39.9496855345912,
"alnum_prop": 0.7169395465994962,
"repo_name": "dash1291/rest-security",
"id": "6418011de45224399d0b535e44d29db2706eb640",
"size": "6397",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/securesttest/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29400"
}
],
"symlink_target": ""
} |
"""Utilities to read values from a bytearray."""
import struct
def _ParseULeb128(data, offset):
"""Returns a tuple of (uleb128 value, number of bytes occupied).
From DWARF3 spec: http://dwarfstd.org/doc/Dwarf3.pdf
Args:
data: bytearray containing unsigned LEB128.
offset: Location of the unsigned LEB128.
"""
value = 0
shift = 0
cur_offset = offset
while True:
byte = data[cur_offset]
cur_offset += 1
value |= (byte & 0b01111111) << shift
if (byte & 0b10000000) == 0:
break
shift += 7
return value, cur_offset - offset
def _ParseSLeb128(data, offset):
"""Returns a tuple of (sleb128 value, number of bytes occupied).
Args:
data: bytearray containing signed LEB128.
offset: Location of the signed LEB128.
"""
value, size = _ParseULeb128(data, offset)
sign_bit = 1 << min(31, size * 7 - 1)
if (value & sign_bit) != 0:
value -= sign_bit + sign_bit
return value, size
class Mutf8DecodeError(Exception):
def __init__(self, message, length, offset):
message += ' (decoded string length: {}, string data offset: {:#x})'.format(
length, offset)
super().__init__(message)
class StreamReader:
"""Reads values from a bytearray using a seekable cursor.
Integers are little endian.
"""
def __init__(self, data):
self._data = data
self._pos = 0
def Seek(self, offset):
self._pos = offset
def Tell(self):
return self._pos
def NextStruct(self, fmt):
ret = struct.unpack_from(fmt, self._data, self._pos)
self._pos += struct.calcsize(fmt)
return ret
def NextBytes(self, n):
old_pos = self._pos
self._pos = min(len(self._data), old_pos + n)
return self._data[old_pos:self._pos]
def NextUByte(self):
self._pos += 1
return self._data[self._pos - 1]
def NextUShort(self):
self._pos += 2
return struct.unpack_from('<H', self._data, self._pos - 2)[0]
def NextUInt(self):
self._pos += 4
return struct.unpack_from('<I', self._data, self._pos - 4)[0]
def NextULeb128(self):
value, inc = _ParseULeb128(self._data, self._pos)
self._pos += inc
return value
def NextSLeb128(self):
value, inc = _ParseSLeb128(self._data, self._pos)
self._pos += inc
return value
def NextMUtf8(self, string_length):
"""Returns the string located at the specified offset.
See https://source.android.com/devices/tech/dalvik/dex-format#mutf-8
Ported from the Android Java implementation:
https://android.googlesource.com/platform/dalvik/+/fe107fb6e3f308ac5174ebdc5a794ee880c741d9/dx/src/com/android/dex/Mutf8.java#34
Args:
string_length: The length of the decoded string.
offset: Offset to the beginning of the string.
"""
offset = self._pos
ret = ''
for _ in range(string_length):
a = self.NextUByte()
if a == 0:
raise Mutf8DecodeError('Early string termination encountered',
string_length, offset)
if (a & 0x80) == 0x00:
code = a
elif (a & 0xe0) == 0xc0:
b = self.NextUByte()
if (b & 0xc0) != 0x80:
raise Mutf8DecodeError('Error in byte 2', string_length, offset)
code = ((a & 0x1f) << 6) | (b & 0x3f)
elif (a & 0xf0) == 0xe0:
b = self.NextUByte()
c = self.NextUByte()
if (b & 0xc0) != 0x80 or (c & 0xc0) != 0x80:
raise Mutf8DecodeError('Error in byte 3 or 4', string_length, offset)
code = ((a & 0x0f) << 12) | ((b & 0x3f) << 6) | (c & 0x3f)
else:
raise Mutf8DecodeError('Bad byte', string_length, offset)
ret += chr(code)
if self.NextUByte() != 0x00:
raise Mutf8DecodeError('Expected string termination', string_length,
offset)
return ret
def NextString(self):
string_length = self.NextULeb128()
return self.NextMUtf8(string_length)
def NextList(self, count, factory):
return [factory(self) for _ in range(count)]
def AlignUpTo(self, align_unit):
off_by = self._pos % align_unit
if off_by:
self.Seek(self._pos + align_unit - off_by)
| {
"content_hash": "8f8f968c36f667daf4e4cf631316bd85",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 132,
"avg_line_length": 27.32450331125828,
"alnum_prop": 0.614396509936985,
"repo_name": "chromium/chromium",
"id": "ce7f7faa4b571228cb5ddd38e6f2eba09609b631",
"size": "4266",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "tools/binary_size/libsupersize/stream_reader.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""Test the numpy pickler as a replacement of the standard pickler."""
import copy
import os
import random
import re
import io
import sys
import warnings
import gzip
import zlib
import bz2
import pickle
import socket
from contextlib import closing
import mmap
from pathlib import Path
try:
import lzma
except ImportError:
lzma = None
import pytest
from joblib.test.common import np, with_numpy, with_lz4, without_lz4
from joblib.test.common import with_memory_profiler, memory_used
from joblib.testing import parametrize, raises, warns
# numpy_pickle is not a drop-in replacement of pickle, as it takes
# filenames instead of open files as arguments.
from joblib import numpy_pickle, register_compressor
from joblib.test import data
from joblib.numpy_pickle_utils import _IO_BUFFER_SIZE
from joblib.numpy_pickle_utils import _detect_compressor
from joblib.numpy_pickle_utils import _is_numpy_array_byte_order_mismatch
from joblib.numpy_pickle_utils import _ensure_native_byte_order
from joblib.compressor import (_COMPRESSORS, _LZ4_PREFIX, CompressorWrapper,
LZ4_NOT_INSTALLED_ERROR, BinaryZlibFile)
###############################################################################
# Define a list of standard types.
# Borrowed from dill, initial author: Micheal McKerns:
# http://dev.danse.us/trac/pathos/browser/dill/dill_test2.py
typelist = []
# testing types
_none = None
typelist.append(_none)
_type = type
typelist.append(_type)
_bool = bool(1)
typelist.append(_bool)
_int = int(1)
typelist.append(_int)
_float = float(1)
typelist.append(_float)
_complex = complex(1)
typelist.append(_complex)
_string = str(1)
typelist.append(_string)
_tuple = ()
typelist.append(_tuple)
_list = []
typelist.append(_list)
_dict = {}
typelist.append(_dict)
_builtin = len
typelist.append(_builtin)
def _function(x):
yield x
class _class:
def _method(self):
pass
class _newclass(object):
def _method(self):
pass
typelist.append(_function)
typelist.append(_class)
typelist.append(_newclass) # <type 'type'>
_instance = _class()
typelist.append(_instance)
_object = _newclass()
typelist.append(_object) # <type 'class'>
###############################################################################
# Tests
@parametrize('compress', [0, 1])
@parametrize('member', typelist)
def test_standard_types(tmpdir, compress, member):
# Test pickling and saving with standard types.
filename = tmpdir.join('test.pkl').strpath
numpy_pickle.dump(member, filename, compress=compress)
_member = numpy_pickle.load(filename)
# We compare the pickled instance to the reloaded one only if it
# can be compared to a copied one
if member == copy.deepcopy(member):
assert member == _member
def test_value_error():
# Test inverting the input arguments to dump
with raises(ValueError):
numpy_pickle.dump('foo', dict())
@parametrize('wrong_compress', [-1, 10, dict()])
def test_compress_level_error(wrong_compress):
# Verify that passing an invalid compress argument raises an error.
exception_msg = ('Non valid compress level given: '
'"{0}"'.format(wrong_compress))
with raises(ValueError) as excinfo:
numpy_pickle.dump('dummy', 'foo', compress=wrong_compress)
excinfo.match(exception_msg)
@with_numpy
@parametrize('compress', [False, True, 0, 3, 'zlib'])
def test_numpy_persistence(tmpdir, compress):
filename = tmpdir.join('test.pkl').strpath
rnd = np.random.RandomState(0)
a = rnd.random_sample((10, 2))
# We use 'a.T' to have a non C-contiguous array.
for index, obj in enumerate(((a,), (a.T,), (a, a), [a, a, a])):
filenames = numpy_pickle.dump(obj, filename, compress=compress)
# All is cached in one file
assert len(filenames) == 1
# Check that only one file was created
assert filenames[0] == filename
# Check that this file does exist
assert os.path.exists(filenames[0])
# Unpickle the object
obj_ = numpy_pickle.load(filename)
# Check that the items are indeed arrays
for item in obj_:
assert isinstance(item, np.ndarray)
# And finally, check that all the values are equal.
np.testing.assert_array_equal(np.array(obj), np.array(obj_))
# Now test with an array subclass
obj = np.memmap(filename + 'mmap', mode='w+', shape=4, dtype=np.float64)
filenames = numpy_pickle.dump(obj, filename, compress=compress)
# All is cached in one file
assert len(filenames) == 1
obj_ = numpy_pickle.load(filename)
if (type(obj) is not np.memmap and
hasattr(obj, '__array_prepare__')):
# We don't reconstruct memmaps
assert isinstance(obj_, type(obj))
np.testing.assert_array_equal(obj_, obj)
# Test with an object containing multiple numpy arrays
obj = ComplexTestObject()
filenames = numpy_pickle.dump(obj, filename, compress=compress)
# All is cached in one file
assert len(filenames) == 1
obj_loaded = numpy_pickle.load(filename)
assert isinstance(obj_loaded, type(obj))
np.testing.assert_array_equal(obj_loaded.array_float, obj.array_float)
np.testing.assert_array_equal(obj_loaded.array_int, obj.array_int)
np.testing.assert_array_equal(obj_loaded.array_obj, obj.array_obj)
@with_numpy
def test_numpy_persistence_bufferred_array_compression(tmpdir):
big_array = np.ones((_IO_BUFFER_SIZE + 100), dtype=np.uint8)
filename = tmpdir.join('test.pkl').strpath
numpy_pickle.dump(big_array, filename, compress=True)
arr_reloaded = numpy_pickle.load(filename)
np.testing.assert_array_equal(big_array, arr_reloaded)
@with_numpy
def test_memmap_persistence(tmpdir):
rnd = np.random.RandomState(0)
a = rnd.random_sample(10)
filename = tmpdir.join('test1.pkl').strpath
numpy_pickle.dump(a, filename)
b = numpy_pickle.load(filename, mmap_mode='r')
assert isinstance(b, np.memmap)
# Test with an object containing multiple numpy arrays
filename = tmpdir.join('test2.pkl').strpath
obj = ComplexTestObject()
numpy_pickle.dump(obj, filename)
obj_loaded = numpy_pickle.load(filename, mmap_mode='r')
assert isinstance(obj_loaded, type(obj))
assert isinstance(obj_loaded.array_float, np.memmap)
assert not obj_loaded.array_float.flags.writeable
assert isinstance(obj_loaded.array_int, np.memmap)
assert not obj_loaded.array_int.flags.writeable
# Memory map not allowed for numpy object arrays
assert not isinstance(obj_loaded.array_obj, np.memmap)
np.testing.assert_array_equal(obj_loaded.array_float,
obj.array_float)
np.testing.assert_array_equal(obj_loaded.array_int,
obj.array_int)
np.testing.assert_array_equal(obj_loaded.array_obj,
obj.array_obj)
# Test we can write in memmapped arrays
obj_loaded = numpy_pickle.load(filename, mmap_mode='r+')
assert obj_loaded.array_float.flags.writeable
obj_loaded.array_float[0:10] = 10.0
assert obj_loaded.array_int.flags.writeable
obj_loaded.array_int[0:10] = 10
obj_reloaded = numpy_pickle.load(filename, mmap_mode='r')
np.testing.assert_array_equal(obj_reloaded.array_float,
obj_loaded.array_float)
np.testing.assert_array_equal(obj_reloaded.array_int,
obj_loaded.array_int)
# Test w+ mode is caught and the mode has switched to r+
numpy_pickle.load(filename, mmap_mode='w+')
assert obj_loaded.array_int.flags.writeable
assert obj_loaded.array_int.mode == 'r+'
assert obj_loaded.array_float.flags.writeable
assert obj_loaded.array_float.mode == 'r+'
@with_numpy
def test_memmap_persistence_mixed_dtypes(tmpdir):
# loading datastructures that have sub-arrays with dtype=object
# should not prevent memmapping on fixed size dtype sub-arrays.
rnd = np.random.RandomState(0)
a = rnd.random_sample(10)
b = np.array([1, 'b'], dtype=object)
construct = (a, b)
filename = tmpdir.join('test.pkl').strpath
numpy_pickle.dump(construct, filename)
a_clone, b_clone = numpy_pickle.load(filename, mmap_mode='r')
# the floating point array has been memory mapped
assert isinstance(a_clone, np.memmap)
# the object-dtype array has been loaded in memory
assert not isinstance(b_clone, np.memmap)
@with_numpy
def test_masked_array_persistence(tmpdir):
# The special-case picker fails, because saving masked_array
# not implemented, but it just delegates to the standard pickler.
rnd = np.random.RandomState(0)
a = rnd.random_sample(10)
a = np.ma.masked_greater(a, 0.5)
filename = tmpdir.join('test.pkl').strpath
numpy_pickle.dump(a, filename)
b = numpy_pickle.load(filename, mmap_mode='r')
assert isinstance(b, np.ma.masked_array)
@with_numpy
def test_compress_mmap_mode_warning(tmpdir):
# Test the warning in case of compress + mmap_mode
rnd = np.random.RandomState(0)
a = rnd.random_sample(10)
this_filename = tmpdir.join('test.pkl').strpath
numpy_pickle.dump(a, this_filename, compress=1)
with warns(UserWarning) as warninfo:
numpy_pickle.load(this_filename, mmap_mode='r+')
warninfo = [w.message for w in warninfo]
assert len(warninfo) == 1
assert (
str(warninfo[0]) ==
'mmap_mode "r+" is not compatible with compressed '
f'file {this_filename}. "r+" flag will be ignored.'
)
@with_numpy
@parametrize('cache_size', [None, 0, 10])
def test_cache_size_warning(tmpdir, cache_size):
# Check deprecation warning raised when cache size is not None
filename = tmpdir.join('test.pkl').strpath
rnd = np.random.RandomState(0)
a = rnd.random_sample((10, 2))
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as warninfo:
numpy_pickle.dump(a, filename, cache_size=cache_size)
expected_nb_warnings = 1 if cache_size is not None else 0
assert len(warninfo) == expected_nb_warnings
for w in warninfo:
assert w.category == DeprecationWarning
assert (str(w.message) ==
"Please do not set 'cache_size' in joblib.dump, this "
"parameter has no effect and will be removed. You "
"used 'cache_size={0}'".format(cache_size))
@with_numpy
@with_memory_profiler
@parametrize('compress', [True, False])
def test_memory_usage(tmpdir, compress):
# Verify memory stays within expected bounds.
filename = tmpdir.join('test.pkl').strpath
small_array = np.ones((10, 10))
big_array = np.ones(shape=100 * int(1e6), dtype=np.uint8)
for obj in (small_array, big_array):
size = obj.nbytes / 1e6
obj_filename = filename + str(np.random.randint(0, 1000))
mem_used = memory_used(numpy_pickle.dump,
obj, obj_filename, compress=compress)
# The memory used to dump the object shouldn't exceed the buffer
# size used to write array chunks (16MB).
write_buf_size = _IO_BUFFER_SIZE + 16 * 1024 ** 2 / 1e6
assert mem_used <= write_buf_size
mem_used = memory_used(numpy_pickle.load, obj_filename)
# memory used should be less than array size + buffer size used to
# read the array chunk by chunk.
read_buf_size = 32 + _IO_BUFFER_SIZE # MiB
assert mem_used < size + read_buf_size
@with_numpy
def test_compressed_pickle_dump_and_load(tmpdir):
expected_list = [np.arange(5, dtype=np.dtype('<i8')),
np.arange(5, dtype=np.dtype('>i8')),
np.arange(5, dtype=np.dtype('<f8')),
np.arange(5, dtype=np.dtype('>f8')),
np.array([1, 'abc', {'a': 1, 'b': 2}], dtype='O'),
np.arange(256, dtype=np.uint8).tobytes(),
u"C'est l'\xe9t\xe9 !"]
fname = tmpdir.join('temp.pkl.gz').strpath
dumped_filenames = numpy_pickle.dump(expected_list, fname, compress=1)
assert len(dumped_filenames) == 1
result_list = numpy_pickle.load(fname)
for result, expected in zip(result_list, expected_list):
if isinstance(expected, np.ndarray):
expected = _ensure_native_byte_order(expected)
assert result.dtype == expected.dtype
np.testing.assert_equal(result, expected)
else:
assert result == expected
def _check_pickle(filename, expected_list, mmap_mode=None):
"""Helper function to test joblib pickle content.
Note: currently only pickles containing an iterable are supported
by this function.
"""
version_match = re.match(r'.+py(\d)(\d).+', filename)
py_version_used_for_writing = int(version_match.group(1))
py_version_to_default_pickle_protocol = {2: 2, 3: 3}
pickle_reading_protocol = py_version_to_default_pickle_protocol.get(3, 4)
pickle_writing_protocol = py_version_to_default_pickle_protocol.get(
py_version_used_for_writing, 4)
if pickle_reading_protocol >= pickle_writing_protocol:
try:
with warnings.catch_warnings(record=True) as warninfo:
warnings.simplefilter('always')
warnings.filterwarnings(
'ignore', module='numpy',
message='The compiler package is deprecated')
result_list = numpy_pickle.load(filename, mmap_mode=mmap_mode)
filename_base = os.path.basename(filename)
expected_nb_deprecation_warnings = 1 if (
"_0.9" in filename_base or "_0.8.4" in filename_base) else 0
expected_nb_user_warnings = 3 if (
re.search("_0.1.+.pkl$", filename_base) and
mmap_mode is not None) else 0
expected_nb_warnings = \
expected_nb_deprecation_warnings + expected_nb_user_warnings
assert len(warninfo) == expected_nb_warnings
deprecation_warnings = [
w for w in warninfo if issubclass(
w.category, DeprecationWarning)]
user_warnings = [
w for w in warninfo if issubclass(
w.category, UserWarning)]
for w in deprecation_warnings:
assert (str(w.message) ==
"The file '{0}' has been generated with a joblib "
"version less than 0.10. Please regenerate this "
"pickle file.".format(filename))
for w in user_warnings:
escaped_filename = re.escape(filename)
assert re.search(
f"memmapped.+{escaped_filename}.+segmentation fault",
str(w.message))
for result, expected in zip(result_list, expected_list):
if isinstance(expected, np.ndarray):
expected = _ensure_native_byte_order(expected)
assert result.dtype == expected.dtype
np.testing.assert_equal(result, expected)
else:
assert result == expected
except Exception as exc:
# When trying to read with python 3 a pickle generated
# with python 2 we expect a user-friendly error
if py_version_used_for_writing == 2:
assert isinstance(exc, ValueError)
message = ('You may be trying to read with '
'python 3 a joblib pickle generated with python 2.')
assert message in str(exc)
elif filename.endswith('.lz4') and with_lz4.args[0]:
assert isinstance(exc, ValueError)
assert LZ4_NOT_INSTALLED_ERROR in str(exc)
else:
raise
else:
# Pickle protocol used for writing is too high. We expect a
# "unsupported pickle protocol" error message
try:
numpy_pickle.load(filename)
raise AssertionError('Numpy pickle loading should '
'have raised a ValueError exception')
except ValueError as e:
message = 'unsupported pickle protocol: {0}'.format(
pickle_writing_protocol)
assert message in str(e.args)
@with_numpy
def test_joblib_pickle_across_python_versions():
# We need to be specific about dtypes in particular endianness
# because the pickles can be generated on one architecture and
# the tests run on another one. See
# https://github.com/joblib/joblib/issues/279.
expected_list = [np.arange(5, dtype=np.dtype('<i8')),
np.arange(5, dtype=np.dtype('<f8')),
np.array([1, 'abc', {'a': 1, 'b': 2}], dtype='O'),
np.arange(256, dtype=np.uint8).tobytes(),
# np.matrix is a subclass of np.ndarray, here we want
# to verify this type of object is correctly unpickled
# among versions.
np.matrix([0, 1, 2], dtype=np.dtype('<i8')),
u"C'est l'\xe9t\xe9 !"]
# Testing all the compressed and non compressed
# pickles in joblib/test/data. These pickles were generated by
# the joblib/test/data/create_numpy_pickle.py script for the
# relevant python, joblib and numpy versions.
test_data_dir = os.path.dirname(os.path.abspath(data.__file__))
pickle_extensions = ('.pkl', '.gz', '.gzip', '.bz2', 'lz4')
if lzma is not None:
pickle_extensions += ('.xz', '.lzma')
pickle_filenames = [os.path.join(test_data_dir, fn)
for fn in os.listdir(test_data_dir)
if any(fn.endswith(ext) for ext in pickle_extensions)]
for fname in pickle_filenames:
_check_pickle(fname, expected_list)
@with_numpy
def test_joblib_pickle_across_python_versions_with_mmap():
expected_list = [np.arange(5, dtype=np.dtype('<i8')),
np.arange(5, dtype=np.dtype('<f8')),
np.array([1, 'abc', {'a': 1, 'b': 2}], dtype='O'),
np.arange(256, dtype=np.uint8).tobytes(),
# np.matrix is a subclass of np.ndarray, here we want
# to verify this type of object is correctly unpickled
# among versions.
np.matrix([0, 1, 2], dtype=np.dtype('<i8')),
u"C'est l'\xe9t\xe9 !"]
test_data_dir = os.path.dirname(os.path.abspath(data.__file__))
pickle_filenames = [
os.path.join(test_data_dir, fn)
for fn in os.listdir(test_data_dir) if fn.endswith('.pkl')]
for fname in pickle_filenames:
_check_pickle(fname, expected_list, mmap_mode='r')
@with_numpy
def test_numpy_array_byte_order_mismatch_detection():
# List of numpy arrays with big endian byteorder.
be_arrays = [np.array([(1, 2.0), (3, 4.0)],
dtype=[('', '>i8'), ('', '>f8')]),
np.arange(3, dtype=np.dtype('>i8')),
np.arange(3, dtype=np.dtype('>f8'))]
# Verify the byteorder mismatch is correctly detected.
for array in be_arrays:
if sys.byteorder == 'big':
assert not _is_numpy_array_byte_order_mismatch(array)
else:
assert _is_numpy_array_byte_order_mismatch(array)
converted = _ensure_native_byte_order(array)
if converted.dtype.fields:
for f in converted.dtype.fields.values():
f[0].byteorder == '='
else:
assert converted.dtype.byteorder == "="
# List of numpy arrays with little endian byteorder.
le_arrays = [np.array([(1, 2.0), (3, 4.0)],
dtype=[('', '<i8'), ('', '<f8')]),
np.arange(3, dtype=np.dtype('<i8')),
np.arange(3, dtype=np.dtype('<f8'))]
# Verify the byteorder mismatch is correctly detected.
for array in le_arrays:
if sys.byteorder == 'little':
assert not _is_numpy_array_byte_order_mismatch(array)
else:
assert _is_numpy_array_byte_order_mismatch(array)
converted = _ensure_native_byte_order(array)
if converted.dtype.fields:
for f in converted.dtype.fields.values():
f[0].byteorder == '='
else:
assert converted.dtype.byteorder == "="
@parametrize('compress_tuple', [('zlib', 3), ('gzip', 3)])
def test_compress_tuple_argument(tmpdir, compress_tuple):
# Verify the tuple is correctly taken into account.
filename = tmpdir.join('test.pkl').strpath
numpy_pickle.dump("dummy", filename,
compress=compress_tuple)
# Verify the file contains the right magic number
with open(filename, 'rb') as f:
assert _detect_compressor(f) == compress_tuple[0]
@parametrize('compress_tuple,message',
[(('zlib', 3, 'extra'), # wrong compress tuple
'Compress argument tuple should contain exactly 2 elements'),
(('wrong', 3), # wrong compress method
'Non valid compression method given: "{}"'.format('wrong')),
(('zlib', 'wrong'), # wrong compress level
'Non valid compress level given: "{}"'.format('wrong'))])
def test_compress_tuple_argument_exception(tmpdir, compress_tuple, message):
filename = tmpdir.join('test.pkl').strpath
# Verify setting a wrong compress tuple raises a ValueError.
with raises(ValueError) as excinfo:
numpy_pickle.dump('dummy', filename, compress=compress_tuple)
excinfo.match(message)
@parametrize('compress_string', ['zlib', 'gzip'])
def test_compress_string_argument(tmpdir, compress_string):
# Verify the string is correctly taken into account.
filename = tmpdir.join('test.pkl').strpath
numpy_pickle.dump("dummy", filename,
compress=compress_string)
# Verify the file contains the right magic number
with open(filename, 'rb') as f:
assert _detect_compressor(f) == compress_string
@with_numpy
@parametrize('compress', [1, 3, 6])
@parametrize('cmethod', _COMPRESSORS)
def test_joblib_compression_formats(tmpdir, compress, cmethod):
filename = tmpdir.join('test.pkl').strpath
objects = (np.ones(shape=(100, 100), dtype='f8'),
range(10),
{'a': 1, 2: 'b'}, [], (), {}, 0, 1.0)
if cmethod in ("lzma", "xz") and lzma is None:
pytest.skip("lzma is support not available")
elif cmethod == 'lz4' and with_lz4.args[0]:
# Skip the test if lz4 is not installed. We here use the with_lz4
# skipif fixture whose argument is True when lz4 is not installed
pytest.skip("lz4 is not installed.")
dump_filename = filename + "." + cmethod
for obj in objects:
numpy_pickle.dump(obj, dump_filename, compress=(cmethod, compress))
# Verify the file contains the right magic number
with open(dump_filename, 'rb') as f:
assert _detect_compressor(f) == cmethod
# Verify the reloaded object is correct
obj_reloaded = numpy_pickle.load(dump_filename)
assert isinstance(obj_reloaded, type(obj))
if isinstance(obj, np.ndarray):
np.testing.assert_array_equal(obj_reloaded, obj)
else:
assert obj_reloaded == obj
def _gzip_file_decompress(source_filename, target_filename):
"""Decompress a gzip file."""
with closing(gzip.GzipFile(source_filename, "rb")) as fo:
buf = fo.read()
with open(target_filename, "wb") as fo:
fo.write(buf)
def _zlib_file_decompress(source_filename, target_filename):
"""Decompress a zlib file."""
with open(source_filename, 'rb') as fo:
buf = zlib.decompress(fo.read())
with open(target_filename, 'wb') as fo:
fo.write(buf)
@parametrize('extension,decompress',
[('.z', _zlib_file_decompress),
('.gz', _gzip_file_decompress)])
def test_load_externally_decompressed_files(tmpdir, extension, decompress):
# Test that BinaryZlibFile generates valid gzip and zlib compressed files.
obj = "a string to persist"
filename_raw = tmpdir.join('test.pkl').strpath
filename_compressed = filename_raw + extension
# Use automatic extension detection to compress with the right method.
numpy_pickle.dump(obj, filename_compressed)
# Decompress with the corresponding method
decompress(filename_compressed, filename_raw)
# Test that the uncompressed pickle can be loaded and
# that the result is correct.
obj_reloaded = numpy_pickle.load(filename_raw)
assert obj == obj_reloaded
@parametrize('extension,cmethod',
# valid compressor extensions
[('.z', 'zlib'),
('.gz', 'gzip'),
('.bz2', 'bz2'),
('.lzma', 'lzma'),
('.xz', 'xz'),
# invalid compressor extensions
('.pkl', 'not-compressed'),
('', 'not-compressed')])
def test_compression_using_file_extension(tmpdir, extension, cmethod):
if cmethod in ("lzma", "xz") and lzma is None:
pytest.skip("lzma is missing")
# test that compression method corresponds to the given filename extension.
filename = tmpdir.join('test.pkl').strpath
obj = "object to dump"
dump_fname = filename + extension
numpy_pickle.dump(obj, dump_fname)
# Verify the file contains the right magic number
with open(dump_fname, 'rb') as f:
assert _detect_compressor(f) == cmethod
# Verify the reloaded object is correct
obj_reloaded = numpy_pickle.load(dump_fname)
assert isinstance(obj_reloaded, type(obj))
assert obj_reloaded == obj
@with_numpy
def test_file_handle_persistence(tmpdir):
objs = [np.random.random((10, 10)), "some data"]
fobjs = [bz2.BZ2File, gzip.GzipFile]
if lzma is not None:
fobjs += [lzma.LZMAFile]
filename = tmpdir.join('test.pkl').strpath
for obj in objs:
for fobj in fobjs:
with fobj(filename, 'wb') as f:
numpy_pickle.dump(obj, f)
# using the same decompressor prevents from internally
# decompress again.
with fobj(filename, 'rb') as f:
obj_reloaded = numpy_pickle.load(f)
# when needed, the correct decompressor should be used when
# passing a raw file handle.
with open(filename, 'rb') as f:
obj_reloaded_2 = numpy_pickle.load(f)
if isinstance(obj, np.ndarray):
np.testing.assert_array_equal(obj_reloaded, obj)
np.testing.assert_array_equal(obj_reloaded_2, obj)
else:
assert obj_reloaded == obj
assert obj_reloaded_2 == obj
@with_numpy
def test_in_memory_persistence():
objs = [np.random.random((10, 10)), "some data"]
for obj in objs:
f = io.BytesIO()
numpy_pickle.dump(obj, f)
obj_reloaded = numpy_pickle.load(f)
if isinstance(obj, np.ndarray):
np.testing.assert_array_equal(obj_reloaded, obj)
else:
assert obj_reloaded == obj
@with_numpy
def test_file_handle_persistence_mmap(tmpdir):
obj = np.random.random((10, 10))
filename = tmpdir.join('test.pkl').strpath
with open(filename, 'wb') as f:
numpy_pickle.dump(obj, f)
with open(filename, 'rb') as f:
obj_reloaded = numpy_pickle.load(f, mmap_mode='r+')
np.testing.assert_array_equal(obj_reloaded, obj)
@with_numpy
def test_file_handle_persistence_compressed_mmap(tmpdir):
obj = np.random.random((10, 10))
filename = tmpdir.join('test.pkl').strpath
with open(filename, 'wb') as f:
numpy_pickle.dump(obj, f, compress=('gzip', 3))
with closing(gzip.GzipFile(filename, 'rb')) as f:
with warns(UserWarning) as warninfo:
numpy_pickle.load(f, mmap_mode='r+')
assert len(warninfo) == 1
assert (str(warninfo[0].message) ==
'"%(fileobj)r" is not a raw file, mmap_mode "%(mmap_mode)s" '
'flag will be ignored.' % {'fileobj': f, 'mmap_mode': 'r+'})
@with_numpy
def test_file_handle_persistence_in_memory_mmap():
obj = np.random.random((10, 10))
buf = io.BytesIO()
numpy_pickle.dump(obj, buf)
with warns(UserWarning) as warninfo:
numpy_pickle.load(buf, mmap_mode='r+')
assert len(warninfo) == 1
assert (str(warninfo[0].message) ==
'In memory persistence is not compatible with mmap_mode '
'"%(mmap_mode)s" flag passed. mmap_mode option will be '
'ignored.' % {'mmap_mode': 'r+'})
@parametrize('data', [b'a little data as bytes.',
# More bytes
10000 * "{}".format(
random.randint(0, 1000) * 1000).encode('latin-1')],
ids=["a little data as bytes.", "a large data as bytes."])
@parametrize('compress_level', [1, 3, 9])
def test_binary_zlibfile(tmpdir, data, compress_level):
filename = tmpdir.join('test.pkl').strpath
# Regular cases
with open(filename, 'wb') as f:
with BinaryZlibFile(f, 'wb',
compresslevel=compress_level) as fz:
assert fz.writable()
fz.write(data)
assert fz.fileno() == f.fileno()
with raises(io.UnsupportedOperation):
fz._check_can_read()
with raises(io.UnsupportedOperation):
fz._check_can_seek()
assert fz.closed
with raises(ValueError):
fz._check_not_closed()
with open(filename, 'rb') as f:
with BinaryZlibFile(f) as fz:
assert fz.readable()
assert fz.seekable()
assert fz.fileno() == f.fileno()
assert fz.read() == data
with raises(io.UnsupportedOperation):
fz._check_can_write()
assert fz.seekable()
fz.seek(0)
assert fz.tell() == 0
assert fz.closed
# Test with a filename as input
with BinaryZlibFile(filename, 'wb',
compresslevel=compress_level) as fz:
assert fz.writable()
fz.write(data)
with BinaryZlibFile(filename, 'rb') as fz:
assert fz.read() == data
assert fz.seekable()
# Test without context manager
fz = BinaryZlibFile(filename, 'wb', compresslevel=compress_level)
assert fz.writable()
fz.write(data)
fz.close()
fz = BinaryZlibFile(filename, 'rb')
assert fz.read() == data
fz.close()
@parametrize('bad_value', [-1, 10, 15, 'a', (), {}])
def test_binary_zlibfile_bad_compression_levels(tmpdir, bad_value):
filename = tmpdir.join('test.pkl').strpath
with raises(ValueError) as excinfo:
BinaryZlibFile(filename, 'wb', compresslevel=bad_value)
pattern = re.escape("'compresslevel' must be an integer between 1 and 9. "
"You provided 'compresslevel={}'".format(bad_value))
excinfo.match(pattern)
@parametrize('bad_mode', ['a', 'x', 'r', 'w', 1, 2])
def test_binary_zlibfile_invalid_modes(tmpdir, bad_mode):
filename = tmpdir.join('test.pkl').strpath
with raises(ValueError) as excinfo:
BinaryZlibFile(filename, bad_mode)
excinfo.match("Invalid mode")
@parametrize('bad_file', [1, (), {}])
def test_binary_zlibfile_invalid_filename_type(bad_file):
with raises(TypeError) as excinfo:
BinaryZlibFile(bad_file, 'rb')
excinfo.match("filename must be a str or bytes object, or a file")
###############################################################################
# Test dumping array subclasses
if np is not None:
class SubArray(np.ndarray):
def __reduce__(self):
return _load_sub_array, (np.asarray(self), )
def _load_sub_array(arr):
d = SubArray(arr.shape)
d[:] = arr
return d
class ComplexTestObject:
"""A complex object containing numpy arrays as attributes."""
def __init__(self):
self.array_float = np.arange(100, dtype='float64')
self.array_int = np.ones(100, dtype='int32')
self.array_obj = np.array(['a', 10, 20.0], dtype='object')
@with_numpy
def test_numpy_subclass(tmpdir):
filename = tmpdir.join('test.pkl').strpath
a = SubArray((10,))
numpy_pickle.dump(a, filename)
c = numpy_pickle.load(filename)
assert isinstance(c, SubArray)
np.testing.assert_array_equal(c, a)
def test_pathlib(tmpdir):
filename = tmpdir.join('test.pkl').strpath
value = 123
numpy_pickle.dump(value, Path(filename))
assert numpy_pickle.load(filename) == value
numpy_pickle.dump(value, filename)
assert numpy_pickle.load(Path(filename)) == value
@with_numpy
def test_non_contiguous_array_pickling(tmpdir):
filename = tmpdir.join('test.pkl').strpath
for array in [ # Array that triggers a contiguousness issue with nditer,
# see https://github.com/joblib/joblib/pull/352 and see
# https://github.com/joblib/joblib/pull/353
np.asfortranarray([[1, 2], [3, 4]])[1:],
# Non contiguous array with works fine with nditer
np.ones((10, 50, 20), order='F')[:, :1, :]]:
assert not array.flags.c_contiguous
assert not array.flags.f_contiguous
numpy_pickle.dump(array, filename)
array_reloaded = numpy_pickle.load(filename)
np.testing.assert_array_equal(array_reloaded, array)
@with_numpy
def test_pickle_highest_protocol(tmpdir):
# ensure persistence of a numpy array is valid even when using
# the pickle HIGHEST_PROTOCOL.
# see https://github.com/joblib/joblib/issues/362
filename = tmpdir.join('test.pkl').strpath
test_array = np.zeros(10)
numpy_pickle.dump(test_array, filename, protocol=pickle.HIGHEST_PROTOCOL)
array_reloaded = numpy_pickle.load(filename)
np.testing.assert_array_equal(array_reloaded, test_array)
@with_numpy
def test_pickle_in_socket():
# test that joblib can pickle in sockets
test_array = np.arange(10)
_ADDR = ("localhost", 12345)
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listener.bind(_ADDR)
listener.listen(1)
client = socket.create_connection(_ADDR)
server, client_addr = listener.accept()
with server.makefile("wb") as sf:
numpy_pickle.dump(test_array, sf)
with client.makefile("rb") as cf:
array_reloaded = numpy_pickle.load(cf)
np.testing.assert_array_equal(array_reloaded, test_array)
# Check that a byte-aligned numpy array written in a file can be send over
# a socket and then read on the other side
bytes_to_send = io.BytesIO()
numpy_pickle.dump(test_array, bytes_to_send)
server.send(bytes_to_send.getvalue())
with client.makefile("rb") as cf:
array_reloaded = numpy_pickle.load(cf)
np.testing.assert_array_equal(array_reloaded, test_array)
@with_numpy
def test_load_memmap_with_big_offset(tmpdir):
# Test that numpy memmap offset is set correctly if greater than
# mmap.ALLOCATIONGRANULARITY, see
# https://github.com/joblib/joblib/issues/451 and
# https://github.com/numpy/numpy/pull/8443 for more details.
fname = tmpdir.join('test.mmap').strpath
size = mmap.ALLOCATIONGRANULARITY
obj = [np.zeros(size, dtype='uint8'), np.ones(size, dtype='uint8')]
numpy_pickle.dump(obj, fname)
memmaps = numpy_pickle.load(fname, mmap_mode='r')
assert isinstance(memmaps[1], np.memmap)
assert memmaps[1].offset > size
np.testing.assert_array_equal(obj, memmaps)
def test_register_compressor(tmpdir):
# Check that registering compressor file works.
compressor_name = 'test-name'
compressor_prefix = 'test-prefix'
class BinaryCompressorTestFile(io.BufferedIOBase):
pass
class BinaryCompressorTestWrapper(CompressorWrapper):
def __init__(self):
CompressorWrapper.__init__(self, obj=BinaryCompressorTestFile,
prefix=compressor_prefix)
register_compressor(compressor_name, BinaryCompressorTestWrapper())
assert (_COMPRESSORS[compressor_name].fileobj_factory ==
BinaryCompressorTestFile)
assert _COMPRESSORS[compressor_name].prefix == compressor_prefix
# Remove this dummy compressor file from extra compressors because other
# tests might fail because of this
_COMPRESSORS.pop(compressor_name)
@parametrize('invalid_name', [1, (), {}])
def test_register_compressor_invalid_name(invalid_name):
# Test that registering an invalid compressor name is not allowed.
with raises(ValueError) as excinfo:
register_compressor(invalid_name, None)
excinfo.match("Compressor name should be a string")
def test_register_compressor_invalid_fileobj():
# Test that registering an invalid file object is not allowed.
class InvalidFileObject():
pass
class InvalidFileObjectWrapper(CompressorWrapper):
def __init__(self):
CompressorWrapper.__init__(self, obj=InvalidFileObject,
prefix=b'prefix')
with raises(ValueError) as excinfo:
register_compressor('invalid', InvalidFileObjectWrapper())
excinfo.match("Compressor 'fileobj_factory' attribute should implement "
"the file object interface")
class AnotherZlibCompressorWrapper(CompressorWrapper):
def __init__(self):
CompressorWrapper.__init__(self, obj=BinaryZlibFile, prefix=b'prefix')
class StandardLibGzipCompressorWrapper(CompressorWrapper):
def __init__(self):
CompressorWrapper.__init__(self, obj=gzip.GzipFile, prefix=b'prefix')
def test_register_compressor_already_registered():
# Test registration of existing compressor files.
compressor_name = 'test-name'
# register a test compressor
register_compressor(compressor_name, AnotherZlibCompressorWrapper())
with raises(ValueError) as excinfo:
register_compressor(compressor_name,
StandardLibGzipCompressorWrapper())
excinfo.match("Compressor '{}' already registered."
.format(compressor_name))
register_compressor(compressor_name, StandardLibGzipCompressorWrapper(),
force=True)
assert compressor_name in _COMPRESSORS
assert _COMPRESSORS[compressor_name].fileobj_factory == gzip.GzipFile
# Remove this dummy compressor file from extra compressors because other
# tests might fail because of this
_COMPRESSORS.pop(compressor_name)
@with_lz4
def test_lz4_compression(tmpdir):
# Check that lz4 can be used when dependency is available.
import lz4.frame
compressor = 'lz4'
assert compressor in _COMPRESSORS
assert _COMPRESSORS[compressor].fileobj_factory == lz4.frame.LZ4FrameFile
fname = tmpdir.join('test.pkl').strpath
data = 'test data'
numpy_pickle.dump(data, fname, compress=compressor)
with open(fname, 'rb') as f:
assert f.read(len(_LZ4_PREFIX)) == _LZ4_PREFIX
assert numpy_pickle.load(fname) == data
# Test that LZ4 is applied based on file extension
numpy_pickle.dump(data, fname + '.lz4')
with open(fname, 'rb') as f:
assert f.read(len(_LZ4_PREFIX)) == _LZ4_PREFIX
assert numpy_pickle.load(fname) == data
@without_lz4
def test_lz4_compression_without_lz4(tmpdir):
# Check that lz4 cannot be used when dependency is not available.
fname = tmpdir.join('test.nolz4').strpath
data = 'test data'
msg = LZ4_NOT_INSTALLED_ERROR
with raises(ValueError) as excinfo:
numpy_pickle.dump(data, fname, compress='lz4')
excinfo.match(msg)
with raises(ValueError) as excinfo:
numpy_pickle.dump(data, fname + '.lz4')
excinfo.match(msg)
protocols = [pickle.DEFAULT_PROTOCOL]
if pickle.HIGHEST_PROTOCOL != pickle.DEFAULT_PROTOCOL:
protocols.append(pickle.HIGHEST_PROTOCOL)
@with_numpy
@parametrize('protocol', protocols)
def test_memmap_alignment_padding(tmpdir, protocol):
# Test that memmaped arrays returned by numpy.load are correctly aligned
fname = tmpdir.join('test.mmap').strpath
a = np.random.randn(2)
numpy_pickle.dump(a, fname, protocol=protocol)
memmap = numpy_pickle.load(fname, mmap_mode='r')
assert isinstance(memmap, np.memmap)
np.testing.assert_array_equal(a, memmap)
assert (
memmap.ctypes.data % numpy_pickle.NUMPY_ARRAY_ALIGNMENT_BYTES == 0)
assert memmap.flags.aligned
array_list = [
np.random.randn(2), np.random.randn(2),
np.random.randn(2), np.random.randn(2)
]
# On Windows OSError 22 if reusing the same path for memmap ...
fname = tmpdir.join('test1.mmap').strpath
numpy_pickle.dump(array_list, fname, protocol=protocol)
l_reloaded = numpy_pickle.load(fname, mmap_mode='r')
for idx, memmap in enumerate(l_reloaded):
assert isinstance(memmap, np.memmap)
np.testing.assert_array_equal(array_list[idx], memmap)
assert (
memmap.ctypes.data % numpy_pickle.NUMPY_ARRAY_ALIGNMENT_BYTES == 0)
assert memmap.flags.aligned
array_dict = {
'a0': np.arange(2, dtype=np.uint8),
'a1': np.arange(3, dtype=np.uint8),
'a2': np.arange(5, dtype=np.uint8),
'a3': np.arange(7, dtype=np.uint8),
'a4': np.arange(11, dtype=np.uint8),
'a5': np.arange(13, dtype=np.uint8),
'a6': np.arange(17, dtype=np.uint8),
'a7': np.arange(19, dtype=np.uint8),
'a8': np.arange(23, dtype=np.uint8),
}
# On Windows OSError 22 if reusing the same path for memmap ...
fname = tmpdir.join('test2.mmap').strpath
numpy_pickle.dump(array_dict, fname, protocol=protocol)
d_reloaded = numpy_pickle.load(fname, mmap_mode='r')
for key, memmap in d_reloaded.items():
assert isinstance(memmap, np.memmap)
np.testing.assert_array_equal(array_dict[key], memmap)
assert (
memmap.ctypes.data % numpy_pickle.NUMPY_ARRAY_ALIGNMENT_BYTES == 0)
assert memmap.flags.aligned
| {
"content_hash": "36c7a9688d3b42c58091510d5eac7fa4",
"timestamp": "",
"source": "github",
"line_count": 1158,
"max_line_length": 79,
"avg_line_length": 36.57772020725388,
"alnum_prop": 0.6261302736265553,
"repo_name": "joblib/joblib",
"id": "c9d1d5bdb33ac1b17109ae7902c682be79fc160d",
"size": "42357",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "joblib/test/test_numpy_pickle.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Cython",
"bytes": "390"
},
{
"name": "Makefile",
"bytes": "363"
},
{
"name": "Python",
"bytes": "622066"
},
{
"name": "Shell",
"bytes": "9122"
}
],
"symlink_target": ""
} |
"""
Tools for working with objects
This module re-implements several STL threading objects in a lightweight
fashion, portable across python versions.
"""
from threading import Lock, ThreadError
from collections import deque
import time
import ast
import operator
try:
from thread import error as thread_error
except ImportError:
from _thread import error as thread_error
from .compat import stringabc, inf, intern_str, unicode_str, long_int, range
from .exceptions import CPy2PyException
#: sentinel for unset variables
UNSET = intern_str("<unset_sentinel>")
class ThreadGuard(object):
"""
Threadsafe wrapper for primitives
This class wraps all magic methods (e.g. ``a + b``, ``a[b]``) to
make them atomic.
:note: Derived values inherit the original type. For example,
``foo = ThreadGuard(1.0) * 2`` will be of type
:py:class:`float`, i.e. the result of ``1.0 * 2``.
:note: When invoked as a context manager, the underlying lock
will be held until the context is exited.
:warning: It is not possible to call :py:func:`bytes` on a
guarded object without explicitly unwrapping it.
"""
def __init__(self, start=0.0, lock_type=Lock):
if isinstance(start, stringabc):
try:
start = ast.literal_eval(start)
except ValueError:
pass
self.__wrapped__ = start
self._lock = lock_type()
# Developer note:
# operations are implemented using operator.__add__(self._value, other)
# instead of self._value.__add__(other) as the later does *not* imply
# calling other.__radd__(self._value) on failure.
def __add__(self, other):
with self._lock:
return operator.__add__(self.__wrapped__, other)
def __sub__(self, other):
with self._lock:
return operator.__sub__(self.__wrapped__, other)
def __mul__(self, other):
with self._lock:
return operator.__mul__(self.__wrapped__, other)
# __div__ is py2 only
if hasattr(operator, '__div__'):
def __div__(self, other): # nopep8
with self._lock:
return operator.__div__(self.__wrapped__, other)
def __truediv__(self, other):
with self._lock:
return operator.__truediv__(self.__wrapped__, other)
def __floordiv__(self, other):
with self._lock:
return operator.__floordiv__(self.__wrapped__, other)
def __mod__(self, other):
with self._lock:
return operator.__mod__(self.__wrapped__, other)
def __divmod__(self, other):
with self._lock:
return divmod(self.__wrapped__, other)
def __pow__(self, power, modulo=None):
with self._lock:
return pow(self.__wrapped__, power, modulo)
def __lshift__(self, other):
with self._lock:
return operator.__lshift__(self.__wrapped__, other)
def __rshift__(self, other):
with self._lock:
return operator.__rshift__(self.__wrapped__, other)
def __and__(self, other):
with self._lock:
return operator.__and__(self.__wrapped__, other)
def __xor__(self, other):
with self._lock:
return operator.__xor__(self.__wrapped__, other)
def __or__(self, other):
with self._lock:
return operator.__or__(self.__wrapped__, other)
def __radd__(self, other):
with self._lock:
return operator.__add__(other, self.__wrapped__)
def __rsub__(self, other):
with self._lock:
return operator.__sub__(other, self.__wrapped__)
def __rmul__(self, other):
with self._lock:
return operator.__mul__(other, self.__wrapped__)
if hasattr(operator, '__div__'):
def __rdiv__(self, other): # nopep8
with self._lock:
return operator.__div__(other, self.__wrapped__)
def __rtruediv__(self, other):
with self._lock:
return operator.__truediv__(other, self.__wrapped__)
def __rfloordiv__(self, other):
with self._lock:
return operator.__floordiv__(other, self.__wrapped__)
def __rmod__(self, other):
with self._lock:
return operator.__mod__(other, self.__wrapped__)
def __rdivmod__(self, other):
with self._lock:
return divmod(other, self.__wrapped__)
def __rpow__(self, other):
with self._lock:
return operator.__pow__(other, self.__wrapped__)
def __rlshift__(self, other):
with self._lock:
return operator.__lshift__(other, self.__wrapped__)
def __rrshift__(self, other):
with self._lock:
return operator.__rshift__(other, self.__wrapped__)
def __rand__(self, other):
with self._lock:
return operator.__and__(other, self.__wrapped__)
def __rxor__(self, other):
with self._lock:
return operator.__xor__(other, self.__wrapped__)
def __ror__(self, other):
with self._lock:
return operator.__or__(other, self.__wrapped__)
# inplace operations
def __iadd__(self, other):
with self._lock:
self.__wrapped__ += other
return self
def __isub__(self, other):
with self._lock:
self.__wrapped__ -= other
return self
def __imul__(self, other):
with self._lock:
self.__wrapped__ *= other
return self
if hasattr(operator, '__idiv__'):
def __idiv__(self, other): # nopep8
with self._lock:
self.__wrapped__ = operator.__idiv__(self.__wrapped__, other)
return self
def __itruediv__(self, other):
with self._lock:
self.__wrapped__ = operator.__itruediv__(self.__wrapped__, other)
return self
def __ifloordiv__(self, other):
with self._lock:
self.__wrapped__ //= other
return self
def __imod__(self, other):
with self._lock:
self.__wrapped__ %= other
return self
def __ipow__(self, power, modulo=None):
with self._lock:
self.__wrapped__ = pow(self.__wrapped__, power, modulo)
return self
def __ilshift__(self, other):
with self._lock:
self.__wrapped__ <<= other
return self
def __irshift__(self, other):
with self._lock:
self.__wrapped__ >>= other
return self
def __iand__(self, other):
with self._lock:
self.__wrapped__ &= other
return self
def __ixor__(self, other):
with self._lock:
self.__wrapped__ ^= other
return self
def __ior__(self, other):
with self._lock:
self.__wrapped__ |= other
return self
def __neg__(self):
with self._lock:
return -self.__wrapped__
def __pos__(self):
with self._lock:
return +self.__wrapped__
def __abs__(self):
with self._lock:
return abs(self.__wrapped__)
def __invert__(self):
with self._lock:
return ~self.__wrapped__
def __complex__(self):
with self._lock:
return complex(self.__wrapped__)
def __int__(self):
with self._lock:
return int(self.__wrapped__)
def __float__(self):
with self._lock:
return float(self.__wrapped__)
def __long__(self):
with self._lock:
return long_int(self.__wrapped__)
def __round__(self):
with self._lock:
return round(self.__wrapped__)
def __index__(self):
try:
with self._lock:
return self.__wrapped__.__index__()
except AttributeError:
return NotImplemented
def __enter__(self):
self._lock.acquire()
try:
_enter = self.__wrapped__.__enter__
except AttributeError:
self._lock.release()
else:
return _enter()
def __exit__(self, exc_type, exc_val, exc_tb):
try:
_exit = self.__wrapped__.__exit__
return _exit(exc_type, exc_val, exc_tb)
finally:
self._lock.release()
def __str__(self):
with self._lock:
return str(self.__wrapped__)
def __unicode__(self):
with self._lock:
return unicode_str(self.__wrapped__)
def __repr__(self):
with self._lock:
return '%s<%r>' % (self.__class__.__name__, self.__wrapped__)
def __lt__(self, other):
with self._lock:
return self.__wrapped__ < other
def __gt__(self, other):
with self._lock:
return self.__wrapped__ > other
def __le__(self, other):
with self._lock:
return self.__wrapped__ <= other
def __ge__(self, other):
with self._lock:
return self.__wrapped__ >= other
def __eq__(self, other):
with self._lock:
return self.__wrapped__ == other
def __ne__(self, other):
with self._lock:
return self.__wrapped__ != other
def __hash__(self):
with self._lock:
return hash(self.__wrapped__)
def __nonzero__(self):
with self._lock:
return bool(self.__wrapped__)
__bool__ = __nonzero__
def __call__(self, *args, **kwargs):
with self._lock:
return self.__wrapped__(*args, **kwargs)
class ItemError(CPy2PyException):
"""No Items in the queue"""
class FifoQueue(object):
"""
Lighweight FIFO Queue
This is essentially a thread-safe :py:class:`~collections.deque`.
The additional tuning parameters :py:attr:`~sleep_min`, :py:attr:`~sleep_max`,
:py:attr:`~sleep_fail_penalty` and :py:attr:`~sleep_order_penalty` only apply
if python does not support the ``timeout`` argument to
:py:meth:`threading.Lock.acquire`.
"""
#: minimum interval between polling for new elements
sleep_min = 0.00005
#: maximum interval between polling for new elements
sleep_max = 0.001
#: factor applied to interval after unsuccessful poll
sleep_fail_penalty = 1.5
#: factor applied to interval for every preceeding request
sleep_order_penalty = 2
def __init__(self):
self._queue_content = deque()
self._queue_mutex = Lock()
self._waiters = []
def __len__(self):
return len(self._queue_content)
def __nonzero__(self):
return bool(self._queue_content)
def qsize(self):
"""Size of the queue, alias for ``len(this)``"""
return len(self)
def put(self, item):
"""Put a single item in the queue"""
with self._queue_mutex:
self._queue_content.append(item)
for w_idx in range(len(self._waiters)):
try:
self._waiters[w_idx].release()
except (ThreadError, RuntimeError, thread_error):
continue
else:
break
def get(self, block=True, timeout=inf):
"""
Retrieve a single item from the queue
:param block: whether to wait for an item if the queue is currently empty
:type block: bool
:param timeout: how long to wait for an item
:type timeout: int or float
:return: an item from the queue
:raises: :py:exc:`~.ItemError` if no item could be retrieved
"""
with self._queue_mutex:
try:
# always try if anything is ready
return self._queue_content.pop()
except IndexError:
if not block:
raise ItemError
# register ourselves as waiting for content
wait_mutex = Lock()
wait_mutex.acquire() # lock mutex so we can wait for its release
self._waiters.append(wait_mutex)
try:
if not timeout or timeout <= 0 or timeout == inf:
with wait_mutex:
with self._queue_mutex:
return self._queue_content.pop()
else:
# in Py3, we can explicitly block for a specific time
try:
if wait_mutex.acquire(True, timeout):
with self._queue_mutex:
return self._queue_content.pop()
else:
raise ItemError
except TypeError:
# Replicate the diminishing wait behaviour of threading.Condition
_w_min, _w_max, _w_fail, _w_order = (
self.sleep_min, self.sleep_max, self.sleep_fail_penalty, self.sleep_order_penalty)
_w_start, _w_now, _w_idx, _w_cnt = time.time(), _w_min / _w_fail, self._waiters.index(wait_mutex), 1
while True:
with self._queue_mutex:
if wait_mutex.acquire(False):
try:
return self._queue_content.pop()
except IndexError: # someone else beat us to it, continue waiting
pass
_w_now = min(
_w_now * _w_fail * (_w_order ** _w_idx), # diminishing wake
_w_start + timeout - time.time(), # timeout
_w_max # minimum responsiveness
)
if _w_now < 0:
raise ItemError
time.sleep(_w_now)
_w_cnt += 1
finally:
# always clean up
self._waiters.remove(wait_mutex)
| {
"content_hash": "33ecd649e4004fe72e67d1429ff9234c",
"timestamp": "",
"source": "github",
"line_count": 462,
"max_line_length": 120,
"avg_line_length": 30.2987012987013,
"alnum_prop": 0.5172881840262895,
"repo_name": "maxfischer2781/cpy2py",
"id": "7c6f073f1033f66ac27a9575ff12bb24f3584598",
"size": "14625",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cpy2py/utility/thread_tools.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "7860"
},
{
"name": "Python",
"bytes": "230870"
}
],
"symlink_target": ""
} |
"""
Helper methods to deal with images.
This is essentially a copy from nova.virt.images.py
Some slight modifications, but at some point
we should look at maybe pushing this up to Oslo
"""
import contextlib
import os
import tempfile
from oslo.config import cfg
from cinder import exception
from cinder.i18n import _
from cinder.openstack.common import fileutils
from cinder.openstack.common import imageutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
from cinder.openstack.common import timeutils
from cinder.openstack.common import units
from cinder import utils
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
image_helper_opt = [cfg.StrOpt('image_conversion_dir',
default='$state_path/conversion',
help='Directory used for temporary storage '
'during image conversion'), ]
CONF = cfg.CONF
CONF.register_opts(image_helper_opt)
def qemu_img_info(path):
"""Return an object containing the parsed output from qemu-img info."""
cmd = ('env', 'LC_ALL=C', 'qemu-img', 'info', path)
if os.name == 'nt':
cmd = cmd[2:]
out, err = utils.execute(*cmd, run_as_root=True)
return imageutils.QemuImgInfo(out)
def convert_image(source, dest, out_format, bps_limit=None):
"""Convert image to other format."""
cmd = ('qemu-img', 'convert',
'-O', out_format, source, dest)
# Check whether O_DIRECT is supported and set '-t none' if it is
# This is needed to ensure that all data hit the device before
# it gets unmapped remotely from the host for some backends
# Reference Bug: #1363016
# NOTE(jdg): In the case of file devices qemu does the
# flush properly and more efficiently than would be done
# setting O_DIRECT, so check for that and skip the
# setting for non BLK devs
if (utils.is_blk_device(dest) and
volume_utils.check_for_odirect_support(source,
dest,
'oflag=direct')):
cmd = ('qemu-img', 'convert',
'-t', 'none',
'-O', out_format, source, dest)
start_time = timeutils.utcnow()
cgcmd = volume_utils.setup_blkio_cgroup(source, dest, bps_limit)
if cgcmd:
cmd = tuple(cgcmd) + cmd
utils.execute(*cmd, run_as_root=True)
duration = timeutils.delta_seconds(start_time, timeutils.utcnow())
# NOTE(jdg): use a default of 1, mostly for unit test, but in
# some incredible event this is 0 (cirros image?) don't barf
if duration < 1:
duration = 1
fsz_mb = os.stat(source).st_size / units.Mi
mbps = (fsz_mb / duration)
msg = ("Image conversion details: src %(src)s, size %(sz).2f MB, "
"duration %(duration).2f sec, destination %(dest)s")
LOG.debug(msg % {"src": source,
"sz": fsz_mb,
"duration": duration,
"dest": dest})
msg = _("Converted %(sz).2f MB image at %(mbps).2f MB/s")
LOG.info(msg % {"sz": fsz_mb, "mbps": mbps})
def resize_image(source, size, run_as_root=False):
"""Changes the virtual size of the image."""
cmd = ('qemu-img', 'resize', source, '%sG' % size)
utils.execute(*cmd, run_as_root=run_as_root)
def fetch(context, image_service, image_id, path, _user_id, _project_id):
# TODO(vish): Improve context handling and add owner and auth data
# when it is added to glance. Right now there is no
# auth checking in glance, so we assume that access was
# checked before we got here.
start_time = timeutils.utcnow()
with fileutils.remove_path_on_error(path):
with open(path, "wb") as image_file:
image_service.download(context, image_id, image_file)
duration = timeutils.delta_seconds(start_time, timeutils.utcnow())
# NOTE(jdg): use a default of 1, mostly for unit test, but in
# some incredible event this is 0 (cirros image?) don't barf
if duration < 1:
duration = 1
fsz_mb = os.stat(image_file.name).st_size / units.Mi
mbps = (fsz_mb / duration)
msg = ("Image fetch details: dest %(dest)s, size %(sz).2f MB, "
"duration %(duration).2f sec")
LOG.debug(msg % {"dest": image_file.name,
"sz": fsz_mb,
"duration": duration})
msg = _("Image download %(sz).2f MB at %(mbps).2f MB/s")
LOG.info(msg % {"sz": fsz_mb, "mbps": mbps})
def fetch_verify_image(context, image_service, image_id, dest,
user_id=None, project_id=None, size=None):
fetch(context, image_service, image_id, dest,
None, None)
with fileutils.remove_path_on_error(dest):
data = qemu_img_info(dest)
fmt = data.file_format
if fmt is None:
raise exception.ImageUnacceptable(
reason=_("'qemu-img info' parsing failed."),
image_id=image_id)
backing_file = data.backing_file
if backing_file is not None:
raise exception.ImageUnacceptable(
image_id=image_id,
reason=(_("fmt=%(fmt)s backed by: %(backing_file)s") %
{'fmt': fmt, 'backing_file': backing_file}))
# NOTE(xqueralt): If the image virtual size doesn't fit in the
# requested volume there is no point on resizing it because it will
# generate an unusable image.
if size is not None and data.virtual_size > size:
params = {'image_size': data.virtual_size, 'volume_size': size}
reason = _("Size is %(image_size)dGB and doesn't fit in a "
"volume of size %(volume_size)dGB.") % params
raise exception.ImageUnacceptable(image_id=image_id, reason=reason)
def fetch_to_vhd(context, image_service,
image_id, dest, blocksize,
user_id=None, project_id=None):
fetch_to_volume_format(context, image_service, image_id, dest, 'vpc',
blocksize, user_id, project_id)
def fetch_to_raw(context, image_service,
image_id, dest, blocksize,
user_id=None, project_id=None, size=None):
fetch_to_volume_format(context, image_service, image_id, dest, 'raw',
blocksize, user_id, project_id, size)
def fetch_to_volume_format(context, image_service,
image_id, dest, volume_format, blocksize,
user_id=None, project_id=None, size=None):
if (CONF.image_conversion_dir and not
os.path.exists(CONF.image_conversion_dir)):
os.makedirs(CONF.image_conversion_dir)
qemu_img = True
image_meta = image_service.show(context, image_id)
# NOTE(avishay): I'm not crazy about creating temp files which may be
# large and cause disk full errors which would confuse users.
# Unfortunately it seems that you can't pipe to 'qemu-img convert' because
# it seeks. Maybe we can think of something for a future version.
with temporary_file() as tmp:
# We may be on a system that doesn't have qemu-img installed. That
# is ok if we are working with a RAW image. This logic checks to see
# if qemu-img is installed. If not we make sure the image is RAW and
# throw an exception if not. Otherwise we stop before needing
# qemu-img. Systems with qemu-img will always progress through the
# whole function.
try:
# Use the empty tmp file to make sure qemu_img_info works.
qemu_img_info(tmp)
except processutils.ProcessExecutionError:
qemu_img = False
if image_meta:
if image_meta['disk_format'] != 'raw':
raise exception.ImageUnacceptable(
reason=_("qemu-img is not installed and image is of "
"type %s. Only RAW images can be used if "
"qemu-img is not installed.") %
image_meta['disk_format'],
image_id=image_id)
else:
raise exception.ImageUnacceptable(
reason=_("qemu-img is not installed and the disk "
"format is not specified. Only RAW images "
"can be used if qemu-img is not installed."),
image_id=image_id)
fetch(context, image_service, image_id, tmp, user_id, project_id)
if is_xenserver_image(context, image_service, image_id):
replace_xenserver_image_with_coalesced_vhd(tmp)
if not qemu_img:
# qemu-img is not installed but we do have a RAW image. As a
# result we only need to copy the image to the destination and then
# return.
LOG.debug('Copying image from %(tmp)s to volume %(dest)s - '
'size: %(size)s' % {'tmp': tmp, 'dest': dest,
'size': image_meta['size']})
volume_utils.copy_volume(tmp, dest, image_meta['size'], blocksize)
return
data = qemu_img_info(tmp)
virt_size = data.virtual_size / units.Gi
# NOTE(xqueralt): If the image virtual size doesn't fit in the
# requested volume there is no point on resizing it because it will
# generate an unusable image.
if size is not None and virt_size > size:
params = {'image_size': virt_size, 'volume_size': size}
reason = _("Size is %(image_size)dGB and doesn't fit in a "
"volume of size %(volume_size)dGB.") % params
raise exception.ImageUnacceptable(image_id=image_id, reason=reason)
fmt = data.file_format
if fmt is None:
raise exception.ImageUnacceptable(
reason=_("'qemu-img info' parsing failed."),
image_id=image_id)
backing_file = data.backing_file
if backing_file is not None:
raise exception.ImageUnacceptable(
image_id=image_id,
reason=_("fmt=%(fmt)s backed by:%(backing_file)s")
% {'fmt': fmt, 'backing_file': backing_file, })
# NOTE(jdg): I'm using qemu-img convert to write
# to the volume regardless if it *needs* conversion or not
# TODO(avishay): We can speed this up by checking if the image is raw
# and if so, writing directly to the device. However, we need to keep
# check via 'qemu-img info' that what we copied was in fact a raw
# image and not a different format with a backing file, which may be
# malicious.
LOG.debug("%s was %s, converting to %s " % (image_id, fmt,
volume_format))
convert_image(tmp, dest, volume_format,
bps_limit=CONF.volume_copy_bps_limit)
data = qemu_img_info(dest)
if data.file_format != volume_format:
raise exception.ImageUnacceptable(
image_id=image_id,
reason=_("Converted to %(vol_format)s, but format is "
"now %(file_format)s") % {'vol_format': volume_format,
'file_format': data.
file_format})
def upload_volume(context, image_service, image_meta, volume_path,
volume_format='raw'):
image_id = image_meta['id']
if (image_meta['disk_format'] == volume_format):
LOG.debug("%s was %s, no need to convert to %s" %
(image_id, volume_format, image_meta['disk_format']))
if os.name == 'nt' or os.access(volume_path, os.R_OK):
with fileutils.file_open(volume_path, 'rb') as image_file:
image_service.update(context, image_id, {}, image_file)
else:
with utils.temporary_chown(volume_path):
with fileutils.file_open(volume_path) as image_file:
image_service.update(context, image_id, {}, image_file)
return
if (CONF.image_conversion_dir and not
os.path.exists(CONF.image_conversion_dir)):
os.makedirs(CONF.image_conversion_dir)
fd, tmp = tempfile.mkstemp(dir=CONF.image_conversion_dir)
os.close(fd)
with fileutils.remove_path_on_error(tmp):
LOG.debug("%s was %s, converting to %s" %
(image_id, volume_format, image_meta['disk_format']))
convert_image(volume_path, tmp, image_meta['disk_format'],
bps_limit=CONF.volume_copy_bps_limit)
data = qemu_img_info(tmp)
if data.file_format != image_meta['disk_format']:
raise exception.ImageUnacceptable(
image_id=image_id,
reason=_("Converted to %(f1)s, but format is now %(f2)s") %
{'f1': image_meta['disk_format'], 'f2': data.file_format})
with fileutils.file_open(tmp, 'rb') as image_file:
image_service.update(context, image_id, {}, image_file)
fileutils.delete_if_exists(tmp)
def is_xenserver_image(context, image_service, image_id):
image_meta = image_service.show(context, image_id)
return is_xenserver_format(image_meta)
def is_xenserver_format(image_meta):
return (
image_meta['disk_format'] == 'vhd'
and image_meta['container_format'] == 'ovf'
)
def file_exist(fpath):
return os.path.exists(fpath)
def set_vhd_parent(vhd_path, parentpath):
utils.execute('vhd-util', 'modify', '-n', vhd_path, '-p', parentpath)
def extract_targz(archive_name, target):
utils.execute('tar', '-xzf', archive_name, '-C', target)
def fix_vhd_chain(vhd_chain):
for child, parent in zip(vhd_chain[:-1], vhd_chain[1:]):
set_vhd_parent(child, parent)
def get_vhd_size(vhd_path):
out, err = utils.execute('vhd-util', 'query', '-n', vhd_path, '-v')
return int(out)
def resize_vhd(vhd_path, size, journal):
utils.execute(
'vhd-util', 'resize', '-n', vhd_path, '-s', '%d' % size, '-j', journal)
def coalesce_vhd(vhd_path):
utils.execute(
'vhd-util', 'coalesce', '-n', vhd_path)
def create_temporary_file(*args, **kwargs):
fd, tmp = tempfile.mkstemp(dir=CONF.image_conversion_dir, *args, **kwargs)
os.close(fd)
return tmp
def rename_file(src, dst):
os.rename(src, dst)
@contextlib.contextmanager
def temporary_file(*args, **kwargs):
try:
tmp = create_temporary_file(*args, **kwargs)
yield tmp
finally:
fileutils.delete_if_exists(tmp)
def temporary_dir():
return utils.tempdir(dir=CONF.image_conversion_dir)
def coalesce_chain(vhd_chain):
for child, parent in zip(vhd_chain[:-1], vhd_chain[1:]):
with temporary_dir() as directory_for_journal:
size = get_vhd_size(child)
journal_file = os.path.join(
directory_for_journal, 'vhd-util-resize-journal')
resize_vhd(parent, size, journal_file)
coalesce_vhd(child)
return vhd_chain[-1]
def discover_vhd_chain(directory):
counter = 0
chain = []
while True:
fpath = os.path.join(directory, '%d.vhd' % counter)
if file_exist(fpath):
chain.append(fpath)
else:
break
counter += 1
return chain
def replace_xenserver_image_with_coalesced_vhd(image_file):
with temporary_dir() as tempdir:
extract_targz(image_file, tempdir)
chain = discover_vhd_chain(tempdir)
fix_vhd_chain(chain)
coalesced = coalesce_chain(chain)
fileutils.delete_if_exists(image_file)
rename_file(coalesced, image_file)
| {
"content_hash": "6dbf62201e7cbe4eccd872a4c3b4a3d7",
"timestamp": "",
"source": "github",
"line_count": 414,
"max_line_length": 79,
"avg_line_length": 38.5024154589372,
"alnum_prop": 0.5866373902132999,
"repo_name": "nash-x/hws",
"id": "361e36fc1a02bef50a57a2da4daaca92dd946181",
"size": "16714",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cinder/image/image_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1043"
},
{
"name": "PLpgSQL",
"bytes": "12782"
},
{
"name": "Python",
"bytes": "20443623"
},
{
"name": "Shell",
"bytes": "4643"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import os
import sys
from pathlib import Path
import click
from airflow_breeze.params.build_ci_params import BuildCiParams
from airflow_breeze.params.shell_params import ShellParams
from airflow_breeze.utils.ci_group import ci_group
from airflow_breeze.utils.click_utils import BreezeGroup
from airflow_breeze.utils.common_options import (
option_additional_dev_apt_command,
option_additional_dev_apt_deps,
option_additional_dev_apt_env,
option_additional_extras,
option_additional_pip_install_flags,
option_additional_python_deps,
option_airflow_constraints_mode_ci,
option_airflow_constraints_reference_build,
option_answer,
option_builder,
option_dev_apt_command,
option_dev_apt_deps,
option_docker_cache,
option_dry_run,
option_empty_image,
option_force_build,
option_github_repository,
option_github_token,
option_github_username,
option_image_name,
option_image_tag_for_building,
option_image_tag_for_pulling,
option_image_tag_for_verifying,
option_include_success_outputs,
option_install_providers_from_sources,
option_parallelism,
option_platform_multiple,
option_prepare_buildx_cache,
option_pull,
option_push,
option_python,
option_python_image,
option_python_versions,
option_run_in_parallel,
option_skip_cleanup,
option_tag_as_latest,
option_upgrade_on_failure,
option_upgrade_to_newer_dependencies,
option_verbose,
option_verify,
option_wait_for_image,
)
from airflow_breeze.utils.confirm import STANDARD_TIMEOUT, Answer, user_confirm
from airflow_breeze.utils.console import Output, get_console
from airflow_breeze.utils.docker_command_utils import (
build_cache,
make_sure_builder_configured,
perform_environment_checks,
prepare_docker_build_command,
prepare_docker_build_from_input,
warm_up_docker_builder,
)
from airflow_breeze.utils.image import run_pull_image, run_pull_in_parallel, tag_image_as_latest
from airflow_breeze.utils.mark_image_as_refreshed import mark_image_as_refreshed
from airflow_breeze.utils.md5_build_check import md5sum_check_if_build_is_needed
from airflow_breeze.utils.parallel import DockerBuildxProgressMatcher, check_async_run_results, run_with_pool
from airflow_breeze.utils.path_utils import AIRFLOW_SOURCES_ROOT, BUILD_CACHE_DIR
from airflow_breeze.utils.python_versions import get_python_version_list
from airflow_breeze.utils.registry import login_to_github_docker_registry
from airflow_breeze.utils.run_tests import verify_an_image
from airflow_breeze.utils.run_utils import (
filter_out_none,
fix_group_permissions,
instruct_build_image,
is_repo_rebased,
run_command,
)
@click.group(
cls=BreezeGroup, name='ci-image', help="Tools that developers can use to manually manage CI images"
)
def ci_image():
pass
def check_if_image_building_is_needed(
ci_image_params: BuildCiParams, output: Output | None, dry_run: bool, verbose: bool
) -> bool:
"""Starts building attempt. Returns false if we should not continue"""
if not ci_image_params.force_build and not ci_image_params.upgrade_to_newer_dependencies:
if not should_we_run_the_build(build_ci_params=ci_image_params):
return False
if ci_image_params.prepare_buildx_cache or ci_image_params.push:
login_to_github_docker_registry(
image_params=ci_image_params, dry_run=dry_run, output=output, verbose=verbose
)
return True
def run_build_in_parallel(
image_params_list: list[BuildCiParams],
python_version_list: list[str],
include_success_outputs: bool,
parallelism: int,
skip_cleanup: bool,
dry_run: bool,
verbose: bool,
) -> None:
warm_up_docker_builder(image_params_list[0], verbose=verbose, dry_run=dry_run)
with ci_group(f"Building for {python_version_list}"):
all_params = [f"CI {image_params.python}" for image_params in image_params_list]
with run_with_pool(
parallelism=parallelism, all_params=all_params, progress_matcher=DockerBuildxProgressMatcher()
) as (pool, outputs):
results = [
pool.apply_async(
run_build_ci_image,
kwds={
"ci_image_params": image_params,
"verbose": verbose,
"dry_run": dry_run,
"output": outputs[index],
},
)
for index, image_params in enumerate(image_params_list)
]
check_async_run_results(
results=results,
success="All images built correctly",
outputs=outputs,
include_success_outputs=include_success_outputs,
skip_cleanup=skip_cleanup,
)
def start_building(params: BuildCiParams, dry_run: bool, verbose: bool):
check_if_image_building_is_needed(params, output=None, dry_run=dry_run, verbose=verbose)
make_sure_builder_configured(params=params, dry_run=dry_run, verbose=verbose)
@ci_image.command(name='build')
@option_github_repository
@option_verbose
@option_dry_run
@option_answer
@option_python
@option_run_in_parallel
@option_parallelism
@option_skip_cleanup
@option_include_success_outputs
@option_python_versions
@option_upgrade_to_newer_dependencies
@option_upgrade_on_failure
@option_platform_multiple
@option_github_token
@option_github_username
@option_docker_cache
@option_image_tag_for_building
@option_prepare_buildx_cache
@option_push
@option_empty_image
@option_install_providers_from_sources
@option_additional_extras
@option_additional_dev_apt_deps
@option_additional_python_deps
@option_additional_dev_apt_command
@option_additional_dev_apt_env
@option_builder
@option_dev_apt_command
@option_dev_apt_deps
@option_force_build
@option_python_image
@option_airflow_constraints_mode_ci
@option_airflow_constraints_reference_build
@option_tag_as_latest
@option_additional_pip_install_flags
def build(
verbose: bool,
dry_run: bool,
run_in_parallel: bool,
parallelism: int,
skip_cleanup: bool,
include_success_outputs,
python_versions: str,
answer: str,
**kwargs,
):
"""Build CI image. Include building multiple images for all python versions (sequentially)."""
def run_build(ci_image_params: BuildCiParams) -> None:
return_code, info = run_build_ci_image(
ci_image_params=ci_image_params,
output=None,
verbose=verbose,
dry_run=dry_run,
)
if return_code != 0:
get_console().print(f"[error]Error when building image! {info}")
sys.exit(return_code)
perform_environment_checks(verbose=verbose)
parameters_passed = filter_out_none(**kwargs)
parameters_passed['force_build'] = True
fix_group_permissions(verbose=verbose)
if run_in_parallel:
python_version_list = get_python_version_list(python_versions)
params_list: list[BuildCiParams] = []
for python in python_version_list:
params = BuildCiParams(**parameters_passed)
params.python = python
params.answer = answer
params_list.append(params)
start_building(params=params_list[0], dry_run=dry_run, verbose=verbose)
run_build_in_parallel(
image_params_list=params_list,
python_version_list=python_version_list,
include_success_outputs=include_success_outputs,
parallelism=parallelism,
skip_cleanup=skip_cleanup,
dry_run=dry_run,
verbose=verbose,
)
else:
params = BuildCiParams(**parameters_passed)
start_building(params=params, dry_run=dry_run, verbose=verbose)
run_build(ci_image_params=params)
@ci_image.command(name='pull')
@option_verbose
@option_dry_run
@option_python
@option_github_repository
@option_run_in_parallel
@option_parallelism
@option_skip_cleanup
@option_include_success_outputs
@option_python_versions
@option_github_token
@option_verify
@option_wait_for_image
@option_image_tag_for_pulling
@option_include_success_outputs
@option_tag_as_latest
@click.argument('extra_pytest_args', nargs=-1, type=click.UNPROCESSED)
def pull(
verbose: bool,
dry_run: bool,
python: str,
github_repository: str,
run_in_parallel: bool,
python_versions: str,
github_token: str,
parallelism: int,
skip_cleanup: bool,
include_success_outputs: bool,
image_tag: str,
wait_for_image: bool,
tag_as_latest: bool,
verify: bool,
extra_pytest_args: tuple,
):
"""Pull and optionally verify CI images - possibly in parallel for all Python versions."""
perform_environment_checks(verbose=verbose)
if run_in_parallel:
python_version_list = get_python_version_list(python_versions)
ci_image_params_list = [
BuildCiParams(
image_tag=image_tag,
python=python,
github_repository=github_repository,
github_token=github_token,
)
for python in python_version_list
]
run_pull_in_parallel(
dry_run=dry_run,
parallelism=parallelism,
skip_cleanup=skip_cleanup,
include_success_outputs=include_success_outputs,
image_params_list=ci_image_params_list,
python_version_list=python_version_list,
verbose=verbose,
verify=verify,
wait_for_image=wait_for_image,
tag_as_latest=tag_as_latest,
extra_pytest_args=extra_pytest_args if extra_pytest_args is not None else (),
)
else:
image_params = BuildCiParams(
image_tag=image_tag, python=python, github_repository=github_repository, github_token=github_token
)
return_code, info = run_pull_image(
image_params=image_params,
output=None,
dry_run=dry_run,
verbose=verbose,
wait_for_image=wait_for_image,
tag_as_latest=tag_as_latest,
)
if return_code != 0:
get_console().print(f"[error]There was an error when pulling CI image: {info}[/]")
sys.exit(return_code)
@ci_image.command(
name='verify',
context_settings=dict(
ignore_unknown_options=True,
allow_extra_args=True,
),
)
@option_verbose
@option_dry_run
@option_python
@option_github_repository
@option_image_tag_for_verifying
@option_image_name
@option_pull
@click.argument('extra_pytest_args', nargs=-1, type=click.UNPROCESSED)
def verify(
verbose: bool,
dry_run: bool,
python: str,
github_repository: str,
image_name: str,
image_tag: str | None,
pull: bool,
extra_pytest_args: tuple,
):
"""Verify CI image."""
perform_environment_checks(verbose=verbose)
if image_name is None:
build_params = BuildCiParams(python=python, image_tag=image_tag, github_repository=github_repository)
image_name = build_params.airflow_image_name_with_tag
if pull:
command_to_run = ["docker", "pull", image_name]
run_command(command_to_run, verbose=verbose, dry_run=dry_run, check=True)
get_console().print(f"[info]Verifying CI image: {image_name}[/]")
return_code, info = verify_an_image(
image_name=image_name,
output=None,
verbose=verbose,
dry_run=dry_run,
image_type='CI',
slim_image=False,
extra_pytest_args=extra_pytest_args,
)
sys.exit(return_code)
def should_we_run_the_build(build_ci_params: BuildCiParams) -> bool:
"""
Check if we should run the build based on what files have been modified since last build and answer from
the user.
* If build is needed, the user is asked for confirmation
* If the branch is not rebased it warns the user to rebase (to make sure latest remote cache is useful)
* Builds Image/Skips/Quits depending on the answer
:param build_ci_params: parameters for the build
"""
# We import those locally so that click autocomplete works
from inputimeout import TimeoutOccurred
if not md5sum_check_if_build_is_needed(md5sum_cache_dir=build_ci_params.md5sum_cache_dir):
return False
try:
answer = user_confirm(
message="Do you want to build the image (this works best when you have good connection and "
"can take usually from 20 seconds to few minutes depending how old your image is)?",
timeout=STANDARD_TIMEOUT,
default_answer=Answer.NO,
)
if answer == answer.YES:
if is_repo_rebased(build_ci_params.github_repository, build_ci_params.airflow_branch):
return True
else:
get_console().print(
"\n[warning]This might take a lot of time (more than 10 minutes) even if you have "
"a good network connection. We think you should attempt to rebase first.[/]\n"
)
answer = user_confirm(
"But if you really, really want - you can attempt it. Are you really sure?",
timeout=STANDARD_TIMEOUT,
default_answer=Answer.NO,
)
if answer == Answer.YES:
return True
else:
get_console().print(
f"[info]Please rebase your code to latest {build_ci_params.airflow_branch} "
"before continuing.[/]\nCheck this link to find out how "
"https://github.com/apache/airflow/blob/main/CONTRIBUTING.rst#id15\n"
)
get_console().print('[error]Exiting the process[/]\n')
sys.exit(1)
elif answer == Answer.NO:
instruct_build_image(build_ci_params.python)
return False
else: # users_status == Answer.QUIT:
get_console().print('\n[warning]Quitting the process[/]\n')
sys.exit()
except TimeoutOccurred:
get_console().print('\nTimeout. Considering your response as No\n')
instruct_build_image(build_ci_params.python)
return False
except Exception as e:
get_console().print(f'\nTerminating the process on {e}')
sys.exit(1)
def run_build_ci_image(
ci_image_params: BuildCiParams,
verbose: bool,
dry_run: bool,
output: Output | None,
) -> tuple[int, str]:
"""
Builds CI image:
* fixes group permissions for files (to improve caching when umask is 002)
* converts all the parameters received via kwargs into BuildCIParams (including cache)
* prints info about the image to build
* logs int to docker registry on CI if build cache is being executed
* removes "tag" for previously build image so that inline cache uses only remote image
* constructs docker-compose command to run based on parameters passed
* run the build command
* update cached information that the build completed and saves checksums of all files
for quick future check if the build is needed
:param verbose: print commands when running
:param dry_run: do not execute "write" commands - just print what would happen
:param ci_image_params: CI image parameters
:param output: output redirection
"""
if (
ci_image_params.is_multi_platform()
and not ci_image_params.push
and not ci_image_params.prepare_buildx_cache
):
get_console(output=output).print(
"\n[red]You cannot use multi-platform build without using --push flag or "
"preparing buildx cache![/]\n"
)
return 1, "Error: building multi-platform image without --push."
if verbose or dry_run:
get_console(output=output).print(
f"\n[info]Building CI image of airflow from {AIRFLOW_SOURCES_ROOT} "
f"python version: {ci_image_params.python}[/]\n"
)
if ci_image_params.prepare_buildx_cache:
build_command_result = build_cache(
image_params=ci_image_params, output=output, dry_run=dry_run, verbose=verbose
)
else:
if ci_image_params.empty_image:
env = os.environ.copy()
env['DOCKER_BUILDKIT'] = "1"
get_console(output=output).print(
f"\n[info]Building empty CI Image for Python {ci_image_params.python}\n"
)
build_command_result = run_command(
prepare_docker_build_from_input(image_params=ci_image_params),
input="FROM scratch\n",
verbose=verbose,
dry_run=dry_run,
cwd=AIRFLOW_SOURCES_ROOT,
text=True,
env=env,
output=output,
)
else:
get_console(output=output).print(
f"\n[info]Building CI Image for Python {ci_image_params.python}\n"
)
build_command_result = run_command(
prepare_docker_build_command(
image_params=ci_image_params,
verbose=verbose,
),
verbose=verbose,
dry_run=dry_run,
cwd=AIRFLOW_SOURCES_ROOT,
text=True,
check=False,
output=output,
)
if (
build_command_result.returncode != 0
and ci_image_params.upgrade_on_failure
and not ci_image_params.upgrade_to_newer_dependencies
):
ci_image_params.upgrade_to_newer_dependencies = True
get_console().print(
"[warning]Attempting to build with upgrade_to_newer_dependencies on failure"
)
build_command_result = run_command(
prepare_docker_build_command(
image_params=ci_image_params,
verbose=verbose,
),
verbose=verbose,
dry_run=dry_run,
cwd=AIRFLOW_SOURCES_ROOT,
text=True,
check=False,
output=output,
)
if build_command_result.returncode == 0:
if ci_image_params.tag_as_latest:
build_command_result = tag_image_as_latest(
image_params=ci_image_params,
output=output,
dry_run=dry_run,
verbose=verbose,
)
if ci_image_params.preparing_latest_image():
if dry_run:
get_console(output=output).print(
"[info]Not updating build hash because we are in `dry_run` mode.[/]"
)
else:
mark_image_as_refreshed(ci_image_params)
return build_command_result.returncode, f"Image build: {ci_image_params.python}"
def rebuild_or_pull_ci_image_if_needed(
command_params: ShellParams | BuildCiParams, dry_run: bool, verbose: bool
) -> None:
"""
Rebuilds CI image if needed and user confirms it.
:param command_params: parameters of the command to execute
:param dry_run: whether it's a dry_run
:param verbose: should we print verbose messages
"""
build_ci_image_check_cache = Path(
BUILD_CACHE_DIR, command_params.airflow_branch, f".built_{command_params.python}"
)
ci_image_params = BuildCiParams(
python=command_params.python,
upgrade_to_newer_dependencies=False,
image_tag=command_params.image_tag,
platform=command_params.platform,
force_build=command_params.force_build,
)
if command_params.image_tag is not None and command_params.image_tag != "latest":
return_code, message = run_pull_image(
image_params=ci_image_params,
output=None,
dry_run=dry_run,
verbose=verbose,
wait_for_image=True,
tag_as_latest=False,
)
if return_code != 0:
get_console().print(f"[error]Pulling image with {command_params.image_tag} failed! {message}[/]")
sys.exit(return_code)
return
if build_ci_image_check_cache.exists():
if verbose:
get_console().print(f'[info]{command_params.image_type} image already built locally.[/]')
else:
get_console().print(
f'[warning]{command_params.image_type} image was never built locally or deleted. '
'Forcing build.[/]'
)
ci_image_params.force_build = True
if check_if_image_building_is_needed(
ci_image_params=ci_image_params, output=None, dry_run=dry_run, verbose=verbose
):
run_build_ci_image(ci_image_params=ci_image_params, output=None, verbose=verbose, dry_run=dry_run)
| {
"content_hash": "72dcf3776c71bb38db0673fba6bbe308",
"timestamp": "",
"source": "github",
"line_count": 577,
"max_line_length": 110,
"avg_line_length": 36.61178509532063,
"alnum_prop": 0.6256568047337278,
"repo_name": "cfei18/incubator-airflow",
"id": "6cba47a6ef950742af35d09ce5dd84e5053c51c7",
"size": "21910",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dev/breeze/src/airflow_breeze/commands/ci_image_commands.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "72003"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "173434"
},
{
"name": "JavaScript",
"bytes": "143068"
},
{
"name": "Jinja",
"bytes": "38808"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "22660683"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "312715"
},
{
"name": "TypeScript",
"bytes": "472379"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
from collections import defaultdict
from . import Command
from ..benchmarks import Benchmarks
from ..machine import iter_machine_files
from ..results import iter_results_for_machine, iter_results_for_machine_and_hash
from ..runner import format_benchmark_result
from ..repo import get_repo, NoSuchNameError
from ..util import load_json
from ..console import log, color_print
from ..environment import get_environments
from .. import util
from . import common_args
class Show(Command):
@classmethod
def setup_arguments(cls, subparsers):
parser = subparsers.add_parser(
"show", help="Print recorded data.",
description="""Print saved benchmark results.""")
parser.add_argument(
'commit', nargs='?', default=None,
help="""The commit to show data for.""")
parser.add_argument(
'--details', action='store_true', default=False,
help="""Show all result details.""")
parser.add_argument(
'--durations', action='store_true', default=False,
help="""Show only run durations.""")
common_args.add_bench(parser)
common_args.add_machine(parser)
common_args.add_environment(parser)
parser.set_defaults(func=cls.run_from_args)
return parser
@classmethod
def run_from_conf_args(cls, conf, args, **kwargs):
return cls.run(
conf=conf, commit=args.commit, bench=args.bench,
machine=args.machine, env_spec=args.env_spec,
details=args.details, durations=args.durations,
**kwargs
)
@classmethod
def run(cls, conf, commit=None, bench=None, machine=None, env_spec=None,
details=False, durations=False):
if env_spec:
env_names = ([env.name for env in get_environments(conf, env_spec, verbose=False)]
+ list(env_spec))
else:
env_names = None
machines = []
for path in iter_machine_files(conf.results_dir):
d = load_json(path)
machines.append(d['machine'])
if len(machines) == 0:
raise util.UserError("No results found")
elif machine is None:
pass
elif machine in machines:
machines = [machine]
else:
raise util.UserError(
"Results for machine '{0} not found".format(machine))
benchmarks = Benchmarks.load(conf, regex=bench)
if commit is None:
result_iter = cls._iter_results(conf, machines, env_names)
if durations:
cls._print_commit_durations(conf, result_iter, benchmarks)
else:
cls._print_commits(conf, result_iter, benchmarks)
else:
result_iter = cls._iter_results(conf, machines, env_names, commit)
if durations:
cls._print_result_durations(conf, commit, result_iter, benchmarks)
else:
cls._print_results(conf, commit, result_iter,
benchmarks, show_details=details)
@classmethod
def _iter_results(cls, conf, machines, env_names, commit_hash=None):
"""
Iterate over results for given machines/environments.
Yields
------
machine : str
Machine name
result : asv.result.Results
Results
"""
if commit_hash is not None:
repo = get_repo(conf)
try:
commit_hash = repo.get_hash_from_name(commit_hash)
except NoSuchNameError:
pass
for machine in machines:
if commit_hash is not None:
result_iter = iter_results_for_machine_and_hash(
conf.results_dir, machine, commit_hash)
else:
result_iter = iter_results_for_machine(
conf.results_dir, machine)
for result in result_iter:
if env_names is not None and result.env_name not in env_names:
continue
yield machine, result
@classmethod
def _print_commits(cls, conf, result_iter, benchmarks):
commits = defaultdict(lambda: {})
for machine, result in result_iter:
if result.get_result_keys(benchmarks):
commits[(machine, result.env_name)][result.commit_hash] = result.date
log.flush()
color_print("Commits with results:")
color_print("")
for machine, env_name in sorted(commits.keys()):
color_print("Machine : {}".format(machine))
color_print("Environment: {}".format(env_name))
color_print("")
cur_commits = commits[(machine, env_name)]
commit_order = list(cur_commits.keys())
commit_order.sort(key=lambda x: cur_commits[x])
for commit in commit_order:
color_print(" {}".format(commit[:conf.hash_length]))
color_print("")
@classmethod
def _print_results(cls, conf, commit_hash, result_iter, benchmarks, show_details=False):
repo = get_repo(conf)
log.flush()
color_print("Commit: {}".format(repo.get_decorated_hash(commit_hash,
conf.hash_length)),
"blue")
color_print("")
for machine, result in result_iter:
for name in sorted(result.get_result_keys(benchmarks)):
cls._print_benchmark(machine, result, benchmarks[name],
show_details=show_details)
@classmethod
def _print_benchmark(cls, machine, result, benchmark, show_details=False):
color_print("{} [{}/{}]".format(benchmark['name'],
machine,
result.env_name),
'green')
info, details = format_benchmark_result(result, benchmark)
color_print(" {}".format(info), 'red')
if details:
color_print(" " + details.replace("\n", "\n "))
started_at = result.started_at.get(benchmark['name'])
if started_at is not None:
started_at = util.js_timestamp_to_datetime(started_at)
started_at = started_at.strftime('%Y-%m-%d %H:%M:%S')
else:
started_at = "n/a"
duration = result.duration.get(benchmark['name'])
if duration is not None:
duration = util.human_time(duration)
else:
duration = "n/a"
if started_at != "n/a" or duration != "n/a":
color_print(' started: {}, duration: {}'.format(
started_at, duration))
if not show_details:
color_print("")
return
stats = result.get_result_stats(benchmark['name'], benchmark['params'])
def get_stat_info(key):
if key == 'ci_99':
return [(x.get('ci_99_a'), x.get('ci_99_b')) if x is not None else None
for x in stats]
return [x.get(key) if x is not None else None for x in stats]
for key in ['repeat', 'number', 'ci_99', 'mean', 'std', 'min', 'max']:
values = get_stat_info(key)
if key == 'ci_99':
values = ["({}, {})".format(util.human_value(x[0], benchmark['unit']),
util.human_value(x[1], benchmark['unit']))
if x is not None else None
for x in values]
elif any(isinstance(x, float) for x in values):
values = [util.human_value(x, benchmark['unit']) if x is not None else None
for x in values]
if not all(x is None for x in values):
color_print(" {}: {}".format(key, ", ".join(map(str, values))))
samples = result.get_result_samples(benchmark['name'], benchmark['params'])
if not all(x is None for x in samples):
color_print(" samples: {}".format(samples))
color_print("")
@classmethod
def _get_durations(cls, result_iter, benchmarks, commits=False):
durations = defaultdict(lambda: {})
for machine, result in result_iter:
total_duration = None
keys = list(result.get_result_keys(benchmarks))
keys += ["<build>"]
for key in result.get_result_keys(benchmarks):
setup_cache_key = benchmarks[key].get('setup_cache_key')
if setup_cache_key is not None:
keys.append("<setup_cache {}>".format(setup_cache_key))
for key in keys:
duration = result.duration.get(key)
if duration is not None:
if total_duration is None:
total_duration = 0
total_duration += duration
if not commits:
durations[(machine, result.env_name)][key] = duration
if total_duration is None:
total_duration = float("nan")
if commits:
durations[(machine, result.env_name)][result.commit_hash] = (result.date, total_duration)
return durations
@classmethod
def _print_commit_durations(cls, conf, result_iter, benchmarks):
durations = cls._get_durations(result_iter, benchmarks, commits=True)
log.flush()
color_print("Run durations:")
color_print("")
for machine, env_name in sorted(durations.keys()):
color_print("Machine : {}".format(machine))
color_print("Environment: {}".format(env_name))
color_print("")
cur_durations = durations[(machine, env_name)]
commit_order = list(cur_durations.keys())
commit_order.sort(key=lambda x: cur_durations[x])
for commit in commit_order:
seconds = cur_durations[commit][1]
color_print(" {} {}".format(
commit, util.human_time(seconds)))
color_print("")
@classmethod
def _print_result_durations(cls, conf, commit_hash, result_iter, benchmarks):
durations = cls._get_durations(result_iter, benchmarks, commits=False)
repo = get_repo(conf)
log.flush()
color_print("Commit: {}".format(repo.get_decorated_hash(commit_hash,
conf.hash_length)),
"blue")
color_print("")
for machine, env_name in sorted(durations.keys()):
color_print("Machine : {}".format(machine))
color_print("Environment: {}".format(env_name))
color_print("")
cur_durations = durations[(machine, env_name)]
order = list(cur_durations.keys())
order.sort(key=lambda x: -cur_durations[x])
total = 0
for name in order:
seconds = cur_durations[name]
total += seconds
color_print(" {} {}".format(
name, util.human_time(seconds)))
color_print("")
color_print(" total duration: {}".format(util.human_time(total)))
| {
"content_hash": "10f3241390af367031bedef2b1195e7d",
"timestamp": "",
"source": "github",
"line_count": 319,
"max_line_length": 105,
"avg_line_length": 36.01880877742947,
"alnum_prop": 0.5380330722367276,
"repo_name": "qwhelan/asv",
"id": "4007e3f39cb917dc2fb1bf9741b71e6d2bfd64df",
"size": "11579",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "asv/commands/show.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "11960"
},
{
"name": "CSS",
"bytes": "4240"
},
{
"name": "HTML",
"bytes": "8621"
},
{
"name": "JavaScript",
"bytes": "112750"
},
{
"name": "Python",
"bytes": "743235"
}
],
"symlink_target": ""
} |
import schunk
import serial
import argparse
import sys
import os
def arg_options():
''' init options '''
parser = argparse.ArgumentParser(
description='''
A simple program which checks if all software
requirements are fulfilled. ''')
parser.add_argument(
'--config',
dest='config',
default=None,
metavar=('CONFIGFILENAME'),
help='''
Create a configfile with the given name and fill it
with default values.''')
# return arguments
return parser.parse_args()
def main():
# get arguments
args = arg_options()
if(args.config):
try:
from python_core_components.configcreater import ConfigCreater
print("create configfile")
config = ConfigCreater()
config.create_configfile(args.config)
except:
print("\n You need to install setup-tools first !")
exit()
print(" Start check \n")
print(" Your version of python should be > version 2.7 ")
print(" Your version : " + sys.version)
try:
import configparser
test = True
except:
test = False
print("\n Check if setup-tools are installed : " + str(test))
print(
'\n Check if gphoto2 is installed [@ /usr/bin/gphoto2 ] : ' +
str(os.path.isfile('/usr/bin/gphoto2'))
)
print(
'\n Check if hugin is installed [@ /usr/bin/hugin ]: ' +
str(os.path.isfile('/usr/bin/hugin'))
)
print(
'\n Check if enblend is installed [@ /usr/bin/enblend ]: ' +
str(os.path.isfile('/usr/bin/enblend'))
)
try:
import schunk
test = True
except:
test = False
print("\n Check if the schunk-modul is installed : " + str(test))
# other checks will follow
if __name__ == '__main__':
main()
| {
"content_hash": "226c48af9d904e6177694382b3d100e2",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 74,
"avg_line_length": 22.783132530120483,
"alnum_prop": 0.562665256478054,
"repo_name": "spatialaudio/panorama",
"id": "f1e2325cd18608e7c5dd34020ebe64fca33cbca2",
"size": "1891",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/checkDependcies.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30669"
}
],
"symlink_target": ""
} |
"""
Utility methods for working with WSGI servers redux
"""
import sys
import netaddr
from oslo import i18n
import six
import webob.dec
import webob.exc
from neutron.common import exceptions
from neutron.i18n import _LE, _LI
from neutron.openstack.common import log as logging
from neutron.openstack.common import policy as common_policy
from neutron import wsgi
LOG = logging.getLogger(__name__)
class Request(wsgi.Request):
pass
def Resource(controller, faults=None, deserializers=None, serializers=None):
"""Represents an API entity resource and the associated serialization and
deserialization logic
"""
default_deserializers = {'application/json': wsgi.JSONDeserializer()}
default_serializers = {'application/json': wsgi.JSONDictSerializer()}
format_types = {'json': 'application/json'}
action_status = dict(create=201, delete=204)
default_deserializers.update(deserializers or {})
default_serializers.update(serializers or {})
deserializers = default_deserializers
serializers = default_serializers
faults = faults or {}
@webob.dec.wsgify(RequestClass=Request)
def resource(request):
route_args = request.environ.get('wsgiorg.routing_args')
if route_args:
args = route_args[1].copy()
else:
args = {}
# NOTE(jkoelker) by now the controller is already found, remove
# it from the args if it is in the matchdict
args.pop('controller', None)
fmt = args.pop('format', None)
action = args.pop('action', None)
content_type = format_types.get(fmt,
request.best_match_content_type())
language = request.best_match_language()
deserializer = deserializers.get(content_type)
serializer = serializers.get(content_type)
try:
if request.body:
args['body'] = deserializer.deserialize(request.body)['body']
method = getattr(controller, action)
result = method(request=request, **args)
except (exceptions.NeutronException,
netaddr.AddrFormatError,
common_policy.PolicyNotAuthorized) as e:
for fault in faults:
if isinstance(e, fault):
mapped_exc = faults[fault]
break
else:
mapped_exc = webob.exc.HTTPInternalServerError
if 400 <= mapped_exc.code < 500:
LOG.info(_LI('%(action)s failed (client error): %(exc)s'),
{'action': action, 'exc': e})
else:
LOG.exception(_LE('%s failed'), action)
e = translate(e, language)
body = serializer.serialize(
{'NeutronError': get_exception_data(e)})
kwargs = {'body': body, 'content_type': content_type}
raise mapped_exc(**kwargs)
except webob.exc.HTTPException as e:
type_, value, tb = sys.exc_info()
LOG.exception(_LE('%s failed'), action)
translate(e, language)
value.body = serializer.serialize(
{'NeutronError': get_exception_data(e)})
value.content_type = content_type
six.reraise(type_, value, tb)
except NotImplementedError as e:
e = translate(e, language)
# NOTE(armando-migliaccio): from a client standpoint
# it makes sense to receive these errors, because
# extensions may or may not be implemented by
# the underlying plugin. So if something goes south,
# because a plugin does not implement a feature,
# returning 500 is definitely confusing.
body = serializer.serialize(
{'NotImplementedError': get_exception_data(e)})
kwargs = {'body': body, 'content_type': content_type}
raise webob.exc.HTTPNotImplemented(**kwargs)
except Exception:
# NOTE(jkoelker) Everything else is 500
LOG.exception(_LE('%s failed'), action)
# Do not expose details of 500 error to clients.
msg = _('Request Failed: internal server error while '
'processing your request.')
msg = translate(msg, language)
body = serializer.serialize(
{'NeutronError': get_exception_data(
webob.exc.HTTPInternalServerError(msg))})
kwargs = {'body': body, 'content_type': content_type}
raise webob.exc.HTTPInternalServerError(**kwargs)
status = action_status.get(action, 200)
body = serializer.serialize(result)
# NOTE(jkoelker) Comply with RFC2616 section 9.7
if status == 204:
content_type = ''
body = None
return webob.Response(request=request, status=status,
content_type=content_type,
body=body)
return resource
def get_exception_data(e):
"""Extract the information about an exception.
Neutron client for the v2 API expects exceptions to have 'type', 'message'
and 'detail' attributes.This information is extracted and converted into a
dictionary.
:param e: the exception to be reraised
:returns: a structured dict with the exception data
"""
err_data = {'type': e.__class__.__name__,
'message': e, 'detail': ''}
return err_data
def translate(translatable, locale):
"""Translates the object to the given locale.
If the object is an exception its translatable elements are translated
in place, if the object is a translatable string it is translated and
returned. Otherwise, the object is returned as-is.
:param translatable: the object to be translated
:param locale: the locale to translate to
:returns: the translated object, or the object as-is if it
was not translated
"""
localize = i18n.translate
if isinstance(translatable, exceptions.NeutronException):
translatable.msg = localize(translatable.msg, locale)
elif isinstance(translatable, webob.exc.HTTPError):
translatable.detail = localize(translatable.detail, locale)
elif isinstance(translatable, Exception):
translatable.message = localize(translatable.message, locale)
else:
return localize(translatable, locale)
return translatable
| {
"content_hash": "d0c104beac254c7a644d2e9c81e11405",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 78,
"avg_line_length": 38.10588235294118,
"alnum_prop": 0.6167026860142019,
"repo_name": "leeseulstack/openstack",
"id": "ab5618ab856821a775093de3d27eacfd91729a40",
"size": "7115",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "neutron/api/v2/resource.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "21914"
},
{
"name": "JavaScript",
"bytes": "60527"
},
{
"name": "Makefile",
"bytes": "3295"
},
{
"name": "Python",
"bytes": "8816599"
},
{
"name": "Shell",
"bytes": "11768"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
} |
"""
https://leetcode.com/problems/maximum-difference-between-node-and-ancestor/
https://leetcode.com/submissions/detail/226688002/
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
from common.list_to_tree_node import listToTreeNode
from common.tree_node import TreeNode
class Solution:
def maxAncestorDiff(self, root: TreeNode) -> int:
ans = 0
ways = [[root, root.val, root.val]]
while len(ways):
node, _min, _max = ways.pop(0)
if node.val > _max:
_max = node.val
if node.val < _min:
_min = node.val
if node.left:
ways.append([node.left, _min, _max])
if node.right:
ways.append([node.right, _min, _max])
ans = max(_max - _min, ans)
return ans
import unittest
class Test(unittest.TestCase):
def test(self):
solution = Solution()
self.assertEqual(solution.maxAncestorDiff(listToTreeNode(
[8, 3, 10, 1, 6, None, 14, None, None, 4, 7, 13]
)), 7)
self.assertEqual(solution.maxAncestorDiff(listToTreeNode(
[2, None, 0, 1]
)), 2)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "0de09775a6ec17634bdeb344b0f437b3",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 75,
"avg_line_length": 25.75,
"alnum_prop": 0.549663928304705,
"repo_name": "vivaxy/algorithms",
"id": "bfb3d9c5510e45906c9580089566df87d4eeea29",
"size": "1339",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/problems/maximum_difference_between_node_and_ancestor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "130225"
},
{
"name": "Python",
"bytes": "272982"
},
{
"name": "Shell",
"bytes": "439"
}
],
"symlink_target": ""
} |
import csv
import datetime
import random
import time as Time
f = open('data.csv', 'w').close()
while True:
csvfile = open('data.csv', 'a', newline='')
csvwriter = csv.writer(csvfile, dialect='excel')
for portNum in range(1,9):
date = datetime.datetime.now().date().isoformat()
time = datetime.datetime.now().time().isoformat()
current = random.random()*2
csvwriter.writerow([portNum,current,time,date])
print("Wrote a sequence\n")
csvfile.close()
Time.sleep(5)
| {
"content_hash": "7bfa4d2c2bd574e86678dcfcc6f67da4",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 57,
"avg_line_length": 28.36842105263158,
"alnum_prop": 0.6178107606679035,
"repo_name": "jlmart88/mobilechargingmetrics",
"id": "93481e6eb8ca35c4e34d351b894e0d06432b837f",
"size": "539",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CSVGenerator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "26487"
},
{
"name": "C",
"bytes": "2015"
},
{
"name": "C++",
"bytes": "17515"
},
{
"name": "CSS",
"bytes": "1738"
},
{
"name": "JavaScript",
"bytes": "361060"
},
{
"name": "Python",
"bytes": "539"
},
{
"name": "Shell",
"bytes": "680"
}
],
"symlink_target": ""
} |
from .color import Color
from .number import Number
from .length import Length
_converters = {
"fill": Color,
"fill-opacity": Number,
"stroke": Color,
"stroke-opacity": Number,
"opacity": Number,
"stroke-width": Length,
# "stroke-miterlimit": Number,
# "stroke-dasharray": Lengths,
# "stroke-dashoffset": Length,
}
class Style(object):
def __init__(self):
self._unset = True
for key in _converters.keys():
key_ = key.replace("-","_")
self.__setattr__(key_, None)
def update(self, content):
if not content:
return
self._unset = False
items = content.strip().split(";")
attributes = dict([item.strip().split(":") for item in items if item])
for key,value in attributes.items():
if key in _converters:
key_ = key.replace("-","_")
self.__setattr__(key_, _converters[key](value))
@property
def xml(self):
return self._xml()
def _xml(self, prefix=""):
if self._unset:
return ""
s = 'style="'
for key in _converters.keys():
key_ = key.replace("-","_")
value = self.__getattribute__(key_)
if value is not None:
s += '%s:%s ' % (key, value)
s+= '"'
return s
| {
"content_hash": "28c542d43f179d96b0f91f4ea96886de",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 78,
"avg_line_length": 27.03846153846154,
"alnum_prop": 0.49431009957325744,
"repo_name": "duyuan11/glumpy",
"id": "360e34dd1d9d7a6d39e9cf5e3a4d22c1718ee0fc",
"size": "1705",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glumpy/graphics/svg/style.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "26075"
},
{
"name": "GLSL",
"bytes": "165997"
},
{
"name": "Makefile",
"bytes": "407"
},
{
"name": "Python",
"bytes": "1201174"
}
],
"symlink_target": ""
} |
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GLX import _types as _cs
# End users want this...
from OpenGL.raw.GLX._types import *
from OpenGL.raw.GLX import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GLX_NV_multisample_coverage'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GLX,'GLX_NV_multisample_coverage',error_checker=_errors._error_checker)
GLX_COLOR_SAMPLES_NV=_C('GLX_COLOR_SAMPLES_NV',0x20B3)
GLX_COVERAGE_SAMPLES_NV=_C('GLX_COVERAGE_SAMPLES_NV',100001)
| {
"content_hash": "95be881e36be4462439c5a16adee5e56",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 122,
"avg_line_length": 39.375,
"alnum_prop": 0.7571428571428571,
"repo_name": "alexus37/AugmentedRealityChess",
"id": "b5a6b565abfc7162d8e7b247450c5b96082ddba8",
"size": "630",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/OpenGL/raw/GLX/NV/multisample_coverage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "158062"
},
{
"name": "C++",
"bytes": "267993"
},
{
"name": "CMake",
"bytes": "11319"
},
{
"name": "Fortran",
"bytes": "3707"
},
{
"name": "Makefile",
"bytes": "14618"
},
{
"name": "Python",
"bytes": "12813086"
},
{
"name": "Roff",
"bytes": "3310"
},
{
"name": "Shell",
"bytes": "3855"
}
],
"symlink_target": ""
} |
import wasp as ws
class Nest(object):
def __init__(self):
self.colony = {}
def hatch(self, meta_data):
"""DESC: generate a new wasp"""
hatchling = ws.Wasp(meta_data)
self.assimilate(hatchling)
hatchling.get_peers()
def assimilate(self, hatchling):
# TODO: check if torrent already exists in colony
if hatchling.info_hash in self.colony:
print("ALERT: torrent file already exists")
self.colony[hatchling.info_hash] = hatchling
def destroy(self, wasp):
# TODO: Check if was exists, destroy it and
# all saved and associated data
print("destroy")
def swarm(self):
# TODO: Activate the swarm. Spin off threads for each wasp
# in colony to seed or leech
print("swarm")
| {
"content_hash": "c4aadbdb76bb82bdae9ffb587abfb2aa",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 66,
"avg_line_length": 28.103448275862068,
"alnum_prop": 0.6061349693251534,
"repo_name": "j-crowe/wasp",
"id": "638b6a58f1b9c7f1e37ef5ebea41d2680b85e13d",
"size": "815",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wasp/nest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9690"
}
],
"symlink_target": ""
} |
"""
Testing clustering algorithms in Clusterpy
** All the following tests take considerable time to complete **
"""
from unittest import TestCase, skip
from nose.plugins.attrib import attr
import clusterpy
from numpy.random import seed as make_static_random
from clusterpy.core.toolboxes.cluster.componentsAlg import AreaManager
from clusterpy.core.toolboxes.cluster.componentsAlg import RegionMaker
map_type = 'n100'
into_regions = 10
_seed = 10
sample_input_path = "clusterpy/data_examples/" + map_type
sample_output_path = "tests/sample_output/" + map_type
def _final_regions_are_contiguous_in_instance(instance):
exp_name = instance.fieldNames[-1]
clustering_results = instance.outputCluster[exp_name]
final_region_assignment = clustering_results['r2a']
am = AreaManager(instance.Wrook, instance.Y)
return am.checkFeasibility(final_region_assignment)
def _final_objfunction_from_instance(instance):
exp_name = instance.fieldNames[-1]
clustering_results = instance.outputCluster[exp_name]
return clustering_results['objectiveFunction']
class TestArisel(TestCase):
def setUp(self):
self.map_instance = clusterpy.importArcData(sample_input_path)
def tearDown(self):
pass
@attr('slow')
def test_arisel_never_breaks_contiguity(self):
"""
Tests that the output regions never break the contiguity constraint.
"""
instance = self.map_instance
instance.cluster('arisel', ['SAR1'],
into_regions, dissolve = 1,
inits = 20)
feasible = _final_regions_are_contiguous_in_instance(instance)
self.assertTrue(feasible)
@attr('slow')
def test_arisel_gives_at_least_same_obj_func(self):
"""
Tests that the objective function is at least the same, but not worse.
"""
instance = self.map_instance
make_static_random(_seed)
initial_obj_func = float(90.1868744781) # Using a seed of _seed
instance.cluster('arisel', ['SAR1'],
into_regions, dissolve = 1,
inits = 20)
final_obj_func = _final_objfunction_from_instance(instance)
self.assertTrue(initial_obj_func >= final_obj_func)
class TestMaxPTabu(TestCase):
def setUp(self):
self.map_instance = clusterpy.importArcData(sample_input_path)
def tearDown(self):
pass
@attr('slow')
def test_maxpt_never_breaks_contiguity(self):
instance = self.map_instance
instance.cluster('maxpTabu',
['SAR1', 'Uniform2'],
threshold = 130,
dissolve = 1)
feasible = _final_regions_are_contiguous_in_instance(instance)
self.assertTrue(feasible)
@attr('slow')
def test_maxpt_gives_at_least_same_obj_func(self):
instance = self.map_instance
make_static_random(_seed)
initial_obj_func = float(140) # Using a seed of _seed
instance.cluster('maxpTabu',
['SAR1', 'Uniform2'],
threshold = 130,
dissolve = 1)
final_obj_func = _final_objfunction_from_instance(instance)
self.assertTrue(initial_obj_func >= final_obj_func)
@attr('slow')
def test_maxpt_min_num_areas_in_region_threshold(self):
instance = self.map_instance
from collections import Counter as cnt
instance.dataOperation("CONSTANTS = 1")
thresholds = [5, 8, 13, 21, 34]
for threshold in thresholds:
instance.cluster('maxpTabu', ['CONSTANTS'], threshold=threshold)
region_size = cnt(instance.region2areas).values()
self.assertTrue(all(item >= threshold for item in region_size))
class TestAZPalgorithms(TestCase):
""" Tests for AZP, AZPrTabu, AZPSA """
def setUp(self):
self.map_instance = clusterpy.importArcData(sample_input_path)
def tearDown(self):
pass
@attr('slow')
def test_azp_never_breaks_contiguity(self):
instance = self.map_instance
instance.cluster('azp',
['SAR1'],
into_regions,
dissolve=1)
feasible = _final_regions_are_contiguous_in_instance(instance)
self.assertTrue(feasible)
@attr('slow')
def test_azp_gives_at_least_same_obj_func(self):
instance = self.map_instance
aream = AreaManager(instance.Wrook, instance.Y)
rm = RegionMaker(aream, into_regions)
ob_before = rm.objInfo
rm.AZPImproving()
ob_after = rm.objInfo
self.assertTrue(ob_before >= ob_after)
@attr('slow', 'azpsa')
def test_azpsa_never_breaks_contiguity(self):
instance = self.map_instance
instance.cluster('azpSa',
['SAR1'],
into_regions,
dissolve=1)
feasible = _final_regions_are_contiguous_in_instance(instance)
self.assertTrue(feasible)
@attr('slow', 'azpsa')
def test_azpsa_gives_at_least_same_obj_func(self):
instance = self.map_instance
aream = AreaManager(instance.Wrook, instance.Y)
rm = RegionMaker(aream, into_regions)
alpha = 0.85
maxiterations = 3
ob_before = rm.objInfo
rm.AZPSA(alpha, maxiterations)
ob_after = rm.objInfo
self.assertTrue(ob_before >= ob_after)
@attr('slow')
def test_azptabu_never_breaks_contiguity(self):
instance = self.map_instance
instance.cluster('azpTabu',
['SAR1'],
into_regions,
dissolve=1)
feasible = _final_regions_are_contiguous_in_instance(instance)
self.assertTrue(feasible)
@attr('slow')
def test_azptabu_gives_at_least_same_obj_func(self):
instance = self.map_instance
aream = AreaManager(instance.Wrook, instance.Y)
rm = RegionMaker(aream, into_regions)
convTabu = max(10, len(instance.Y) / into_regions)
tabuLength = 10
ob_before = rm.objInfo
rm.AZPTabuMove(tabuLength=tabuLength, convTabu=convTabu)
ob_after = rm.objInfo
self.assertTrue(ob_before >= ob_after)
@attr('slow')
def test_azprtabu_never_breaks_contiguity(self):
instance = self.map_instance
instance.cluster('azpRTabu',
['SAR1'],
into_regions,
dissolve=1)
feasible = _final_regions_are_contiguous_in_instance(instance)
self.assertTrue(feasible)
@attr('slow')
def test_azprtabu_gives_at_least_same_obj_func(self):
instance = self.map_instance
aream = AreaManager(instance.Wrook, instance.Y)
rm = RegionMaker(aream, into_regions)
convTabu = len(instance.Y)/into_regions
ob_before = rm.objInfo
rm.reactiveTabuMove(convTabu)
ob_after = rm.objInfo
self.assertTrue(ob_before >= ob_after)
| {
"content_hash": "c51756d6c0f4264fc70415d88ab9981d",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 78,
"avg_line_length": 30.270042194092827,
"alnum_prop": 0.6034290493448564,
"repo_name": "clusterpy/clusterpy",
"id": "1b5ae7b3b167575db03dbf1ce987a5bdf0cb2e84",
"size": "7174",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_clustering.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "490774"
},
{
"name": "Shell",
"bytes": "423"
}
],
"symlink_target": ""
} |
"""
SMC common functions
"""
import numpy as np
from scipy.linalg import cholesky
def _initial_population(draws, priors):
posterior = np.array([prior.rvs(draws) for prior in priors]).T
return posterior
def _metrop_kernel(
q_old,
old_tempered_logp,
proposal,
scaling,
accepted,
n_steps,
prior_logp,
likelihood_logp,
beta,
):
"""
Metropolis kernel
"""
deltas = proposal(n_steps) * scaling
for n_step in range(n_steps):
delta = deltas[n_step]
q_new = q_old + delta
new_tempered_logp = prior_logp(q_new) + likelihood_logp(q_new) * beta
q_old, accept = _metrop_select(
new_tempered_logp - old_tempered_logp, q_new, q_old
)
if accept:
accepted += 1
old_tempered_logp = new_tempered_logp
return q_old, accepted
def _metrop_select(mr, q, q0):
"""Perform rejection/acceptance step for Metropolis class samplers.
Returns the new sample q if a uniform random number is less than the
metropolis acceptance rate (`mr`), and the old sample otherwise, along
with a boolean indicating whether the sample was accepted.
Parameters
----------
mr : float, Metropolis acceptance rate
q : proposed sample
q0 : current sample
Returns
-------
q or q0
"""
# Compare acceptance ratio to uniform random number
if np.isfinite(mr) and np.log(np.random.uniform()) < mr:
return q, True
else:
return q0, False
def _calc_beta(beta, likelihoods, threshold=0.5):
"""
Calculate next inverse temperature (beta) and importance weights based on
current beta and tempered likelihood.
Parameters
----------
beta : float
tempering parameter of current stage
likelihoods : numpy array
likelihoods computed from the model
threshold : float
Determines the change of beta from stage to stage, i.e.indirectly the
number of stages, the higher the value of threshold the higher the
number of stage. Defaults to 0.5. It should be between 0 and 1.
Returns
-------
new_beta : float
tempering parameter of the next stage
old_beta : float
tempering parameter of the current stage
weights : numpy array
Importance weights (floats)
sj : float
Partial marginal likelihood
"""
low_beta = old_beta = beta
up_beta = 2.0
rN = int(len(likelihoods) * threshold)
while up_beta - low_beta > 1e-6:
new_beta = (low_beta + up_beta) / 2.0
weights_un = np.exp((new_beta - old_beta) * (likelihoods - likelihoods.max()))
weights = weights_un / np.sum(weights_un)
ESS = int(1 / np.sum(weights ** 2))
if ESS == rN:
break
elif ESS < rN:
up_beta = new_beta
else:
low_beta = new_beta
if new_beta >= 1:
new_beta = 1
sj = np.exp((new_beta - old_beta) * likelihoods)
weights_un = np.exp((new_beta - old_beta) * (likelihoods - likelihoods.max()))
weights = weights_un / np.sum(weights_un)
return new_beta, old_beta, weights, np.mean(sj)
def _calc_covariance(posterior, weights):
"""
Calculate trace covariance matrix based on importance weights.
"""
cov = np.cov(posterior, aweights=weights.ravel(), bias=False, rowvar=0)
cov = np.atleast_2d(cov)
cov += 1e-6 * np.eye(cov.shape[0])
if np.isnan(cov).any() or np.isinf(cov).any():
raise ValueError('Sample covariances not valid! Likely "draws" is too small!')
return cov
class _MultivariateNormalProposal:
def __init__(self, s):
n, m = s.shape
if n != m:
raise ValueError("Covariance matrix is not symmetric.")
self.n = n
self.chol = cholesky(s, lower=True)
def __call__(self, num_draws=None):
if num_draws is not None:
b = np.random.randn(self.n, num_draws)
return np.dot(self.chol, b).T
else:
b = np.random.randn(self.n)
return np.dot(self.chol, b)
def _tune(
acc_rate,
proposed,
tune_scaling,
tune_steps,
scaling,
n_steps,
max_steps,
p_acc_rate,
):
"""
Tune scaling and/or n_steps based on the acceptance rate.
Parameters
----------
acc_rate: float
Acceptance rate of the previous stage
proposed: int
Total number of proposed steps (draws * n_steps)
step: SMC step method
"""
if tune_scaling:
# a and b after Muto & Beck 2008.
a = 1 / 9
b = 8 / 9
scaling = (a + b * acc_rate) ** 2
if tune_steps:
acc_rate = max(1.0 / proposed, acc_rate)
n_steps = min(
max_steps, max(2, int(np.log(1 - p_acc_rate) / np.log(1 - acc_rate)))
)
return scaling, n_steps
def _cpu_count():
"""Try to guess the number of CPUs in the system.
We use the number provided by psutil if that is installed.
If not, we use the number provided by multiprocessing, but assume
that half of the cpus are only hardware threads and ignore those.
"""
try:
import psutil
cpus = psutil.cpu_count(False)
except ImportError:
try:
cpus = multiprocessing.cpu_count() // 2
except NotImplementedError:
cpus = 1
if cpus is None:
cpus = 1
return cpus
| {
"content_hash": "e912e56aa5debfcac828faffa7d54377",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 86,
"avg_line_length": 26.57843137254902,
"alnum_prop": 0.5942456658059757,
"repo_name": "bomeba/bomeba0",
"id": "9cf57284f55a8bd7134b588faee2304bf5b81d10",
"size": "5422",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bomeba0/sampling/smc_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "114009"
},
{
"name": "Python",
"bytes": "69584"
}
],
"symlink_target": ""
} |
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import NoReverseMatch
class MockObject(object):
def __init__(self, **kwargs):
self._kwargs = kwargs
for key, val in kwargs.items():
setattr(self, key, val)
def __str__(self):
kwargs_str = ', '.join([
'%s=%s' % (key, value)
for key, value in sorted(self._kwargs.items())
])
return '<MockObject %s>' % kwargs_str
class MockQueryset(object):
def __init__(self, iterable):
self.items = iterable
def get(self, **lookup):
for item in self.items:
if all([
getattr(item, key, None) == value
for key, value in lookup.items()
]):
return item
raise ObjectDoesNotExist()
class BadType(object):
"""
When used as a lookup with a `MockQueryset`, these objects
will raise a `TypeError`, as occurs in Django when making
queryset lookups with an incorrect type for the lookup value.
"""
def __eq__(self):
raise TypeError()
def mock_reverse(view_name, args=None, kwargs=None, request=None, format=None):
args = args or []
kwargs = kwargs or {}
value = (args + list(kwargs.values()) + ['-'])[0]
prefix = 'http://example.org' if request else ''
suffix = ('.' + format) if (format is not None) else ''
return '%s/%s/%s%s/' % (prefix, view_name, value, suffix)
def fail_reverse(view_name, args=None, kwargs=None, request=None, format=None):
raise NoReverseMatch()
| {
"content_hash": "6ef9317676c19cd623fb9960af58310a",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 79,
"avg_line_length": 29.90566037735849,
"alnum_prop": 0.5886435331230284,
"repo_name": "werthen/django-rest-framework",
"id": "5b2d75864a1ddfa31495f38545b6d3a12ac7936f",
"size": "1585",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/utils.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "10024"
},
{
"name": "HTML",
"bytes": "60448"
},
{
"name": "JavaScript",
"bytes": "6793"
},
{
"name": "Python",
"bytes": "945572"
}
],
"symlink_target": ""
} |
from datetime import datetime, timedelta
from werkzeug import cached_property
from ua_parser import user_agent_parser
from flask import request
from coaster.utils import buid as make_buid
from coaster.sqlalchemy import make_timestamp_columns
from . import db, BaseMixin
from .user import User
from ..signals import session_revoked
__all__ = ['UserSession']
session_client = db.Table(
'session_client', db.Model.metadata,
*(make_timestamp_columns() + (
db.Column('user_session_id', None, db.ForeignKey('user_session.id'), nullable=False, primary_key=True),
db.Column('client_id', None, db.ForeignKey('client.id'), nullable=False, primary_key=True))),
info={'bind_key': 'lastuser'}
)
class UserSession(BaseMixin, db.Model):
__tablename__ = 'user_session'
__bind_key__ = 'lastuser'
buid = db.Column(db.Unicode(22), nullable=False, unique=True, default=make_buid)
sessionid = db.synonym('buid')
user_id = db.Column(None, db.ForeignKey('user.id'), nullable=False)
user = db.relationship(User, backref=db.backref('sessions', cascade='all, delete-orphan', lazy='dynamic'))
ipaddr = db.Column(db.String(45), nullable=False)
user_agent = db.Column(db.Unicode(250), nullable=False)
accessed_at = db.Column(db.DateTime, nullable=False)
revoked_at = db.Column(db.DateTime, nullable=True)
sudo_enabled_at = db.Column(db.DateTime, nullable=False, default=datetime.utcnow())
def __init__(self, **kwargs):
super(UserSession, self).__init__(**kwargs)
if not self.buid:
self.buid = make_buid()
def access(self, client=None):
"""
Mark a session as currently active.
:param client: For API calls from clients, save the client instead of IP address and User-Agent
"""
# `accessed_at` will be different from the automatic `updated_at` in one
# crucial context: when the session was revoked remotely. `accessed_at` won't
# be updated at that time.
self.accessed_at = datetime.utcnow()
with db.session.no_autoflush:
if client:
if client not in self.clients: # self.clients is defined via Client.sessions
self.clients.append(client)
else:
# If we've seen this client in this session before, only update the timestamp
db.session.execute(session_client.update().where(
session_client.c.user_session_id == self.id).where(
session_client.c.client_id == client.id).values(
updated_at=datetime.utcnow()))
else:
self.ipaddr = request.remote_addr or u''
self.user_agent = unicode(request.user_agent.string[:250]) or u''
@cached_property
def ua(self):
return user_agent_parser.Parse(self.user_agent)
@property
def has_sudo(self):
return self.sudo_enabled_at > datetime.utcnow() - timedelta(hours=1)
def set_sudo(self):
self.sudo_enabled_at = datetime.utcnow()
def revoke(self):
if not self.revoked_at:
self.revoked_at = datetime.utcnow()
session_revoked.send(self)
@classmethod
def get(cls, buid):
return cls.query.filter_by(buid=buid).one_or_none()
@classmethod
def authenticate(cls, buid):
return cls.query.filter(
# Session key must match.
cls.buid == buid,
# Sessions are valid for one year...
cls.accessed_at > datetime.utcnow() - timedelta(days=365),
# ...unless explicitly revoked (or user logged out)
cls.revoked_at == None).one_or_none() # NOQA
# Patch a retriever into the User class. This could be placed in the
# UserSession.user relationship's backref with a custom primaryjoin
# clause and explicit foreign_keys, but we're not sure if we can
# put the datetime.utcnow() in there too.
def active_sessions(self):
return self.sessions.filter(
UserSession.accessed_at > datetime.utcnow() - timedelta(days=14),
UserSession.revoked_at == None).all() # NOQA
User.active_sessions = active_sessions
| {
"content_hash": "be87e111d910b104fbf7a28f3192cdcf",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 111,
"avg_line_length": 38.55045871559633,
"alnum_prop": 0.6373155640171347,
"repo_name": "sindhus/lastuser",
"id": "023353046c9704ebc0b986a43e15276d25a6c7ad",
"size": "4227",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lastuser_core/models/session.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "3623"
},
{
"name": "HTML",
"bytes": "35810"
},
{
"name": "JavaScript",
"bytes": "145"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "349287"
},
{
"name": "Ruby",
"bytes": "404"
},
{
"name": "Shell",
"bytes": "40"
}
],
"symlink_target": ""
} |
from robot.model import SuiteVisitor
class ModelModifier(SuiteVisitor):
def __init__(self, *tags, **extra):
if extra:
tags += tuple('%s-%s' % item for item in extra.items())
self.config = tags or ('visited',)
def start_suite(self, suite):
config = self.config
if config[0] == 'FAIL':
raise RuntimeError(' '.join(self.config[1:]))
elif config[0] == 'CREATE':
suite.tests.create(**dict(conf.split('-', 1) for conf in config[1:]))
self.config = []
elif config == ('REMOVE', 'ALL', 'TESTS'):
suite.tests = []
else:
suite.tests = [t for t in suite.tests if not t.tags.match('fail')]
def start_test(self, test):
test.tags.add(self.config)
def start_for(self, for_):
if for_.parent.name == 'FOR IN RANGE loop in test':
for_.flavor = 'IN'
for_.values = ['FOR', 'is', 'modified!']
def start_for_iteration(self, iteration):
for name, value in iteration.variables.items():
iteration.variables[name] = value + ' (modified)'
iteration.variables['${x}'] = 'new'
def start_if_branch(self, branch):
if branch.condition == "'IF' == 'WRONG'":
branch.condition = 'True'
# With Robot
if not hasattr(branch, 'status'):
branch.body[0].config(name='Log', args=['going here!'])
# With Rebot
elif branch.status == 'NOT RUN':
branch.status = 'PASS'
branch.condition = 'modified'
branch.body[0].args = ['got here!']
if branch.condition == '${i} == 9':
branch.condition = 'False'
| {
"content_hash": "4db1fc18bd5ec3701c24b2543aa477c8",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 81,
"avg_line_length": 36.083333333333336,
"alnum_prop": 0.5254041570438799,
"repo_name": "HelioGuilherme66/robotframework",
"id": "23f8fb5f9c450c7a118d4e7ad9787769f351de55",
"size": "1732",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "atest/robot/cli/model_modifiers/ModelModifier.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "44706"
},
{
"name": "HTML",
"bytes": "86409"
},
{
"name": "JavaScript",
"bytes": "162950"
},
{
"name": "Python",
"bytes": "2671114"
},
{
"name": "RobotFramework",
"bytes": "1231105"
}
],
"symlink_target": ""
} |
"""
Filesystem backup script
"""
import datetime
import logging
import optparse
import os
import socket
import sys
import stat
import yaml
from Branckenstein.storage import StorageManager
logger = logging.getLogger('Branckenstein')
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(logging.Formatter(fmt="%(levelname)s: %(message)s"))
logger.addHandler(handler)
def print_table_data(table, header_rows=0):
"""Print some data, as a table"""
col_lengths = {}
for row in table:
for cellid, cell in enumerate(row):
if not isinstance(cell, basestring):
cell = str(cell)
col_lengths[cellid] = max(len(cell), col_lengths.get(cellid, 0))
max_length = sum(col_lengths.itervalues()) + (len(col_lengths) - 1)
for rowid, row in enumerate(table):
new_row = []
for cellid, cell in enumerate(row):
if isinstance(cell, basestring):
_cell = cell.ljust(col_lengths[cellid])
elif isinstance(cell, (int, long, float)):
_cell = str(cell).rjust(col_lengths[cellid])
else:
_cell = str(cell).ljust(col_lengths[cellid])
new_row.append(_cell)
print " ".join(new_row)
if header_rows > 0 and rowid == (header_rows-1):
print '-' * max_length
DEFAULT_CONF = {
'repository': None,
'client_id': socket.gethostname(),
}
DATE_FORMAT = '%F %T'
IFMT_DESC = {
stat.S_IFDIR: "d",
stat.S_IFCHR: "c",
stat.S_IFBLK: "b",
stat.S_IFREG: "-",
stat.S_IFIFO: "f",
stat.S_IFLNK: "l",
stat.S_IFSOCK: "s",
}
class FileColorsDB(object):
defalt_ls_colors = \
'rs=0:di=01;34:ln=01;36:mh=00:pi=40;33:so=01;35:do=01;35:' \
'bd=40;33;01:cd=40;33;01:or=40;31;01:su=37;41:sg=30;43:ca=30;41:' \
'tw=30;42:ow=34;42:st=37;44:ex=01;32:'
def __init__(self, ls_colors=None):
if ls_colors is None:
ls_colors = os.environ.get('LS_COLORS') \
or self.defalt_ls_colors
self.ls_colors = ls_colors
self._colors_db = None
@property
def colors_db(self):
if self._colors_db is None:
self._colors_db = {}
for item in (x.strip() for x in self.ls_colors.split(':')):
if item:
key, val = item.split('=', 1)
self._colors_db[key] = val
return self._colors_db
def get_file_color(self, file_name, file_mode):
mode_keys = {
stat.S_IFDIR: 'di',
stat.S_IFLNK: 'ln',
# 'mh' means file w/ multiple hardlinks
stat.S_IFIFO: 'pi',
stat.S_IFSOCK: 'so',
# 'do' means "door" (?)
stat.S_IFBLK: 'bd',
stat.S_IFCHR: 'cd',
# 'or' means "Orphan symlink"
# 'su' means SETUID
# 'sg' means SETGID
# 'ca' means "CAPABILITY" (?)
# 'tw' dir that is sticky and other-writable (+t,o+w)
# 'ow' dir that is other-writable (o+w) and not sticky
# 'st' dir with the sticky bit set (+t) and not other-writable
# 'ex' means "executable"
}
st_ifmt = stat.S_IFMT(file_mode)
key = mode_keys.get(st_ifmt)
if key is not None:
return self.colors_db.get(key, '0')
if file_mode & stat.S_ISUID:
return self.colors_db.get('su', '0')
if file_mode & stat.S_ISGID:
return self.colors_db.get('sg', '0')
## todo: sticky stuff goes here
if file_mode & stat.S_IXUSR:
return self.colors_db.get('ex', '0')
file_ext = '*' + os.path.splitext(file_name)[1]
return self.colors_db.get(file_ext, '0')
class FilesystemBackupTool(object):
def __init__(self, client_id, conf):
self.client_id = client_id
self.conf = conf
if not self.conf.get('repository'):
raise ValueError("You must specify a repository location")
self.storage = StorageManager(self.conf['repository'])
def create_snapshot(self, source):
source = os.path.abspath(source)
start_date = datetime.datetime.now()
snapshot_name = '{0}_{1:%s}'.format(self.client_id, start_date)
snapshot_meta = {
'client_id': self.client_id,
'date': start_date.strftime(DATE_FORMAT),
'hostname': socket.gethostname(),
'root_dir': source,
}
snapshot_files = self._get_file_info(source)
snapshot_meta['end_date'] = \
datetime.datetime.now().strftime(DATE_FORMAT)
self.storage.store_table(snapshot_name, snapshot_meta, snapshot_files)
return snapshot_name
def _get_file_info(self, filepath):
print "Processing file: {0}".format(filepath)
filename = os.path.basename(filepath)
file_stat = os.lstat(filepath)
st_ifmt = stat.S_IFMT(file_stat.st_mode)
file_info = {
'type': IFMT_DESC[st_ifmt],
'name': filename,
'st_ifmt': st_ifmt,
'st_mode': file_stat.st_mode,
'st_ino': file_stat.st_ino,
'st_dev': file_stat.st_dev,
'st_nlink': file_stat.st_nlink,
'st_uid': file_stat.st_uid,
'st_gid': file_stat.st_gid,
'st_size': file_stat.st_size,
'st_atime': file_stat.st_atime,
'st_ctime': file_stat.st_ctime,
'st_mtime': file_stat.st_mtime,
}
if st_ifmt == stat.S_IFREG:
with open(filepath, 'rb') as f:
blob_hash = self.storage.store_blob(f)
file_info['blob_hash'] = blob_hash
elif st_ifmt == stat.S_IFDIR:
file_info['children'] = list(self._get_dir_children(filepath))
return file_info
def _get_dir_children(self, dirname):
for filename in os.listdir(dirname):
filepath = os.path.join(dirname, filename)
yield self._get_file_info(filepath)
def list_snapshots(self):
return self.storage.list_tables(client_id=self.client_id)
def get_file_info(self, snapshot_id, path=None):
## todo: read the table, list stuff, ...
table_data = self.storage.get_table_data(snapshot_id)
if path is None:
return table_data
path = filter(None, path.split("/"))
def find_path(cur, trail):
if len(trail) == 0:
## We're there
return cur
if cur['type'] != 'd':
raise ValueError("No such file")
for sub in cur['children']:
if sub['name'] == trail[0]:
return find_path(sub, trail[1:])
raise ValueError("No such file")
selected = find_path(table_data, trail=path)
return selected
def restore_file(self, snapshot, filename, destination):
pass
def restore_full(self, destination):
pass
COMMANDS_HELP = """\
help
Show this help message and exit
backup <directory>
Create a snapshot for the selected directory
ls [ <snapshot-id> [<path>] ]
If no snapshot-id is provided, prints a list of snapshots.
If a snapshot-id is provided, a list of files in the specified
path (or in the backup root) will be printed, in ``ls -l`` format.
restore <snapshot-id> <file> <destination>
Restore a file from the selected snapshot to a destination
"""
def main():
parser = optparse.OptionParser()
parser.add_option(
'--config', dest='config_file', help="Configuration file name")
grp_cfg = optparse.OptionGroup(parser, 'Configuration overrides')
grp_cfg.add_option('--repo', dest='cfg_repository')
grp_cfg.add_option('--client-id', dest='cfg_client_id')
parser.add_option_group(grp_cfg)
opts, args = parser.parse_args()
try:
command = args[0]
except IndexError:
print "You must specify a command!"
command = 'help'
if command not in ('help', 'backup', 'list', 'ls', 'restore'):
print "No such command: {0}".format(command)
command = 'help'
if command == 'help':
print COMMANDS_HELP
return
configuration = DEFAULT_CONF.copy()
if opts.config_file:
configuration.update(yaml.load(opts.config_file))
for key in ('repository', 'client_id'):
value = getattr(opts, 'cfg_{0}'.format(key))
if value:
configuration[key] = value
bcktools = FilesystemBackupTool(socket.gethostname(), configuration)
##==========================================================================
## Command: backup
##==========================================================================
if command == 'backup':
if not len(args) == 2:
raise TypeError("Usage: backup <directory>")
backup_root = args[1]
snapshot_name = bcktools.create_snapshot(backup_root)
print "----"
print "Snapshot id: {0}".format(snapshot_name)
##==========================================================================
## Command: ls
##==========================================================================
elif command == 'ls':
snapshot_id = None
path = None
try:
snapshot_id = args[1]
path = args[2]
except IndexError:
pass
##----------------------------------------------------------------------
## List snapshots
##----------------------------------------------------------------------
if snapshot_id is None:
## List all snapshots
table = [('Backup id', 'Client id', 'Date', 'Root directory')]
for backup_id in sorted(bcktools.list_snapshots()):
backup_index = bcktools.storage.get_table_index(backup_id)
table.append((
backup_id,
backup_index['client_id'],
backup_index['date'],
backup_index['root_dir'],
))
print_table_data(table, header_rows=1)
return
##----------------------------------------------------------------------
## List information on a file/directory
##----------------------------------------------------------------------
file_colors = FileColorsDB()
def get_file_row(record):
st_mode = record['st_mode']
perms_string = ''.join((
IFMT_DESC[record['st_ifmt']],
'r' if st_mode & stat.S_IRUSR else '-',
'w' if st_mode & stat.S_IWUSR else '-',
'x' if st_mode & stat.S_IXUSR else '-',
'r' if st_mode & stat.S_IRGRP else '-',
'w' if st_mode & stat.S_IWGRP else '-',
'x' if st_mode & stat.S_IXGRP else '-',
'r' if st_mode & stat.S_IROTH else '-',
'w' if st_mode & stat.S_IWOTH else '-',
'x' if st_mode & stat.S_IXOTH else '-',
))
file_date = datetime.datetime.fromtimestamp(record['st_mtime'])
file_color = file_colors.get_file_color(
record['name'], record['st_mode'])
file_name = "\033[{fc}m{name}\033[{rs}m".format(
name=record['name'],
fc=file_color,
rs=file_colors.colors_db.get('rs', '0'),
)
return (
perms_string,
record['st_nlink'],
record['st_uid'],
record['st_gid'],
record['st_size'],
file_date.strftime('%F %H:%M'),
# record['name'],
file_name,
)
selected = bcktools.get_file_info(snapshot_id, path)
st_ifmt = selected['st_ifmt']
if st_ifmt == stat.S_IFDIR:
print "total {0}".format(len(selected['children']))
sorting = lambda x: x['name'].lower()
table_data = []
for child in sorted(selected['children'], key=sorting):
table_data.append(get_file_row(child))
print_table_data(table_data)
else:
print_table_data(selected)
##==========================================================================
## Command: restore
##==========================================================================
elif command == 'restore':
if not len(args) == 4:
raise TypeError("Usage: restore <snapshot-id> <file> <destination>")
restore_snapshot = args[1]
restore_file = args[2]
restore_destination = args[3]
bcktools.restore_file(
restore_snapshot, restore_file, restore_destination)
else:
raise ValueError("Something bad happened")
if __name__ == '__main__':
main()
| {
"content_hash": "69a6487019a562f203ab889eee4a64e6",
"timestamp": "",
"source": "github",
"line_count": 396,
"max_line_length": 80,
"avg_line_length": 32.74747474747475,
"alnum_prop": 0.5046267735965454,
"repo_name": "rshk/branckenstein",
"id": "2e450f1031039f8083e59a659b823138b87feaf1",
"size": "13582",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/backupfs.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "140143"
},
{
"name": "Shell",
"bytes": "1771"
}
],
"symlink_target": ""
} |
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, COIN
from io import BytesIO
def txFromHex(hexstring):
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(hexstring))
tx.deserialize(f)
return tx
def check_array_result(object_array, to_match, expected):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
"""
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0:
raise AssertionError("No objects matched %s"%(str(to_match)))
class ListTransactionsTest(BitcoinTestFramework):
def setup_nodes(self):
#This test requires mocktime
enable_mocktime()
return start_nodes(4, self.options.tmpdir)
def run_test(self):
# Simple send, 0 to 1:
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
check_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":0})
check_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":0})
# mine a block, confirmations should change:
self.nodes[0].generate(1)
self.sync_all()
check_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":1})
check_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":1})
# send-to-self:
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
check_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"send"},
{"amount":Decimal("-0.2")})
check_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"receive"},
{"amount":Decimal("0.2")})
# sendmany from node1: twice to self, twice to node2:
send_to = { self.nodes[0].getnewaddress() : 0.11,
self.nodes[1].getnewaddress() : 0.22,
self.nodes[0].getaccountaddress("from1") : 0.33,
self.nodes[1].getaccountaddress("toself") : 0.44 }
txid = self.nodes[1].sendmany("", send_to)
self.sync_all()
check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.11")},
{"txid":txid} )
check_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.11")},
{"txid":txid} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.22")},
{"txid":txid} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.22")},
{"txid":txid} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.33")},
{"txid":txid} )
check_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.33")},
{"txid":txid, "account" : "from1"} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.44")},
{"txid":txid, "account" : ""} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.44")},
{"txid":txid, "account" : "toself"} )
multisig = self.nodes[1].createmultisig(1, [self.nodes[1].getnewaddress()])
self.nodes[0].importaddress(multisig["redeemScript"], "watchonly", False, True)
txid = self.nodes[1].sendtoaddress(multisig["address"], 0.1)
self.nodes[1].generate(1)
self.sync_all()
assert(len(self.nodes[0].listtransactions("watchonly", 100, 0, False)) == 0)
check_array_result(self.nodes[0].listtransactions("watchonly", 100, 0, True),
{"category":"receive","amount":Decimal("0.1")},
{"txid":txid, "account" : "watchonly"} )
self.run_rbf_opt_in_test()
# Check that the opt-in-rbf flag works properly, for sent and received
# transactions.
def run_rbf_opt_in_test(self):
# Check whether a transaction signals opt-in RBF itself
def is_opt_in(node, txid):
rawtx = node.getrawtransaction(txid, 1)
for x in rawtx["vin"]:
if x["sequence"] < 0xfffffffe:
return True
return False
# Find an unconfirmed output matching a certain txid
def get_unconfirmed_utxo_entry(node, txid_to_match):
utxo = node.listunspent(0, 0)
for i in utxo:
if i["txid"] == txid_to_match:
return i
return None
# 1. Chain a few transactions that don't opt-in.
txid_1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
assert(not is_opt_in(self.nodes[0], txid_1))
check_array_result(self.nodes[0].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"})
sync_mempools(self.nodes)
check_array_result(self.nodes[1].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"})
# Tx2 will build off txid_1, still not opting in to RBF.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1)
# Create tx2 using createrawtransaction
inputs = [{"txid":utxo_to_use["txid"], "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.999}
tx2 = self.nodes[1].createrawtransaction(inputs, outputs)
tx2_signed = self.nodes[1].signrawtransaction(tx2)["hex"]
txid_2 = self.nodes[1].sendrawtransaction(tx2_signed)
# ...and check the result
assert(not is_opt_in(self.nodes[1], txid_2))
check_array_result(self.nodes[1].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"})
sync_mempools(self.nodes)
check_array_result(self.nodes[0].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"})
# Tx3 will opt-in to RBF
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_2)
inputs = [{"txid": txid_2, "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[1].getnewaddress(): 0.998}
tx3 = self.nodes[0].createrawtransaction(inputs, outputs)
tx3_modified = txFromHex(tx3)
tx3_modified.vin[0].nSequence = 0
tx3 = bytes_to_hex_str(tx3_modified.serialize())
tx3_signed = self.nodes[0].signrawtransaction(tx3)['hex']
txid_3 = self.nodes[0].sendrawtransaction(tx3_signed)
assert(is_opt_in(self.nodes[0], txid_3))
check_array_result(self.nodes[0].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"})
sync_mempools(self.nodes)
check_array_result(self.nodes[1].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"})
# Tx4 will chain off tx3. Doesn't signal itself, but depends on one
# that does.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_3)
inputs = [{"txid": txid_3, "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.997}
tx4 = self.nodes[1].createrawtransaction(inputs, outputs)
tx4_signed = self.nodes[1].signrawtransaction(tx4)["hex"]
txid_4 = self.nodes[1].sendrawtransaction(tx4_signed)
assert(not is_opt_in(self.nodes[1], txid_4))
check_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"})
sync_mempools(self.nodes)
check_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"})
# Replace tx3, and check that tx4 becomes unknown
tx3_b = tx3_modified
tx3_b.vout[0].nValue -= int(Decimal("0.004") * COIN) # bump the fee
tx3_b = bytes_to_hex_str(tx3_b.serialize())
tx3_b_signed = self.nodes[0].signrawtransaction(tx3_b)['hex']
txid_3b = self.nodes[0].sendrawtransaction(tx3_b_signed, True)
assert(is_opt_in(self.nodes[0], txid_3b))
check_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"})
sync_mempools(self.nodes)
check_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"})
# Check gettransaction as well:
for n in self.nodes[0:2]:
assert_equal(n.gettransaction(txid_1)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_2)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_3)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_3b)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_4)["bip125-replaceable"], "unknown")
# After mining a transaction, it's no longer BIP125-replaceable
self.nodes[0].generate(1)
assert(txid_3b not in self.nodes[0].getrawmempool())
assert_equal(self.nodes[0].gettransaction(txid_3b)["bip125-replaceable"], "no")
assert_equal(self.nodes[0].gettransaction(txid_4)["bip125-replaceable"], "unknown")
if __name__ == '__main__':
ListTransactionsTest().main()
| {
"content_hash": "aea0a6fb11f52c659bef89620aca3eae",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 112,
"avg_line_length": 49.425233644859816,
"alnum_prop": 0.5733194667675144,
"repo_name": "jimmykiselak/lbrycrd",
"id": "0783a1f3d37179566e69eba81b43ee06374d7beb",
"size": "10829",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qa/rpc-tests/listtransactions.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "692473"
},
{
"name": "C++",
"bytes": "4490033"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Groff",
"bytes": "3792"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "M4",
"bytes": "156814"
},
{
"name": "Makefile",
"bytes": "101253"
},
{
"name": "Objective-C",
"bytes": "2162"
},
{
"name": "Objective-C++",
"bytes": "7240"
},
{
"name": "Protocol Buffer",
"bytes": "2308"
},
{
"name": "Python",
"bytes": "717633"
},
{
"name": "QMake",
"bytes": "2020"
},
{
"name": "Shell",
"bytes": "31540"
}
],
"symlink_target": ""
} |
import colander
from deform import (
Form,
ValidationFailure,
widget,
)
@colander.deferred
def deferred_status(node, kw):
values = kw.get('daftar_status', [])
return widget.SelectWidget(values=values)
STATUS = (
(1, 'Active'),
(0, 'Inactive'),
)
@colander.deferred
def deferred_periode(node, kw):
values = kw.get('daftar_periode', [])
return widget.SelectWidget(values=values)
PERIODE = (
(1, 'Tahunan'),
(0, 'Bulanan'),
)
@colander.deferred
def deferred_bayar(node, kw):
values = kw.get('daftar_bayar', [])
return widget.SelectWidget(values=values)
BAYAR = (
(1, 'Kartu Kredit'),
(0, 'Transfer'),
)
@colander.deferred
def deferred_propinsi(node, kw):
values = kw.get('daftar_propinsi', [])
return widget.SelectWidget(values=values)
@colander.deferred
def deferred_dati2(node, kw):
values = kw.get('daftar_dati2', [])
return widget.SelectWidget(values=values)
| {
"content_hash": "788b869ac8fe2c435a08b088998fefc0",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 45,
"avg_line_length": 22.25531914893617,
"alnum_prop": 0.5927342256214149,
"repo_name": "aagusti/i-pbb",
"id": "642f119a9ed785d1915ff3eeee26a10a0f851712",
"size": "1046",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ipbb/views/tools.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ActionScript",
"bytes": "15982"
},
{
"name": "CSS",
"bytes": "103882"
},
{
"name": "HTML",
"bytes": "1278135"
},
{
"name": "JavaScript",
"bytes": "808922"
},
{
"name": "PHP",
"bytes": "1684"
},
{
"name": "Python",
"bytes": "116825"
},
{
"name": "Shell",
"bytes": "39"
}
],
"symlink_target": ""
} |
'''
Created on 21 Sep 2015
@author: mawa
'''
import os
INPUT_DIR = '/home/mawa/Downloads/stats/hd'
OUTPUT_FILE = '/home/mawa/Downloads/stats_hd.csv'
TOTAL_STATS = {}
def get_files(path):
files = os.listdir(path)
files.sort()
return files
def add_stat(stat_name, stat_value, file_name):
if not TOTAL_STATS.has_key(stat_name):
TOTAL_STATS[stat_name] = {}
bag = TOTAL_STATS[stat_name]
bag[file_name] = stat_value
def parse_file(file_path):
file_desc = open(file_path, 'r')
while(True):
stat_name = file_desc.readline();
stat_value = file_desc.readline()
if(stat_name == None or len(stat_name) == 0):
break
add_stat(stat_name.strip(), stat_value.strip(), os.path.basename(file_path))
file_desc.close()
def export_stats():
fwrite = open(OUTPUT_FILE, 'w')
stat_names = TOTAL_STATS.keys()
stat_names.sort()
for stat_name in stat_names:
fwrite.write(stat_name)
fwrite.write('\n')
stat_values = TOTAL_STATS[stat_name]
file_names = stat_values.keys()
file_names.sort()
for file_name in file_names:
fwrite.write(file_name)
fwrite.write('\t')
fwrite.write(stat_values[file_name])
fwrite.write('\n')
fwrite.close()
if __name__ == '__main__':
files = get_files(INPUT_DIR)
for file_item in files:
parse_file(os.path.join(INPUT_DIR, file_item))
export_stats() | {
"content_hash": "1851a5efa98350f2f772cccbdda75637",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 84,
"avg_line_length": 24.491803278688526,
"alnum_prop": 0.5890227576974565,
"repo_name": "mwach/transcoderwebapi",
"id": "14210119f2c8f8ce6110579a42f59fdf7534e729",
"size": "1494",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/stats_to_xsl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "25441"
},
{
"name": "Python",
"bytes": "9220"
},
{
"name": "Shell",
"bytes": "248"
}
],
"symlink_target": ""
} |
from apps.data.users.models import User
from apps.data.songs.models import Song
from apps.data.userPlaySong.models import UserPlaySong
from apps.similarities.Cosine.benchmark.models import BenchCosine_SongTitle
from apps.recommenders.UserAverage.algorithm.models import UserAverage_Life
from django.db.models import Sum
from apps.CONSTANTS import SET_SIZE_LIST, START_VALIDE_RUN, TOTAL_RUN, INTERVAL
def system_statistical():
user_len = User.objects.count()
heard_sum = UserPlaySong.objects.aggregate(Sum('play_count'))
song_len = Song.objects.count()
print('Heard : ' + str(heard_sum['play_count__sum']))
print('+ Heard/User : ' + str(heard_sum['play_count__sum']/user_len))
print('+ Heard/Song : ' + str(heard_sum['play_count__sum']/song_len))
def cosine_overview():
print('Similarities')
meanSimilarities = {}
for size in SET_SIZE_LIST:
meanSimilarities[size] = BenchCosine_SongTitle.objects.filter(setSize=size)[START_VALIDE_RUN:TOTAL_RUN].aggregate(total=Sum('similarity'))['total']/INTERVAL
print(str(size) + ': ' + str(meanSimilarities[size]))
print('Time Latency')
allBenchmarks = {}
for runner in SET_SIZE_LIST:
allBenchmarks.setdefault(runner, [])
for benchmark in BenchCosine_SongTitle.objects.filter(
setSize=runner
)[START_VALIDE_RUN:TOTAL_RUN]:
allBenchmarks[runner].append(
(
benchmark.finished_at - benchmark.started_at
).total_seconds()
)
print(str(runner) + ': ' + str(sum(allBenchmarks[runner])/INTERVAL))
def userAverage_overview():
print('Similarities')
meanSimilarities = {}
for size in SET_SIZE_LIST:
meanSimilarities[size] = UserAverage_Life.objects.filter(setSize=size).order_by('-id')[:INTERVAL].aggregate(total=Sum('similarity'))['total']/INTERVAL
print(str(size) + ': ' + str(meanSimilarities[size]))
print('Time Latency')
allBenchmarks = {}
for runner in SET_SIZE_LIST:
allBenchmarks.setdefault(runner, [])
for benchmark in UserAverage_Life.objects.filter(
setSize=runner
):
allBenchmarks[runner].append(
(
benchmark.benchuseraverage.finished_at - benchmark.benchuseraverage.started_at
).total_seconds()
)
print(str(runner) + ': ' + str(sum(allBenchmarks[runner][-INTERVAL:])/INTERVAL))
| {
"content_hash": "024a6f3a3963ecce256b7b4fbf97ffc0",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 164,
"avg_line_length": 41.813559322033896,
"alnum_prop": 0.652614511552493,
"repo_name": "DiegoCorrea/ouvidoMusical",
"id": "a73b93902aa83bba257d4f2b8faff77716e7b7b6",
"size": "2467",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/system_overview.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "182332"
},
{
"name": "Shell",
"bytes": "51486"
}
],
"symlink_target": ""
} |
from circuits.web import JSONRPC as JSONRPCDispatcher
from ..plugin import BasePlugin
class JSONRPC(BasePlugin):
"""JSONRPC Plugin
This plugin provides no user commands. This plugin gives
JSON-RPC support to the system allowing other systems to
interact with the system and other loaded plugins.
The "notify" plugin is one such plugin that uses this
to allow remote machines to send notification messages
to a configured channel.
Depends on: web
"""
__version__ = "0.1"
__author__ = "James Mills, prologic at shortcircuit dot net dot au"
def init(self, *args, **kwargs):
super(JSONRPC, self).init(*args, **kwargs)
JSONRPCDispatcher("/json-rpc", "utf-8", "rpc").register(self)
| {
"content_hash": "8065f0e2bf90676d28600a35b85defa4",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 71,
"avg_line_length": 27.77777777777778,
"alnum_prop": 0.6893333333333334,
"repo_name": "prologic/kdb",
"id": "d5eba03a4376b7158c8b313a01c67f4706e82a25",
"size": "750",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kdb/plugins/jsonrpc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3097"
},
{
"name": "HTML",
"bytes": "562"
},
{
"name": "JavaScript",
"bytes": "274"
},
{
"name": "Python",
"bytes": "125919"
}
],
"symlink_target": ""
} |
import os
import six
__all__ = [
'prefix_dict_keys',
'compare_path_file_name'
]
def prefix_dict_keys(dictionary, prefix='_'):
"""
Prefix dictionary keys with a provided prefix.
:param dictionary: Dictionary whose keys to prefix.
:type dictionary: ``dict``
:param prefix: Key prefix.
:type prefix: ``str``
:rtype: ``dict``:
"""
result = {}
for key, value in six.iteritems(dictionary):
result['%s%s' % (prefix, key)] = value
return result
def compare_path_file_name(file_path_a, file_path_b):
"""
Custom compare function which compares full absolute file paths just using
the file name.
This function can be used with ``sorted`` or ``list.sort`` function.
"""
file_name_a = os.path.basename(file_path_a)
file_name_b = os.path.basename(file_path_b)
return file_name_a < file_name_b
def strip_last_newline_char(input_str):
"""
Strips the last char if its newline.
:param input_str: Input string to be stripped.
:type input_str: ``str``
:rtype: ``str``
"""
if not input_str:
return input_str
if input_str.endswith('\n'):
return input_str[:-1]
return input_str
| {
"content_hash": "50a93b3cd0eb1767d9d1c08eee890b17",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 78,
"avg_line_length": 20.64406779661017,
"alnum_prop": 0.6141215106732348,
"repo_name": "Itxaka/st2",
"id": "7be5013a92bb59673ac11594d8d82dd906009bf5",
"size": "1998",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "st2common/st2common/util/misc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "Makefile",
"bytes": "35769"
},
{
"name": "PowerShell",
"bytes": "299"
},
{
"name": "Python",
"bytes": "2673739"
},
{
"name": "Shell",
"bytes": "16008"
},
{
"name": "Slash",
"bytes": "677"
}
],
"symlink_target": ""
} |
"""
Ironic host manager.
This host manager will consume all cpu's, disk space, and
ram from a host / node as it is supporting Baremetal hosts, which can not be
subdivided into multiple instances.
"""
from oslo.config import cfg
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
import nova.scheduler.base_baremetal_host_manager as bbhm
from nova.scheduler import host_manager
host_manager_opts = [
cfg.ListOpt('baremetal_scheduler_default_filters',
default=[
'RetryFilter',
'AvailabilityZoneFilter',
'ComputeFilter',
'ComputeCapabilitiesFilter',
'ImagePropertiesFilter',
'ExactRamFilter',
'ExactDiskFilter',
'ExactCoreFilter',
],
help='Which filter class names to use for filtering '
'baremetal hosts when not specified in the request.'),
cfg.BoolOpt('scheduler_use_baremetal_filters',
default=False,
help='Flag to decide whether to use '
'baremetal_scheduler_default_filters or not.'),
]
CONF = cfg.CONF
CONF.register_opts(host_manager_opts)
LOG = logging.getLogger(__name__)
class IronicNodeState(bbhm.BaseBaremetalNodeState):
"""Mutable and immutable information tracked for a host.
This is an attempt to remove the ad-hoc data structures
previously used and lock down access.
"""
def update_from_compute_node(self, compute):
"""Update information about a host from its compute_node info."""
super(IronicNodeState, self).update_from_compute_node(compute)
self.total_usable_disk_gb = compute['local_gb']
self.updated = compute['updated_at']
def consume_from_instance(self, instance):
"""Consume nodes entire resources regardless of instance request."""
super(IronicNodeState, self).consume_from_instance(instance)
self.updated = timeutils.utcnow()
class IronicHostManager(bbhm.BaseBaremetalHostManager):
"""Ironic HostManager class."""
def __init__(self):
super(IronicHostManager, self).__init__()
if CONF.scheduler_use_baremetal_filters:
baremetal_default = CONF.baremetal_scheduler_default_filters
CONF.scheduler_default_filters = baremetal_default
def host_state_cls(self, host, node, **kwargs):
"""Factory function/property to create a new HostState."""
compute = kwargs.get('compute')
if compute and compute.get('cpu_info') == 'baremetal cpu':
return IronicNodeState(host, node, **kwargs)
else:
return host_manager.HostState(host, node, **kwargs)
| {
"content_hash": "f2ff143d00af3031caf4f577ec5b1a7f",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 76,
"avg_line_length": 36.298701298701296,
"alnum_prop": 0.6415026833631485,
"repo_name": "srajag/nova",
"id": "409c6dd1cc1b8ed1dc7b7f2478202cab9f44e844",
"size": "3478",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "nova/scheduler/ironic_host_manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from .javacls import JavaClass
class Serializable:
pass
| {
"content_hash": "660166ead2093d946313d60dcdf683ce",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 30,
"avg_line_length": 15.25,
"alnum_prop": 0.7704918032786885,
"repo_name": "lodevil/javaobject",
"id": "4cc2fd88944abe1348469e60b12b3541234c310d",
"size": "61",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "javaobject/java/ser.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "43428"
}
],
"symlink_target": ""
} |
"""Illustrates a method to intercept changes on objects, turning
an UPDATE statement on a single row into an INSERT statement, so that a new
row is inserted with the new data, keeping the old row intact.
"""
from sqlalchemy import Column
from sqlalchemy import create_engine
from sqlalchemy import event
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import attributes
from sqlalchemy.orm import backref
from sqlalchemy.orm import make_transient
from sqlalchemy.orm import relationship
from sqlalchemy.orm import Session
from sqlalchemy.orm import sessionmaker
class Versioned:
def new_version(self, session):
# make us transient (removes persistent
# identity).
make_transient(self)
# set 'id' to None.
# a new PK will be generated on INSERT.
self.id = None
@event.listens_for(Session, "before_flush")
def before_flush(session, flush_context, instances):
for instance in session.dirty:
if not isinstance(instance, Versioned):
continue
if not session.is_modified(instance):
continue
if not attributes.instance_state(instance).has_identity:
continue
# make it transient
instance.new_version(session)
# re-add
session.add(instance)
Base = declarative_base()
engine = create_engine("sqlite://", echo=True)
Session = sessionmaker(engine)
# example 1, simple versioning
class Example(Versioned, Base):
__tablename__ = "example"
id = Column(Integer, primary_key=True)
data = Column(String)
Base.metadata.create_all(engine)
session = Session()
e1 = Example(data="e1")
session.add(e1)
session.commit()
e1.data = "e2"
session.commit()
assert session.query(Example.id, Example.data).order_by(Example.id).all() == (
[(1, "e1"), (2, "e2")]
)
# example 2, versioning with a parent
class Parent(Base):
__tablename__ = "parent"
id = Column(Integer, primary_key=True)
child_id = Column(Integer, ForeignKey("child.id"))
child = relationship("Child", backref=backref("parent", uselist=False))
class Child(Versioned, Base):
__tablename__ = "child"
id = Column(Integer, primary_key=True)
data = Column(String)
def new_version(self, session):
# expire parent's reference to us
session.expire(self.parent, ["child"])
# create new version
Versioned.new_version(self, session)
# re-add ourselves to the parent
self.parent.child = self
Base.metadata.create_all(engine)
session = Session()
p1 = Parent(child=Child(data="c1"))
session.add(p1)
session.commit()
p1.child.data = "c2"
session.commit()
assert p1.child_id == 2
assert session.query(Child.id, Child.data).order_by(Child.id).all() == (
[(1, "c1"), (2, "c2")]
)
| {
"content_hash": "e68be7dca1f45eef5a67b71407a5af55",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 78,
"avg_line_length": 24.369747899159663,
"alnum_prop": 0.6844827586206896,
"repo_name": "zzzeek/sqlalchemy",
"id": "96d2e399ec14c8c01a8c0eaf6c0f045006803fce",
"size": "2900",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "examples/versioned_rows/versioned_rows.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cython",
"bytes": "21698"
},
{
"name": "Python",
"bytes": "16838583"
}
],
"symlink_target": ""
} |
'''
:codeauthor: :email:`Jayesh Kariya <jayeshk@saltstack.com>`
'''
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing Libs
from salttesting import skipIf, TestCase
from salttesting.mock import (
NO_MOCK,
NO_MOCK_REASON,
MagicMock,
patch
)
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import Salt Libs
from salt.states import rbenv
rbenv.__opts__ = {}
rbenv.__salt__ = {}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class RbenvTestCase(TestCase):
'''
Test cases for salt.states.rbenv
'''
# 'installed' function tests: 1
def test_installed(self):
'''
Test to verify that the specified ruby is installed with rbenv.
'''
name = 'rbenv-deps'
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
mock_t = MagicMock(side_effect=[False, True, True])
mock_f = MagicMock(return_value=False)
mock_def = MagicMock(return_value='2.7')
mock_ver = MagicMock(return_value=['2.7'])
with patch.dict(rbenv.__salt__,
{'rbenv.is_installed': mock_f,
'rbenv.install': mock_t,
'rbenv.default': mock_def,
'rbenv.versions': mock_ver,
'rbenv.install_ruby': mock_t}):
with patch.dict(rbenv.__opts__, {'test': True}):
comt = ('Ruby rbenv-deps is set to be installed')
ret.update({'comment': comt, 'result': None})
self.assertDictEqual(rbenv.installed(name), ret)
with patch.dict(rbenv.__opts__, {'test': False}):
comt = ('Rbenv failed to install')
ret.update({'comment': comt, 'result': False})
self.assertDictEqual(rbenv.installed(name), ret)
comt = ('Successfully installed ruby')
ret.update({'comment': comt, 'result': True, 'default': False,
'changes': {name: 'Installed'}})
self.assertDictEqual(rbenv.installed(name), ret)
# 'absent' function tests: 1
def test_absent(self):
'''
Test to verify that the specified ruby is not installed with rbenv.
'''
name = 'myqueue'
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
mock = MagicMock(side_effect=[False, True])
mock_def = MagicMock(return_value='2.7')
mock_ver = MagicMock(return_value=['2.7'])
with patch.dict(rbenv.__salt__,
{'rbenv.is_installed': mock,
'rbenv.default': mock_def,
'rbenv.versions': mock_ver}):
with patch.dict(rbenv.__opts__, {'test': True}):
comt = ('Ruby myqueue is set to be uninstalled')
ret.update({'comment': comt, 'result': None})
self.assertDictEqual(rbenv.absent(name), ret)
with patch.dict(rbenv.__opts__, {'test': False}):
comt = ('Rbenv not installed, myqueue not either')
ret.update({'comment': comt, 'result': True})
self.assertDictEqual(rbenv.absent(name), ret)
comt = ('Ruby myqueue is already absent')
ret.update({'comment': comt, 'result': True})
self.assertDictEqual(rbenv.absent(name), ret)
# 'install_rbenv' function tests: 1
def test_install_rbenv(self):
'''
Test to install rbenv if not installed.
'''
name = 'myqueue'
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
with patch.dict(rbenv.__opts__, {'test': True}):
comt = ('Rbenv is set to be installed')
ret.update({'comment': comt, 'result': None})
self.assertDictEqual(rbenv.install_rbenv(name), ret)
with patch.dict(rbenv.__opts__, {'test': False}):
mock = MagicMock(side_effect=[False, True])
with patch.dict(rbenv.__salt__,
{'rbenv.is_installed': mock,
'rbenv.install': mock}):
comt = ('Rbenv installed')
ret.update({'comment': comt, 'result': True})
self.assertDictEqual(rbenv.install_rbenv(name), ret)
if __name__ == '__main__':
from integration import run_tests
run_tests(RbenvTestCase, needs_daemon=False)
| {
"content_hash": "d7a3e46fae83a365a04e1ed30d263bc4",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 78,
"avg_line_length": 34.33582089552239,
"alnum_prop": 0.5205390132579873,
"repo_name": "stephane-martin/salt-debian-packaging",
"id": "56ef9714f042ff1e6562cd2bbb90daca4d84759c",
"size": "4625",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "salt-2016.3.3/tests/unit/states/rbenv_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13798"
},
{
"name": "C",
"bytes": "986"
},
{
"name": "Groff",
"bytes": "13634346"
},
{
"name": "HTML",
"bytes": "39558"
},
{
"name": "Makefile",
"bytes": "20902"
},
{
"name": "NSIS",
"bytes": "22316"
},
{
"name": "PowerShell",
"bytes": "38719"
},
{
"name": "Python",
"bytes": "40857506"
},
{
"name": "SaltStack",
"bytes": "58278"
},
{
"name": "Scheme",
"bytes": "1790"
},
{
"name": "Shell",
"bytes": "829927"
},
{
"name": "Tcl",
"bytes": "6532"
},
{
"name": "TeX",
"bytes": "11632"
}
],
"symlink_target": ""
} |
import sqlalchemy as sql
def upgrade(migrate_engine):
# The group_project_metadata table was not updated in terms of its
# FK to the tenant table when the tenant->project change was made at
# the 015 migration for sqlite. This upgrade fixes that.
# We need to create a fake tenant table so that we can first load
# the group_project_metadata at all, then do a dance of copying tables
# to get us to the correct schema.
meta = sql.MetaData()
meta.bind = migrate_engine
if migrate_engine.name != 'sqlite':
return
temp_tenant_table = sql.Table(
'tenant',
meta,
sql.Column('id', sql.String(64), primary_key=True))
temp_tenant_table.create(migrate_engine, checkfirst=True)
sql.Table('user', meta, autoload=True)
old_group_metadata_table = sql.Table('group_project_metadata',
meta, autoload=True)
# OK, we now have the table loaded, create a first
# temporary table of a different name with the correct FK
sql.Table('project', meta, autoload=True)
temp_group_project_metadata_table = sql.Table(
'temp_group_project_metadata',
meta,
sql.Column(
'group_id',
sql.String(64),
primary_key=True),
sql.Column(
'project_id',
sql.String(64),
sql.ForeignKey('project.id'),
primary_key=True),
sql.Column('data', sql.Text()))
temp_group_project_metadata_table.create(migrate_engine, checkfirst=True)
# Populate the new temporary table, and then drop the old one
session = sql.orm.sessionmaker(bind=migrate_engine)()
for metadata in session.query(old_group_metadata_table):
q = temp_group_project_metadata_table.insert().values(
group_id=metadata.group_id,
project_id=metadata.project_id,
data=metadata.data)
session.execute(q)
session.commit()
old_group_metadata_table.drop()
temp_tenant_table.drop()
# Now do a final table copy to get the table of the right name.
# Re-init the metadata so that sqlalchemy does not get confused with
# multiple versions of the same named table.
meta2 = sql.MetaData()
meta2.bind = migrate_engine
sql.Table('project', meta2, autoload=True)
new_group_project_metadata_table = sql.Table(
'group_project_metadata',
meta2,
sql.Column(
'group_id',
sql.String(64),
primary_key=True),
sql.Column(
'project_id',
sql.String(64),
sql.ForeignKey('project.id'),
primary_key=True),
sql.Column('data', sql.Text()))
new_group_project_metadata_table.create(migrate_engine, checkfirst=True)
for metadata in session.query(temp_group_project_metadata_table):
q = new_group_project_metadata_table.insert().values(
group_id=metadata.group_id,
project_id=metadata.project_id,
data=metadata.data)
session.execute(q)
session.commit()
temp_group_project_metadata_table.drop()
def downgrade(migrate_engine):
# Put the group_project_metadata table back the way it was in its rather
# broken state. We don't try and re-write history, since otherwise people
# get out of step.
meta = sql.MetaData()
meta.bind = migrate_engine
if migrate_engine.name != 'sqlite':
return
sql.Table('user', meta, autoload=True)
sql.Table('project', meta, autoload=True)
group_metadata_table = sql.Table('group_project_metadata',
meta, autoload=True)
# We want to create a temp group meta table with the FK
# set to the wrong place.
temp_tenant_table = sql.Table(
'tenant',
meta,
sql.Column('id', sql.String(64), primary_key=True))
temp_tenant_table.create(migrate_engine, checkfirst=True)
temp_group_project_metadata_table = sql.Table(
'temp_group_project_metadata',
meta,
sql.Column(
'group_id',
sql.String(64),
primary_key=True),
sql.Column(
'project_id',
sql.String(64),
sql.ForeignKey('tenant.id'),
primary_key=True),
sql.Column('data', sql.Text()))
temp_group_project_metadata_table.create(migrate_engine, checkfirst=True)
# Now populate the temp table and drop the real one
session = sql.orm.sessionmaker(bind=migrate_engine)()
for metadata in session.query(group_metadata_table):
q = temp_group_project_metadata_table.insert().values(
group_id=metadata.group_id,
project_id=metadata.project_id,
data=metadata.data)
session.execute(q)
session.commit()
group_metadata_table.drop()
# Now copy again into the correctly named table. Re-init the metadata
# so that sqlalchemy does not get confused with multiple versions of the
# same named table.
meta2 = sql.MetaData()
meta2.bind = migrate_engine
sql.Table('tenant', meta2, autoload=True)
new_group_project_metadata_table = sql.Table(
'group_project_metadata',
meta2,
sql.Column(
'group_id',
sql.String(64),
primary_key=True),
sql.Column(
'project_id',
sql.String(64),
sql.ForeignKey('tenant.id'),
primary_key=True),
sql.Column('data', sql.Text()))
new_group_project_metadata_table.create(migrate_engine, checkfirst=True)
for metadata in session.query(temp_group_project_metadata_table):
q = new_group_project_metadata_table.insert().values(
group_id=metadata.group_id,
project_id=metadata.project_id,
data=metadata.data)
session.execute(q)
session.commit()
temp_group_project_metadata_table.drop()
temp_tenant_table.drop()
| {
"content_hash": "9bedb0b34eb08571e6d1a1b81a276cea",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 77,
"avg_line_length": 34.39655172413793,
"alnum_prop": 0.6163742690058479,
"repo_name": "UTSA-ICS/keystone-SID",
"id": "61fce39eb360cb59e66d57f80073eb85cb848bad",
"size": "6571",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "keystone/common/sql/migrate_repo/versions/028_fixup_group_metadata.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16002"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "2961841"
},
{
"name": "Shell",
"bytes": "10512"
}
],
"symlink_target": ""
} |
from .dataset import Dataset
class SparkObjectDataset(Dataset):
"""Spark Server dataset.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param description: Dataset description.
:type description: str
:param structure: Columns that define the structure of the dataset. Type:
array (or Expression with resultType array), itemType: DatasetDataElement.
:type structure: object
:param linked_service_name: Linked service reference.
:type linked_service_name:
~azure.mgmt.datafactory.models.LinkedServiceReference
:param parameters: Parameters for dataset.
:type parameters: dict[str,
~azure.mgmt.datafactory.models.ParameterSpecification]
:param annotations: List of tags that can be used for describing the
Dataset.
:type annotations: list[object]
:param type: Constant filled by server.
:type type: str
"""
_validation = {
'linked_service_name': {'required': True},
'type': {'required': True},
}
def __init__(self, linked_service_name, additional_properties=None, description=None, structure=None, parameters=None, annotations=None):
super(SparkObjectDataset, self).__init__(additional_properties=additional_properties, description=description, structure=structure, linked_service_name=linked_service_name, parameters=parameters, annotations=annotations)
self.type = 'SparkObject'
| {
"content_hash": "48fde4a9431def56735f25d69062e194",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 228,
"avg_line_length": 43.34285714285714,
"alnum_prop": 0.7277521423862887,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "0f998f03375c1aba07ec97cf79362c152d5eb425",
"size": "1991",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-datafactory/azure/mgmt/datafactory/models/spark_object_dataset.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
} |
"""
This bot resets a (user) sandbox with predefined text.
This script understands the following command-line arguments:
¶ms;
Furthermore, the following command line parameters are supported:
-hours:# Use this parameter if to make the script repeat itself
after # hours. Hours can be defined as a decimal. 0.01
hours are 36 seconds; 0.1 are 6 minutes.
-delay:# Use this parameter for a wait time after the last edit
was made. If no parameter is given it takes it from
hours and limits it between 5 and 15 minutes.
The minimum delay time is 5 minutes.
-text The text that substitutes in the sandbox, you can use this
when you haven't configured clean_candbox for your wiki.
-summary Summary of the edit made by bot.
"""
#
# (C) Leonardo Gregianin, 2006
# (C) Wikipedian, 2006-2007
# (C) Andre Engels, 2007
# (C) Siebrand Mazeland, 2007
# (C) xqt, 2009-2014
# (C) Dr. Trigon, 2012
# (C) Pywikibot team, 2012-2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import division, unicode_literals
__version__ = '$Id: b6d9824feba3246bb9ef2c41134696378c2e84e0 $'
#
import time
import datetime
import pywikibot
from pywikibot import i18n, Bot, pagegenerators
content = {
'commons': u'{{Sandbox}}\n<!-- Please edit only below this line. -->',
'als': u'{{subst:/Vorlage}}',
'ar': u'{{عنوان الملعب}}\n<!-- مرحبا! خذ راحتك في تجربة مهارتك في التنسيق '
u'والتحرير أسفل هذا السطر. هذه الصفحة لتجارب التعديل ، سيتم تفريغ '
u'هذه الصفحة كل 12 ساعة. -->',
'arz': u'{{عنوان السبوره}}\n<!-- مرحبا! خد راحتك فى تجريب مهاراتك فى\n'
u'التحرير تحت الخط ده. بما إن الصفحه دى لتجارب التعديل، فالصفحه دى '
u'حيتم تنضيفها\nاوتوماتيكيا كل 12 ساعه. -->',
'az': u'<!--- LÜTFƏN, BU SƏTRƏ TOXUNMAYIN --->\n{{Qaralama dəftəri}}\n'
u'<!-- AŞAĞIDAKI XƏTTİN ALTINDAN YAZA BİLƏRSİNİZ --->',
'bar': u'{{Bitte erst NACH dieser Zeile schreiben! (Begrüßungskasten)}}\r\n',
'cs': u'{{subst:/uhrabat}}',
'da': u'{{subst:Sandkasse tekst}}',
'de': u'{{subst:Wikipedia:Spielwiese/Vorlage}}',
'en': u'{{Sandbox heading}}\n<!-- Hello! Feel free to try your formatting '
u'and editing skills below this line. As this page is for editing '
u'experiments, this page will automatically be cleaned every 12 '
u'hours. -->',
'fa': u'{{subst:Wikipedia:ربات/sandbox}}',
'fi': u'{{subst:Hiekka}}',
'he': u'{{ארגז חול}}\n<!-- נא לערוך מתחת לשורה זו בלבד, תודה. -->',
'id': u'{{Bakpasir}}\n<!-- Uji coba dilakukan di baris di bawah ini -->',
'it': u'{{sandbox}}<!-- Scrivi SOTTO questa riga senza cancellarla. Grazie. -->',
'ja': u'{{subst:サンドボックス}}',
'ko': u'{{연습장 안내문}}',
'ksh': u'{{subst:/Schablon}}',
'mzn': u'{{ویکیپدیا:چنگمویی صفحه/پیغوم}}\n<!-- سلام!اگه '
u'خواننی شه دچیین مهارتون وسه تمرین هاکنین بتوننی اینتا صفحه جا '
u'ایستفاده هاکنین، اته لطف هاکنین اینتا پیغوم ره شه بقیه رفقون وسه '
u'بیلین. اینتا صفحه هرچند ساعت ربوت جا پاک بونه.-->',
'nds': u'{{subst:/Vörlaag}}',
'nl': u'{{subst:Wikipedia:Zandbak/schoon zand}}',
'nn': u'{{sandkasse}}\n<!-- Ver snill og IKKJE FJERN DENNE LINA OG LINA '
u'OVER ({{sandkasse}}) Nedanføre kan du derimot ha det artig og '
u'prøve deg fram! Lykke til! :-) -->',
'no': u'{{Sandkasse}}\n<!-- VENNLIGST EKSPERIMENTER NEDENFOR DENNE '
u'SKJULTE TEKSTLINJEN! SANDKASSEMALEN {{Sandkasse}} SKAL IKKE '
u'FJERNES! -->}}',
'pl': u'{{Prosimy - NIE ZMIENIAJ, NIE KASUJ, NIE PRZENOŚ tej linijki - pisz niżej}}',
'pt': u'<!--não apague esta linha-->{{página de testes}}<!--não apagar-->\r\n',
'ru': u'{{/Пишите ниже}}\n<!-- Не удаляйте, пожалуйста, эту строку, тестируйте ниже -->',
'simple': u'{{subst:/Text}}',
'sco': u'Feel free tae test here',
'sr': u'{{песак}}\n<!-- Молимо, испробавајте испод ове линије. Хвала. -->',
'sv': u'{{subst:Sandlådan}}',
'th': u'{{กระบะทราย}}\n<!-- กรุณาอย่าแก้ไขบรรทัดนี้ ขอบคุณครับ/ค่ะ -- '
u'Please leave this line as they are. Thank you! -->',
'tr': u'{{/Bu satırı değiştirmeden bırakın}}',
'zh': u'{{subst:User:Sz-iwbot/sandbox}}\r\n',
}
sandboxTitle = {
'commons': u'Project:Sandbox',
'als': u'Project:Sandchaschte',
'ar': u'Project:ملعب',
'arz': u'Project:السبوره',
'az': u'Vikipediya:Qaralama dəftəri',
'bar': u'Project:Spuiwiesn',
'cs': u'Project:Pískoviště',
'da': u'Project:Sandkassen',
'de': u'Project:Spielwiese',
'en': u'Project:Sandbox',
'fa': [u'Project:صفحه تمرین', u'Project:آشنایی با ویرایش'],
'fi': u'Project:Hiekkalaatikko',
'fr': u'Project:Bac à sable',
'he': u'Project:ארגז חול',
'id': u'Project:Bak pasir',
'it': u'Project:Pagina delle prove',
'ja': u'Project:サンドボックス',
'ko': u'Project:연습장',
'ksh': u'Project:Shpillplaz',
'mzn': u'Project:چنگمویی صفحه',
'nds': u'Project:Speelwisch',
'nl': u'Project:Zandbak',
'no': u'Project:Sandkasse',
'pl': u'Project:Brudnopis',
'pt': u'Project:Página de testes',
'ru': u'Project:Песочница',
'simple': u'Project:Sandbox',
'sco': u'Project:Saundpit',
'sr': u'Project:Песак',
'sv': u'Project:Sandlådan',
'th': u'Project:ทดลองเขียน',
'tr': u'Vikipedi:Deneme tahtası',
'zh': u'Project:沙盒',
}
# This is required for the text that is shown when you run this script
# with the parameter -help.
docuReplacements = {
'¶ms;': pagegenerators.parameterHelp,
}
class SandboxBot(Bot):
"""Sandbox reset bot."""
availableOptions = {
'hours': 1,
'no_repeat': True,
'delay': None,
'delay_td': None,
'text': "",
'summary': "",
}
def __init__(self, **kwargs):
"""Constructor."""
super(SandboxBot, self).__init__(**kwargs)
if self.getOption('delay') is None:
d = min(15, max(5, int(self.getOption('hours') * 60)))
self.availableOptions['delay_td'] = datetime.timedelta(minutes=d)
else:
d = max(5, self.getOption('delay'))
self.availableOptions['delay_td'] = datetime.timedelta(minutes=d)
self.site = pywikibot.Site()
if not content.get(self.site.code) and not self.getOption('text'):
pywikibot.error(u'No content is given for pages, exiting.')
raise RuntimeError
if not self.generator:
if self.site.code not in sandboxTitle:
pywikibot.error(u'No generator is given for this site'
u'(%s), exiting.' % self.site)
raise RuntimeError
local_sandbox_title = sandboxTitle[self.site.code]
if not isinstance(local_sandbox_title, list):
local_sandbox_title = [local_sandbox_title]
self.generator = [pywikibot.Page(self.site, page_name) for
page_name in local_sandbox_title]
def run(self):
"""Run bot."""
self.site.login()
while True:
wait = False
now = time.strftime("%d %b %Y %H:%M:%S (UTC)", time.gmtime())
for sandboxPage in self.generator:
pywikibot.output(u'Preparing to process sandbox page %s'
% sandboxPage.title(asLink=True))
if sandboxPage.isRedirectPage():
pywikibot.warning(
u'%s is a redirect page, cleaning it anyway'
% sandboxPage.title(asLink=True))
try:
text = sandboxPage.text
if not self.getOption('text'):
translatedContent = i18n.translate(self.site, content)
else:
translatedContent = self.getOption('text')
if self.getOption('summary'):
translatedMsg = self.getOption('summary')
else:
translatedMsg = i18n.twtranslate(
self.site, 'clean_sandbox-cleaned')
subst = 'subst:' in translatedContent
pos = text.find(translatedContent.strip())
if text.strip() == translatedContent.strip():
pywikibot.output(
u'The sandbox is still clean, no change necessary.')
elif subst and \
sandboxPage.userName() == self.site.user():
pywikibot.output(
u'The sandbox might be clean, no change necessary.')
elif pos != 0 and not subst:
sandboxPage.put(translatedContent, translatedMsg)
pywikibot.showDiff(text, translatedContent)
pywikibot.output(u'Standard content was changed, '
u'sandbox cleaned.')
else:
edit_delta = (datetime.datetime.utcnow() -
sandboxPage.editTime())
delta = self.getOption('delay_td') - edit_delta
# Is the last edit more than 'delay' minutes ago?
if delta <= datetime.timedelta(0):
sandboxPage.put(translatedContent, translatedMsg)
pywikibot.showDiff(text, translatedContent)
pywikibot.output(u'Standard content was changed, '
u'sandbox cleaned.')
else: # wait for the rest
pywikibot.output(
u'Sandbox edited %.1f minutes ago...'
% (edit_delta.seconds / 60.0))
pywikibot.output(u'Sleeping for %d minutes.'
% (delta.seconds // 60))
time.sleep(delta.seconds)
wait = True
except pywikibot.EditConflict:
pywikibot.output(
u'*** Loading again because of edit conflict.\n')
except pywikibot.NoPage:
pywikibot.output(
u'*** The sandbox is not existent, skipping.')
continue
if self.getOption('no_repeat'):
pywikibot.output(u'\nDone.')
return
elif not wait:
if self.getOption('hours') < 1.0:
pywikibot.output('\nSleeping %s minutes, now %s'
% ((self.getOption('hours') * 60), now))
else:
pywikibot.output('\nSleeping %s hours, now %s'
% (self.getOption('hours'), now))
time.sleep(self.getOption('hours') * 60 * 60)
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
opts = {}
local_args = pywikibot.handle_args(args)
gen_factory = pagegenerators.GeneratorFactory()
for arg in local_args:
if arg.startswith('-hours:'):
opts['hours'] = float(arg[7:])
opts['no_repeat'] = False
elif arg.startswith('-delay:'):
opts['delay'] = int(arg[7:])
elif arg.startswith('-text'):
if len(arg) == 5:
opts['text'] = pywikibot.input(
u'What text do you want to substitute?')
else:
opts['text'] = arg[6:]
elif arg.startswith('-summary'):
if len(arg) == len('-summary'):
opts['summary'] = pywikibot.input(u'Enter the summary:')
else:
opts['summary'] = arg[9:]
else:
gen_factory.handleArg(arg)
generator = gen_factory.getCombinedGenerator()
bot = SandboxBot(generator=generator, **opts)
bot.run()
if __name__ == "__main__":
main()
| {
"content_hash": "0850db91ef297f8cd47691c396d62e18",
"timestamp": "",
"source": "github",
"line_count": 292,
"max_line_length": 93,
"avg_line_length": 42.284246575342465,
"alnum_prop": 0.537053535271726,
"repo_name": "hperala/kontuwikibot",
"id": "40229713376f093a927da091258287d23317bc96",
"size": "13198",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/clean_sandbox.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "97"
},
{
"name": "C",
"bytes": "137889"
},
{
"name": "C++",
"bytes": "4113"
},
{
"name": "Python",
"bytes": "3758566"
}
],
"symlink_target": ""
} |
from django.db import models
class Building(models.Model):
code = models.CharField(max_length=6, db_index=True)
latititude = models.CharField(max_length=40)
longitude = models.CharField(max_length=40)
name = models.CharField(max_length=200)
class Meta:
db_table = "myuw_mobile_building"
app_label = "myuw"
| {
"content_hash": "710b2cd810b3798915dee21057fe213b",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 56,
"avg_line_length": 28.75,
"alnum_prop": 0.6840579710144927,
"repo_name": "fanglinfang/myuw",
"id": "9fbdb90f0e298918469f4c13913f375f4b93cdbc",
"size": "345",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "myuw/models/building.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "54427"
},
{
"name": "HTML",
"bytes": "169387"
},
{
"name": "JavaScript",
"bytes": "226000"
},
{
"name": "Python",
"bytes": "403286"
}
],
"symlink_target": ""
} |
import argparse
import copy
from textwrap import dedent
from polygraphy import constants, mod
from polygraphy.exception import PolygraphyException
from polygraphy.logger import G_LOGGER
from polygraphy.tools.args import (
ComparatorCompareArgs,
ComparatorPostprocessArgs,
ComparatorRunArgs,
CompareFuncIndicesArgs,
CompareFuncSimpleArgs,
DataLoaderArgs,
LoggerArgs,
ModelArgs,
OnnxFromTfArgs,
OnnxInferShapesArgs,
OnnxLoadArgs,
OnnxrtRunnerArgs,
OnnxrtSessionArgs,
OnnxSaveArgs,
PluginRefRunnerArgs,
RunnerSelectArgs,
TfConfigArgs,
TfLoadArgs,
TfRunnerArgs,
TfTrtArgs,
TrtConfigArgs,
TrtLegacyRunnerArgs,
TrtLoadEngineArgs,
TrtLoadNetworkArgs,
TrtLoadPluginsArgs,
TrtRunnerArgs,
TrtSaveEngineArgs,
)
from polygraphy.tools.base import Tool
from polygraphy.tools.script import Script, safe
try:
# No need to lazy import since this is part of the standard library
from importlib import metadata
except:
# importlib.metadata may not exist in older versions of Python.
metadata = mod.lazy_import("importlib_metadata")
PLUGIN_ENTRY_POINT = "polygraphy.run.plugins"
def generate_summary(model_file, runners, load_results):
def join_list(lst):
new_list = copy.copy(list(lst))
if len(new_list) > 1:
new_list[-1] = f"and {new_list[-1]}"
return ", ".join(new_list) if len(new_list) > 2 else " ".join(new_list)
summary = ""
if runners:
summary += "This script "
if len(runners) > 1:
summary += "compares "
else:
summary += "runs "
if model_file:
summary += f"{model_file} "
summary += "between " if len(runners) > 1 else "using "
summary += join_list(runners) + "."
if load_results:
summary += f"\nIt will check against outputs stored in {join_list(load_results)}\n"
return summary
class Run(Tool):
"""
Run inference and compare results across backends.
The typical usage of `run` is:
polygraphy run [model_file] [runners...] [runner_options...]
`run` will then run inference on the specified model with all the specified runners
and compare inference outputs between them.
TIP: You can use `--gen-script` to generate a Python script that does exactly what the `run`
command would otherwise do.
"""
def __init__(self):
super().__init__("run")
def get_subscriptions_impl(self):
deps = [
RunnerSelectArgs(),
ModelArgs(guess_model_type_from_runners=True),
TfTrtArgs(),
TfLoadArgs(allow_tftrt=True),
TfConfigArgs(),
TfRunnerArgs(),
OnnxFromTfArgs(),
OnnxSaveArgs(output_opt="save-onnx", output_short_opt=False),
OnnxInferShapesArgs(),
OnnxLoadArgs(allow_saving=True, allow_from_tf=True),
OnnxrtSessionArgs(),
OnnxrtRunnerArgs(),
PluginRefRunnerArgs(),
# We run calibration/inference with the same data, so it doesn't really matter if it's random.
TrtConfigArgs(allow_random_data_calib_warning=False),
TrtLoadPluginsArgs(),
TrtLoadNetworkArgs(),
TrtSaveEngineArgs(output_opt="save-engine", output_short_opt=False),
TrtLoadEngineArgs(allow_saving=True),
TrtRunnerArgs(),
TrtLegacyRunnerArgs(),
DataLoaderArgs(),
ComparatorRunArgs(),
ComparatorPostprocessArgs(),
ComparatorCompareArgs(),
CompareFuncSimpleArgs(),
CompareFuncIndicesArgs(),
]
# Initialize plugins
self.loaded_plugins = []
try:
entry_points = metadata.entry_points()
except PolygraphyException as err:
G_LOGGER.warning(
f"Could not load extension modules since `importlib.metadata` and `importlib_metadata` are missing."
)
else:
if isinstance(entry_points, dict):
# For compatibility with older versions of importlib_metadata
plugins = entry_points.get(PLUGIN_ENTRY_POINT, [])
else:
entry_points = entry_points.select(group=PLUGIN_ENTRY_POINT)
plugins = [entry_points[name] for name in entry_points.names]
for plugin in plugins:
try:
get_arg_groups_func = plugin.load()
plugin_arg_groups = get_arg_groups_func()
except Exception as err:
G_LOGGER.warning(f"Failed to load plugin: {plugin.name}.\nNote: Error was:\n{err}")
else:
deps.extend(plugin_arg_groups)
self.loaded_plugins.append(plugin.name)
return deps
def add_parser_args_impl(self, parser):
parser.add_argument(
"--gen",
"--gen-script",
help="Path to save a generated Python script, that will do exactly "
"what `run` would. When this option is enabled, `run` will save the script and exit. "
"Use a value of `-` to print the script to the standard output instead of saving it to a file",
type=argparse.FileType("w"),
dest="gen_script",
)
def show_start_end_logging_impl(self, args):
# No need to print start/end messages when we're just creating a script
return not args.gen_script
def run_impl(self, args):
G_LOGGER.verbose(f"Loaded extension modules: {self.loaded_plugins}")
if self.arg_groups[ModelArgs].path is None and self.arg_groups[RunnerSelectArgs].runners:
G_LOGGER.critical(
"One or more runners was specified, but no model file was provided. Make sure you've specified the model path, "
"and also that it's not being consumed as an argument for another parameter"
)
script = Script(
summary=generate_summary(
self.arg_groups[ModelArgs].path,
list(self.arg_groups[RunnerSelectArgs].runners.values()),
self.arg_groups[ComparatorCompareArgs].load_outputs_paths,
)
)
self.arg_groups[LoggerArgs].add_to_script(script)
self.arg_groups[RunnerSelectArgs].add_to_script(script)
RESULTS_VAR_NAME = self.arg_groups[ComparatorRunArgs].add_to_script(script)
SUCCESS_VAR_NAME = self.arg_groups[ComparatorCompareArgs].add_to_script(script, results_name=RESULTS_VAR_NAME)
script.add_import(imports=["PolygraphyException"], frm="polygraphy.exception")
exit_status = safe(
dedent(
f"""
# Report Results
if not {{success}}:
{constants.TAB}raise PolygraphyException('FAILED')"""
),
success=SUCCESS_VAR_NAME,
)
script.append_suffix(exit_status)
if args.gen_script:
script.save(args.gen_script)
else:
exec(str(script))
| {
"content_hash": "c224ccc6dafd564e87cac3e6ffe621a3",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 128,
"avg_line_length": 34.38942307692308,
"alnum_prop": 0.6075772403187474,
"repo_name": "NVIDIA/TensorRT",
"id": "131f482b22ccb84db23a82805e8e0a78db8b63fd",
"size": "7841",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tools/Polygraphy/polygraphy/tools/run/run.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "804"
},
{
"name": "C",
"bytes": "26267"
},
{
"name": "C++",
"bytes": "174835683"
},
{
"name": "CMake",
"bytes": "73882"
},
{
"name": "Cuda",
"bytes": "713094"
},
{
"name": "Dockerfile",
"bytes": "21378"
},
{
"name": "HTML",
"bytes": "266"
},
{
"name": "Jupyter Notebook",
"bytes": "2284036"
},
{
"name": "Makefile",
"bytes": "9128"
},
{
"name": "PowerShell",
"bytes": "162"
},
{
"name": "PureBasic",
"bytes": "388"
},
{
"name": "Python",
"bytes": "2541976"
},
{
"name": "Shell",
"bytes": "20007"
}
],
"symlink_target": ""
} |
"""Allow bots to build and deploy themselves. So like Rapid in ~300 lines."""
# In general, we want to catch all exceptions, so ignore lint errors for e.g.
# catching Exception
# pylint: disable=broad-except
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from concurrent import futures
from typing import Any, List
from hypebot.core import async_lib
from hypebot.plugins import coin_lib
class DeploymentManager(object):
"""Monitors for changes to hypebot, and automatically deploys them to prod."""
# Maps *bot* names to their deploy configs. This allows for easier addition
# and makes it clear which bots we know how to act on.
_BOT_CONFIGS = {
}
def __init__(self,
bot_name: str,
bookie: coin_lib.Bookie,
# This dep creates a cycle in the build graph, so just Any it.
output_util: Any, # hypecore.OutputUtil
executor: futures.Executor) -> None:
self._bot_name = bot_name
self._bookie = bookie
self._output_util = output_util
self._runner = async_lib.AsyncRunner(executor)
def IsValidBot(self, bot_name: str) -> bool:
"""Returns if bot_name is a bot DeploymentManager can act upon."""
return bot_name in self._BOT_CONFIGS
def RequestBuild(self, user: str, cl: int, bot_name: str,
channel: str) -> bool:
"""Requests a build of bot_name on behalf of user."""
self._output_util.Output(channel or user, 'No deploy integration.')
return True
def RequestDeploy(self,
user: str,
cl: int,
bot_name: str,
schema_list: List[str],
channel: str) -> bool:
"""Requests a deploy (test, build, push) of bot_name on behalf of user."""
self._output_util.Output(channel or user, 'No deploy integration.')
return True
def RequestPush(self,
user: str,
cl: int,
bot_name: str,
channel: str) -> bool:
"""Requests a push of bot_name on behalf of user."""
self._output_util.Output(channel or user, 'No deploy integration.')
return True
def RequestSchemaUpdate(self, user: str, cl: int, schema_env: str) -> bool:
"""Requests a schema update on behalf of user."""
self._output_util.Output(channel or user, 'No deploy integration.')
return True
def RequestTest(self,
user: str,
cl: int,
bot_name: str,
channel: str) -> bool:
"""Requests to run bot_name tests on behalf of user."""
self._output_util.Output(channel or user, 'No deploy integration.')
return True
| {
"content_hash": "c38d2f706500c85a30b2632294a2fa4e",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 80,
"avg_line_length": 34.9875,
"alnum_prop": 0.6152197213290461,
"repo_name": "google/hypebot",
"id": "9beb4fc3f766e54e7437e62d9b6b479e85387fc8",
"size": "3418",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hypebot/plugins/deploy_lib.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "581383"
},
{
"name": "Starlark",
"bytes": "16083"
}
],
"symlink_target": ""
} |
import sys
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy
from .._version import VERSION
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class TimeSeriesInsightsClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for TimeSeriesInsightsClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Azure Subscription ID. Required.
:type subscription_id: str
:keyword api_version: Api Version. Default value is "2021-03-31-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(self, credential: "AsyncTokenCredential", subscription_id: str, **kwargs: Any) -> None:
super(TimeSeriesInsightsClientConfiguration, self).__init__(**kwargs)
api_version = kwargs.pop("api_version", "2021-03-31-preview") # type: Literal["2021-03-31-preview"]
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.api_version = api_version
self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"])
kwargs.setdefault("sdk_moniker", "mgmt-timeseriesinsights/{}".format(VERSION))
self._configure(**kwargs)
def _configure(self, **kwargs: Any) -> None:
self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get("http_logging_policy") or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get("authentication_policy")
if self.credential and not self.authentication_policy:
self.authentication_policy = AsyncARMChallengeAuthenticationPolicy(
self.credential, *self.credential_scopes, **kwargs
)
| {
"content_hash": "0663c17024a6d73d60e3d7b02f5ff006",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 108,
"avg_line_length": 52.375,
"alnum_prop": 0.722255369928401,
"repo_name": "Azure/azure-sdk-for-python",
"id": "9cd67a515ff753dca320d22e85cc40633dcffb8c",
"size": "3820",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/timeseriesinsights/azure-mgmt-timeseriesinsights/azure/mgmt/timeseriesinsights/aio/_configuration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from time import strftime, localtime
from cme.protocols.smb.remotefile import RemoteFile
from impacket.smb3structs import FILE_READ_DATA
from impacket.smbconnection import SessionError
import logging
import re
import traceback
class SMBSpider:
def __init__(self, smbconnection, logger):
self.smbconnection = smbconnection
self.logger = logger
self.share = None
self.regex = []
self.pattern = []
self.folder = None
self.exclude_dirs = []
self.onlyfiles = True
self.content = False
self.results = []
def spider(self, share, folder='.', pattern=[], regex=[], exclude_dirs=[], depth=None, content=False, onlyfiles=True):
if regex:
try:
self.regex = [re.compile(rx) for rx in regex]
except Exception as e:
self.logger.error('Regex compilation error: {}'.format(e))
self.folder = folder
self.pattern = pattern
self.exclude_dirs = exclude_dirs
self.content = content
self.onlyfiles = onlyfiles
if share == "*":
self.logger.info("Enumerating shares for spidering")
permissions = []
try:
for share in self.smbconnection.listShares():
share_name = share['shi1_netname'][:-1]
share_remark = share['shi1_remark'][:-1]
try:
self.smbconnection.listPath(share_name, '*')
self.share = share_name
self.logger.info("Spidering share: {0}".format(share_name))
self._spider(folder, depth)
except SessionError:
pass
except Exception as e:
self.logger.error('Error enumerating shares: {}'.format(e))
else:
self.share = share
self.logger.info("Spidering {0}".format(folder))
self._spider(folder, depth)
return self.results
def _spider(self, subfolder, depth):
'''
Abondon all hope ye who enter here.
You're now probably wondering if I was drunk and/or high when writing this.
Getting this to work took a toll on my sanity. So yes. a lot.
'''
# The following is some funky shit that deals with the way impacket treats file paths
if subfolder in ['', '.']:
subfolder = '*'
elif subfolder.startswith('*/'):
subfolder = subfolder[2:] + '/*'
else:
subfolder = subfolder.replace('/*/', '/') + '/*'
# End of the funky shit... or is it? Surprise! This whole thing is funky
filelist = None
try:
filelist = self.smbconnection.listPath(self.share, subfolder)
self.dir_list(filelist, subfolder)
if depth == 0:
return
except SessionError as e:
if not filelist:
if 'STATUS_ACCESS_DENIED' not in str(e):
logging.debug("Failed listing files on share {} in directory {}: {}".format(self.share, subfolder, e))
return
for result in filelist:
if result.is_directory() and result.get_longname() not in ['.','..']:
if subfolder == '*':
self._spider(subfolder.replace('*', '') + result.get_longname(), depth-1 if depth else None)
elif subfolder != '*' and (subfolder[:-2].split('/')[-1] not in self.exclude_dirs):
self._spider(subfolder.replace('*', '') + result.get_longname(), depth-1 if depth else None)
return
def dir_list(self, files, path):
path = path.replace('*', '')
for result in files:
for pattern in self.pattern:
if result.get_longname().lower().find(pattern.lower()) != -1:
if not self.onlyfiles and result.is_directory():
self.logger.highlight(u"//{}/{}/{}{} [dir]".format(self.smbconnection.getRemoteHost(), self.share,
path,
result.get_longname()))
else:
self.logger.highlight(u"//{}/{}/{}{} [lastm:'{}' size:{}]".format(self.smbconnection.getRemoteHost(), self.share,
path,
result.get_longname(),
'n\\a' if not self.get_lastm_time(result) else self.get_lastm_time(result),
result.get_filesize()))
self.results.append('{}{}'.format(path, result.get_longname()))
for regex in self.regex:
if regex.findall(result.get_longname()):
if not self.onlyfiles and result.is_directory():
self.logger.highlight(u"//{}/{}/{}{} [dir]".format(self.smbconnection.getRemoteHost(), self.share, path, result.get_longname()))
else:
self.logger.highlight(u"//{}/{}/{}{} [lastm:'{}' size:{}]".format(self.smbconnection.getRemoteHost(), self.share,
path,
result.get_longname(),
'n\\a' if not self.get_lastm_time(result) else self.get_lastm_time(result),
result.get_filesize()))
self.results.append('{}{}'.format(path, result.get_longname()))
if self.content:
if not result.is_directory():
self.search_content(path, result)
return
def search_content(self, path, result):
path = path.replace('*', '')
try:
rfile = RemoteFile(self.smbconnection, path + result.get_longname(), self.share, access=FILE_READ_DATA)
rfile.open()
while True:
try:
contents = rfile.read(4096)
if not contents:
break
except SessionError as e:
if 'STATUS_END_OF_FILE' in str(e):
break
except Exception:
traceback.print_exc()
break
for pattern in self.pattern:
if contents.lower().find(pattern.lower()) != -1:
self.logger.highlight(u"//{}/{}/{}{} [lastm:'{}' size:{} offset:{} pattern:'{}']".format(self.smbconnection.getRemoteHost(),
self.share,
path,
result.get_longname(),
'n\\a' if not self.get_lastm_time(result) else self.get_lastm_time(result),
result.get_filesize(),
rfile.tell(),
pattern))
self.results.append('{}{}'.format(path, result.get_longname()))
for regex in self.regex:
if regex.findall(contents):
self.logger.highlight(u"//{}/{}/{}{} [lastm:'{}' size:{} offset:{} regex:'{}']".format(self.smbconnection.getRemoteHost(),
self.share,
path,
result.get_longname(),
'n\\a' if not self.get_lastm_time(result) else self.get_lastm_time(result),
result.get_filesize(),
rfile.tell(),
regex.pattern))
self.results.append('{}{}'.format(path, result.get_longname()))
rfile.close()
return
except SessionError as e:
if 'STATUS_SHARING_VIOLATION' in str(e):
pass
except Exception:
traceback.print_exc()
def get_lastm_time(self, result_obj):
lastm_time = None
try:
lastm_time = strftime('%Y-%m-%d %H:%M', localtime(result_obj.get_mtime_epoch()))
except Exception:
pass
return lastm_time
| {
"content_hash": "1fd3117eab392d7d5100acf3b8eb9537",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 183,
"avg_line_length": 50.47938144329897,
"alnum_prop": 0.41019095272133155,
"repo_name": "jorik041/CrackMapExec",
"id": "b69866debfbad30a095cb5054817271f516f9b48",
"size": "9793",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cme/protocols/smb/smbspider.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "PowerShell",
"bytes": "1120855"
},
{
"name": "Python",
"bytes": "109495"
}
],
"symlink_target": ""
} |
from msrest.serialization import Model
class ImageDataDisk(Model):
"""Describes a data disk.
All required parameters must be populated in order to send to Azure.
:param lun: Required. Specifies the logical unit number of the data disk.
This value is used to identify data disks within the VM and therefore must
be unique for each data disk attached to a VM.
:type lun: int
:param snapshot: The snapshot.
:type snapshot: ~azure.mgmt.compute.v2016_04_30_preview.models.SubResource
:param managed_disk: The managedDisk.
:type managed_disk:
~azure.mgmt.compute.v2016_04_30_preview.models.SubResource
:param blob_uri: The Virtual Hard Disk.
:type blob_uri: str
:param caching: Specifies the caching requirements. <br><br> Possible
values are: <br><br> **None** <br><br> **ReadOnly** <br><br> **ReadWrite**
<br><br> Default: **None for Standard storage. ReadOnly for Premium
storage**. Possible values include: 'None', 'ReadOnly', 'ReadWrite'
:type caching: str or
~azure.mgmt.compute.v2016_04_30_preview.models.CachingTypes
:param disk_size_gb: Specifies the size of empty data disks in gigabytes.
This element can be used to overwrite the name of the disk in a virtual
machine image. <br><br> This value cannot be larger than 1023 GB
:type disk_size_gb: int
"""
_validation = {
'lun': {'required': True},
}
_attribute_map = {
'lun': {'key': 'lun', 'type': 'int'},
'snapshot': {'key': 'snapshot', 'type': 'SubResource'},
'managed_disk': {'key': 'managedDisk', 'type': 'SubResource'},
'blob_uri': {'key': 'blobUri', 'type': 'str'},
'caching': {'key': 'caching', 'type': 'CachingTypes'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
}
def __init__(self, **kwargs):
super(ImageDataDisk, self).__init__(**kwargs)
self.lun = kwargs.get('lun', None)
self.snapshot = kwargs.get('snapshot', None)
self.managed_disk = kwargs.get('managed_disk', None)
self.blob_uri = kwargs.get('blob_uri', None)
self.caching = kwargs.get('caching', None)
self.disk_size_gb = kwargs.get('disk_size_gb', None)
| {
"content_hash": "ba6447ef034df251ed2f0021a9840f72",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 79,
"avg_line_length": 42.90384615384615,
"alnum_prop": 0.6364858807709547,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "d696f2804c4bde7e5db1221e3c419f5af6464b7a",
"size": "2705",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-compute/azure/mgmt/compute/v2016_04_30_preview/models/image_data_disk.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
} |
try:
from ._models_py3 import Answer
from ._models_py3 import CreativeWork
from ._models_py3 import Error
from ._models_py3 import ErrorResponse, ErrorResponseException
from ._models_py3 import Identifiable
from ._models_py3 import ImageObject
from ._models_py3 import MediaObject
from ._models_py3 import PivotSuggestions
from ._models_py3 import Query
from ._models_py3 import QueryContext
from ._models_py3 import Response
from ._models_py3 import ResponseBase
from ._models_py3 import SearchResultsAnswer
from ._models_py3 import Thing
from ._models_py3 import TrendingVideos
from ._models_py3 import TrendingVideosCategory
from ._models_py3 import TrendingVideosSubcategory
from ._models_py3 import TrendingVideosTile
from ._models_py3 import VideoDetails
from ._models_py3 import VideoObject
from ._models_py3 import Videos
from ._models_py3 import VideosModule
except (SyntaxError, ImportError):
from ._models import Answer
from ._models import CreativeWork
from ._models import Error
from ._models import ErrorResponse, ErrorResponseException
from ._models import Identifiable
from ._models import ImageObject
from ._models import MediaObject
from ._models import PivotSuggestions
from ._models import Query
from ._models import QueryContext
from ._models import Response
from ._models import ResponseBase
from ._models import SearchResultsAnswer
from ._models import Thing
from ._models import TrendingVideos
from ._models import TrendingVideosCategory
from ._models import TrendingVideosSubcategory
from ._models import TrendingVideosTile
from ._models import VideoDetails
from ._models import VideoObject
from ._models import Videos
from ._models import VideosModule
from ._video_search_client_enums import (
ErrorCode,
ErrorSubCode,
Freshness,
SafeSearch,
TextFormat,
VideoInsightModule,
VideoLength,
VideoPricing,
VideoQueryScenario,
VideoResolution,
)
__all__ = [
'Answer',
'CreativeWork',
'Error',
'ErrorResponse', 'ErrorResponseException',
'Identifiable',
'ImageObject',
'MediaObject',
'PivotSuggestions',
'Query',
'QueryContext',
'Response',
'ResponseBase',
'SearchResultsAnswer',
'Thing',
'TrendingVideos',
'TrendingVideosCategory',
'TrendingVideosSubcategory',
'TrendingVideosTile',
'VideoDetails',
'VideoObject',
'Videos',
'VideosModule',
'VideoQueryScenario',
'ErrorCode',
'ErrorSubCode',
'Freshness',
'VideoLength',
'VideoPricing',
'VideoResolution',
'SafeSearch',
'TextFormat',
'VideoInsightModule',
]
| {
"content_hash": "f7ea22229ba92605576147110ca2e95b",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 66,
"avg_line_length": 29.78494623655914,
"alnum_prop": 0.7043321299638989,
"repo_name": "Azure/azure-sdk-for-python",
"id": "482fbb3e796c27cb43ec286a8e486dc7a6f5067e",
"size": "3244",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/cognitiveservices/azure-cognitiveservices-search-videosearch/azure/cognitiveservices/search/videosearch/models/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import asyncio
from enum import Enum
import os
import traceback
from prompt_toolkit.token import Token
from prompt_toolkit.mouse_events import MouseEventType
class TaskManager:
def __init__(self, tosh):
self.tosh = tosh
self._tasks = []
def get_tokens(self, _):
if not self._tasks:
return [(Token.Result, 'No tasks\n')]
tokens = []
for index, task in enumerate(self._tasks):
if index > 0:
tokens.append((Token.Task.Separator, '\n'))
tokens += task.tokens()
return tokens
def refresh(self):
self.tosh.refresh()
class Task:
Status = Enum('Status', ['Waiting', 'Running', 'Success', 'Error'])
def __init__(self, tosh):
self._tosh = tosh
self._status = Task.Status.Waiting
self._status_line_tokens = []
self._output_token_lines = []
self._children = []
def _set_output_text(self, text):
self._output_token_lines = [[self._token(line)] for line in text.split('\n')]
def tokens(self):
tokens = []
for line in self._token_lines():
tokens += line + [self._token('\n')]
return tokens
def _token_lines(self):
status_line = self._status_tokens() + [self._token(' ')] + self._status_line_tokens
return [status_line] + self._children_token_lines() + self._output_token_lines
def _children_token_lines(self):
token_lines = []
active_children = any(child._status is not Task.Status.Success for child in self._children)
if self._children and active_children:
for child in self._children[:-1]:
lines = child._token_lines()
token_lines.append([self._token('├╴')] + lines[0])
for l in lines[1:]:
token_lines.append([self._token('│ ')] + l)
pass
last_child = self._children[-1]
lines = last_child._token_lines()
token_lines.append([self._token('└╴')] + lines[0])
for l in lines[1:]:
token_lines.append([self._token(' ')] + l)
return token_lines
def _token(self, text, style=Token.Task.Result):
return (style, text, self._mouse_handler)
def _status_tokens(self):
STATUS_TEMPLATES = {
Task.Status.Waiting: 'task.status.waiting',
Task.Status.Running: 'task.status.running',
Task.Status.Success: 'task.status.success',
Task.Status.Error: 'task.status.error'
}
template = STATUS_TEMPLATES[self._status]
return self._tosh.style.get_template(template, mouse_handler=self._mouse_handler)
def _mouse_handler(self, _, event):
if event.event_type == MouseEventType.MOUSE_DOWN:
return self._clicked()
else:
return NotImplemented
def _clicked(self):
pass
async def sub(self, task_or_func, *args, **kwargs):
if isinstance(task_or_func, Task):
_task = task_or_func
else:
_task = task_or_func(*args, **kwargs, _tosh=self._tosh)
assert isinstance(_task, Task), str(task_or_func) + ' is not a task'
self._children.append(_task)
result = await _task.run()
return result
async def parallel(self, tasks):
_parallel_tasks = []
for (task_func, args, kwargs) in tasks:
_task = task_func(*args, **kwargs, _tosh=self._tosh)
assert isinstance(_task, Task), str(task_func) + ' is not a task'
self._children.append(_task)
_parallel_tasks.append(asyncio.ensure_future(_task.run()))
results = await asyncio.gather(*_parallel_tasks, return_exceptions=True)
return results
class CoroutineTask(Task):
def __init__(self, tosh, coroutine, title):
super().__init__(tosh)
self._coroutine = coroutine
self._status_line_tokens = [self._token(title)]
async def run(self):
self._status = Task.Status.Running
self._tosh.refresh()
try:
result = await self._coroutine
self._status = Task.Status.Success
return result
except BaseException as e:
self._status = Task.Status.Error
raise e
finally:
self._tosh.refresh()
class FakeTask:
async def sub(self, task_func, *args, **kwargs):
if isinstance(task_func, Task):
return (await task_func.run())
else:
return (await task_func(*args, **kwargs))
# Decorator
def task(title):
def task_decorator(func):
def task_method(*args, **kwargs):
try:
tosh = kwargs.pop('_tosh')
_task = CoroutineTask.__new__(CoroutineTask)
_task.__init__(tosh, func(*args, **kwargs, task=_task), title.format(pos=args, kw=kwargs))
return _task
except KeyError:
return func(*args, **kwargs, task=FakeTask())
task_method._returns_task = True # Ugly hack, see variable.AttributeAccessTask._is_task_function
return task_method
return task_decorator
| {
"content_hash": "0593db408a97f4e41a5106f46d3521a7",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 106,
"avg_line_length": 34,
"alnum_prop": 0.5645905420991926,
"repo_name": "javitonino/tosh",
"id": "d929fc62fe145878df6b49c093915d57c3a54f9d",
"size": "5212",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tosh/tasks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "68848"
}
],
"symlink_target": ""
} |
"""Populate compound fields"""
# pylint: disable=missing-function-docstring
import logging
import os
import time
from datetime import datetime
from typing import Any, Generator, List, Optional
from urllib.parse import quote
import pandas as pd
import pubchempy as pcp
import requests
from rdkit import Chem
from rdkit.Chem.Descriptors import ExactMolWt
from metatlas.datastructures import metatlas_objects as metob
from metatlas.plots import dill2plots as dp
from metatlas.tools import cheminfo
logger = logging.getLogger(__name__)
def generate_template_atlas(
raw_file_name: str, confidence_levels: List[str], polarity: str, name: str, mz_tolerance: float = 10
) -> metob.Atlas:
data = pd.read_csv(raw_file_name, sep="\t")
acceptable = data[data["confidence_category"].isin(confidence_levels)]
by_polarity = acceptable[acceptable["polarity"] == polarity]
by_polarity = by_polarity.assign(label=None)
atlas = dp.make_atlas_from_spreadsheet(
by_polarity, name, filetype="dataframe", polarity=polarity, store=False, mz_tolerance=mz_tolerance
)
inchi_keys = [cid.compound[0].inchi_key for cid in atlas.compound_identifications]
pubchem_results = query_pubchem(inchi_keys)
for cid in atlas.compound_identifications:
fill_fields(cid.compound[0], pubchem_results)
cid.name = cid.compound[0].name
return atlas
def count_non_empty(compound: metob.Compound) -> int:
return sum([v != "" for v in compound.trait_values().values()])
def flatten_inchi(mol: Chem.rdchem.Mol) -> str:
smiles = Chem.MolToSmiles(mol).replace("@", "")
flattened_rdkit_mol = Chem.MolFromSmiles(smiles)
try:
return Chem.MolToInchi(flattened_rdkit_mol)
except Exception: # This fails when can't kekulize mol # pylint: disable=broad-except
logger.warning("failed to flatten a molecule")
return ""
def chunks(data: List[Any], num: int) -> Generator[List[Any], None, None]:
"""Yield successive num-sized chunks from data."""
for i in range(0, len(data), num):
yield data[i : i + num]
def query_pubchem(inchi_key_list: List[str], items_per_query: int = 50) -> List[pcp.Compound]:
out = []
counter = 1
for inchi_key_sub_list in chunks(inchi_key_list, items_per_query):
out.extend(pcp.get_compounds(inchi_key_sub_list, "inchikey"))
if counter % 5 == 0:
time.sleep(1)
counter = counter + 1
return out
def get_pubchem_compound(inchi_key: str, pub_chem_results: List[pcp.Compound]) -> Optional[pcp.Compound]:
for compound in pub_chem_results:
if compound.inchikey == inchi_key:
return compound
return None
def convert_id(input_id_type: str, output_id_type: str, query: str) -> str:
base_url = "https://cts.fiehnlab.ucdavis.edu/rest/convert/"
url = f"{base_url}{quote(input_id_type)}/{quote(output_id_type)}/{quote(query)}"
result = requests.get(url)
result.raise_for_status()
return result.json()[0]["results"][0]
def set_id(rec: metob.Compound, metatlas_name: str, cts_name: str, base_url: str, inchi_key: str) -> None:
id_attr_name = f"{metatlas_name}_id"
url_attr_name = f"{metatlas_name}_url"
try:
rec.set_trait(
id_attr_name, rec.trait_values()[id_attr_name] or convert_id("InChIKey", cts_name, inchi_key)
)
rec.set_trait(
url_attr_name,
rec.trait_values()[url_attr_name] or f"{base_url}{rec.trait_values()[id_attr_name]}",
)
except IndexError:
pass
def set_all_ids(comp: metob.Compound) -> None:
ids = [
{
"metatlas_name": "hmdb",
"cts_name": "Human Metabolome Database",
"base_url": "https://www.hmdb.ca/metabolites/",
},
{
"metatlas_name": "chebi",
"cts_name": "ChEBI",
"base_url": "https://www.ebi.ac.uk/chebi/searchId.do?chebiId=",
},
{
"metatlas_name": "lipidmaps",
"cts_name": "LipidMAPS",
"base_url": "https://www.lipidmaps.org/databases/lmsd/",
},
{
"metatlas_name": "kegg",
"cts_name": "KEGG",
"base_url": "https://www.genome.jp/dbget-bin/www_bget?",
},
]
for id_type in ids:
set_id(comp, id_type["metatlas_name"], id_type["cts_name"], id_type["base_url"], comp.inchi_key)
def fill_neutralized_fields(comp: metob.Compound, mol: Chem.rdchem.Mol) -> None:
try:
norm_mol = cheminfo.normalize_molecule(mol)
except Exception: # pylint: disable=broad-except
logger.warning("failed to normalized %s", comp.name)
return
assert norm_mol is not None
if not comp.neutralized_inchi:
comp.neutralized_inchi = Chem.inchi.MolToInchi(norm_mol)
if not comp.neutralized_inchi_key:
comp.neutralized_inchi_key = Chem.inchi.InchiToInchiKey(comp.neutralized_inchi)
if not comp.neutralized_2d_inchi:
comp.neutralized_2d_inchi = flatten_inchi(norm_mol) # type: ignore
if not comp.neutralized_2d_inchi_key:
comp.neutralized_2d_inchi_key = Chem.inchi.InchiToInchiKey(comp.neutralized_2d_inchi)
def fill_calculated_fields(comp: metob.Compound, mol: Chem.rdchem.Mol) -> None:
assert mol is not None
comp.inchi_key = comp.inchi_key or Chem.inchi.InchiToInchiKey(comp.inchi)
comp.formula = comp.formula or Chem.rdMolDescriptors.CalcMolFormula(mol)
comp.mono_isotopic_molecular_weight = comp.mono_isotopic_molecular_weight or ExactMolWt(mol)
comp.permanent_charge = comp.permanent_charge or Chem.GetFormalCharge(mol)
comp.number_components = comp.number_components or 1 # type: ignore
comp.num_free_radicals = comp.num_free_radicals or Chem.Descriptors.NumRadicalElectrons(mol)
fill_neutralized_fields(comp, mol)
def first_all_ascii(list_of_strings: List[str]) -> str:
for to_check in list_of_strings:
if to_check.isascii():
return to_check
raise ValueError("No strings found with only ASCII characters")
def filter_out_strings_with_non_ascii(list_of_strings: List[str]) -> List[str]:
return [s for s in list_of_strings if s.isascii()]
def fill_fields(comp: metob.Compound, pubchem_results: List[pcp.Compound]) -> None:
"""
Populate blank fields that can be infered from other fields.
Does not overwrite any existing values that are not None, '', or 'Untitled'"""
mol = Chem.inchi.MolFromInchi(comp.inchi)
if mol is None:
return
fill_calculated_fields(comp, mol)
set_all_ids(comp)
pubchem = get_pubchem_compound(comp.inchi_key, pubchem_results)
if pubchem is not None:
if not comp.pubchem_compound_id:
comp.pubchem_compound_id = pubchem.cid
if not comp.pubchem_url:
comp.pubchem_url = (
f"https://pubchem.ncbi.nlm.nih.gov/compound/{comp.pubchem_compound_id}"
) # type: ignore[assignment]
if not comp.synonyms:
comp.synonyms = "///".join(
filter_out_strings_with_non_ascii(pubchem.synonyms)
) # type: ignore[assignment]
if not comp.iupac_name:
comp.iupac_name = pubchem.iupac_name
if comp.name in ["", "Untitled"] or "///" in comp.name:
names = [first_all_ascii(comp.synonyms.split("///"))] + [comp.iupac_name]
comp.name = names[0] # type: ignore[assignment]
def create_c18_template_atlases(source: os.PathLike, polarity: str) -> None:
assert polarity in ["negative", "positive"]
name = f"C18_{datetime.today().strftime('%Y%m%d')}_TPL_{polarity[:3].upper()}"
new_atlas = generate_template_atlas(source, ["Gold", "Platinum"], polarity, name)
metob.store(new_atlas)
# pylint: disable=too-many-arguments
def generate_stds_atlas(
raw_file_name: str,
inchi_keys: List[str],
polarity: str,
name: str,
mz_tolerance: float = 10,
more_rows: Optional[pd.DataFrame] = None,
) -> metob.Atlas:
data = pd.read_csv(raw_file_name, sep="\t")
if more_rows is not None:
data = data.append(more_rows)
acceptable = data[data["inchi_key"].isin(inchi_keys)]
by_polarity = acceptable[acceptable["polarity"] == polarity]
by_polarity = by_polarity.assign(label=None)
return make_atlas_from_df(by_polarity, name, polarity, mz_tolerance)
def fill_atlas_compound_fields(atlas: metob.Atlas) -> metob.Atlas:
inchi_keys = [cid.compound[0].inchi_key for cid in atlas.compound_identifications]
pubchem_results = query_pubchem(inchi_keys)
for cid in atlas.compound_identifications:
fill_fields(cid.compound[0], pubchem_results)
cid.name = cid.compound[0].name
return atlas
def make_atlas_from_df(data: pd.DataFrame, name: str, polarity: str, mz_tolerance: float) -> metob.Atlas:
atlas = dp.make_atlas_from_spreadsheet(
data, name, filetype="dataframe", polarity=polarity, store=False, mz_tolerance=mz_tolerance
)
return fill_atlas_compound_fields(atlas)
def create_c18_stds_atlases(source: os.PathLike, polarity: str, mz_tolerance: float = 10) -> None:
data = pd.read_csv(source, sep="\t")
std_inchi_keys = {
"Phenylalanine": "COLNVLDHVKWLRT-QMMMGPOBSA-N",
"L-Tryptophan": "QIVBCDIJIAJPQS-SECBINFHSA-N",
"Salicylic acid": "YGSDEFSMJLZEOE-UHFFFAOYSA-N",
# this one will not be found in c18_data...
"2-Amino-3-bromo-5-methylbenzoic acid": "LCMZECCEEOQWLQ-UHFFFAOYSA-N",
}
abmba = "2-Amino-3-bromo-5-methylbenzoic acid"
more_rows = pd.DataFrame(
{
"inchi_key": [std_inchi_keys[abmba]],
"label": [abmba],
"adduct": ["[M+H]+" if polarity == "positive" else "[M-H]-"],
"polarity": [polarity],
"rt_min": [4.5],
"rt_peak": [4.7],
"rt_max": [4.9],
"mz": [228.97384 + (1.00727647 * (1 if polarity == "positive" else -1))],
"confidence_category": "Platinum",
}
)
if more_rows is not None:
data = data.append(more_rows)
acceptable = data[data["inchi_key"].isin(std_inchi_keys.values())]
by_polarity = acceptable[acceptable["polarity"] == polarity]
by_polarity = by_polarity.assign(label=None)
by_polarity["rank"] = by_polarity["confidence_category"] == "Platinum"
single = by_polarity.loc[by_polarity.groupby(["inchi_key"])["rank"].idxmax()]
name = f"C18_{datetime.today().strftime('%Y%m%d')}_QC_{polarity[:3].upper()}"
atlas = make_atlas_from_df(single, name, polarity, mz_tolerance)
metob.store(atlas)
| {
"content_hash": "2dd4e13cb1bb9f8fa94fff1c1c66c5b6",
"timestamp": "",
"source": "github",
"line_count": 270,
"max_line_length": 106,
"avg_line_length": 39.237037037037034,
"alnum_prop": 0.645365301113838,
"repo_name": "biorack/metatlas",
"id": "aac42d1b2e9b344acd7e61f12c6392964fbc7499",
"size": "10594",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "metatlas/interfaces/compounds/populate.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "4850"
},
{
"name": "Jupyter Notebook",
"bytes": "1233246"
},
{
"name": "Python",
"bytes": "1501450"
},
{
"name": "Shell",
"bytes": "66479"
},
{
"name": "wdl",
"bytes": "18796"
}
],
"symlink_target": ""
} |
from base_memory import *
import os, sys
import random
class TestMemoryAllocDoesNotReturnNull():
def setup(self):
pass
def teardown(self):
pass
def test(self):
l = ri_mem_alloc(128)
assert l != None
class TestMemoryFreeReturnsZero():
def setup(self):
self.mem = ri_mem_alloc(128)
def teardown(self):
pass
def test(self):
ret = ri_mem_free(self.mem)
assert (ret == 0), ret
class TestAlignedMallocWithRandomArg():
def setup(self):
pass
def teardown(self):
pass
def test(self):
nTests = 10000
for i in range(nTests):
sz = random.randint(0, 1024*1024)
align = random.randint(0, 1024)
print "sz = %d, align = %d" % (sz, align)
p = aligned_malloc(sz, align);
assert int(p) % 16 == 0, "Not 16-byte aligned: addr(p) = %d" % int(p)
aligned_free(p)
# class TestMemoryAllocWithLargeSizeWillReturnNull():
#
# def setup(self):
# pass
#
# def teardown(self):
# pass
#
# def test(self):
# try:
# l = ri_mem_alloc(1024*1024*1024)
# except Error:
# print "OSError"
#
# pass
| {
"content_hash": "c3c77e0faf3d05d2d162fde90cc2d01f",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 81,
"avg_line_length": 16.986842105263158,
"alnum_prop": 0.5189775367931836,
"repo_name": "chiyama/lucille",
"id": "f82427ff208ff33c167badf24086ef02bfd2c579",
"size": "1291",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/unit/testBase/testMemory/test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import unittest
from .. import Stopwords
class TestStopwords(unittest.TestCase):
def test_default(self):
test = Stopwords()
compare = Stopwords('arakhne/stopwords/defaults/english.txt')
return self.assertEqual(test, compare)
| {
"content_hash": "0b1a171b65858095b8fc2c4d80c0602c",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 69,
"avg_line_length": 23.363636363636363,
"alnum_prop": 0.7003891050583657,
"repo_name": "thePortus/arakhne",
"id": "22abaabb09d1d413c391c0f535519b5f3de2d32f",
"size": "257",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "arakhne/stopwords/tests/test_stopwords.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "84723"
}
],
"symlink_target": ""
} |
class Solution(object):
def isValid(self, s):
"""
:type s: str
:rtype: bool
"""
if not s:
return True
stack = []
for i in xrange(len(s)):
# if its opening it, its getting deeper so add to stack
if s[i] in "([{":
stack.append(s[i])
# if not it must be a closing parenth
# in which case check if stack is empty if not pop and check
# whether popped elem is closed with the current item
else:
if len(stack) == 0:
return False
last = stack.pop()
if s[i] == ")" and last != "(": return False
if s[i] == "]" and last != "[": return False
if s[i] == "}" and last != "{": return False
return len(stack) == 0
| {
"content_hash": "b1b882326ee2acab55033b51ba858df9",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 72,
"avg_line_length": 34.76,
"alnum_prop": 0.4430379746835443,
"repo_name": "young-geng/leet_code",
"id": "f776efe76b9587466ad0e9e382f6761f256d1fd7",
"size": "920",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "problems/20_valid-parentheses/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "599"
},
{
"name": "Python",
"bytes": "111519"
}
],
"symlink_target": ""
} |
"""
This module implements the main Evennia server process, the core of
the game engine.
This module should be started with the 'twistd' executable since it
sets up all the networking features. (this is done automatically
by evennia/server/server_runner.py).
"""
from __future__ import print_function
from builtins import object
import time
import sys
import os
from twisted.web import static
from twisted.application import internet, service
from twisted.internet import reactor, defer
from twisted.internet.task import LoopingCall
import django
django.setup()
import evennia
evennia._init()
from django.db import connection
from django.conf import settings
from evennia.players.models import PlayerDB
from evennia.scripts.models import ScriptDB
from evennia.server.models import ServerConfig
from evennia.server import initial_setup
from evennia.utils.utils import get_evennia_version, mod_import, make_iter
from evennia.comms import channelhandler
from evennia.server.sessionhandler import SESSIONS
_SA = object.__setattr__
if os.name == 'nt':
# For Windows we need to handle pid files manually.
SERVER_PIDFILE = os.path.join(settings.GAME_DIR, "server", 'server.pid')
# a file with a flag telling the server to restart after shutdown or not.
SERVER_RESTART = os.path.join(settings.GAME_DIR, "server", 'server.restart')
# module containing hook methods called during start_stop
SERVER_STARTSTOP_MODULE = mod_import(settings.AT_SERVER_STARTSTOP_MODULE)
# modules containing plugin services
SERVER_SERVICES_PLUGIN_MODULES = [mod_import(module) for module in make_iter(settings.SERVER_SERVICES_PLUGIN_MODULES)]
try:
WEB_PLUGINS_MODULE = mod_import(settings.WEB_PLUGINS_MODULE)
except ImportError:
WEB_PLUGINS_MODULE = None
print ("WARNING: settings.WEB_PLUGINS_MODULE not found - "
"copy 'evennia/game_template/server/conf/web_plugins.py to mygame/server/conf.")
#------------------------------------------------------------
# Evennia Server settings
#------------------------------------------------------------
SERVERNAME = settings.SERVERNAME
VERSION = get_evennia_version()
AMP_ENABLED = True
AMP_HOST = settings.AMP_HOST
AMP_PORT = settings.AMP_PORT
AMP_INTERFACE = settings.AMP_INTERFACE
WEBSERVER_PORTS = settings.WEBSERVER_PORTS
WEBSERVER_INTERFACES = settings.WEBSERVER_INTERFACES
GUEST_ENABLED = settings.GUEST_ENABLED
# server-channel mappings
WEBSERVER_ENABLED = settings.WEBSERVER_ENABLED and WEBSERVER_PORTS and WEBSERVER_INTERFACES
IRC_ENABLED = settings.IRC_ENABLED
RSS_ENABLED = settings.RSS_ENABLED
WEBCLIENT_ENABLED = settings.WEBCLIENT_ENABLED
# Maintenance function - this is called repeatedly by the server
_MAINTENANCE_COUNT = 0
_FLUSH_CACHE = None
_IDMAPPER_CACHE_MAXSIZE = settings.IDMAPPER_CACHE_MAXSIZE
_GAMETIME_MODULE = None
def _server_maintenance():
"""
This maintenance function handles repeated checks and updates that
the server needs to do. It is called every 5 minutes.
"""
global EVENNIA, _MAINTENANCE_COUNT, _FLUSH_CACHE, _GAMETIME_MODULE
if not _FLUSH_CACHE:
from evennia.utils.idmapper.models import conditional_flush as _FLUSH_CACHE
if not _GAMETIME_MODULE:
from evennia.utils import gametime as _GAMETIME_MODULE
_MAINTENANCE_COUNT += 1
now = time.time()
if _MAINTENANCE_COUNT == 1:
# first call after a reload
_GAMETIME_MODULE.SERVER_START_TIME = now
_GAMETIME_MODULE.SERVER_RUNTIME = ServerConfig.objects.conf("runtime", default=0.0)
else:
_GAMETIME_MODULE.SERVER_RUNTIME += 60.0
# update game time and save it across reloads
_GAMETIME_MODULE.SERVER_RUNTIME_LAST_UPDATED = now
ServerConfig.objects.conf("runtime", _GAMETIME_MODULE.SERVER_RUNTIME)
if _MAINTENANCE_COUNT % 300 == 0:
# check cache size every 5 minutes
_FLUSH_CACHE(_IDMAPPER_CACHE_MAXSIZE)
if _MAINTENANCE_COUNT % 3600 == 0:
# validate scripts every hour
evennia.ScriptDB.objects.validate()
if _MAINTENANCE_COUNT % 3700 == 0:
# validate channels off-sync with scripts
evennia.CHANNEL_HANDLER.update()
## Commenting this out, it is probably not needed
## with CONN_MAX_AGE set. Keeping it as a reminder
## if database-gone-away errors appears again /Griatch
#if _MAINTENANCE_COUNT % 18000 == 0:
# connection.close()
maintenance_task = LoopingCall(_server_maintenance)
maintenance_task.start(60, now=True) # call every minute
#------------------------------------------------------------
# Evennia Main Server object
#------------------------------------------------------------
class Evennia(object):
"""
The main Evennia server handler. This object sets up the database and
tracks and interlinks all the twisted network services that make up
evennia.
"""
def __init__(self, application):
"""
Setup the server.
application - an instantiated Twisted application
"""
sys.path.insert(1, '.')
# create a store of services
self.services = service.IServiceCollection(application)
self.amp_protocol = None # set by amp factory
self.sessions = SESSIONS
self.sessions.server = self
# Database-specific startup optimizations.
self.sqlite3_prep()
self.start_time = time.time()
# Run the initial setup if needed
self.run_initial_setup()
# initialize channelhandler
channelhandler.CHANNELHANDLER.update()
# set a callback if the server is killed abruptly,
# by Ctrl-C, reboot etc.
reactor.addSystemEventTrigger('before', 'shutdown',
self.shutdown, _reactor_stopping=True)
self.game_running = True
# track the server time
self.run_init_hooks()
# Server startup methods
def sqlite3_prep(self):
"""
Optimize some SQLite stuff at startup since we
can't save it to the database.
"""
if ((".".join(str(i) for i in django.VERSION) < "1.2" and settings.DATABASE_ENGINE == "sqlite3")
or (hasattr(settings, 'DATABASES')
and settings.DATABASES.get("default", {}).get('ENGINE', None)
== 'django.db.backends.sqlite3')):
cursor = connection.cursor()
cursor.execute("PRAGMA cache_size=10000")
cursor.execute("PRAGMA synchronous=OFF")
cursor.execute("PRAGMA count_changes=OFF")
cursor.execute("PRAGMA temp_store=2")
def update_defaults(self):
"""
We make sure to store the most important object defaults here, so
we can catch if they change and update them on-objects automatically.
This allows for changing default cmdset locations and default
typeclasses in the settings file and have them auto-update all
already existing objects.
"""
# setting names
settings_names = ("CMDSET_CHARACTER", "CMDSET_PLAYER",
"BASE_PLAYER_TYPECLASS", "BASE_OBJECT_TYPECLASS",
"BASE_CHARACTER_TYPECLASS", "BASE_ROOM_TYPECLASS",
"BASE_EXIT_TYPECLASS", "BASE_SCRIPT_TYPECLASS",
"BASE_CHANNEL_TYPECLASS")
# get previous and current settings so they can be compared
settings_compare = zip([ServerConfig.objects.conf(name) for name in settings_names],
[settings.__getattr__(name) for name in settings_names])
mismatches = [i for i, tup in enumerate(settings_compare) if tup[0] and tup[1] and tup[0] != tup[1]]
if len(mismatches): # can't use any() since mismatches may be [0] which reads as False for any()
# we have a changed default. Import relevant objects and
# run the update
from evennia.objects.models import ObjectDB
from evennia.comms.models import ChannelDB
#from evennia.players.models import PlayerDB
for i, prev, curr in ((i, tup[0], tup[1]) for i, tup in enumerate(settings_compare) if i in mismatches):
# update the database
print(" %s:\n '%s' changed to '%s'. Updating unchanged entries in database ..." % (settings_names[i], prev, curr))
if i == 0:
ObjectDB.objects.filter(db_cmdset_storage__exact=prev).update(db_cmdset_storage=curr)
if i == 1:
PlayerDB.objects.filter(db_cmdset_storage__exact=prev).update(db_cmdset_storage=curr)
if i == 2:
PlayerDB.objects.filter(db_typeclass_path__exact=prev).update(db_typeclass_path=curr)
if i in (3, 4, 5, 6):
ObjectDB.objects.filter(db_typeclass_path__exact=prev).update(db_typeclass_path=curr)
if i == 7:
ScriptDB.objects.filter(db_typeclass_path__exact=prev).update(db_typeclass_path=curr)
if i == 8:
ChannelDB.objects.filter(db_typeclass_path__exact=prev).update(db_typeclass_path=curr)
# store the new default and clean caches
ServerConfig.objects.conf(settings_names[i], curr)
ObjectDB.flush_instance_cache()
PlayerDB.flush_instance_cache()
ScriptDB.flush_instance_cache()
ChannelDB.flush_instance_cache()
# if this is the first start we might not have a "previous"
# setup saved. Store it now.
[ServerConfig.objects.conf(settings_names[i], tup[1])
for i, tup in enumerate(settings_compare) if not tup[0]]
def run_initial_setup(self):
"""
This attempts to run the initial_setup script of the server.
It returns if this is not the first time the server starts.
Once finished the last_initial_setup_step is set to -1.
"""
last_initial_setup_step = ServerConfig.objects.conf('last_initial_setup_step')
if not last_initial_setup_step:
# None is only returned if the config does not exist,
# i.e. this is an empty DB that needs populating.
print(' Server started for the first time. Setting defaults.')
initial_setup.handle_setup(0)
print('-' * 50)
elif int(last_initial_setup_step) >= 0:
# a positive value means the setup crashed on one of its
# modules and setup will resume from this step, retrying
# the last failed module. When all are finished, the step
# is set to -1 to show it does not need to be run again.
print(' Resuming initial setup from step %(last)s.' % \
{'last': last_initial_setup_step})
initial_setup.handle_setup(int(last_initial_setup_step))
print('-' * 50)
def run_init_hooks(self):
"""
Called every server start
"""
from evennia.objects.models import ObjectDB
#from evennia.players.models import PlayerDB
#update eventual changed defaults
self.update_defaults()
[o.at_init() for o in ObjectDB.get_all_cached_instances()]
[p.at_init() for p in PlayerDB.get_all_cached_instances()]
with open(SERVER_RESTART, 'r') as f:
mode = f.read()
if mode in ('True', 'reload'):
from evennia.scripts.monitorhandler import MONITOR_HANDLER
MONITOR_HANDLER.restore()
from evennia.scripts.tickerhandler import TICKER_HANDLER
TICKER_HANDLER.restore(mode in ('True', 'reload'))
# call correct server hook based on start file value
if mode in ('True', 'reload'):
# True was the old reload flag, kept for compatibilty
self.at_server_reload_start()
elif mode == 'reset':
# only run hook, don't purge sessions
self.at_server_cold_start()
elif mode in ('reset', 'shutdown'):
self.at_server_cold_start()
# clear eventual lingering session storages
ObjectDB.objects.clear_all_sessids()
# always call this regardless of start type
self.at_server_start()
def set_restart_mode(self, mode=None):
"""
This manages the flag file that tells the runner if the server is
reloading, resetting or shutting down. Valid modes are
'reload', 'reset', 'shutdown' and None.
If mode is None, no change will be done to the flag file.
Either way, the active restart setting (Restart=True/False) is
returned so the server knows which more it's in.
"""
if mode is None:
with open(SERVER_RESTART, 'r') as f:
# mode is either shutdown, reset or reload
mode = f.read()
else:
with open(SERVER_RESTART, 'w') as f:
f.write(str(mode))
return mode
@defer.inlineCallbacks
def shutdown(self, mode=None, _reactor_stopping=False):
"""
Shuts down the server from inside it.
mode - sets the server restart mode.
'reload' - server restarts, no "persistent" scripts
are stopped, at_reload hooks called.
'reset' - server restarts, non-persistent scripts stopped,
at_shutdown hooks called but sessions will not
be disconnected.
'shutdown' - like reset, but server will not auto-restart.
None - keep currently set flag from flag file.
_reactor_stopping - this is set if server is stopped by a kill
command OR this method was already called
once - in both cases the reactor is
dead/stopping already.
"""
if _reactor_stopping and hasattr(self, "shutdown_complete"):
# this means we have already passed through this method
# once; we don't need to run the shutdown procedure again.
defer.returnValue(None)
mode = self.set_restart_mode(mode)
from evennia.objects.models import ObjectDB
#from evennia.players.models import PlayerDB
from evennia.server.models import ServerConfig
if mode == 'reload':
# call restart hooks
ServerConfig.objects.conf("server_restart_mode", "reload")
yield [o.at_server_reload() for o in ObjectDB.get_all_cached_instances()]
yield [p.at_server_reload() for p in PlayerDB.get_all_cached_instances()]
yield [(s.pause(manual_pause=False), s.at_server_reload()) for s in ScriptDB.get_all_cached_instances() if s.is_active]
yield self.sessions.all_sessions_portal_sync()
self.at_server_reload_stop()
# only save monitor state on reload, not on shutdown/reset
from evennia.scripts.monitorhandler import MONITOR_HANDLER
MONITOR_HANDLER.save()
else:
if mode == 'reset':
# like shutdown but don't unset the is_connected flag and don't disconnect sessions
yield [o.at_server_shutdown() for o in ObjectDB.get_all_cached_instances()]
yield [p.at_server_shutdown() for p in PlayerDB.get_all_cached_instances()]
if self.amp_protocol:
yield self.sessions.all_sessions_portal_sync()
else: # shutdown
yield [_SA(p, "is_connected", False) for p in PlayerDB.get_all_cached_instances()]
yield [o.at_server_shutdown() for o in ObjectDB.get_all_cached_instances()]
yield [(p.unpuppet_all(), p.at_server_shutdown())
for p in PlayerDB.get_all_cached_instances()]
yield ObjectDB.objects.clear_all_sessids()
yield [(s.pause(manual_pause=False), s.at_server_shutdown()) for s in ScriptDB.get_all_cached_instances()]
ServerConfig.objects.conf("server_restart_mode", "reset")
self.at_server_cold_stop()
# tickerhandler state should always be saved.
from evennia.scripts.tickerhandler import TICKER_HANDLER
TICKER_HANDLER.save()
# always called, also for a reload
self.at_server_stop()
# if _reactor_stopping is true, reactor does not need to
# be stopped again.
if os.name == 'nt' and os.path.exists(SERVER_PIDFILE):
# for Windows we need to remove pid files manually
os.remove(SERVER_PIDFILE)
if not _reactor_stopping:
# this will also send a reactor.stop signal, so we set a
# flag to avoid loops.
self.shutdown_complete = True
# kill the server
reactor.callLater(0, reactor.stop)
# server start/stop hooks
def at_server_start(self):
"""
This is called every time the server starts up, regardless of
how it was shut down.
"""
if SERVER_STARTSTOP_MODULE:
SERVER_STARTSTOP_MODULE.at_server_start()
def at_server_stop(self):
"""
This is called just before a server is shut down, regardless
of it is fore a reload, reset or shutdown.
"""
if SERVER_STARTSTOP_MODULE:
SERVER_STARTSTOP_MODULE.at_server_stop()
def at_server_reload_start(self):
"""
This is called only when server starts back up after a reload.
"""
if SERVER_STARTSTOP_MODULE:
SERVER_STARTSTOP_MODULE.at_server_reload_start()
def at_server_reload_stop(self):
"""
This is called only time the server stops before a reload.
"""
if SERVER_STARTSTOP_MODULE:
SERVER_STARTSTOP_MODULE.at_server_reload_stop()
def at_server_cold_start(self):
"""
This is called only when the server starts "cold", i.e. after a
shutdown or a reset.
"""
# We need to do this just in case the server was killed in a way where
# the normal cleanup operations did not have time to run.
from evennia.objects.models import ObjectDB
ObjectDB.objects.clear_all_sessids()
# Remove non-persistent scripts
from evennia.scripts.models import ScriptDB
for script in ScriptDB.objects.filter(db_persistent=False):
script.stop()
if GUEST_ENABLED:
for guest in PlayerDB.objects.all().filter(db_typeclass_path=settings.BASE_GUEST_TYPECLASS):
for character in guest.db._playable_characters:
if character: character.delete()
guest.delete()
if SERVER_STARTSTOP_MODULE:
SERVER_STARTSTOP_MODULE.at_server_cold_start()
def at_server_cold_stop(self):
"""
This is called only when the server goes down due to a shutdown or reset.
"""
if SERVER_STARTSTOP_MODULE:
SERVER_STARTSTOP_MODULE.at_server_cold_stop()
#------------------------------------------------------------
#
# Start the Evennia game server and add all active services
#
#------------------------------------------------------------
# Tell the system the server is starting up; some things are not available yet
ServerConfig.objects.conf("server_starting_mode", True)
# twistd requires us to define the variable 'application' so it knows
# what to execute from.
application = service.Application('Evennia')
# The main evennia server program. This sets up the database
# and is where we store all the other services.
EVENNIA = Evennia(application)
print('-' * 50)
print(' %(servername)s Server (%(version)s) started.' % {'servername': SERVERNAME, 'version': VERSION})
if AMP_ENABLED:
# The AMP protocol handles the communication between
# the portal and the mud server. Only reason to ever deactivate
# it would be during testing and debugging.
ifacestr = ""
if AMP_INTERFACE != '127.0.0.1':
ifacestr = "-%s" % AMP_INTERFACE
print(' amp (to Portal)%s: %s' % (ifacestr, AMP_PORT))
from evennia.server import amp
factory = amp.AmpServerFactory(EVENNIA)
amp_service = internet.TCPServer(AMP_PORT, factory, interface=AMP_INTERFACE)
amp_service.setName("EvenniaPortal")
EVENNIA.services.addService(amp_service)
if WEBSERVER_ENABLED:
# Start a django-compatible webserver.
from twisted.python import threadpool
from evennia.server.webserver import DjangoWebRoot, WSGIWebServer, Website
# start a thread pool and define the root url (/) as a wsgi resource
# recognized by Django
threads = threadpool.ThreadPool(minthreads=max(1, settings.WEBSERVER_THREADPOOL_LIMITS[0]),
maxthreads=max(1, settings.WEBSERVER_THREADPOOL_LIMITS[1]))
web_root = DjangoWebRoot(threads)
# point our media resources to url /media
web_root.putChild("media", static.File(settings.MEDIA_ROOT))
# point our static resources to url /static
web_root.putChild("static", static.File(settings.STATIC_ROOT))
if WEB_PLUGINS_MODULE:
# custom overloads
web_root = WEB_PLUGINS_MODULE.at_webserver_root_creation(web_root)
web_site = Website(web_root, logPath=settings.HTTP_LOG_FILE)
for proxyport, serverport in WEBSERVER_PORTS:
# create the webserver (we only need the port for this)
webserver = WSGIWebServer(threads, serverport, web_site, interface='127.0.0.1')
webserver.setName('EvenniaWebServer%s' % serverport)
EVENNIA.services.addService(webserver)
print(" webserver: %s" % serverport)
ENABLED = []
if IRC_ENABLED:
# IRC channel connections
ENABLED.append('irc')
if RSS_ENABLED:
# RSS feed channel connections
ENABLED.append('rss')
if ENABLED:
print(" " + ", ".join(ENABLED) + " enabled.")
for plugin_module in SERVER_SERVICES_PLUGIN_MODULES:
# external plugin protocols
plugin_module.start_plugin_services(EVENNIA)
print('-' * 50) # end of terminal output
# clear server startup mode
ServerConfig.objects.conf("server_starting_mode", delete=True)
if os.name == 'nt':
# Windows only: Set PID file manually
with open(SERVER_PIDFILE, 'w') as f:
f.write(str(os.getpid()))
| {
"content_hash": "67d8047c3844291f1502605bf85d35fe",
"timestamp": "",
"source": "github",
"line_count": 559,
"max_line_length": 131,
"avg_line_length": 39.991055456171736,
"alnum_prop": 0.6279132185193469,
"repo_name": "titeuf87/evennia",
"id": "028447daa9abe23d33443138a7689d29e110ad03",
"size": "22355",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "evennia/server/server.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "40125"
},
{
"name": "Emacs Lisp",
"bytes": "2734"
},
{
"name": "HTML",
"bytes": "13723"
},
{
"name": "JavaScript",
"bytes": "28374"
},
{
"name": "Python",
"bytes": "2307118"
},
{
"name": "Shell",
"bytes": "4237"
}
],
"symlink_target": ""
} |
"""RFC 3548: Base16, Base32, Base64 Data Encodings"""
# Modified 04-Oct-1995 by Jack Jansen to use binascii module
# Modified 30-Dec-2003 by Barry Warsaw to add full RFC 3548 support
#import re
import struct
import binascii
__all__ = [
# Legacy interface exports traditional RFC 1521 Base64 encodings
'encode', 'decode', 'encodestring', 'decodestring',
# Generalized interface for other encodings
'b64encode', 'b64decode', 'b32encode', 'b32decode',
'b16encode', 'b16decode',
# Standard Base64 encoding
'standard_b64encode', 'standard_b64decode',
# Some common Base64 alternatives. As referenced by RFC 3458, see thread
# starting at:
#
# http://zgp.org/pipermail/p2p-hackers/2001-September/000316.html
'urlsafe_b64encode', 'urlsafe_b64decode',
]
_translation = [chr(_x) for _x in range(256)]
EMPTYSTRING = ''
def _translate(s, altchars):
translation = _translation[:]
for k, v in altchars.items():
translation[ord(k)] = v
#return s.translate(''.join(translation))
t = ''
for c in s:
t += translation[ord(c)]
return t
# Base64 encoding/decoding uses binascii
def b64encode(s, altchars=None):
"""Encode a string using Base64.
s is the string to encode. Optional altchars must be a string of at least
length 2 (additional characters are ignored) which specifies an
alternative alphabet for the '+' and '/' characters. This allows an
application to e.g. generate url or filesystem safe Base64 strings.
The encoded string is returned.
"""
# Strip off the trailing newline
encoded = binascii.b2a_base64(s)[:-1]
if altchars is not None:
return _translate(encoded, {'+': altchars[0], '/': altchars[1]})
return encoded
def b64decode(s, altchars=None):
"""Decode a Base64 encoded string.
s is the string to decode. Optional altchars must be a string of at least
length 2 (additional characters are ignored) which specifies the
alternative alphabet used instead of the '+' and '/' characters.
The decoded string is returned. A TypeError is raised if s were
incorrectly padded or if there are non-alphabet characters present in the
string.
"""
if altchars is not None:
s = _translate(s, {altchars[0]: '+', altchars[1]: '/'})
try:
return binascii.a2b_base64(s)
except binascii.Error, msg:
# Transform this exception for consistency
raise TypeError(msg)
def standard_b64encode(s):
"""Encode a string using the standard Base64 alphabet.
s is the string to encode. The encoded string is returned.
"""
return b64encode(s)
def standard_b64decode(s):
"""Decode a string encoded with the standard Base64 alphabet.
s is the string to decode. The decoded string is returned. A TypeError
is raised if the string is incorrectly padded or if there are non-alphabet
characters present in the string.
"""
return b64decode(s)
def urlsafe_b64encode(s):
"""Encode a string using a url-safe Base64 alphabet.
s is the string to encode. The encoded string is returned. The alphabet
uses '-' instead of '+' and '_' instead of '/'.
"""
return b64encode(s, '-_')
def urlsafe_b64decode(s):
"""Decode a string encoded with the standard Base64 alphabet.
s is the string to decode. The decoded string is returned. A TypeError
is raised if the string is incorrectly padded or if there are non-alphabet
characters present in the string.
The alphabet uses '-' instead of '+' and '_' instead of '/'.
"""
return b64decode(s, '-_')
# Base32 encoding/decoding must be done in Python
_b32alphabet = {
0: 'A', 9: 'J', 18: 'S', 27: '3',
1: 'B', 10: 'K', 19: 'T', 28: '4',
2: 'C', 11: 'L', 20: 'U', 29: '5',
3: 'D', 12: 'M', 21: 'V', 30: '6',
4: 'E', 13: 'N', 22: 'W', 31: '7',
5: 'F', 14: 'O', 23: 'X',
6: 'G', 15: 'P', 24: 'Y',
7: 'H', 16: 'Q', 25: 'Z',
8: 'I', 17: 'R', 26: '2',
}
_b32tab = _b32alphabet.items()
_b32tab.sort()
_b32tab = [v for k, v in _b32tab]
_b32rev = dict([(v, k) for k, v in _b32alphabet.items()])
def b32encode(s):
"""Encode a string using Base32.
s is the string to encode. The encoded string is returned.
"""
parts = []
quanta, leftover = divmod(len(s), 5)
# Pad the last quantum with zero bits if necessary
if leftover:
#s += ('\0' * (5 - leftover))
s += ("".ljust(5 - leftover, '\0'))
quanta += 1
for i in range(quanta):
# c1 and c2 are 16 bits wide, c3 is 8 bits wide. The intent of this
# code is to process the 40 bits in units of 5 bits. So we take the 1
# leftover bit of c1 and tack it onto c2. Then we take the 2 leftover
# bits of c2 and tack them onto c3. The shifts and masks are intended
# to give us values of exactly 5 bits in width.
c1, c2, c3 = struct.unpack('!HHB', s[i*5:(i+1)*5])
c2 += (c1 & 1) << 16 # 17 bits wide
c3 += (c2 & 3) << 8 # 10 bits wide
parts.extend([_b32tab[c1 >> 11], # bits 1 - 5
_b32tab[(c1 >> 6) & 0x1f], # bits 6 - 10
_b32tab[(c1 >> 1) & 0x1f], # bits 11 - 15
_b32tab[c2 >> 12], # bits 16 - 20 (1 - 5)
_b32tab[(c2 >> 7) & 0x1f], # bits 21 - 25 (6 - 10)
_b32tab[(c2 >> 2) & 0x1f], # bits 26 - 30 (11 - 15)
_b32tab[c3 >> 5], # bits 31 - 35 (1 - 5)
_b32tab[c3 & 0x1f], # bits 36 - 40 (1 - 5)
])
encoded = EMPTYSTRING.join(parts)
# Adjust for any leftover partial quanta
if leftover == 1:
return encoded[:-6] + '======'
elif leftover == 2:
return encoded[:-4] + '===='
elif leftover == 3:
return encoded[:-3] + '==='
elif leftover == 4:
return encoded[:-1] + '='
return encoded
def b32decode(s, casefold=False, map01=None):
"""Decode a Base32 encoded string.
s is the string to decode. Optional casefold is a flag specifying whether
a lowercase alphabet is acceptable as input. For security purposes, the
default is False.
RFC 3548 allows for optional mapping of the digit 0 (zero) to the letter O
(oh), and for optional mapping of the digit 1 (one) to either the letter I
(eye) or letter L (el). The optional argument map01 when not None,
specifies which letter the digit 1 should be mapped to (when map01 is not
None, the digit 0 is always mapped to the letter O). For security
purposes the default is None, so that 0 and 1 are not allowed in the
input.
The decoded string is returned. A TypeError is raised if s were
incorrectly padded or if there are non-alphabet characters present in the
string.
"""
quanta, leftover = divmod(len(s), 8)
if leftover:
raise TypeError('Incorrect padding')
# Handle section 2.4 zero and one mapping. The flag map01 will be either
# False, or the character to map the digit 1 (one) to. It should be
# either L (el) or I (eye).
if map01:
s = _translate(s, {'0': 'O', '1': map01})
if casefold:
s = s.upper()
# Strip off pad characters from the right. We need to count the pad
# characters because this will tell us how many null bytes to remove from
# the end of the decoded string.
padchars = 0
# Replace regex code
#mo = re.search('(?P<pad>[=]*)$', s)
#if mo:
# padchars = len(mo.group('pad'))
# if padchars > 0:
# s = s[:-padchars]
# Replace with:
padchars = len(s)
s = s.rstrip('=')
padchars -= len(s)
# Now decode the full quanta
parts = []
acc = 0
shift = 35
for c in s:
val = _b32rev.get(c)
if val is None:
raise TypeError('Non-base32 digit found')
#acc += _b32rev[c] << shift
acc += _b32rev[c] * (2**shift)
shift -= 5
if shift < 0:
parts.append(binascii.unhexlify('%010x' % acc))
acc = 0
shift = 35
# Process the last, partial quanta
last = binascii.unhexlify('%010x' % acc)
if padchars == 0:
last = '' # No characters
elif padchars == 1:
last = last[:-1]
elif padchars == 3:
last = last[:-2]
elif padchars == 4:
last = last[:-3]
elif padchars == 6:
last = last[:-4]
else:
raise TypeError('Incorrect padding')
parts.append(last)
return EMPTYSTRING.join(parts)
# RFC 3548, Base 16 Alphabet specifies uppercase, but hexlify() returns
# lowercase. The RFC also recommends against accepting input case
# insensitively.
def b16encode(s):
"""Encode a string using Base16.
s is the string to encode. The encoded string is returned.
"""
return binascii.hexlify(s).upper()
def b16decode(s, casefold=False):
"""Decode a Base16 encoded string.
s is the string to decode. Optional casefold is a flag specifying whether
a lowercase alphabet is acceptable as input. For security purposes, the
default is False.
The decoded string is returned. A TypeError is raised if s were
incorrectly padded or if there are non-alphabet characters present in the
string.
"""
if casefold:
s = s.upper()
# Replace regex code
#if re.search('[^0-9A-F]', s):
# raise TypeError('Non-base16 digit found')
# Replace with:
from __javascript__ import RegExp
r = RegExp('[^0-9A-F]')
if r.test(s):
raise TypeError('Non-base16 digit found')
return binascii.unhexlify(s)
# Legacy interface. This code could be cleaned up since I don't believe
# binascii has any line length limitations. It just doesn't seem worth it
# though.
MAXLINESIZE = 76 # Excluding the CRLF
MAXBINSIZE = (MAXLINESIZE//4)*3
def encode(input, output):
"""Encode a file."""
raise NotImplementedError("encode: no file operations possible")
#while True:
# s = input.read(MAXBINSIZE)
# if not s:
# break
# while len(s) < MAXBINSIZE:
# ns = input.read(MAXBINSIZE-len(s))
# if not ns:
# break
# s += ns
# line = binascii.b2a_base64(s)
# output.write(line)
def decode(input, output):
"""Decode a file."""
raise NotImplementedError("decode: no file operations possible")
#while True:
# line = input.readline()
# if not line:
# break
# s = binascii.a2b_base64(line)
# output.write(s)
def encodestring(s):
"""Encode a string."""
pieces = []
for i in range(0, len(s), MAXBINSIZE):
chunk = s[i : i + MAXBINSIZE]
pieces.append(binascii.b2a_base64(chunk))
return "".join(pieces)
def decodestring(s):
"""Decode a string."""
return binascii.a2b_base64(s)
# Useable as a script...
def test():
"""Small test program"""
import sys, getopt
try:
opts, args = getopt.getopt(sys.argv[1:], 'deut')
except getopt.error, msg:
sys.stdout = sys.stderr
print msg
print """usage: %s [-d|-e|-u|-t] [file|-]
-d, -u: decode
-e: encode (default)
-t: encode and decode string 'Aladdin:open sesame'"""%sys.argv[0]
sys.exit(2)
func = encode
for o, a in opts:
if o == '-e': func = encode
if o == '-d': func = decode
if o == '-u': func = decode
if o == '-t': test1(); return
if args and args[0] != '-':
func(open(args[0], 'rb'), sys.stdout)
else:
func(sys.stdin, sys.stdout)
def test1():
s0 = "Aladdin:open sesame"
s1 = encodestring(s0)
s2 = decodestring(s1)
print s0, repr(s1), s2
if __name__ == '__main__':
test()
| {
"content_hash": "108b966c92b690a9720f8bdf651cb6c9",
"timestamp": "",
"source": "github",
"line_count": 379,
"max_line_length": 78,
"avg_line_length": 31.337730870712402,
"alnum_prop": 0.5932474530605372,
"repo_name": "gpitel/pyjs",
"id": "07e12bc05a3505b410867a0a9994ac65387fe79c",
"size": "12134",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "pyjs/lib/base64.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4640"
},
{
"name": "Groff",
"bytes": "6633"
},
{
"name": "HTML",
"bytes": "10106"
},
{
"name": "JavaScript",
"bytes": "63385"
},
{
"name": "Makefile",
"bytes": "453"
},
{
"name": "Python",
"bytes": "5517085"
},
{
"name": "Shell",
"bytes": "4264"
}
],
"symlink_target": ""
} |
from dbobj.dbobj import dbRecord
from dbobj.dbobj import dbTable
class lng_lex_lex(dbRecord):
def __init__(self, app, **args):
if args.has_key("fields"):
dbRecord.__init__(self, app, "lng_lex_lex", args["fields"])
else:
dbRecord.__init__(self, app, "lng_lex_lex", args)
class lng_lex_lexemes(dbTable):
"""
Collection class for all lexeme relations data.
"""
def __init__(self, app):
dbTable.__init__(self, app, "lng_lex_lex", lng_lex_lex)
__copyright__="""
"""
| {
"content_hash": "74e8c9c0cb611c4f96cddb0979c46564",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 65,
"avg_line_length": 24.047619047619047,
"alnum_prop": 0.6158415841584158,
"repo_name": "boudewijnrempt/kura",
"id": "a32dd62c4e78a1fdb93e3513a1a3e1c43abd5069",
"size": "923",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kuralib/lng_lxlx.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "54679"
},
{
"name": "Makefile",
"bytes": "1138"
},
{
"name": "Python",
"bytes": "522300"
}
],
"symlink_target": ""
} |
from python_qt_binding.QtCore import *
from python_qt_binding.QtGui import *
from diarc.util import *
import sys
import logging
log = logging.getLogger('qt_view.SpacerContainter')
class SpacerContainer(QGraphicsWidget):
""" A SpacerContainer is a specialized widget for creating artifical
spacing between other widgets "inside" it. These spaces consist of Spacer
objects, which are usually drawn blank to give the same effect as margins.
Items and spacers occur in a linear arrangement, but the direction is unspecified.
The items and spacers are intended to be linked together using an AnchoredLayout,
as part of a two level layout process. The first level is called 'linking',
in which we actually reassign new AnchoredLayout properties to objects that
are defined to be beside each other. The second level is for the layout mechanism
to perform the acutal layout, with items being children of the SpacerContainer's
parent object and Spacers being children of the SpacerContainer itself.
Spacer objects can be used as targets for drag and drop operations.
This code is a generalization of repeated code used in qtview.
+--------+ +---------+ +--------+
| Item A | Spacer A | Current | Spacer B | Item B |
+--------+ +---------+ +--------+
+--------+ +--------+
| Item A | Spacer | Item B |
+--------+ +--------+
"""
def __init__(self,parent):
super(SpacerContainer,self).__init__(parent=parent)
# Parent needs to be of type "DrawingBoard" to make sure that
# self.parent = typecheck(parent,DrawingBoard,"parent")
self.parent = parent
self._spacers = list()
# We need to know what specific type of spacer we are using, since
# all new spacers are instantiated inside getSpacerA or getSpacerB.
self._spacerType = None #SpacerContainer.Spacer
def _release(self):
""" releases all spacer objects and dissasociates from parent """
self.setVisible(False)
self.setParent(None)
self.parent = None
for spacer in self._spacers:
spacer._release()
self._spacers.remove(spacer)
def removeItemSpacers(self,item):
""" Removes spacers that touch a particular item. Used by SpacerContainter.Item
when it is being released.
"""
removalList = list()
for spacer in self._spacers:
if spacer.itemA is not None and spacer.itemA == item:
removalList.append(spacer)
if spacer.itemB is not None and spacer.itemB == item:
removalList.append(spacer)
log.debug("... removing %d spacers linked to item"%len(removalList))
for spacer in removalList:
spacer._release()
self._spacers.remove(spacer)
def getSpacerA(self,item):
""" Return the current spacer, or create a new one, in the direction of
the current item's 'itemA'. This is used by SpacerContainer.Item objects
during the linking process.
"""
# Determine if the item is currently being used
isUsed = item.isUsed()
# Find spacers where item is itemB (making this spacer A)
ret = filter(lambda x: x.itemB == item, self._spacers)
# Delete old unused spacers. Remove them from the QLayout system so
# they don't try to draw anymore and from our list of spacers so we
# don't try to search it anymore.
for spacer in ret:
if (not spacer.itemA == item.itemA()) or (not isUsed):
spacer.setParent(None)
spacer._release()
self._spacers.remove(spacer)
ret = filter(lambda x: x.itemB == item, self._spacers)
# Once we have deleted old spacers, make sure we are using the band.
# If we are not, don't return anything (just None)
if not isUsed:
return None
# Return existing spacer if only one exists. There should not be extras
if len(ret) == 1 and ret[0].itemA == item.itemA():
return ret[0]
elif len(ret) >= 1:
raise Exception("To many spacers found %d"%len(ret))
# No existing spacers fit - create a new spacer in direction A
spacer = self.spacerType(self)
spacer.itemB = item
spacer.itemA = item.itemA()
self._spacers.append(spacer)
return spacer
def getSpacerB(self,item):
""" Finds the spacer for an item in direction b """
# Determine if the item is currently being used
isUsed = item.isUsed()
# Find spacers where item is itemA (making this spacer B)
ret = filter(lambda x: x.itemA == item, self._spacers)
# Delete old unused spacers. Remove them from the QLayout system so
# they don't try to draw anymore and from our list of spacers so we
# don't try to search it anymore.
for spacer in ret:
if (not spacer.itemB == item.itemB()) or (not isUsed):
spacer.setParent(None)
spacer._release()
self._spacers.remove(spacer)
# TODO: This next line may not be needed
ret = filter(lambda x: x.itemA == item, self._spacers)
# Once we have deleted old spacers, make sure we are using the band.
# If we are not, don't return anything (just None)
if not isUsed:
return None
# Return existing spacer if only one exists. There should not be extras
if len(ret) == 1 and ret[0].itemB == item.itemB():
return ret[0]
elif len(ret) >= 1:
raise Exception("To many spacers found %d"%len(ret))
# No existing spacers fit - create a new spacer in direction B
spacer = self.spacerType(self)
spacer.itemA = item
spacer.itemB = item.itemB()
self._spacers.append(spacer)
return spacer
def _get_spacerType(self):
if isinstance(self._spacerType,types.NoneType):
raise Exception("you must set the spacerType for the SpacerContainer %r"%type(self))
return self._spacerType
def _set_spacerType(self,spacerType):
# self._spacerType = typecheck(spacerType,SpacerContainer.Spacer,"spacerType")
self._spacerType = spacerType
spacerType = property(_get_spacerType,_set_spacerType)
class Spacer(QGraphicsWidget):
""" A Spacer between two items.
Spacers are automatically created and removed by the SpacerContainer to
seperate adjacent Items. You must create your own Spacer object that
implements the link() method to define how the spacers connect to the
Items on either side of it. The implementation may also contain hooks
for receiving drag and drop events.
"""
def __init__(self,parent):
self.parent = typecheck(parent,SpacerContainer,"parent")
self.parent = parent
super(SpacerContainer.Spacer,self).__init__(parent=parent)
self.itemA = None
self.itemB = None
def _release(self):
self.setVisible(False)
self.itemA = None
self.itemB = None
self.setParent(None)
self.parent = None
def layout(self):
""" Returns the QGraphicsLayout that is being used. """
return self.parent.parent.layout()
def link(self):
""" Must be implemented by the subclass. This method should consist
of self.layout().addAnchor(self, ..., self.itemA/B, ...) calls anchoring
the sides of itemA and itemB to this spacer.
"""
raise Exception("You must implement the linking to the spacersA and B")
class Item(QGraphicsWidget):
""" An Item with spacers around it. """
def __init__(self,parent,container):
# self.parent = typecheck(parent,DrawingBoard,"parent")
self.parent = parent
self.container = typecheck(container,SpacerContainer,"container")
super(SpacerContainer.Item,self).__init__(parent=parent)
def _release(self):
self.setVisible(False)
self.setParent(None)
self.parent = None
self.container.removeItemSpacers(self)
self.container = None
# TODO: This may need to delete former spacers too!
def itemA(self):
raise Exception("You must implement a way to return itemA")
def itemB(self):
raise Exception("You must implement a way to return itemB")
def isUsed(self):
""" return if the item is currently being used or not - determines
if the item will be visible or not
"""
raise Exception("You must implement a way to return if the item is used")
def link(self):
""" This method manages the spacer objects linked on either side of
the item. When the spacers link() method is called, the anchored
layout will hook into the object.
"""
l = self.parent.layout()
# Calculate Spacers A and B - deleteing old spacers to this item
# when necessary, reusing existing spacers if possible, and otherwise
# creating new spacers
spacerA = self.container.getSpacerA(self)
spacerB = self.container.getSpacerB(self)
if isinstance(spacerA,types.NoneType) or isinstance(spacerB,types.NoneType):
self.setVisible(False)
return
self.setVisible(True)
spacerA.link()
spacerB.link()
| {
"content_hash": "b614692b046f7dd2664593b48945cbf4",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 97,
"avg_line_length": 43.1858407079646,
"alnum_prop": 0.6111680327868853,
"repo_name": "mrdanbrooks/diarc",
"id": "041f49d2532db358e5fa0694d7bb2b22599d3bb3",
"size": "10361",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "qt_view/SpacerContainer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "185063"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import unittest
from calc import views
class CalcTestBase(unittest.TestCase):
def setUp(self):
self.app = views.app.test_client()
| {
"content_hash": "0481802341aa81632ce76ee5bfa8f159",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 42,
"avg_line_length": 18.5,
"alnum_prop": 0.7189189189189189,
"repo_name": "saadbinakhlaq/prove-it",
"id": "9d2627035005270f4071f49470ea42b064649625",
"size": "185",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "calc/tests/base.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.