repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
DayGitH/Python-Challenges | DailyProgrammer/DP20150713A.py | Python | mit | 1,597 | 0.005009 | """
[2015-07-13] Challenge #223 [Easy] Garland words
https://www.reddit.com/r/dailyprogrammer/comments/3d4fwj/20150713_challenge_223_easy_garland_words/
# Description
A [_garland word_](http://blog.vivekhaldar.com/post/89763722591/garland-words) is one that starts and ends with the
same N letters in the same order, for some N greater than 0, but less than the length of the word. I'll call the
maximum N for which this works the garland word's _degree_. For instance, "onion" is a garland word of degree 2,
because its first 2 letters "on" are the same as its last 2 letters. The name "garland word" comes from the fact that
you can make chains of the word in this manner:
onionionionionionionionionionion...
Today's challenge is to write a function `garl | and` that, given a lowercase word, returns the degree of the word if it's
a garland word, and 0 otherwise.
# Examples
garland("programmer") -> 0
garland("ceramic") -> 1
ga | rland("onion") -> 2
garland("alfalfa") -> 4
# Optional challenges
1. Given a garland word, print out the chain using that word, as with "onion" above. You can make it as long or short
as you like, even infinite.
1. Find the largest degree of any garland word in the [enable1 English word
list](https://code.google.com/p/dotnetperls-controls/downloads/detail?name=enable1.txt).
1. Find a word list for some other language, and see if you can find a language with a garland word with a higher
degree.
*Thanks to /u/skeeto for submitting this challenge on /r/dailyprogrammer_ideas!*
"""
def main():
pass
if __name__ == "__main__":
main()
|
priyankarani/trytond-shipping-dhl-de | tests/test_shipment.py | Python | bsd-3-clause | 29,744 | 0.000067 | # -*- coding: utf-8 -*-
"""
test_shipment
Test dhl de Integration
"""
from decimal import Decimal
from time import time
from datetime import datetime
from dateutil.relativedelta import relativedelta
import sys
import os
import unittest
import trytond.tests.test_tryton
from trytond.tests.test_tryton import POOL, DB_NAME, USER, CONTEXT
from trytond.transaction import Transaction
from trytond.config import config
from trytond.exceptions import UserError
config.set('database', 'path', '.')
DIR = os.path.abspath(os.path.normpath(
os.path.join(__file__, '..', '..', '..', '..', '..', 'trytond')
))
if os.path.isdir(DIR):
sys.path.insert(0, os.path.dirname(DIR))
class TestDHLDEShipment(unittest.TestCase):
"""Test DHL DE Integration
"""
def setUp(self):
trytond.tests.test_tryton.install_module('shipping_dhl_de')
self.Address = POOL.get('party.address')
self.Sale = POOL.get('sale.sale')
self.SaleLine = POOL.get('sale.line')
self.SaleConfig = POOL.get('sale.configuration')
self.Product = POOL.get('product.product')
self.Uom = POOL.get('product.uom')
self.Account = POOL.get('account.account')
self.Category = POOL.get('product.category')
self.Carrier = POOL.get('carrier')
self.Party = POOL.get('party.party')
self.PartyContact = POOL.get('party.contact_mechanism')
self.PaymentTerm = POOL.get('account.invoice.payment_term')
self.Country = POOL.get('country.country')
self.Subdivision = POOL.get('country.subdivision')
self.PartyAddress = POOL.get('party.address')
self.StockLocation = POOL.get('stock.location')
self.StockShipmentOut = POOL.get('stock.shipment.out')
self.Currency = POOL.get('currency.currency')
self.Company = POOL.get('company.company')
self.IrAttachment = POOL.get('ir.attachment')
self.User = POOL.get('res.user')
self.Template = POOL.get('product.template')
self.GenerateLabel = POOL.get('shipping.label', type="wizard")
assert 'DHL_DE_USERNAME' in os.environ, \
"DHL_DE_USERNAME missing. Hint:Use export DHL_DE_USERNAME=<string>"
assert 'DHL_DE_PASSWORD' in os.environ, \
"DHL_DE_PASSWORD missing. Hint:Use export DHL_DE_PASSWORD=<string>"
self.username = os.enviro | n['DHL_DE_USERNAME']
self.password = os.environ['DHL_DE_PASSWORD']
def _create_coa_minimal(self, company):
"""Create a minimal chart of accounts
"""
AccountTemplate = POOL.get('account.account.template')
Account = POOL.get('account.account')
| account_create_chart = POOL.get(
'account.create_chart', type="wizard"
)
account_template, = AccountTemplate.search(
[('parent', '=', None)]
)
session_id, _, _ = account_create_chart.create()
create_chart = account_create_chart(session_id)
create_chart.account.account_template = account_template
create_chart.account.company = company
create_chart.transition_create_account()
receivable, = Account.search([
('kind', '=', 'receivable'),
('company', '=', company),
])
payable, = Account.search([
('kind', '=', 'payable'),
('company', '=', company),
])
create_chart.properties.company = company
create_chart.properties.account_receivable = receivable
create_chart.properties.account_payable = payable
create_chart.transition_create_properties()
def _create_fiscal_year(self, date_=None, company=None):
"""
Creates a fiscal year and requried sequences
"""
FiscalYear = POOL.get('account.fiscalyear')
Sequence = POOL.get('ir.sequence')
SequenceStrict = POOL.get('ir.sequence.strict')
Company = POOL.get('company.company')
if date_ is None:
date_ = datetime.utcnow().date()
if not company:
company, = Company.search([], limit=1)
invoice_sequence, = SequenceStrict.create([{
'name': '%s' % date_.year,
'code': 'account.invoice',
'company': company
}])
fiscal_year, = FiscalYear.create([{
'name': '%s' % date_.year,
'start_date': date_ + relativedelta(month=1, day=1),
'end_date': date_ + relativedelta(month=12, day=31),
'company': company,
'post_move_sequence': Sequence.create([{
'name': '%s' % date_.year,
'code': 'account.move',
'company': company,
}])[0],
'out_invoice_sequence': invoice_sequence,
'in_invoice_sequence': invoice_sequence,
'out_credit_note_sequence': invoice_sequence,
'in_credit_note_sequence': invoice_sequence,
}])
FiscalYear.create_period([fiscal_year])
return fiscal_year
def _get_account_by_kind(self, kind, company=None, silent=True):
"""Returns an account with given spec
:param kind: receivable/payable/expense/revenue
:param silent: dont raise error if account is not found
"""
Account = POOL.get('account.account')
Company = POOL.get('company.company')
if company is None:
company, = Company.search([], limit=1)
accounts = Account.search([
('kind', '=', kind),
('company', '=', company)
], limit=1)
if not accounts and not silent:
raise Exception("Account not found")
return accounts[0] if accounts else None
def _create_payment_term(self):
"""Create a simple payment term with all advance
"""
PaymentTerm = POOL.get('account.invoice.payment_term')
return PaymentTerm.create([{
'name': 'Direct',
'lines': [('create', [{'type': 'remainder'}])]
}])
def setup_defaults(self):
"""Method to setup defaults
"""
# Create currency
self.currency, = self.Currency.create([{
'name': 'Euro',
'code': 'EUR',
'symbol': 'EUR',
}])
country_de, country_tw = self.Country.create([{
'name': 'Germany',
'code': 'DE',
}, {
'name': 'Taiwan',
'code': 'TW',
}])
subdivision_bw, = self.Subdivision.create([{
'name': 'Baden-Württemberg',
'code': 'DE-BW',
'type': 'state',
'country': country_de.id,
}])
with Transaction().set_context(company=None):
company_party, = self.Party.create([{
'name': 'Deutsche Post IT Brief GmbH',
'vat_number': '123456',
'addresses': [('create', [{
'name': 'Max Muster',
'street': 'Heinrich-Bruening-Str.',
'streetbis': '7',
'zip': '53113',
'city': 'Bonn',
'country': country_de.id,
}])],
'contact_mechanisms': [('create', [{
'type': 'phone',
'value': '030244547777778',
}, {
'type': 'email',
'value': 'max@muster.de',
}, {
'type': 'fax',
'value': '030244547777778',
}, {
'type': 'mobile',
'value': '9876543212',
}, {
'type': 'website',
'value': 'example.com',
}])],
}])
self.company, = self.Company.create([{
'party': company_party.id,
'currency': self.currency.id,
}])
self.User.write(
[self.User(USER)], {
'main_company': self.company.id,
'company': self.company.id,
}
)
CONTEXT.update(self.User.get_preferences(context_only=True))
self._create_ |
cloudera/hue | desktop/core/ext-py3/requests-2.27.1/requests/help.py | Python | apache-2.0 | 3,920 | 0 | """Module containing bug report helper(s)."""
from __future__ import print_function
import json
import platform
import sys
import ssl
import idna
import urllib3
from . import __version__ as requests_version
try:
import charset_normalizer
except ImportError:
charset_normalizer = None
chardet = None
try:
from urllib3.contrib import pyopenssl
except ImportError:
pyopenssl = None
OpenSSL = None
cryptography = None
else:
import OpenSSL
import cryptography
def _implementation():
"""Return a dict with the Python implementation and version.
Provide both the name and the version of the Python implementation
currently running. For example, on CPython 2.7.5 it will return
{'name': 'CPython', 'version': '2.7.5'}.
This function works best on CPython and PyPy: in particular, it probably
doesn't work for Jython or IronPython. Future investigation should be done
to work out the correct shape of the code for those platforms.
"""
implementation = platform.python_implementation()
if implementation == 'CPython':
implementation_version = platform.python_version()
elif implementation == 'PyPy':
implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
implementation_version = ''.join([
implementation_version, sys.pypy_version_info.releaselevel
])
elif implementation == 'Jython':
implementation_version = platform.python_version() # Complete Guess
elif implementation == 'IronPython':
implementation_version = platform.python_version() # Complete Guess
else:
implementation_version = 'Unknown'
return {'name': implementation, 'version': implementation_version}
def info():
"""Generate information for a bug report."""
try:
platform_info = {
'system': platform.system(),
'release': platform.release(),
}
except IOError:
platform_info = {
'system': 'Unknown',
'release': 'Unknown',
}
implementation_info = _implementation()
urllib3_info = {'version': urllib3.__version__}
charset_normalizer_info = {'version': None}
chardet_info = {'version': None}
if cha | rset_normalizer:
charset_n | ormalizer_info = {'version': charset_normalizer.__version__}
if chardet:
chardet_info = {'version': chardet.__version__}
pyopenssl_info = {
'version': None,
'openssl_version': '',
}
if OpenSSL:
pyopenssl_info = {
'version': OpenSSL.__version__,
'openssl_version': '%x' % OpenSSL.SSL.OPENSSL_VERSION_NUMBER,
}
cryptography_info = {
'version': getattr(cryptography, '__version__', ''),
}
idna_info = {
'version': getattr(idna, '__version__', ''),
}
system_ssl = ssl.OPENSSL_VERSION_NUMBER
system_ssl_info = {
'version': '%x' % system_ssl if system_ssl is not None else ''
}
return {
'platform': platform_info,
'implementation': implementation_info,
'system_ssl': system_ssl_info,
'using_pyopenssl': pyopenssl is not None,
'using_charset_normalizer': chardet is None,
'pyOpenSSL': pyopenssl_info,
'urllib3': urllib3_info,
'chardet': chardet_info,
'charset_normalizer': charset_normalizer_info,
'cryptography': cryptography_info,
'idna': idna_info,
'requests': {
'version': requests_version,
},
}
def main():
"""Pretty-print the bug information as JSON."""
print(json.dumps(info(), sort_keys=True, indent=2))
if __name__ == '__main__':
main()
|
RannyeriDev/Solfege | solfege/fpeditor.py | Python | gpl-3.0 | 34,999 | 0.002972 | # vim: set fileencoding=utf-8 :
# GNU Solfege - free ear training software
# Copyright (C) 2009, 2011 Tom Cato Amundsen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You sho | uld have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import logging
import os
import StringIO
import subprocess
from gi.repository import Gtk
from solfege.esel import SearchView
if __name__ == '__main__':
from solfege i | mport i18n
i18n.setup(".", "C")
import solfege.statistics
solfege.db = solfege.statistics.DB()
import solfege
from solfege import cfg
from solfege import filesystem
from solfege import gu
from solfege import frontpage as pd
from solfege import lessonfile
from solfege import osutils
class LessonFilePreviewWidget(Gtk.VBox):
def __init__(self, model):
Gtk.VBox.__init__(self)
self.m_model = model
self.set_size_request(200, 200)
l = Gtk.Label()
l.set_alignment(0.0, 0.5)
l.set_markup("<b>Title:</b>")
self.pack_start(l, False, False, 0)
self.g_title = Gtk.Label()
self.g_title.set_alignment(0.0, 0.5)
self.pack_start(self.g_title, False, False, 0)
l = Gtk.Label()
l.set_alignment(0.0, 0.5)
l.set_markup("<b>Module:</b>")
self.pack_start(l, False, False, 0)
self.g_module = Gtk.Label()
self.g_module.set_alignment(0.0, 0.5)
self.pack_start(self.g_module, False, False, 0)
l = Gtk.Label()
l.set_alignment(0.0, 0.5)
l.set_markup("<b>Used in topcis:</b>")
self.pack_start(l, False, False, 0)
self.g_topic_box = Gtk.VBox()
self.pack_start(self.g_topic_box, False, False, 0)
self.show_all()
def update(self, dlg):
fn = dlg.get_preview_filename()
if fn:
fn = gu.decode_filename(fn)
for child in self.g_topic_box.get_children():
child.destroy()
fn = lessonfile.mk_uri(fn)
try:
self.set_sensitive(True)
self.g_title.set_text(lessonfile.infocache.get(fn, 'title'))
self.g_module.set_text(lessonfile.infocache.get(fn, 'module'))
self.g_ok_button.set_sensitive(True)
for x in self.m_model.iterate_topics_for_file(fn):
l = Gtk.Label(label=x)
l.set_alignment(0.0, 0.5)
self.g_topic_box.pack_start(l, False, False, 0)
if not self.g_topic_box.get_children():
l = Gtk.Label(label=u"-")
l.set_alignment(0.0, 0.5)
self.g_topic_box.pack_start(l, False, False, 0)
except (lessonfile.InfoCache.FileNotFound,
lessonfile.InfoCache.FileNotLessonfile), e:
self.g_title.set_text(u'')
self.g_module.set_text(u'')
self.g_ok_button.set_sensitive(False)
self.set_sensitive(False)
self.show_all()
return True
class SelectLessonFileDialog(Gtk.FileChooserDialog):
def __init__(self, parent):
Gtk.FileChooserDialog.__init__(self, _("Select lesson file"),
parent=parent,
buttons=(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,))
self.set_select_multiple(True)
pv = LessonFilePreviewWidget(parent.m_model)
pv.g_ok_button = self.add_button("gtk-ok", Gtk.ResponseType.OK)
pv.g_ok_button.set_sensitive(False)
pv.show()
self.set_preview_widget(pv)
self.connect('selection-changed', pv.update)
class SelectLessonfileBySearchDialog(Gtk.Dialog):
def __init__(self):
Gtk.Dialog.__init__(self, buttons=(Gtk.STOCK_CLOSE, Gtk.ResponseType.ACCEPT))
view = SearchView(_('Search for exercises. Each exercise you click will be added to the section of the front page.'),
fields=['link-with-filename-tooltip', 'module'])
view.on_link_clicked = self.on_link_clicked
self.vbox.pack_start(view, True, True, 0)
self.show_all()
def on_link_clicked(self, widget, filename):
self.m_filename = filename
self.response(Gtk.ResponseType.OK)
def editor_of(obj):
"""
Return the toplevel page, the one that is a Editor object.
"""
p = obj
while not isinstance(p, Editor):
p = p.m_parent
return p
def parent_page(obj):
"""
Return the parent page of obj. Return None if this is the toplevel page.
"""
p = obj
while True:
try:
p = p.m_parent
except AttributeError:
return None
if isinstance(p, Page):
return p
if p is None:
return None
class Section(Gtk.VBox):
"""
A section consists of a heading and a list of links.
self.g_link_box is a vbox that contains the links.
"""
def __init__(self, model, parent):
Gtk.VBox.__init__(self)
self.m_model = model
self.m_parent = parent
assert isinstance(model, pd.LinkList)
hbox = Gtk.HBox()
hbox.set_spacing(6)
self.pack_start(hbox, False, False, 0)
# This is displayed and used when we edit the heading
self.g_heading_entry = Gtk.Entry()
self.g_heading_entry.set_no_show_all(True)
hbox.pack_start(self.g_heading_entry, True, True, 0)
self.g_heading = Gtk.Label()
self.g_heading.set_alignment(0.0, 0.5)
# FIXME escape m_name
self.g_heading.set_markup("<b>%s</b>" % model.m_name)
hbox.pack_start(self.g_heading, False, False, 0)
###
button_hbox = Gtk.HBox()
button_hbox.set_spacing(0)
hbox.pack_start(button_hbox, False, False, 0)
im = Gtk.Image()
im.set_from_stock(Gtk.STOCK_EDIT, Gtk.IconSize.MENU)
button = Gtk.Button()
button.add(im)
button.connect('clicked', self.on_edit_heading)
button_hbox.pack_start(button, False, False, 0)
###
im = Gtk.Image()
im.set_from_stock(Gtk.STOCK_ADD, Gtk.IconSize.MENU)
button = Gtk.Button()
button.add(im)
button.connect('button-release-event', self.on_add)
button_hbox.pack_start(button, False, False, 0)
###
im = Gtk.Image()
im.set_from_stock(Gtk.STOCK_REMOVE, Gtk.IconSize.MENU)
button = Gtk.Button()
button.add(im)
button.connect('button-release-event', self.on_remove)
button_hbox.pack_start(button, False, False, 0)
###
im = Gtk.Image()
im.set_from_stock(Gtk.STOCK_CUT, Gtk.IconSize.MENU)
b = Gtk.Button()
b.add(im)
b.connect('clicked', self.on_cut)
button_hbox.pack_start(b, False, False, 0)
###
im = Gtk.Image()
im.set_from_stock(Gtk.STOCK_PASTE, Gtk.IconSize.MENU)
b = Gtk.Button()
b.add(im)
b.connect('clicked', self.on_paste, -1)
Editor.clipboard.register_paste_button(b, (pd.LinkList, pd.Page, unicode))
button_hbox.pack_start(b, False, False, 0)
###
im = Gtk.Image()
im.set_from_stock(Gtk.STOCK_GO_DOWN, Gtk.IconSize.MENU)
self.g_move_down_btn = Gtk.Button()
self.g_move_down_btn.add(im)
self.g_move_down_btn.connect('clicked',
self.m_parent.move_section_down, self)
button_hbox.pack_start(self.g_move_down_btn, False, False, 0)
###
im = Gtk.Image()
im.set_from_stock(Gtk.STOCK_GO_UP, Gtk.IconSize.MENU)
self.g_move_up_btn = Gtk.Button()
self.g_move_up_btn.add(im)
self.g_move |
lifeinoppo/littlefishlet-scode | RES/REF/python_sourcecode/ipython-master/docs/source/conf.py | Python | gpl-2.0 | 8,853 | 0.003615 | # -*- coding: utf-8 -*-
#
# IPython documentation build configuration file.
# NOTE: This file has been edited manually from the auto-generated one from
# sphinx. Do NOT delete and re-generate. If any changes from sphinx are
# needed, generate a scratch one and merge by hand any new fields needed.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys, os
ON_RTD = os.environ.get('READTHEDOCS', None) == 'True'
if ON_RTD:
# Mock the presence of matplotlib, which we don't have on RTD
# see
# http://read-the-docs.readthedocs.org/en/latest/faq.html
tags.add('rtd')
# RTD doesn't use the Makefile, so re-run autogen_{things}.py here.
for name in ('config', 'api', 'magics'):
fname = 'autogen_{}.py'.format(name)
fpath = os.path.abspath(os.path.join('..', fname))
with open(fpath) as f:
exec(compile(f.read(), fname, 'exec'), {
'__file__': fpath,
'__name__': '__main__',
})
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('../sphinxext'))
# We load the ipython release info into a dict by explicit execution
iprelease = {}
exec( | compile(open('../../IPython/core/release.py').read(), '../../IPython/core/release.py', 'exec'),iprelease)
# General configuration
# --------- | ------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'matplotlib.sphinxext.mathmpl',
'matplotlib.sphinxext.only_directives',
'matplotlib.sphinxext.plot_directive',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.intersphinx',
'IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive',
'sphinx.ext.napoleon', # to preprocess docstrings
'github', # for easy GitHub links
'magics',
]
if ON_RTD:
# Remove extensions not currently supported on RTD
extensions.remove('matplotlib.sphinxext.only_directives')
extensions.remove('matplotlib.sphinxext.mathmpl')
extensions.remove('matplotlib.sphinxext.plot_directive')
extensions.remove('IPython.sphinxext.ipython_directive')
extensions.remove('IPython.sphinxext.ipython_console_highlighting')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
if iprelease['_version_extra'] == 'dev':
rst_prolog = """
.. note::
This documentation is for a development version of IPython. There may be
significant differences from the latest stable release.
"""
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'IPython'
copyright = 'The IPython Development Team'
# ghissue config
github_project_url = "https://github.com/ipython/ipython"
# numpydoc config
numpydoc_show_class_members = False # Otherwise Sphinx emits thousands of warnings
numpydoc_class_members_toctree = False
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The full version, including alpha/beta/rc tags.
release = "%s" % iprelease['version']
# Just the X.Y.Z part, no '-dev'
version = iprelease['version'].split('-', 1)[0]
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# Exclude these glob-style patterns when looking for source files. They are
# relative to the source/ directory.
exclude_patterns = ['whatsnew/pr']
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Set the default role so we can use `foo` instead of ``foo``
default_role = 'literal'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'default.css'
html_favicon = 'favicon.ico'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# The name of an image file (within the static path) to place at the top of
# the sidebar.
#html_logo = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {
'interactive/htmlnotebook': 'notebook_redirect.html',
'interactive/notebook': 'notebook_redirect.html',
'interactive/nbconvert': 'notebook_redirect.html',
'interactive/public_server': 'notebook_redirect.html',
}
# If false, no module index is generated.
#html_use_modindex = True
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'ipythondoc'
intersphinx_mapping = {'python': ('http://docs.python.org/2/', None),
'rpy2': ('http://rpy.sourceforge.net/rpy2/doc-2.4/html/', None),
'traitlets': ('http://traitlets.readthedocs.org/en/latest/', None),
'jupyterclient': ('http://jupyter-client.readthedocs.org/en/latest/', None),
'ipyparallel': ('http://ipyparallel.readthedocs.org/en/latest/', None),
}
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
latex_font_size = '11pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'ipython.tex', 'IPython Documentation',
u"""The IPython Development Team""", 'manual', True),
('parallel/winhpc_index', 'winhpc_whitepaper.tex',
'Using IPython on Windows HPC Server 2008',
u"Brian E. Granger", 'manual', True)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if |
JoelBender/bacpypes | samples/RecurringWriteProperty.py | Python | mit | 4,423 | 0.000452 | #!/usr/bin/env python
"""
This application demonstrates writing a series of values at a regular interval.
$ python RecurringWriteProperty.py 1.2.3.4 analogValue:1 \
presentValue 1.2 3.4 5.6
"""
import sys
from bacpypes.debugging import bacpypes_debugging, ModuleLogger
from bacpypes.consolelogging import ConfigArgumentParser
from bacpypes.core import run, deferred
from bacpypes.task import RecurringTask
from bacpypes.iocb import IOCB
from bacpypes.pdu import Address
from bacpypes.primitivedata import Real, ObjectIdentifier
from bacpypes.constructeddata import Any
from bacpypes.basetypes import PropertyIdentifier
from bacpypes.apdu import WritePropertyRequest, SimpleAckPDU
from bacpypes.app import BIPSimpleApplication
from bacpypes.local.device import LocalDeviceObject
# some debugging
_debug = 0
_log = ModuleLogger(globals())
# globals
args = None
this_application = None
@bacpypes_debugging
class Pr | airieDog(RecurringTask):
"""
An instance of this class pops up out of the ground every once in a
while and write out the next value.
"""
def __init__(self, interval):
| if _debug:
PrairieDog._debug("__init__ %r", interval)
RecurringTask.__init__(self, interval)
# install it
self.install_task()
def process_task(self):
if _debug:
PrairieDog._debug("process_task")
global args, this_application
if _debug:
PrairieDog._debug(" - args.values: %r", args.values)
# pick up the next value
value = args.values.pop(0)
args.values.append(value)
# make a primitive value out of it
value = Real(float(value))
# build a request
request = WritePropertyRequest(
destination=args.daddr,
objectIdentifier=args.objid,
propertyIdentifier=args.propid,
)
# save the value, application tagged
request.propertyValue = Any()
request.propertyValue.cast_in(value)
if _debug:
PrairieDog._debug(" - request: %r", request)
# make an IOCB
iocb = IOCB(request)
iocb.add_callback(self.write_complete)
if _debug:
PrairieDog._debug(" - iocb: %r", iocb)
# give it to the application to process
deferred(this_application.request_io, iocb)
def write_complete(self, iocb):
if _debug:
PrairieDog._debug("write_complete %r", iocb)
# do something for success
if iocb.ioResponse:
# should be an ack
if not isinstance(iocb.ioResponse, SimpleAckPDU):
if _debug:
PrairieDog._debug(" - not an ack")
else:
sys.stdout.write("ack\n")
# do something for error/reject/abort
elif iocb.ioError:
sys.stdout.write(str(iocb.ioError) + "\n")
def main():
global args, this_application
# parse the command line arguments
parser = ConfigArgumentParser(description=__doc__)
# add an argument for seconds per dog
parser.add_argument("daddr", help="destination address")
parser.add_argument("objid", help="object identifier")
parser.add_argument("propid", help="property identifier")
# list of values to write
parser.add_argument("values", metavar="N", nargs="+", help="values to write")
# add an argument for seconds between writes
parser.add_argument(
"--delay", type=float, help="delay between writes in seconds", default=5.0
)
# now parse the arguments
args = parser.parse_args()
# convert the parameters
args.daddr = Address(args.daddr)
args.objid = ObjectIdentifier(args.objid).value
args.propid = PropertyIdentifier(args.propid).value
if _debug:
_log.debug("initialization")
if _debug:
_log.debug(" - args: %r", args)
# make a device object
this_device = LocalDeviceObject(ini=args.ini)
if _debug:
_log.debug(" - this_device: %r", this_device)
# make a simple application
this_application = BIPSimpleApplication(this_device, args.ini.address)
# make a dog, task scheduling is in milliseconds
dog = PrairieDog(args.delay * 1000)
if _debug:
_log.debug(" - dog: %r", dog)
_log.debug("running")
run()
_log.debug("fini")
if __name__ == "__main__":
main()
|
jeremiah-c-leary/vhdl-style-guide | vsg/rules/number_of_lines_between_tokens.py | Python | gpl-3.0 | 1,091 | 0.00275 |
from vsg.rule_group import length
from vsg import violation
class number_of_lines_between_tokens(length.Rule):
'''
Checks the number of lines between tokens do not exceed a specified number
Parameters
----------
name : string
The group the r | ule belongs to.
identifier : string
unique identifier. Usually in the form of 00N.
'''
def __init__(self, name, identifier, oLeftToken, oRightToken, iLines):
length.Rule.__init__(self, name=name, identifier=identifier)
self.length = iLines
self.oLeftToken = oLeftToken
| self.oRightToken = oRightToken
def _get_tokens_of_interest(self, oFile):
return oFile.get_line_count_between_tokens(self.oLeftToken, self.oRightToken)
def _analyze(self, lToi):
for oToi in lToi:
if oToi.get_token_value() > self.length:
sSolution = 'Reduce process to less than ' + str(self.length) + ' lines'
oViolation = violation.New(oToi.get_line_number(), None, sSolution)
self.add_violation(oViolation)
|
yenliangl/bitcoin | test/functional/feature_help.py | Python | mit | 2,481 | 0.000806 | #!/usr/bin/env python3
# Copyright (c) 2018-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Verify that starting bitcoin with -h works as expected."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class HelpTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self):
self.add_nodes(self.num_nodes)
# Don't start the node
def get_node_output(self, *, ret_code_expected):
ret_code = self.nodes[0].process.wait(timeout=60)
assert_equal(ret_code, ret_code_expected)
self.nodes[0].stdout.seek(0)
self.nodes[0].stderr.seek(0)
out = self.nodes[0].stdout.read()
err = self.nodes[0].stderr.read()
self.nodes[0].stdout.close()
self.nodes[0].stderr.close()
# Clean up TestNode state
self.nodes[0].running = False
self.nodes[0].proces | s = None
self.nodes[0].rpc_connected = False
self.nodes[0].rpc = None
return out, err
def run_test(self):
self.log.info("Start bitcoin with -h for help text" | )
self.nodes[0].start(extra_args=['-h'])
# Node should exit immediately and output help to stdout.
output, _ = self.get_node_output(ret_code_expected=0)
assert b'Options' in output
self.log.info(f"Help text received: {output[0:60]} (...)")
self.log.info("Start bitcoin with -version for version information")
self.nodes[0].start(extra_args=['-version'])
# Node should exit immediately and output version to stdout.
output, _ = self.get_node_output(ret_code_expected=0)
assert b'version' in output
self.log.info(f"Version text received: {output[0:60]} (...)")
# Test that arguments not in the help results in an error
self.log.info("Start bitcoind with -fakearg to make sure it does not start")
self.nodes[0].start(extra_args=['-fakearg'])
# Node should exit immediately and output an error to stderr
_, output = self.get_node_output(ret_code_expected=1)
assert b'Error parsing command line arguments' in output
self.log.info(f"Error message received: {output[0:60]} (...)")
if __name__ == '__main__':
HelpTest().main()
|
PavlovVitaly/python__homework_ITMO | task_04_01.py | Python | gpl-3.0 | 722 | 0 | n = int(input()) |
p = int(input())
with open('data.txt') as input_file:
content = str(input_file.read())
content = list(content.split(' '))
content = list(map(int, content))
content_of_out_file_1 = list(filter(lambda x: (x % n) == 0, content))
content_of_out_file_1 = list(map(str, content_of_out_file_1))
content_of_out_file_1 = ' '.join(content_of_out_file_1)
with open('out-1.txt', 'w') as out_file_1:
out_file_1.write(content_of_out_file_1)
content_of_out_f | ile_2 = list(map(lambda x: x ** p, content))
content_of_out_file_2 = list(map(str, content_of_out_file_2))
content_of_out_file_2 = ' '.join(content_of_out_file_2)
with open('out-2.txt', 'w') as out_file_2:
out_file_2.write(content_of_out_file_2)
|
doctoryes/project_euler | prob9.py | Python | mit | 983 | 0.020346 |
#
# A Pythagorean triplet is a set of three natural numbers, a < b < c, for which,
# a^2 + b^2 = c^2
#
# For example, 3^2 + 4^2 = 9 + 16 = 25 = 5^2.
#
# There exists exactly one Pythagorean triplet for which | a + b + c = 1000.
# Find the product abc.
for x in range(1, 1000):
for y in range(2, 1000):
if x > y:
# Increase y until greater than x.
continue
for z in range(3, 1000):
if y > z:
# Increase z until greater than y.
continue
elif x + y + z != 1000:
# Not the answer.
continue
x_sq = x * x
y_sq = y * y
lval = x_sq + y_sq
z_sq = z * z
| if lval == z_sq:
print "%5d^2 + %5d^2 = %5d^2" % (x, y, z)
if x + y + z == 1000:
print "ANSWER!!!! Product abc = ", x*y*z
exit()
elif lval < z_sq:
# No point in increasing z anymore.
break
|
commtrack/commtrack-core | apps/transformers/zip.py | Python | bsd-3-clause | 3,024 | 0.007606 | import os, tempfile, zipfile, tarfile, logging
from django.core.servers.basehttp import FileWrapper
from django.http import HttpResponse
def get_zipfile(file_list):
"""
Create a ZIP file on disk and transmit it in chunks of 8KB,
without loading the whole file into memory.
"""
temp = tempfile.TemporaryFile()
archive = zipfile.ZipFile(temp, 'w', zipfile.ZIP_DEFLATED)
for file in file_list:
file = file.encode("utf-8")
if os.path.exists(file):
archive.write(file, | os.path.basename(file))
else:
logging.warn("zipfile could not | find %s" % file)
archive.close()
wrapper = FileWrapper(temp)
response = HttpResponse(wrapper, content_type='application/zip')
response['Content-Disposition'] = 'attachment; filename=commcarehq.zip'
response['Content-Length'] = temp.tell()
# this seek is required for 'response' to work
temp.seek(0)
return response
def build_tarfile(file_list, output_file):
"""
Creates a tarfile on disk, given a list of input files
"""
tar = tarfile.open(name=output_file, mode="w:bz2")
if len (file_list) == 0:
logging.info("No submissions could be found.")
return HttpResponse("No submissions could be found.")
for file in file_list:
tar.add(file, os.path.basename(file) )
tar.close()
return tar
def get_tarfile(file_list, output_file):
"""
Creates a tarfile on disk, given a list of input files,
and returns it as an http response.
"""
tar = build_tarfile(file_list, output_file)
fin = open(output_file, 'rb')
wrapper = FileWrapper(fin)
response = HttpResponse(wrapper, content_type='application/tar')
response['Content-Disposition'] = 'attachment; filename=commcarehq.tar'
response['Content-Length'] = os.path.getsize(output_file)
# this seek is required for 'response' to work
return response
class Compressor(object):
""" Interface to create a compressed file on disk, given streams """
def open(self, output_file):
raise NotImplementedError()
def add_stream(self, stream, size=0, name=None ):
raise NotImplementedError()
def close(self):
raise NotImplementedError()
class TarCompressor(Compressor):
""" Interface to create a tarfile on disk, given various input streams """
def __init__(self):
self._tar = None
def open(self, name=None, fileobj=None):
if name == None and fileobj == None:
raise ValueError('Either name or fileobj must be supplied to TarCompressor')
self._tar = tarfile.open(name=name, fileobj=fileobj, mode="w:bz2")
def add_stream(self, stream, size=0, name=None):
tar_info = tarfile.TarInfo( name=name )
tar_info.size = size
self._tar.addfile(tar_info, fileobj=stream)
def add_file(self, file):
self._tar.add(file, os.path.basename(file))
def close(self):
self._tar.close()
|
cklein/wtforms | tests/locale_babel.py | Python | bsd-3-clause | 2,927 | 0.000342 | from __future__ import unicode_literals
import babel
from decimal import Decimal, ROUND_UP
from unittest import TestCase
from wtforms import Form
from wtforms.fields import DecimalField
from wtforms.utils import unset_value
from tests.common import DummyPostData
class TestLocaleDecimal(TestCase):
class F(Form):
class Meta:
locales = ['hi_IN', 'en_US']
a = DecimalField(use_locale=True)
def _format_test(self, expected, val, locales=unset_value):
meta = None
if locales is not unset_value:
meta = {'locales': locales}
form = self.F(meta=meta, a=Decimal(val))
self.assertEqual(form.a._value(), expected)
def | test_typeerror(self):
def build(**kw):
form = self.F()
DecimalField(
use_locale=True,
_form=form,
_name='a', |
_translations=form.meta.get_translations(form),
**kw
)
self.assertRaises(TypeError, build, places=2)
self.assertRaises(TypeError, build, rounding=ROUND_UP)
def test_formatting(self):
val = Decimal('123456.789')
neg = Decimal('-5.2')
self._format_test('1,23,456.789', val)
self._format_test('-12,52,378.2', '-1252378.2')
self._format_test('123,456.789', val, ['en_US'])
self._format_test('-5.2', neg, ['en_US'])
self._format_test('123.456,789', val, ['es_ES'])
self._format_test('123.456,789', val, ['de_DE'])
self._format_test('-5,2', neg, ['de_DE'])
self._format_test("-12'345.2", '-12345.2', ['de_CH'])
def _parse_test(self, raw_val, expected, locales=unset_value):
meta = None
if locales is not unset_value:
meta = {'locales': locales}
form = self.F(DummyPostData(a=raw_val), meta=meta)
if not form.validate():
raise AssertionError(
'Expected value %r to parse as a decimal, instead got %r' % (
raw_val, form.a.errors,
)
)
self.assertEqual(form.a.data, expected)
def _fail_parse(self, raw_val, expected_error, locales=unset_value):
meta = None
if locales is not unset_value:
meta = {'locales': locales}
form = self.F(DummyPostData(a=raw_val), meta=meta)
assert not form.validate()
self.assertEqual(form.a.errors[0], expected_error)
def test_parsing(self):
expected = Decimal('123456.789')
self._parse_test('1,23,456.789', expected)
self._parse_test('1,23,456.789', expected, ['en_US'])
self._parse_test('1.23.456,789', expected, ['de_DE'])
self._parse_test("1'23'456.789", expected, ['de_CH'])
self._fail_parse('1,23,456.5', 'Keine g\xfcltige Dezimalzahl', ['de_DE'])
self._fail_parse('1.234.567,5', 'Not a valid decimal value', ['en_US'])
|
jolyonb/edx-platform | lms/djangoapps/verify_student/models.py | Python | agpl-3.0 | 40,982 | 0.001611 | # -*- coding: utf-8 -*-
"""
Models for Student Identity Verification
This is where we put any models relating to establishing the real-life identity
of a student over a period of time. Right now, the only models are the abstract
`PhotoVerification`, and its one concrete implementation
`SoftwareSecurePhotoVerification`. The hope is to keep as much of the
photo verification process as generic as possible.
"""
import functools
import json
import logging
import os.path
import uuid
from datetime import timedelta
from email.utils import formatdate
import requests
import six
from django.conf import settings
from django.contrib.auth.models import User
from django.core.ca | che import cache
from django.core.files.base import ContentFile
from django.urls import reverse
from django.db import models
f | rom django.dispatch import receiver
from django.utils.functional import cached_property
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy
from model_utils import Choices
from model_utils.models import StatusModel, TimeStampedModel
from opaque_keys.edx.django.models import CourseKeyField
from lms.djangoapps.verify_student.ssencrypt import (
encrypt_and_encode,
generate_signed_message,
random_aes_key,
rsa_encrypt
)
from openedx.core.djangoapps.signals.signals import LEARNER_NOW_VERIFIED
from openedx.core.storage import get_storage
from .utils import earliest_allowed_verification_date
log = logging.getLogger(__name__)
def generateUUID(): # pylint: disable=invalid-name
""" Utility function; generates UUIDs """
return str(uuid.uuid4())
class VerificationException(Exception):
pass
def status_before_must_be(*valid_start_statuses):
"""
Helper decorator with arguments to make sure that an object with a `status`
attribute is in one of a list of acceptable status states before a method
is called. You could use it in a class definition like:
@status_before_must_be("submitted", "approved", "denied")
def refund_user(self, user_id):
# Do logic here...
If the object has a status that is not listed when the `refund_user` method
is invoked, it will throw a `VerificationException`. This is just to avoid
distracting boilerplate when looking at a Model that needs to go through a
workflow process.
"""
def decorator_func(func):
"""
Decorator function that gets returned
"""
@functools.wraps(func)
def with_status_check(obj, *args, **kwargs):
if obj.status not in valid_start_statuses:
exception_msg = (
u"Error calling {} {}: status is '{}', must be one of: {}"
).format(func, obj, obj.status, valid_start_statuses)
raise VerificationException(exception_msg)
return func(obj, *args, **kwargs)
return with_status_check
return decorator_func
class IDVerificationAttempt(StatusModel):
"""
Each IDVerificationAttempt represents a Student's attempt to establish
their identity through one of several methods that inherit from this Model,
including PhotoVerification and SSOVerification.
.. pii: The User's name is stored in this and sub-models
.. pii_types: name
.. pii_retirement: retained
"""
STATUS = Choices('created', 'ready', 'submitted', 'must_retry', 'approved', 'denied')
user = models.ForeignKey(User, db_index=True, on_delete=models.CASCADE)
# They can change their name later on, so we want to copy the value here so
# we always preserve what it was at the time they requested. We only copy
# this value during the mark_ready() step. Prior to that, you should be
# displaying the user's name from their user.profile.name.
name = models.CharField(blank=True, max_length=255)
created_at = models.DateTimeField(auto_now_add=True, db_index=True)
updated_at = models.DateTimeField(auto_now=True, db_index=True)
class Meta(object):
app_label = "verify_student"
abstract = True
ordering = ['-created_at']
@property
def expiration_datetime(self):
"""Datetime that the verification will expire. """
days_good_for = settings.VERIFY_STUDENT["DAYS_GOOD_FOR"]
return self.created_at + timedelta(days=days_good_for)
def should_display_status_to_user(self):
"""Whether or not the status from this attempt should be displayed to the user."""
raise NotImplementedError
def active_at_datetime(self, deadline):
"""Check whether the verification was active at a particular datetime.
Arguments:
deadline (datetime): The date at which the verification was active
(created before and expiration datetime is after today).
Returns:
bool
"""
return (
self.created_at < deadline and
self.expiration_datetime > now()
)
class ManualVerification(IDVerificationAttempt):
"""
Each ManualVerification represents a user's verification that bypasses the need for
any other verification.
.. pii: The User's name is stored in the parent model
.. pii_types: name
.. pii_retirement: retained
"""
reason = models.CharField(
max_length=255,
blank=True,
help_text=(
'Specifies the reason for manual verification of the user.'
)
)
class Meta(object):
app_label = 'verify_student'
def __unicode__(self):
return 'ManualIDVerification for {name}, status: {status}'.format(
name=self.name,
status=self.status,
)
def should_display_status_to_user(self):
"""
Whether or not the status should be displayed to the user.
"""
return False
class SSOVerification(IDVerificationAttempt):
"""
Each SSOVerification represents a Student's attempt to establish their identity
by signing in with SSO. ID verification through SSO bypasses the need for
photo verification.
.. no_pii:
"""
OAUTH2 = 'third_party_auth.models.OAuth2ProviderConfig'
SAML = 'third_party_auth.models.SAMLProviderConfig'
LTI = 'third_party_auth.models.LTIProviderConfig'
IDENTITY_PROVIDER_TYPE_CHOICES = (
(OAUTH2, 'OAuth2 Provider'),
(SAML, 'SAML Provider'),
(LTI, 'LTI Provider'),
)
identity_provider_type = models.CharField(
max_length=100,
blank=False,
choices=IDENTITY_PROVIDER_TYPE_CHOICES,
default=SAML,
help_text=(
'Specifies which type of Identity Provider this verification originated from.'
)
)
identity_provider_slug = models.SlugField(
max_length=30, db_index=True, default='default',
help_text=(
'The slug uniquely identifying the Identity Provider this verification originated from.'
))
class Meta(object):
app_label = "verify_student"
def __unicode__(self):
return 'SSOIDVerification for {name}, status: {status}'.format(
name=self.name,
status=self.status,
)
def should_display_status_to_user(self):
"""Whether or not the status from this attempt should be displayed to the user."""
return False
class PhotoVerification(IDVerificationAttempt):
"""
Each PhotoVerification represents a Student's attempt to establish
their identity by uploading a photo of themselves and a picture ID. An
attempt actually has a number of fields that need to be filled out at
different steps of the approval process. While it's useful as a Django Model
for the querying facilities, **you should only edit a `PhotoVerification`
object through the methods provided**. Initialize them with a user:
attempt = PhotoVerification(user=user)
We track this attempt through various states:
`created`
Initial creation and state we're in after uploading the images.
`ready`
The user has uploaded their images and checked that they can read the
images. There's a separate state here because it m |
willyrv/microSDcheck | microSDcheck.py | Python | mit | 3,837 | 0.005734 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 26 23:44:04 2015
@author: willy
"""
import random
import hashlib
import argparse
import shutil, os, time
def write_a_file(destination, character, size=1):
"""
Create random file with specifyed size (in GBytes)
and copy them to "destination". The file will be
temporally created in the current folder, then we keep
the md5 hash of the file and delete it.
"""
#just a random number for the first line
line1 = 20000000 - int(random.random()*10000000)
with open(dest | ination, 'w') as f:
f.write('{}\n'.format(line1))
number_of_characters = (1024**3)*size -10
characters_to_write = character * number_of_characters
f.write(characters_to_write)
print('File {} copied on disk. Computing md5 hash value ...'.format(destination))
md5_value = md5sum(destination)
print("md5 hash computation done. ")
return md5_value
def md5sum(filename):
hash = hashlib.md5()
blocksize = 25 | 6*128
with open(filename, 'rb') as f:
for block in iter(lambda: f.read(blocksize), b''):
hash.update(block)
return hash.hexdigest()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Check a microSD memory')
parser.add_argument("microSDpath")
parser.add_argument("-s", "--size", type=int, help="total size of the microSD memory (in GB).",
default=25)
args = parser.parse_args()
original_md5_sums = []
copied_md5_sums = []
writing_time = 0
reading_time = 0
# writing the files of size 1GB in the current folder
current_dir = os.getcwd()
for i in range(1, args.size+1):
newfilename = 'test_data_{}.data'.format(i)
new_file_path = os.path.join(current_dir, newfilename)
new_md5 = write_a_file(new_file_path, '0')
original_md5_sums.append(new_md5)
# try to coppy to the microSDcard
try:
print("Coppying file {} to {} ...".format(new_file_path, args.microSDpath))
start = time.time()
shutil.copy(new_file_path, args.microSDpath)
end = time.time()
writing_time += (end-start)
print('File {} copied to the microSD card'.format(i))
except:
pass
os.remove(new_file_path)
# Get the md5 hash of the files in the microSD card
for i in range(1, args.size+1):
newfilename = 'test_data_{}.data'.format(i)
copied_file_path = os.path.join(args.microSDpath, newfilename)
print("Computing the md5 hash value of {}".format(copied_file_path))
start = time.time()
copied_md5_sums.append(md5sum(copied_file_path))
end = time.time()
reading_time += (end-start)
# writting final report
results = []
if len(original_md5_sums) != len(copied_md5_sums):
d = len(original_md5_sums) - len(copied_md5_sums)
results.append('There were {} files not copied'.format(d))
m = min(len(original_md5_sums), len(copied_md5_sums))
for i in range(m):
if original_md5_sums[i] == copied_md5_sums[i]:
line = '{}\t{}\tOK'.format(original_md5_sums[i], copied_md5_sums[i])
else:
line = '{}\t{}\tERROR'.format(original_md5_sums[i], copied_md5_sums[i])
results.append(line)
writting_speed = round(float(args.size*1024)/writing_time, 2)
reading_speed = round(float(args.size*1024)/reading_time, 2)
results.append("Writting speed {} MB/s".format(writting_speed))
results.append("Read speed {} MB/s".format(reading_speed))
with open(os.path.join(current_dir, 'results.txt'), 'w') as f:
f.write('\n'.join(results))
print('done\n{}'.format('\n'.join(results)))
|
parksandwildlife/observations | manage.py | Python | apache-2.0 | 297 | 0.003367 | #!/usr/bin/env python
import os
import sys
import confy
confy.read_environment_file()
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "incredibus | .settings")
from django.core.management import execute_from_command_line
exec | ute_from_command_line(sys.argv)
|
UbiCastTeam/gst-gtklaunch-1.0 | main.py | Python | lgpl-2.1 | 941 | 0.003188 | #!/usr/ | bin/env python3
# -*- coding: utf-8 -*-
# * This Program is free software; you can redistribute it and/or
# * modify it under the terms of the GNU Lesser General Public
# * License as published by the Free Software Foundation; either
# * version 2.1 of the License, or (at your option) any later version.
# *
# * Libav is distributed in the hope that it will be useful,
# * but WITHOUT ANY | WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# * Lesser General Public License for more details.
# *
# * You should have received a copy of the GNU Lesser General Public
# * License along with Libav; if not, write to the Free Software
# * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""
GstGengui: runner for non-installed package
"""
from gst_gtklaunch import gst_gtklaunch
if __name__ == '__main__':
gst_gtklaunch.main()
|
r-kitaev/lucid-python-werkzeug | tests/test_exceptions.py | Python | bsd-3-clause | 2,680 | 0 | # -*- coding: utf-8 -*-
"""
werkzeug.exceptiosn test
~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD license.
"""
from nose.tools import assert_raises
from werkzeug import exceptions
from werkzeug.exceptions import Aborter, abort
from werkzeug.wrappers import Response
def test_proxy_exception():
"""Proxy exceptions"""
orig_resp = Response('Hello World')
try:
abort(orig_resp)
except exceptions.HTTPException, e:
resp = e.get_response({})
else:
assert False, 'exception not raised'
assert resp is orig_resp
assert resp.data == 'Hello World'
def test_aborter():
"""Exception aborter"""
assert_raises(exceptions.BadRequest, abort, 400)
assert_raises(exceptions.Unauthorized, abort, 401)
assert_raises(exceptions.Forbidden, abort, 403)
assert_raises(exceptions.NotFound, abort, 404)
assert_raises(exceptions.MethodNotAllowed, abort, 405, ['GET', 'HEAD'])
assert_raises(exceptions.NotAcceptable, abort, 406)
assert_raises(exceptions.RequestTimeout, abort, 408)
assert_raises(exceptions.Gone, abort, 410)
assert_raises(exceptions.LengthRequired, abort, 411)
assert_raises(exceptions.PreconditionFailed, abort, 412)
assert_raises(exceptions.RequestEntityTooLarge, abort, 413)
assert_raises(exceptions.RequestURITooLarge, abort, 414)
assert_raises(exceptions.UnsupportedMediaType, abort, 41 | 5)
assert_raises(exceptions.InternalServerError, abort, 500)
assert_raises(exceptions.NotImplemented, abort, 501)
assert_raises(exceptions.BadGateway, abort, 502)
assert_raises(exceptions.ServiceUnavailable, abort, 503)
| myabort = Aborter({1: exceptions.NotFound})
assert_raises(LookupError, myabort, 404)
assert_raises(exceptions.NotFound, myabort, 1)
myabort = Aborter(extra={1: exceptions.NotFound})
assert_raises(exceptions.NotFound, myabort, 404)
assert_raises(exceptions.NotFound, myabort, 1)
def test_exception_repr():
"""Repr and unicode of exceptions"""
exc = exceptions.NotFound()
assert unicode(exc) == '404: Not Found'
assert repr(exc) == "<NotFound '404: Not Found'>"
exc = exceptions.NotFound('Not There')
assert unicode(exc) == '404: Not There'
assert repr(exc) == "<NotFound '404: Not There'>"
def test_special_exceptions():
"""Special HTTP exceptions"""
exc = exceptions.MethodNotAllowed(['GET', 'HEAD', 'POST'])
h = dict(exc.get_headers({}))
assert h['Allow'] == 'GET, HEAD, POST'
assert 'The method DELETE is not allowed' in exc.get_description({
'REQUEST_METHOD': 'DELETE'
})
|
rschnapka/odoo | addons/account_asset/account_asset.py | Python | agpl-3.0 | 29,456 | 0.008759 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from dateutil.relativedelta import relativedelta
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
from tools.translate import _
class account_asset_category(osv.osv):
_name = 'account.asset.category'
_description = 'Asset category'
_columns = {
'name': fields.char('Name', size=64, required=True, select=1),
'note': fields.text('Note'),
'account_analytic_id': fields.many2one('account.analytic.account', 'Analytic account'),
'account_asset_id': fields.many2one('account.account', 'Asset Account', required=True, domain=[('type','=','other')]),
'account_depreciation_id': fields.many2one('account.account', 'Depreciation Account', required=True, domain=[('type','=','other')]),
'account_expense_depreciation_id': fields.many2one('account.account', 'Depr. Expense Account', required=True, domain=[('type','=','other')]),
'journal_id': fields.many2one('account.journal', 'Journal', required=True),
'company_id': fields.many2one('res.company', 'Company', required=True),
'method': fields.selection([('linear','Linear'),('degressive','Degressive')], 'Computation Method', required=True, help="Choose the method to use to compute the amount of depreciation lines.\n"\
" * Linear: Calculated on basis of: Gross Value / Number of Depreciations\n" \
" * Degressive: Calculated on basis of: Residual Value * Degressive Factor"),
'method_number': fields.integer('Number of Depreciations', help="The number of depreciations needed to depreciate your asset"),
'method_period': fields.integer('Period Length', help="State here the time between 2 depreciations, in months", required=True),
'method_progress_factor': fields.float('Degressive Factor'),
'method_time': fields.selection([('number','Number of Depreciations'),('end','Ending Date')], 'Time Method | ', required=True,
help="Choose the method to use to compute the dates and number of depreciation lines.\n"\
" * Number of Depreciations: Fix the number of depreciation lines and the time between 2 depreciations.\n" \
" * Ending Date: Choose the time between 2 depreciations and the dat | e the depreciations won't go beyond."),
'method_end': fields.date('Ending date'),
'prorata':fields.boolean('Prorata Temporis', help='Indicates that the first depreciation entry for this asset have to be done from the purchase date instead of the first January'),
'open_asset': fields.boolean('Skip Draft State', help="Check this if you want to automatically confirm the assets of this category when created by invoices."),
}
_defaults = {
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'account.asset.category', context=context),
'method': 'linear',
'method_number': 5,
'method_time': 'number',
'method_period': 12,
'method_progress_factor': 0.3,
}
def onchange_account_asset(self, cr, uid, ids, account_asset_id, context=None):
res = {'value':{}}
if account_asset_id:
res['value'] = {'account_depreciation_id': account_asset_id}
return res
account_asset_category()
class account_asset_asset(osv.osv):
_name = 'account.asset.asset'
_description = 'Asset'
def unlink(self, cr, uid, ids, context=None):
for asset in self.browse(cr, uid, ids, context=context):
if asset.account_move_line_ids:
raise osv.except_osv(_('Error!'), _('You cannot delete an asset that contains posted depreciation lines.'))
return super(account_asset_asset, self).unlink(cr, uid, ids, context=context)
def _get_period(self, cr, uid, context=None):
ctx = dict(context or {}, account_period_prefer_normal=True)
periods = self.pool.get('account.period').find(cr, uid, context=ctx)
if periods:
return periods[0]
else:
return False
def _get_last_depreciation_date(self, cr, uid, ids, context=None):
"""
@param id: ids of a account.asset.asset objects
@return: Returns a dictionary of the effective dates of the last depreciation entry made for given asset ids. If there isn't any, return the purchase date of this asset
"""
cr.execute("""
SELECT a.id as id, COALESCE(MAX(l.date),a.purchase_date) AS date
FROM account_asset_asset a
LEFT JOIN account_move_line l ON (l.asset_id = a.id)
WHERE a.id IN %s
GROUP BY a.id, a.purchase_date """, (tuple(ids),))
return dict(cr.fetchall())
def _compute_board_amount(self, cr, uid, asset, i, residual_amount, amount_to_depr, undone_dotation_number, posted_depreciation_line_ids, total_days, depreciation_date, context=None):
#by default amount = 0
amount = 0
if i == undone_dotation_number:
amount = residual_amount
else:
if asset.method == 'linear':
amount = amount_to_depr / (undone_dotation_number - len(posted_depreciation_line_ids))
if asset.prorata:
amount = amount_to_depr / asset.method_number
days = total_days - float(depreciation_date.strftime('%j'))
if i == 1:
amount = (amount_to_depr / asset.method_number) / total_days * days
elif i == undone_dotation_number:
amount = (amount_to_depr / asset.method_number) / total_days * (total_days - days)
elif asset.method == 'degressive':
amount = residual_amount * asset.method_progress_factor
if asset.prorata:
days = total_days - float(depreciation_date.strftime('%j'))
if i == 1:
amount = (residual_amount * asset.method_progress_factor) / total_days * days
elif i == undone_dotation_number:
amount = (residual_amount * asset.method_progress_factor) / total_days * (total_days - days)
return amount
def _compute_board_undone_dotation_nb(self, cr, uid, asset, depreciation_date, total_days, context=None):
undone_dotation_number = asset.method_number
if asset.method_time == 'end':
end_date = datetime.strptime(asset.method_end, '%Y-%m-%d')
undone_dotation_number = 0
while depreciation_date <= end_date:
depreciation_date = (datetime(depreciation_date.year, depreciation_date.month, depreciation_date.day) + relativedelta(months=+asset.method_period))
undone_dotation_number += 1
if asset.prorata:
undone_dotation_number += 1
return undone_dotation_number
def compute_depreciation_board(self, cr, uid, ids, context=None):
depreciation_lin_obj = self.pool.get('account.asset.depreciation.line')
currency_obj = self.pool.get('res.currency') |
e-koch/VLA_Lband | 16B/pipeline4.7.1_custom/EVLA_pipe_testgains.py | Python | mit | 8,921 | 0.01872 | ######################################################################
#
# Copyright (C) 2013
# Associated Universities, Inc. Washington DC, USA,
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Library General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
# License for more details.
#
# You should have received a copy of the GNU Library General Public License
# along with this library; if not, write to the Free Software Foundation,
# Inc., 675 Massachusetts Ave, Cambridge, MA 02139, USA.
#
# Correspondence concerning VLA Pipelines should be addressed as follows:
# Please register and submit helpdesk tickets via: https://help.nrao.edu
# Postal address:
# National Radio Astronomy Observatory
# VLA Pipeline Support Office
# PO Box O
# Socorro, NM, USA
#
######################################################################
# DO TEST GAIN CALIBRATIONS TO SEE IF MORE FLAGGING IS NEEDED AND TO
# ESTABLISH SHORT AND LONG SOLINTS
# (Needs some work to automate; note also that plotcal holds onto
# testgaincal.g in the table cache unless it has been exited using
# the gui, so only plot the final versions)
logprint ("Starting EVLA_pipe_testgains.py", logfileout='logs/testgains.log')
time_list=runtiming('testgains', 'start')
QA2_testgains='Pass'
print ""
print "Finding a reference antenna | for gain calibrations"
print ""
refantspw=''
refantfield=calibrator_field_select_string
# NB: would use ms_active below instead of calibrators.ms when selection
# to exclude flagged data is implemented
findrefant=RefAntHeuristics(vis='calibrators.ms',field=refantfield,geometry=True,flag | ging=True)
RefAntOutput=findrefant.calculate()
refAnt=str(RefAntOutput[0])+','+str(RefAntOutput[1])+','+str(RefAntOutput[2])+','+str(RefAntOutput[3])
logprint ("The pipeline will use antenna(s) "+refAnt+" as the reference", logfileout='logs/testgains.log')
logprint ("Doing test gain calibration", logfileout='logs/testgains.log')
# First determine short solint for gain calibrator, and see if it is
# shorter or longer than gain_solint1 (determined on BPd cals)
# Start with solint='int'
syscommand='rm -rf testgaincal.g'
os.system(syscommand)
soltime=int_time
solint='int'
tst_gcal_spw=''
combtime='scan'
flaggedSolnResult1=testgains('calibrators.ms','testgaincal.g',tst_gcal_spw,calibrator_scan_select_string,solint,refAnt,minBL_for_cal,combtime)
logprint("For solint = "+solint+" fraction of flagged solutions = "+str(flaggedSolnResult1['all']['fraction']), logfileout='logs/testBPdcals.log')
logprint("Median fraction of flagged solutions per antenna = "+str(flaggedSolnResult1['antmedian']['fraction']), logfileout='logs/testBPdcals.log')
if (flaggedSolnResult1['all']['total'] > 0):
fracFlaggedSolns1=flaggedSolnResult1['antmedian']['fraction']
else:
fracFlaggedSolns1=1.0
shortsol2=soltime
if (fracFlaggedSolns1 > 0.05):
soltime=3.0*int_time
solint=str(soltime)+'s'
flaggedSolnResult3=testgains('calibrators.ms','testgaincal3.g',tst_gcal_spw,calibrator_scan_select_string,solint,refAnt,minBL_for_cal,combtime)
logprint("For solint = "+solint+" fraction of flagged solutions = "+str(flaggedSolnResult3['all']['fraction']), logfileout='logs/testBPdcals.log')
logprint("Median fraction of flagged solutions per antenna = "+str(flaggedSolnResult3['antmedian']['fraction']), logfileout='logs/testBPdcals.log')
if (flaggedSolnResult3['all']['total'] > 0):
fracFlaggedSolns3=flaggedSolnResult3['antmedian']['fraction']
else:
fracFlaggedSolns3=1.0
if (fracFlaggedSolns3 < fracFlaggedSolns1):
shortsol2=soltime
syscommand='rm -rf testgaincal.g'
os.system(syscommand)
syscommand='mv testgaincal3.g testgaincal.g'
os.system(syscommand)
if (fracFlaggedSolns3 > 0.05):
soltime=10.0*int_time
solint=str(soltime)+'s'
flaggedSolnResult10=testgains('calibrators.ms','testgaincal10.g',tst_gcal_spw,calibrator_scan_select_string,solint,refAnt,minBL_for_cal,combtime)
logprint("For solint = "+solint+" fraction of flagged solutions = "+str(flaggedSolnResult10['all']['fraction']), logfileout='logs/testBPdcals.log')
logprint("Median fraction of flagged solutions per antenna = "+str(flaggedSolnResult10['antmedian']['fraction']), logfileout='logs/testBPdcals.log')
if (flaggedSolnResult10['all']['total'] > 0):
fracFlaggedSolns10=flaggedSolnResult10['antmedian']['fraction']
else:
fracFlaggedSolns10=1.0
if (fracFlaggedSolns10 < fracFlaggedSolns3):
shortsol2=soltime
syscommand='rm -rf testgaincal.g'
os.system(syscommand)
syscommand='mv testgaincal10.g testgaincal.g'
os.system(syscommand)
if (fracFlaggedSolns10 > 0.05):
solint='inf'
combtime=''
flaggedSolnResultScan=testgains('calibrators.ms','testgaincalscan.g',tst_gcal_spw,calibrator_scan_select_string,solint,refAnt,minBL_for_cal,combtime)
logprint("For solint = "+solint+" fraction of flagged solutions = "+str(flaggedSolnResultScan['all']['fraction']), logfileout='logs/testBPdcals.log')
logprint("Median fraction of flagged solutions per antenna = "+str(flaggedSolnResultScan['antmedian']['fraction']), logfileout='logs/testBPdcals.log')
if (flaggedSolnResultScan['all']['total'] > 0):
fracFlaggedSolnsScan=flaggedSolnResultScan['antmedian']['fraction']
else:
fracFlaggedSolnsScan=1.0
if (fracFlaggedSolnsScan < fracFlaggedSolns10):
shortsol2=longsolint
syscommand='rm -rf testgaincal.g'
os.system(syscommand)
syscommand='mv testgaincalscan.g testgaincal.g'
os.system(syscommand)
if (fracFlaggedSolnsScan > 0.05):
logprint ("Warning, large fraction of flagged solutions, there might be something wrong with your data", logfileout='logs/testBPdcals.log')
# determine max (shortsol1, shortsol2)
short_solint=max(shortsol1,shortsol2)
new_gain_solint1=str(short_solint)+'s'
logprint ("Using short solint = "+new_gain_solint1, logfileout='logs/testBPdcals.log')
# Plot solutions
logprint ("Plotting gain solutions", logfileout='logs/testgains.log')
nplots=int(numAntenna/3)
if ((numAntenna%3)>0):
nplots = nplots + 1
tb.open('testgaincal.g')
cpar=tb.getcol('CPARAM')
flgs=tb.getcol('FLAG')
tb.close()
amps=np.abs(cpar)
good=np.logical_not(flgs)
maxamp=np.max(amps[good])
plotmax=maxamp
for ii in range(nplots):
filename='testgaincal_amp'+str(ii)+'.png'
syscommand='rm -rf '+filename
os.system(syscommand)
#
antPlot=str(ii*3)+'~'+str(ii*3+2)
#
default('plotcal')
caltable='testgaincal.g'
xaxis='time'
yaxis='amp'
poln=''
field=''
antenna=antPlot
spw=''
timerange=''
subplot=311
overplot=False
clearpanel='Auto'
iteration='antenna'
plotrange=[0,0,0,plotmax]
showflags=False
plotsymbol='o'
plotcolor='blue'
markersize=5.0
fontsize=10.0
showgui=False
figfile=filename
plotcal()
for ii in range(nplots):
filename='testgaincal_phase'+str(ii)+'.png'
syscommand='rm -rf '+filename
os.system(syscommand)
#
antPlot=str(ii*3)+'~'+str(ii*3+2)
#
default('plotcal')
caltable='testgaincal.g'
xaxis='time'
yaxis='phase'
poln=''
field=''
antenna=antPlot
spw=''
timerange=''
subplot=311
overplot=False
clearpanel='Auto'
|
Genesis30/GeDRA | ds.py | Python | gpl-2.0 | 6,580 | 0.040578 | #!/usr/bin/python
#############################
# Function "calculateRisk"
# Given "data", calls the required function to compute the risk state of the
# system, and returns it.
#############################
def calculateRisk(data):
risk = computeRiskState(data[0], data[1], data[2], data[3], data[4], data[5], data[6])
return risk
#############################
# Function "computeRiskState"
# Given all the parameters required to use the DST, calls the functions
# required to compute risk index and risk distribution.
#############################
def computeRiskState(priority,AK, CK0, BK, RS0, priority_IDS, IDS_name):
# Risk state = Risk Index [+] Risk Dristribution
riskIndex = computeRiskIndex(AK, CK0, BK, RS0, priority_IDS, IDS_name)
riskState = computeRiskDistribution(priority,riskIndex)
return riskState
#############################
# Function "computeRiskDistribution"
# Given a riskIndex and the priority of the element in the system,
# computes the actual risk of the element.
#############################
def computeRiskDistribution(priority, riskIndex):
# Risk Distribution = Target Importance
#
# medium = [0,0.5][0.5,0.8][0.8,1.0]
# high = [0,0.4][0.4,0.7][0.7,1.0]
if priority <= 3:
if riskIndex <= 0.5:
#print 'Low risk: %s' % riskIndex
return riskIndex#0.3
elif riskIndex <=0.8:
#print 'Medium risk: %s' % riskIndex
return riskIndex#0.6
else:
#print 'High risk: %s' % riskIndex
return riskIndex#1.0
else:
if riskIndex <= 0.4:
#print 'Low risk: %s' % riskIndex
return riskIndex#0.3
elif riskIndex <=0.7:
#print 'Medium risk: %s' % riskIndex
return riskIndex#0.6
else:
#print 'High risk: %s' % riskIndex
return riskIndex#1.0
#############################
# Function "computeRiskIndex"
# Provided the correct parameters, calls the functions to calculate
# the factors involved in risk calculations, and correlates them.
#############################
def computeRiskIndex(AK, CK0, BK, RS0, priority_IDS, IDS_name):
# Risk Index = Alert Amount [+] Alert Confidence [+] Alter Type Number
# [+] Alert Severity [+] Alert Relevance Score
#
if IDS_name == "snort":
PIDS0 = 3
else:
PIDS0 = 1
mu = calculateMu(AK, CK0, BK, RS0, priority_IDS)
mk = calculateMk(mu,PIDS0)
prob = mk[0][1] + mk[1][1] + mk[2][1] + mk[3][1] + mk[4][1]
tmp = mk[0][0] + mk[1][0] + mk[2][0] + mk[3][0] + mk[4][0]
conflict = tmp + prob
result = prob/conflict
return result
#############################
# Function "calculateMk"
# Factors involved in DST.
#############################
def calculateMk(mu, PIDS0):
mk = [[0]*2 for i in range(5)]
w = [0,0.1,0.2,0.3,0.4]
for i in range(5):
for j in range(2):
if j == 0:
mk[i][j] = mu[i][j] / ( mu[i][j] + mu[i][j+1] + 1 - w[i] * PIDS0)
else:
mk[i][j] = mu[i][j] / ( mu[i][j] + mu[i][j-1] + 1 - w[i] * PIDS0)
return mk
#############################
# Function "calculateMu"
# Given the parameters of the system, it will return the factors of risk/no risk
#
# AK : alert amount of an alert thread (not only attack strength but also attack confidence).
#
# CK0 : updated alert confidence [0,1] ; probability that an abnormal activity is a true attack.
#
# BK : attack confident situation & severity of the corresponding intrusion.
#
# RS0 : likelihood of a sucessful intrusion. Updated alert in an alert thread. [0,1]
#
#############################
def calculateMu(AK, CK0, BK, RS0, priority_IDS):
# alpha1 [5,15] ; alpha2 [10,20] ; alpha3 [15,30]
mu = [[0.0]*2 for i in range(5)]
#----------------
alpha1 = 5
alpha2 = 10
alpha3 = 15
#----------------
if AK <= alpha2:
mu[0][0] = float((alpha2-AK) / alpha2)
else:
mu[0][0] = 0.0
if alpha1 >= AK:
mu[0][1] = 0.0
elif alpha3 < AK:
mu[0][1] = 1.0
else:
mu[0][1] = float((AK - alpha1) / (alpha3 - alpha1))
# CK0 [0,1]
mu[1][0] = 1.0 - CK0
mu[1][1] = float(CK0)
# lambda1 [1,5] ; lambda2 [5,9] ; lambda3 [6,10]
#--------------
lambda1 = 1
lambda2 = 5
lambda3 = 6
#--------------
if BK <= lambda2:
mu[2][0] = float((lambda2 - BK) / lambda2)
else:
mu[2][0] = 0.0
if lambda1 >= BK:
mu[2][1] = 0.0
elif lambda3 < BK:
mu[2][1] = 1.0
else:
mu[2][1] = float((BK - lambda1) / (lambda3 - lambda1))
# phi = 3 ; PR0 = 4 - priority_IDS
PR0 = 4.0 - float(priority_IDS)
phi = 3.0
if PR0 <= phi:
mu[3][0] = float((phi - PR0) / phi)
mu[3][1] = float(PR0 / phi)
else:
mu[3][0] = 0.0
mu[3][1] = 1.0
# RS0 relevance score
mu[4][0] = 1.0 - RS0
mu[4][1] = float(RS0)
return mu
#############################
# Dictionary used to store information related to the system.
#############################
alert_num_dict = { 'web_server': 0,
'database_server': 0,
'voip_server': 0,
'router': 0,
'switch': 0,
'computer': 0,
'firewall': 0,
'printer': 0,
}
#############################
# Function "restartAlertNumber"
# Restarts the number of alerts recorded
#############################
def restartAlertNumber():
for element in alert_num_dict:
alert_num_dict[element] = 0
#############################
# Function "calculateParams"
# Given some information about the attack, computes it and
# decides the factors to compute the risk index.
#############################
def calculateParams(params, affected_element, affected_element_relevance, attack_rating):
IDS_name = 'snort'
step, classification, priority_IDS, protocol, sourceIp, destinationIp = params[1],params[2],params[3],params[4],params[5],params[6]
#priority_IDS = params
alert_num_dict[affected_element] = alert_num_dict[affected_ | element]+1
# AK : alert amount of an alert thread (not only attack strength but also attack confidence).
# Calculate AK
a | lert_number = alert_num_dict[affected_element]
AK = alert_number * 3.0 + float(affected_element_relevance) + float(attack_rating)
# CK0 : updated alert confidence [0,1] ; probability that an abnormal activity is a true attack.
# Calculate CK0
if alert_number == 0:
CK0 = 0.3
elif alert_number >=2:
CK0 = 0.7
else:
CK0 = 0.4
# BK : attack confident situation & severity of the corresponding intrusion.
# Calculate BK
BK = alert_number * 3.0 + float(affected_element_relevance)
# RS0 : likelihood of a sucessful intrusion. Updated alert in an alert thread. [0,1]
# Calculate RS0
if alert_number <=3:
RS0 = CK0 + alert_number/10.0
else:
RS0 = 1.0
data = [0.0 for i in range(7)]
data[0], data[1], data[2], data[3], data[4], data[5], data[6] = affected_element_relevance, AK, CK0, BK, RS0, priority_IDS, IDS_name
return data |
ds-hwang/deeplearning_udacity | tensorflow_examples/word2vec.py | Python | mit | 14,317 | 0.004051 | # coding: utf-8
# The goal of this assignment is to train a Word2Vec skip-gram model over [Text8](http://mattmahoney.net/dc/textdata) data.
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
import collections
import math
import numpy as np
import os
import random
import tensorflow as tf
import zipfile
from matplotlib import pylab
from six.moves import range
from six.moves.urllib.request import urlretrieve
from six.moves import cPickle as pickle
from sklearn.manifold import TSNE
def get_word2vec(epochs, override):
pickle_file = 'word2vec.pickle'
if os.path.exists(pickle_file) and not override:
print('%s already present' % pickle_file)
with open(pickle_file, 'rb') as f:
save = pickle.load(f)
data = save['data']
count = save['count']
dictionary = save['dictionary']
embeddings = save['embeddings']
normalized_embeddings = save['normalized_embeddings']
weights = save['weights']
biases = save['biases']
del save # hint to help gc free up memory
return data, count, dictionary, embeddings, normalized_embeddings, weights, biases
# Download the data from the source website if necessary.
url = 'http://mattmahoney.net/dc/'
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
if not os.path.exists(fi | lename):
filename, _ = urlretr | ieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified %s' % filename)
else:
print(statinfo.st_size)
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a string.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words"""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
words = read_data(filename)
# 17005207
print('Data size %d' % len(words))
# Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words):
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count = unk_count + 1
data.append(index)
count[0][1] = unk_count
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reverse_dictionary
data, count, dictionary, reverse_dictionary = build_dataset(words)
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10])
del words # Hint to reduce memory.
def generate_batch(index, batch_size, num_skips, skip_window):
data_index = index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
windows_buffer = collections.deque(maxlen=span)
for _ in range(span):
windows_buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the windows_buffer
targets_to_avoid = [ skip_window ]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = windows_buffer[skip_window]
labels[i * num_skips + j, 0] = windows_buffer[target]
windows_buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
return batch, labels, data_index
print('data:', [reverse_dictionary[di] for di in data[:8]])
for batch_size, num_skips, skip_window in [(8, 2, 1), (8, 4, 2), (12, 6, 3), (16, 8, 4)]:
batch, labels, _ = generate_batch(0, batch_size=batch_size, num_skips=num_skips, skip_window=skip_window)
print('\nwith num_skips = %d and skip_window = %d:' % (num_skips, skip_window))
print(' batch:', [reverse_dictionary[bi] for bi in batch])
print(' labels:', [reverse_dictionary[li] for li in labels.reshape(batch_size)])
# ('Most common words (+UNK)', [['UNK', 418391], ('the', 1061396), ('of', 593677), ('and', 416629), ('one', 411764)])
# ('Sample data', [5239, 3084, 12, 6, 195, 2, 3137, 46, 59, 156])
# ('data:', ['anarchism', 'originated', 'as', 'a', 'term', 'of', 'abuse', 'first'])
#
# with num_skips = 2 and skip_window = 1:
# (' batch:', ['originated', 'originated', 'as', 'as', 'a', 'a', 'term', 'term'])
# (' labels:', ['anarchism', 'as', 'originated', 'a', 'as', 'term', 'a', 'of'])
#
# with num_skips = 4 and skip_window = 2:
# (' batch:', ['as', 'as', 'as', 'as', 'a', 'a', 'a', 'a'])
# (' labels:', ['originated', 'term', 'anarchism', 'a', 'originated', 'of', 'as', 'term'])
# Train a skip-gram model.
# In[ ]:
def run_skip_gram():
batch_size = 126
num_sampled = 64 # Number of negative examples to sample.
# according to http://cs224d.stanford.edu/lectures/CS224d-Lecture3.pdf
# 17005207 (i.e 17M) -> 1B
embedding_size = 300 # Dimension of the embedding vector.
skip_window = 3 # How many words to consider left and right.
num_skips = 6 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.array(random.sample(range(valid_window), valid_size))
graph = tf.Graph()
with graph.as_default(), tf.device('/cpu:0'):
# Input data.
train_dataset = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
# random 16 samples
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Variables.
# vocabulary_size = 50000, embedding_size = 128
# embeddings == U (i.e. input vector) in cs224d
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
# weights == V (i.e. output vector) in cs224d
softmax_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
softmax_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Model.
# Look up embeddings for inputs.
embed = tf.nn.embedding_lookup(embeddings, train_dataset)
# Compute the softmax loss, using a sample of the negative labels each time.
loss = tf.reduce_mean(
tf.nn.sampled_softmax_loss(softmax_weights, softmax_biases, embed,
train_labels, num_sampled, vocabulary_size))
# Optimizer.
optimizer = tf.tr |
IDragonfire/modular-client | src/fa/replayserver.py | Python | gpl-3.0 | 8,647 | 0.011218 | #-------------------------------------------------------------------------------
# Copyright (c) 2012 Gael Honorez.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the GNU Public License v3.0
# which accompanies this distribution, and is available at
# http://www.gnu.org/licenses/gpl.html
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#-------------------------------------------------------------------------------
from PyQt4 import QtCore, QtNetwork, QtGui
import os
import logging
import util
import fa
import json
import time
INTERNET_REPLAY_SERVER_HOST = "faforever.com"
INTERNET_REPLAY_SERVER_PORT = 15000
class ReplayRecorder(QtCore.QObject):
'''
This is a simple class that takes all the FA replay data input from its inputSocket, writes it to a file,
and relays it to an internet server via its relaySocket.
'''
__logger = logging.getLogger("faf.fa.replayrecorder")
__logger.setLevel(logging.DEBUG)
def __init__(self, parent, login, local_socket, *args, **kwargs):
QtCore.QObject.__init__(self, *args, **kwargs)
self.parent = parent
self.inputSocket = local_socket
self.inputSocket.setSocketOption(QtNetwork.QTcpSocket.KeepAliveOption, 1)
self.inputSocket.readyRead.connect(self.readDatas)
self.inputSocket.disconnected.connect(self.inputDisconnected)
self.__logger.info("FA connected locally.")
#Create a file to write the replay data into
self.replayData = QtCore.QByteArray()
self.replayInfo = fa.exe.instance.info
# Open the relay socket to our server
self.relaySocket = QtNetwork.QTcpSocket(self.parent)
self.relaySocket.connectToHost(INTERNET_REPLAY_SERVER_HOST, INTERNET_REPLAY_SERVER_PORT)
if self.relaySocket.waitForConnected(1000): #Maybe make this asynchronous
self.__logger.debug("internet replay server " + self.relaySocket.peerName() + ":" + str(self.relaySocket.peerPort()))
else:
self.__logger.error("no connection to internet replay server")
def __del__(self):
# Clean up our socket objects, in accordance to the hint from the Qt docs (recommended practice)
self.__logger.debug("destructor entered")
self.inputSocket.deleteLater()
self.relaySocket.deleteLater()
def readDatas(self):
read = self.inputSocket.read(self.inputSocket.bytesAvailable()) #CAVEAT: readAll() was seemingly truncating data here
if not isinstance(read, basestring):
self.__logger.warning("Read failure on inputSocket: " + str(bytes))
return
#Convert data into a bytearray for easier processing
datas = QtCore.QByteArray(read)
# Record locally
if self.replayData.isEmpty():
#This prefix means "P"osting replay in the livereplay protocol of FA, this needs to be stripped from the local file
if datas.startsWith("P/"):
| rest = datas.indexOf("\x00") + 1
self.__logger.info("Stripping prefix '" + str(datas.left(rest)) + "' from replay.")
self.replayData.append(datas.right(datas.size() - rest))
else:
self.replayData.append(datas)
else:
#Write to buffer
self.replayData.append(datas)
|
# Relay to faforever.com
if self.relaySocket.isOpen():
self.relaySocket.write(datas)
def done(self):
self.__logger.info("closing replay file")
self.parent.removeRecorder(self)
@QtCore.pyqtSlot()
def inputDisconnected(self):
self.__logger.info("FA disconnected locally.")
# Part of the hardening - ensure all buffered local replay data is read and relayed
if self.inputSocket.bytesAvailable():
self.__logger.info("Relaying remaining bytes:" + str(self.inputSocket.bytesAvailable()))
self.readDatas()
# Part of the hardening - ensure successful sending of the rest of the replay to the server
if self.relaySocket.bytesToWrite():
self.__logger.info("Waiting for replay transmission to finish: " + str(self.relaySocket.bytesToWrite()) + " bytes")
progress = QtGui.QProgressDialog("Finishing Replay Transmission", "Cancel", 0, 0)
progress.show()
while self.relaySocket.bytesToWrite() and progress.isVisible():
QtGui.QApplication.processEvents()
progress.close()
self.relaySocket.disconnectFromHost()
self.writeReplayFile()
self.done()
def writeReplayFile(self):
# Update info block if possible.
if fa.exe.instance.info and fa.exe.instance.info['uid'] == self.replayInfo['uid']:
if fa.exe.instance.info.setdefault('complete', False):
self.__logger.info("Found Complete Replay Info")
else:
self.__logger.warn("Replay Info not Complete")
self.replayInfo = fa.exe.instance.info
self.replayInfo['game_end'] = time.time()
filename = os.path.join(util.REPLAY_DIR, str(self.replayInfo['uid']) + "-" + self.replayInfo['recorder'] + ".fafreplay")
self.__logger.info("Writing local replay as " + filename + ", containing " + str(self.replayData.size()) + " bytes of replay data.")
replay = QtCore.QFile(filename)
replay.open(QtCore.QIODevice.WriteOnly | QtCore.QIODevice.Text)
replay.write(json.dumps(self.replayInfo))
replay.write('\n')
replay.write(QtCore.qCompress(self.replayData).toBase64())
replay.close()
class ReplayServer(QtNetwork.QTcpServer):
'''
This is a local listening server that FA can send its replay data to.
It will instantiate a fresh ReplayRecorder for each FA instance that launches.
'''
__logger = logging.getLogger("faf.fa.replayserver")
__logger.setLevel(logging.INFO)
def __init__(self, client, *args, **kwargs):
QtNetwork.QTcpServer.__init__(self, *args, **kwargs)
self.recorders = []
self.client = client
self.__logger.debug("initializing...")
self.newConnection.connect(self.acceptConnection)
def doListen(self,local_port):
while not self.isListening():
self.listen(QtNetwork.QHostAddress.LocalHost, local_port)
if (self.isListening()):
self.__logger.info("listening on address " + self.serverAddress().toString() + ":" + str(self.serverPort()))
else:
self.__logger.error("cannot listen, port probably used by another application: " + str(local_port))
answer = QtGui.QMessageBox.warning(None, "Port Occupied", "FAF couldn't start its local replay server, which is needed to play Forged Alliance online. Possible reasons:<ul><li><b>FAF is already running</b> (most likely)</li><li>another program is listening on port {port}</li></ul>".format(port=local_port), QtGui.QMessageBox.Retry, QtGui.QMessageBox.Abort)
if answer == QtGui.QMessageBox.Abort:
return False
|
sparkslabs/kamaelia_ | Sketches/AB/Bookmarks-Dev/App/Bookmarks.py | Python | apache-2.0 | 7,466 | 0.023306 | #! /usr/bin/python
'''
Bookmarks.py - Main Executable
- Identifies current BBC programmes and generates keywords based on them.
- Collects Twitter streaming API data based on generated keywords.
- Analyses the collected data to identify frequent words, hence allowing the web interface to generate bookmarks.
'''
### Danger area: Adding OAuth to both Twitter components will result in them both trying to renew the received key and secret
### To avoid this, there needs to be a way to pass received keys and secrets to components needing them before they try to make requests too.
### Also need to farm out access to config file from OAuth utilising components so they're more generic
# This program requires a config based on the included twitter-login.conf.dist saving to /home/<yourusername>/twitter-login.conf
# During the running of the program, it will create a file called tempRDF.txt in the running directory
# It will also create files called namecache.conf, linkcache.conf and oversizedtweets.conf in your home directory
# See the README for more information
import os
import sys
from Kamaelia.Apps.SocialBookmarks.BBCProgrammes import WhatsOn
from Kamaelia.Apps.SocialBookmarks.DataCollector import DataCollector, RawDataCollector
from Kamaelia.Chassis.Graphline import Graphline
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.Util.TwoWaySplitter import TwoWaySplitter
from Kamaelia.Apps.SocialBookmarks.LiveAnalysis import FinalAnalysisNLTK, LiveAnalysis, LiveAnalysisNLTK
from Kamaelia.Apps.SocialBookmarks.Requester import Requester
from Kamaelia.Apps.SocialBookmarks.TweetFixer import LinkResolver, RetweetCorrector, RetweetFixer, TweetCleaner
from Kamaelia.Apps.SocialBookmarks.TwitterSearch import PeopleSearch
from Kamaelia.Apps.SocialBookmarks.TwitterStream import TwitterStream
from Kamaelia.Apps.SocialBookmarks.URLGetter import HTTPGetter
import cjson
if __name__ == "__main__":
# Load Config
try:
homedir = os.path.expanduser("~")
file = open(homedir + "/twitter-login.conf")
except IOError, e:
print ("Failed to load login data - exiting")
sys.exit(0)
raw_config = file.read()
file.close()
# Read Config
config = cjson.decode(raw_config)
username = config['username']
password = config['password']
dbuser = config['dbuser']
dbpass = config['dbpass']
bitlyusername = config['bitlyusername']
bitlyapikey = config['bitlyapikey']
# Set proxy server if available
if config.has_key('proxy'):
proxy = config['proxy']
else:
proxy = False
# Set OAuth consumer keypair
consumerkeypair = [config['consumerkey'],config['consumersecret']]
# Set OAuth secret keypair if available - if not it will be sourced from Twitter
if config.has_key('key') and config.has_key('secret'):
keypair = [config['key'],config['secret']]
else:
keypair = False
# Linker component for LiveAnalysis
LINKER = Graphline(LINKRESOLVE = LinkResolver(bitlyusername,bitlyapikey),
LINKREQUESTER = HTTPGetter(proxy, "BBC R&D Grabber", 10),
linkages = {("self", "inbox") : ("LINKRESOLVE", "inbox"),
("LINKRESOLVE", "outbox") : ("self", "outbox"),
("LINKRESOLVE", "urlrequests") : ("LINKREQUESTER", "inbox"),
("LINKREQUESTER", "outbox") : ("LINKRESOLVE", "responses")}).activate()
# Linker component for FinalAnalysis
# This duplication could probably be avoided by doing some tagging/filtering TODO
LINKERFINAL = Graphline(LINKRESOLVE = LinkResolver(bitlyusername,bitlyapikey),
LINKREQUESTER = HTTPGetter(proxy, "BBC R&D Grabber", 10),
linkages = {(" | self", "inbox") : ("LINKRESOLVE", "inbox"),
("LINKRESOLVE", "outbox") : ("self", "outbox"),
("LINKRESOLVE", "urlrequests") : ("LINKREQUESTER", "inbox"),
| ("LINKREQUESTER", "outbox") : ("LINKRESOLVE", "responses")}).activate()
system = Graphline(CURRENTPROG = WhatsOn(proxy),
REQUESTER = Requester("all",dbuser,dbpass), # Can set this for specific channels to limit Twitter requests whilst doing dev
FIREHOSE = TwitterStream(username, password, proxy, True, 40), # Twitter API sends blank lines every 30 secs so timeout of 40 should be fine
SEARCH = PeopleSearch(consumerkeypair, keypair, proxy),
COLLECTOR = DataCollector(dbuser,dbpass),
RAWCOLLECTOR = RawDataCollector(dbuser,dbpass),
HTTPGETTER = HTTPGetter(proxy, "BBC R&D Grabber", 10),
HTTPGETTERRDF = HTTPGetter(proxy, "BBC R&D Grabber", 10),
TWOWAY = TwoWaySplitter(),
ANALYSIS = LiveAnalysis(dbuser,dbpass),
NLTKANALYSIS = LiveAnalysisNLTK(dbuser,dbpass),
TWEETCLEANER = Pipeline(LINKER,RetweetFixer(),RetweetCorrector(dbuser,dbpass),TweetCleaner(['user_mentions','urls','hashtags'])),
NLTKANALYSISFINAL = FinalAnalysisNLTK(dbuser,dbpass),
TWEETCLEANERFINAL = Pipeline(LINKERFINAL,RetweetFixer(),RetweetCorrector(dbuser,dbpass),TweetCleaner(['user_mentions','urls','hashtags'])),
linkages = {("REQUESTER", "whatson") : ("CURRENTPROG", "inbox"), # Request what's currently broadcasting
("CURRENTPROG", "outbox") : ("REQUESTER", "whatson"), # Pass back results of what's on
("REQUESTER", "outbox") : ("FIREHOSE", "inbox"), # Send generated keywords to Twitter streaming API
("FIREHOSE", "outbox") : ("TWOWAY" , "inbox"),
("TWOWAY", "outbox") : ("COLLECTOR" , "inbox"),
("TWOWAY", "outbox2") : ("RAWCOLLECTOR" , "inbox"),
("REQUESTER", "search") : ("SEARCH", "inbox"), # Perform Twitter people search based on keywords
("SEARCH", "outbox") : ("REQUESTER", "search"), # Return Twitter people search results
("REQUESTER", "dataout") : ("HTTPGETTERRDF", "inbox"),
("CURRENTPROG", "dataout") : ("HTTPGETTER", "inbox"),
("HTTPGETTER", "outbox") : ("CURRENTPROG", "datain"),
("HTTPGETTERRDF", "outbox") : ("REQUESTER", "datain"),
("ANALYSIS", "nltk") : ("NLTKANALYSIS", "inbox"),
("NLTKANALYSIS", "outbox") : ("ANALYSIS", "nltk"),
("NLTKANALYSIS", "tweetfixer") : ("TWEETCLEANER", "inbox"),
("TWEETCLEANER", "outbox") : ("NLTKANALYSIS", "tweetfixer"),
("ANALYSIS", "nltkfinal") : ("NLTKANALYSISFINAL", "inbox"),
("NLTKANALYSISFINAL", "outbox") : ("ANALYSIS", "nltkfinal"),
("NLTKANALYSISFINAL", "tweetfixer") : ("TWEETCLEANERFINAL", "inbox"),
("TWEETCLEANERFINAL", "outbox") : ("NLTKANALYSISFINAL", "tweetfixer"),
}
).run()
|
fifengine/fifengine | engine/python/fife/extensions/fife_compat.py | Python | lgpl-2.1 | 2,742 | 0.016411 | # -*- coding: utf-8 -*-
# ####################################################################
# Copyright (C) 2005-2019 by the FIFE team
# http://www.fifengine.net
# This file is part of FIFE.
#
# FIFE is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# ####################################################################
"""
FIFE Backwards Combatibility Layer
==================================
This module can be imported if you want to
run code that wasn't adapted to API changes in FIFE.
2008.1
------
- Animation.addFrame now expects a fife.ResourcePtr instead of an fife.Image
- Pool.getIndex is just an alias for Pool.addResourceFromFile.
- EventManager.setNonConsumableKeys is superseeded by EventManager.setKeyFilter
"""
from __future__ import print_function
from fife import fife
# Utility functions
def deprecated(revision,message):
print("fife_compat: Deprecation warning - See revision %d " % revision)
print(" - ",message)
def this_is_deprecated(func,revision=0,message=None):
if message is None:
message = repr(func) + " is deprecated."
def wrapped_func(*args,**kwargs):
deprecated(revision,message)
return func(*args,**kwargs)
return wrapped_func
def _compat_NonCons | umableKeys():
class CompatKeyFilter(fife.IKeyFilter):
def __init__(self, keys):
fife.IKeyFilter.__init__(self)
self.keys = keys
def isFiltered(self, event):
return event.getKey().getValue() in self.keys
def _setNonCon | sumableKeys(self,keys):
deprecated(2636, "Write an IKeyFilter instead of using EventManager.setNonConsumableKeys.\n" +
"You probably don't need it anyway")
self.compat_keyfilter = CompatKeyFilter(keys)
self.compat_keyfilter.__disown__()
self.setKeyFilter(self.compat_keyfilter)
def _getNonConsumableKeys(self,keys):
deprecated(2636, "Write an IKeyFilter instead of using EventManager.getNonConsumableKeys.")
return self.compat_keyfilter.keys
fife.EventManager.setNonConsumableKeys = _setNonConsumableKeys
fife.EventManager.getNonConsumableKeys = _getNonConsumableKeys
_compat_NonConsumableKeys()
|
huggingface/transformers | src/transformers/models/realm/tokenization_realm_fast.py | Python | apache-2.0 | 14,317 | 0.003772 | # coding=utf-8
# Copyright 2022 The REALM authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Tokenization classes for REALM."""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...file_utils import PaddingStrategy
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_realm import RealmTokenizer
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"google/realm-cc-news-pretrained-embedder": "https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt",
"google/realm-cc-news-pretrained-encoder": "https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt",
"google/realm-cc-news-pretrained-scorer": "https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt",
"google/realm-cc-news-pretrained-openqa": "https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt",
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt",
},
"tokenizer_file": {
"google/realm-cc-news-pretrained-embedder": "https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont",
"google/realm-cc-news-pretrained-encoder": "https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json",
"google/realm-cc-news-pretrained-scorer": "https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json",
"google/realm-cc-news-pretrained-openqa": "https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json",
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"google/realm-cc-news-pretrained-embedder": 512,
"google/realm-cc-news-pretrained-encoder": 512,
"google/realm-cc-news-pretrained-scorer": 512,
"google/realm-cc-news-pretrained-openqa": 512,
"google/realm-orqa-nq-openqa": 512,
"google/realm-orqa-nq-reader": 512,
"google/realm-orqa-wq-openqa": 512,
"google/realm-orqa-wq-reader": 512,
}
PRETRAINED_INIT_CONFIGURATION = {
"google/realm-cc-news-pretrained-embedder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-encoder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-scorer": {"do_lower_case": True},
"google/realm-cc-news-pretrained-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-reader": {"do_lower_case": True},
"google/realm-orqa-wq-openqa": {"do_lower_case": True},
"google/realm-orqa-wq-reader": {"do_lower_case": True},
}
class RealmTokenizerFast(PreTrainedTokenizerFast):
r"""
Construct a "fast" REALM tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
[`RealmTokenizerFast`] is identical to [`BertTokenizerFast`] and runs end-to-end tokenization: punctuation
splitting and wordpiece.
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
File containing the vocabulary.
do_lower_case (`bool`, *optional*, defaults to `True`):
Whether or not to lowercase the input when tokenizing.
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
clean_text (`bool`, *optional*, defaults to `True`):
Whether or not to clean the text before tokenization by removing any control characters and replacing all
whitespaces by the classic one.
tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
issue](https://github.com/huggingface/transformers/issues/328)).
strip_accents (`bool`, *optional*):
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
value for `lowercase` (as in the original BERT).
wordpieces_prefix (`str`, *optional*, defaults to `"##"`):
The prefix for subwords.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
slow_tokenizer_class = RealmTokenizer
def __init__(
self,
vocab_file=None,
tokenizer_file=None,
do_lower_case=True,
unk_token="[UNK]",
sep_token="[SEP]",
| pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]",
tokenize_chinese_chars=True,
strip_accents=None,
**kwargs
):
super().__init__(
vocab_file,
tokenizer_file=tokenizer_file,
do_lower_case=do_lower_case,
unk_token=unk_token,
sep_token=sep_token,
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_t | oken,
tokenize_chinese_chars=tokenize_chinese_chars,
strip_accents=strip_accents,
**kwargs,
)
normalizer_state = |
tvlive/tv-crawler | tvChannelLoader.py | Python | apache-2.0 | 5,139 | 0.003308 | #!/usr/bin/env python
import urllib2
from bs4 import BeautifulSoup
from parsingLibrary import loadHtmlTags, parseChannel, remove_duplicate_elements
from pymongo import MongoClient
from mongoConfiguration import load_mongo_configuration
mongo_address | , mongo_port = load_mongo_configuration()
client = MongoClient(mongo_address, mongo_port)
db = client['freeview']
channelCollection = db['tvChannel']
channelCollection.drop()
channelCategoryCollection = db['tvChannelCategory']
channelCategoryCollection.drop()
channelProviderCollection = db['tvChannelProvider']
channelProviderCollection.drop()
def get_channels(url):
url_channels = 'http://tvguideuk.telegraph.co.uk/' + url
print url
a = BeautifulSoup(urll | ib2.urlopen(url_channels).read())
channels = a.findAll("div", {"class": "channel_name"})
list_channels = []
for channel in channels:
list_channels.append(channel.text)
return list_channels
def add_type_to_channel(channels_classified, channels_by_type, key, value):
for channel in channels_by_type:
if channel not in channels_classified:
channels_classified[channel] = {}
channels_classified[channel][key] = []
channels_classified[channel][key].append(value.upper())
channels_classified[channel]['name'] = parseChannel(channel.upper())
else:
if key in channels_classified[channel]:
channels_classified[channel][key].append(value.upper())
else:
channels_classified[channel][key] = []
channels_classified[channel][key].append(value.upper())
return channels_classified
def find_channel_classifed(tags):
for tag_url in tags:
# if 'All' in tag_url:
# all_channels = getChannels(tag_url)
if 'Freeview' in tag_url:
freeview_channels = get_channels(tag_url)
if 'Terrestrial' in tag_url:
terrestrial_channels = get_channels(tag_url)
if 'Sky & Cable' in tag_url:
cable_all_channels = get_channels(tag_url)
if 'Films' in tag_url:
films_channels = get_channels(tag_url)
if 'Sport' in tag_url:
sport_channels = get_channels(tag_url)
if 'News & Doc' in tag_url:
news_channels = get_channels(tag_url)
if 'Kids' in tag_url:
kids_channels = get_channels(tag_url)
if 'Radio' in tag_url:
radio_channels = get_channels(tag_url)
channels_classified = {}
add_type_to_channel(channels_classified, freeview_channels, "provider", "FREEVIEW")
add_type_to_channel(channels_classified, terrestrial_channels, "provider", "TERRESTRIAL")
add_type_to_channel(channels_classified, cable_all_channels, "provider", "SKY & CABLE")
add_type_to_channel(channels_classified, films_channels, "category", "FILMS")
add_type_to_channel(channels_classified, sport_channels, "category", "SPORTS")
add_type_to_channel(channels_classified, news_channels, "category", "NEWS & DOCUMENTARY")
add_type_to_channel(channels_classified, kids_channels, "category", "KIDS")
add_type_to_channel(channels_classified, radio_channels, "category", "RADIO")
return channels_classified
from datetime import datetime
day = datetime.now().day
month = datetime.now().month
year = datetime.now().year
hours = ['12am', '2am', '4am', '6am', '8am', '10am', '12pm', '2pm', '4pm', '6pm', '8pm', '10pm']
channels_classified = {}
for hour in hours:
tags = loadHtmlTags(year, month, day, hour, 'All')
channels_classified_temp = find_channel_classifed(tags)
print '-------- ' + hour
for channel_classified_temp in channels_classified_temp:
if channel_classified_temp in channels_classified:
if channels_classified_temp[channel_classified_temp] != channels_classified[channel_classified_temp]:
print "-------- DIFFERENT"
print channels_classified_temp[channel_classified_temp]
print channels_classified[channel_classified_temp]
else:
channels_classified[channel_classified_temp] = channels_classified_temp[channel_classified_temp]
print channel_classified_temp + ' INSERTED'
for channel in channels_classified:
if 'category' not in channels_classified[channel]:
channels_classified[channel]['category'] = ['GENERIC']
if 'provider' in channels_classified[channel]:
channels_classified[channel]['provider']= remove_duplicate_elements(channels_classified[channel]['provider'])
else:
channels_classified[channel]['provider'] = ['UNKNOWN']
channelCollection.insert(channels_classified[channel])
providers = ["FREEVIEW", "TERRESTRIAL", "SKY & CABLE", "UNKOWN"]
categories = ["FILMS", "SPORTS", "NEWS & DOCUMENTARY", "KIDS", "RADIO", "GENERIC"]
for provider in providers:
json_to_insert = {}
json_to_insert['provider'] = provider
channelProviderCollection.insert(json_to_insert)
for category in categories:
json_to_insert = {}
json_to_insert['category'] = category
channelCategoryCollection.insert(json_to_insert)
|
yalcinozhabes/pythonJDFTx | ElectronicMinimize.py | Python | gpl-2.0 | 9,552 | 0.00157 | #!/usr/bin/python3
# Author: Yalcin Ozhabes
# email: yalcinozhabes@gmail.com
import copy
import time
import numpy as np
from mpi4py import MPI
from ase.calculators.calculator import Calculator, all_changes
from ase import Atoms
from ase.units import Bohr, Hartree
from JDFTxCalcCPU import JDFTxCalcCPU
try:
from JDFTxCalcGPU import JDFTxCalcGPU
except ImportError:
JDFTxCalcGPU = JDFTxCalcCPU
class ElectronicMinimize(JDFTxCalcCPU, Calculator):
"""
A calculator derived from JDFTxCalcCPU.
"""
implemented_properties = ['energy', 'forces']
@staticmethod
def _changeOrder(x, indexList):
if isinstance(x, np.ndarray):
out = copy.copy(x)
for i, ind in enumerate(indexList):
out[ind] = x[i]
return out
elif isinstance(x, Atoms):
out = [0] * len(x)
for i, ind in enumerate(indexList):
out[ind] = copy.copy(x[i])
return Atoms(out)
else:
raise TypeError("Can change the order of np.ndarray or ase.Atoms")
@staticmethod
def _createIndexLists(atoms):
"""JDFT has atoms ordered by their symbols so we need conversion tables
of indices:"""
symbols = {} # count number of occurances
species_order = []
for atom in atoms:
try:
symbols[atom.symbol] += 1
except KeyError:
species_order.append(atom.symbol)
symbols[atom.symbol] = 0
i = 0
for sp in species_order:
number_of_sp = symbols[sp] + 1
symbols[sp] = i
i += number_of_sp
toJDFTOrderIndexList = [0] * len(atoms)
fromJDFTOrderIndexList = [0] * len(atoms)
for ind, atom in enumerate(atoms):
toJDFTOrderIndexList[ind] = symbols[atom.symbol]
fromJDFTOrderIndexList[symbols[atom.symbol]] = ind
symbols[atom.symbol] += 1
return (toJDFTOrderIndexList, fromJDFTOrderIndexList)
def _toJDFTOrder(self, x):
return self._changeOrder(x, self._toJDFTOrderIndexList)
def _fromJDFTOrder(self, x):
return self._changeOrder(x, self._fromJDFTOrderIndexList)
def __init__(self, restart=None, ignore_bad_restart_file=False,
atoms=None, log=True, comm=None, **kwargs):
Calculator.__init__(self, restart, ignore_bad_restart_file,
"JDFT", atoms, **kwargs)
nThreads = kwargs['nThreads'] if 'nThreads' in kwargs else None
super(ElectronicMinimize, self).__init__(comm=comm, nThreads=nThreads,
log = log)
if 'kpts' in kwargs:
self.kpts = kwargs['kpts']
if 'settings' in kwargs:
self.settings = kwargs['settings']
if atoms is None:
return
elif not isinstance(atoms, Atoms):
raise TypeError("atoms should be ase.Atoms type.")
self._toJDFTOrderIndexList, self._fromJDFTOrderIndexList = self._createIndexLists(atoms)
self.cell = atoms.cell
if 'pseudopotential' in atoms.info:
pspots = [atoms.info[pseudopotential]] * len(atoms)
elif 'pseudopotentials' in atoms.info:
pspots = atoms.info['pseudopotentials']
assert len(pspots) == len(atoms)
else:
pspots = None
for i, atom in enumerate(atoms):
if pspots:
atom.data['pseudopotential'] = pspots[i]
self.add_ion(atom)
t0 = time.time()
c0 = time.clock()
self.setup()
print("Wall Time for e.setup()", time.time()-t0, "seconds")
print("Process Time for e.setup()", time.clock()-c0, "seconds")
def updateAtomicPositions(self):
""""""
dpos = self.atoms.positions - self._fromJDFTOrder(self.getIonicPositions() * Bohr)
super(ElectronicMinimize, self).updateIonicPositions(self._toJDFTOrder(dpos / Bohr))
def calculate(self, atoms=None, properties=['energy'],
system_changes=all_changes):
"""Run one electronic minimize loop"""
super(ElectronicMinimize, self).calculate(atoms, properties, system_changes)
if 'positions' in system_changes:
self.updateAtomicPositions()
else:
print(system_changes)
t0 = time.time()
c0 = time.clock()
self.runElecMin()
print("Wall Time for self.runElecMin()", time.time()-t0, "seconds")
print("Process Time for self.runElecMin()", time.clock()-c0, "seconds")
energy = self.readTotalEnergy() * Hartree
forces = np.asarray(self.readForces(), dtype=np.double)
forces.resize((len(atoms), 3))
forces = self._fromJDFTOrder(forces)
forces = forces * Hartree / Bohr
self.results = {'energy': energy,
'forces': forces,
'stress': np.zeros(6),
'dipole': np.zeros(3),
'charges': np.zeros(len(atoms)),
'magmom': 0.0,
'magmoms': np.zeros(len(atoms))}
class ElectronicMinimizeGPU(JDFTxCalcGPU, Calculator):
"""
A calculator derived from JDFTxCalcGPU.
"""
implemented_properties = ['energy', 'forces']
@staticmethod
def _changeOrder(x, indexList):
if isinstance(x, np.ndarray):
out = copy.copy(x)
for i, ind in enumerate(indexList):
out[ind] = x[i]
return out
elif isinstance(x, Atoms):
out = [0] * len(x)
for i, ind in enumerate(indexList):
out[ind] = copy.copy(x[i])
return Atoms(out)
else:
raise TypeError("Can change the order of np.ndarray or ase.Atoms")
@staticmethod
def _createIndexLists(atoms):
"""JDFT has atoms ordered by their symbols so we need conversion tables
of indices:"""
symbols = {} # count number of occurances
| species_order = []
for atom | in atoms:
try:
symbols[atom.symbol] += 1
except KeyError:
species_order.append(atom.symbol)
symbols[atom.symbol] = 0
i = 0
for sp in species_order:
number_of_sp = symbols[sp] + 1
symbols[sp] = i
i += number_of_sp
toJDFTOrderIndexList = [0] * len(atoms)
fromJDFTOrderIndexList = [0] * len(atoms)
for ind, atom in enumerate(atoms):
toJDFTOrderIndexList[ind] = symbols[atom.symbol]
fromJDFTOrderIndexList[symbols[atom.symbol]] = ind
symbols[atom.symbol] += 1
return (toJDFTOrderIndexList, fromJDFTOrderIndexList)
def _toJDFTOrder(self, x):
return self._changeOrder(x, self._toJDFTOrderIndexList)
def _fromJDFTOrder(self, x):
return self._changeOrder(x, self._fromJDFTOrderIndexList)
def __init__(self, restart=None, ignore_bad_restart_file=False,
atoms=None, log=True, comm=None, **kwargs):
print "ElecMinGPU init running"
Calculator.__init__(self, restart, ignore_bad_restart_file,
"JDFT", atoms, **kwargs)
nThreads = kwargs['nThreads'] if 'nThreads' in kwargs else None
super(ElectronicMinimizeGPU, self).__init__(comm=comm, nThreads=nThreads,
log = log)
if 'kpts' in kwargs:
self.kpts = kwargs["kpts"]
if atoms is None:
return
elif not isinstance(atoms, Atoms):
raise TypeError("atoms should be ase.Atoms type.")
self._toJDFTOrderIndexList, self._fromJDFTOrderIndexList = self._createIndexLists(atoms)
self.cell = atoms.cell
for atom in atoms:
self.add_ion(atom)
t0 = time.time()
c0 = time.clock()
self.setup()
print("Wall Time for e.setup()", time.time()-t0, "seconds")
print("Process Time for e.setup()", time.clock()-c0, "seconds")
def updateAtomicPositions(self):
|
tardate/LittleCodingKata | python/random_mac_generation/random_mac.py | Python | mit | 558 | 0 | #! /usr/bin/env python
"""
Generate random MAC address
Inspired | by http://www.linux-kvm.com/sites/default/files/macgen.py
"""
from sys import argv
import random
DEFAULT_OUI = '00-16-3e' # Xens | ource, Inc.
def random_mac(oui=None):
"""returns a random MAC address, with optional +oui+ override"""
mac_parts = [oui or DEFAULT_OUI]
for limit in [0x7f, 0xff, 0xff]:
mac_parts.append("%02x" % random.randint(0x00, limit))
return '-'.join(mac_parts)
if __name__ == '__main__':
print random_mac(argv[1] if len(argv) > 1 else None)
|
thunderhoser/GewitterGefahr | gewittergefahr/scripts/train_many_cnns_2d3d_myrorss.py | Python | mit | 14,391 | 0.000208 | """Trains many CNNs with 2-D and 3-D MYRORSS images.
The 2-D images contain azimuthal shear, and the 3-D images contain reflectivity.
"""
import os
import pickle
import argparse
import traceback
from multiprocessing import Pool, Manager
import numpy
from gewittergefahr.gg_utils import time_conversion
from gewittergefahr.gg_utils import soundings
from gewittergefahr.gg_utils import file_system_utils
SEPARATOR_STRING = '\n\n' + '*' * 50 + '\n\n'
NUM_GPU_PER_NODE = 8
TIME_FORMAT = '%Y-%m-%d-%H%M%S'
FIRST_BATCH_NUMBER = 0
LAST_BATCH_NUMBER = int(1e12)
REFLECTIVITY_HEIGHTS_M_AGL = numpy.linspace(1000, 12000, num=12, dtype=int)
SOUNDING_HEIGHTS_M_AGL = soundings.DEFAULT_HEIGHT_LEVELS_M_AGL + 0
ARGUMENT_FILES_ARG_NAME = 'argument_file_names'
ARGUMENT_FILES_HELP_STRING = (
'1-D list of paths to input files, each containing a dictionary of '
'arguments for the single-CNN script train_cnn_2d3d_myrorss.py. Each file '
'should be a Pickle file, containing only said dictionary.')
INPUT_ARG_PARSER = argparse.ArgumentParser()
INPUT_ARG_PARSER.add_argument(
'--' + ARGUMENT_FILES_ARG_NAME, type=str, nargs='+', required=True,
help=ARGUMENT_FILES_HELP_STRING)
def _write_metadata_one_cnn(model_object, argument_dict):
"""Writes metadata for one CNN to file.
:param model_object: Untrained CNN (instance of `keras.models.Model` or
`keras.models.Sequential`).
:param argument_dict: See doc for `_train_one_cnn`.
:return: metadata_dict: See doc for `cnn.write_model_metadata`.
:return: training_option_dict: Same.
"""
from gewittergefahr.deep_learning import cnn
from gewittergefahr.deep_learning import input_examples
from gewittergefahr.deep_learning import \
training_validation_io as trainval_io
from gewittergefahr.scripts import deep_learning_helper as dl_helper
# Read input args.
sounding_field_names = argument_dict[dl_helper.SOUNDING_FIELDS_ARG_NAME]
if sounding_field_names[0] in ['', 'None']:
sounding_field_names = None
normalization_type_string = argument_dict[
dl_helper.NORMALIZATION_TYPE_ARG_NAME]
normalization_file_name = argument_dict[
dl_helper.NORMALIZATION_FILE_ARG_NAME]
min_normalized_value = argument_dict[dl_helper.MIN_NORM_VALUE_ARG_NAME]
max_normalized_value = argument_dict[dl_helper.MAX_NORM_VALUE_ARG_NAME]
target_name = argument_dict[dl_helper.TARGET_NAME_ARG_NAME]
downsampling_classes = numpy.array(
argument_dict[dl_helper.DOWNSAMPLING_CLASSES_ARG_NAME],
dtype=int
)
downsampling_fractions = numpy.array(
argument_dict[dl_helper.DOWNSAMPLING_FRACTIONS_ARG_NAME],
dtype=float
)
monitor_string = argument_dict[dl_helper.MONITOR_ARG_NAME]
weight_loss_function = bool(argument_dict[dl_helper.WEIGHT_LOSS_ARG_NAME])
x_translations_pixels = numpy.array(
argument_dict[dl_helper.X_TRANSLATIONS_ARG_NAME], dtype=int
)
y_translations_pixels = numpy.array(
argument_dict[dl_helper.Y_TRANSLATIONS_ARG_NAME], dtype=int
)
ccw_rotation_angles_deg = numpy.array(
argument_dict[dl_helper.ROTATION_ANGLES_ARG_NAME], dtype=float
| )
noise_standard_deviation = argument_dict[dl_helper.NOISE_STDEV_ARG_NAME]
num_noisings = argument_dict[dl_helper.NUM_NOISINGS_ARG_NAME]
flip_in_x = bool(argument_dict[dl_helper.FLIP_X_ARG_NAME])
flip_in_y = bool(argument_dict[dl_helper.FLIP_Y_ARG_NAME])
top_training_dir_name = argument_dict[dl_helper.TRAINING_DIR_ARG_NAME]
first_training_time_string = argument_dict[
| dl_helper.FIRST_TRAINING_TIME_ARG_NAME]
last_training_time_string = argument_dict[
dl_helper.LAST_TRAINING_TIME_ARG_NAME]
num_examples_per_train_batch = argument_dict[
dl_helper.NUM_EX_PER_TRAIN_ARG_NAME]
top_validation_dir_name = argument_dict[
dl_helper.VALIDATION_DIR_ARG_NAME]
first_validation_time_string = argument_dict[
dl_helper.FIRST_VALIDATION_TIME_ARG_NAME]
last_validation_time_string = argument_dict[
dl_helper.LAST_VALIDATION_TIME_ARG_NAME]
num_examples_per_validn_batch = argument_dict[
dl_helper.NUM_EX_PER_VALIDN_ARG_NAME]
num_epochs = argument_dict[dl_helper.NUM_EPOCHS_ARG_NAME]
num_training_batches_per_epoch = argument_dict[
dl_helper.NUM_TRAINING_BATCHES_ARG_NAME]
num_validation_batches_per_epoch = argument_dict[
dl_helper.NUM_VALIDATION_BATCHES_ARG_NAME]
output_dir_name = argument_dict[dl_helper.OUTPUT_DIR_ARG_NAME]
# Process input args.
first_training_time_unix_sec = time_conversion.string_to_unix_sec(
first_training_time_string, TIME_FORMAT)
last_training_time_unix_sec = time_conversion.string_to_unix_sec(
last_training_time_string, TIME_FORMAT)
first_validation_time_unix_sec = time_conversion.string_to_unix_sec(
first_validation_time_string, TIME_FORMAT)
last_validation_time_unix_sec = time_conversion.string_to_unix_sec(
last_validation_time_string, TIME_FORMAT)
if len(downsampling_classes) > 1:
downsampling_dict = dict(list(zip(
downsampling_classes, downsampling_fractions
)))
else:
downsampling_dict = None
translate_flag = (
len(x_translations_pixels) > 1
or x_translations_pixels[0] != 0 or y_translations_pixels[0] != 0
)
if not translate_flag:
x_translations_pixels = None
y_translations_pixels = None
if len(ccw_rotation_angles_deg) == 1 and ccw_rotation_angles_deg[0] == 0:
ccw_rotation_angles_deg = None
if num_noisings <= 0:
num_noisings = 0
noise_standard_deviation = None
# Find training and validation files.
training_file_names = input_examples.find_many_example_files(
top_directory_name=top_training_dir_name, shuffled=True,
first_batch_number=FIRST_BATCH_NUMBER,
last_batch_number=LAST_BATCH_NUMBER,
raise_error_if_any_missing=False)
validation_file_names = input_examples.find_many_example_files(
top_directory_name=top_validation_dir_name, shuffled=True,
first_batch_number=FIRST_BATCH_NUMBER,
last_batch_number=LAST_BATCH_NUMBER,
raise_error_if_any_missing=False)
# Write metadata.
metadata_dict = {
cnn.NUM_EPOCHS_KEY: num_epochs,
cnn.NUM_TRAINING_BATCHES_KEY: num_training_batches_per_epoch,
cnn.NUM_VALIDATION_BATCHES_KEY: num_validation_batches_per_epoch,
cnn.MONITOR_STRING_KEY: monitor_string,
cnn.WEIGHT_LOSS_FUNCTION_KEY: weight_loss_function,
cnn.CONV_2D3D_KEY: True,
cnn.VALIDATION_FILES_KEY: validation_file_names,
cnn.FIRST_VALIDN_TIME_KEY: first_validation_time_unix_sec,
cnn.LAST_VALIDN_TIME_KEY: last_validation_time_unix_sec,
cnn.NUM_EX_PER_VALIDN_BATCH_KEY: num_examples_per_validn_batch
}
if isinstance(model_object.input, list):
list_of_input_tensors = model_object.input
else:
list_of_input_tensors = [model_object.input]
upsample_refl = len(list_of_input_tensors) == 2
num_grid_rows = list_of_input_tensors[0].get_shape().as_list()[1]
num_grid_columns = list_of_input_tensors[0].get_shape().as_list()[2]
if upsample_refl:
num_grid_rows = int(numpy.round(num_grid_rows / 2))
num_grid_columns = int(numpy.round(num_grid_columns / 2))
training_option_dict = {
trainval_io.EXAMPLE_FILES_KEY: training_file_names,
trainval_io.TARGET_NAME_KEY: target_name,
trainval_io.FIRST_STORM_TIME_KEY: first_training_time_unix_sec,
trainval_io.LAST_STORM_TIME_KEY: last_training_time_unix_sec,
trainval_io.NUM_EXAMPLES_PER_BATCH_KEY: num_examples_per_train_batch,
trainval_io.RADAR_FIELDS_KEY:
input_examples.AZIMUTHAL_SHEAR_FIELD_NAMES,
trainval_io.RADAR_HEIGHTS_KEY: REFLECTIVITY_HEIGHTS_M_AGL,
trainval_io.SOUNDING_FIELDS_KEY: sounding_field_names,
trainval_io.SOUNDING_HEIGHTS_KEY: SOUNDING_HEIGHTS_M_AGL,
trainval_io.NUM_ROWS_KEY: num_grid_rows,
trainval_io.NUM_COLUMNS_KEY: num_grid_columns |
Royal-Society-of-New-Zealand/NZ-ORCID-Hub | orcid_api_v3/models/education_summary_v30.py | Python | mit | 13,754 | 0.000073 | # coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.created_date_v30 import CreatedDateV30 # noqa: F401,E501
from orcid_api_v3.models.external_i_ds_v30 import ExternalIDsV30 # noqa: F401,E501
from orcid_api_v3.models.fuzzy_date_v30 import FuzzyDateV30 # noqa: F401,E501
from orcid_api_v3.models.last_modified_date_v30 import LastModifiedDateV30 # noqa: F401,E501
from orcid_api_v3.models.organization_v30 import OrganizationV30 # noqa: F401,E501
from orcid_api_v3.models.source_v30 import SourceV30 # noqa: F401,E501
from orcid_api_v3.models.url_v30 import UrlV30 # noqa: F401,E501
class EducationSummaryV30(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'created_date': 'CreatedDateV30',
'last_modified_date': 'LastModifiedDateV30',
'source': 'SourceV30',
'put_code': 'int',
'department_name': 'str',
'role_title': 'str',
'start_date': 'FuzzyDateV30',
'end_date': 'FuzzyDateV30',
'organization': 'OrganizationV30',
'url': 'UrlV30',
'external_ids': 'ExternalIDsV30',
'display_index': 'str',
'visibility': 'str',
'path': 'str'
}
attribute_map = {
'created_date': 'created-date',
'last_modified_date': 'last-modified-date',
'source': 'source',
'put_code': 'put-code',
'department_name': 'department-name',
'role_ | title': 'role-title',
'start_date': 'start-date',
'end_date': 'end-date',
'organization': 'organization',
'url': | 'url',
'external_ids': 'external-ids',
'display_index': 'display-index',
'visibility': 'visibility',
'path': 'path'
}
def __init__(self, created_date=None, last_modified_date=None, source=None, put_code=None, department_name=None, role_title=None, start_date=None, end_date=None, organization=None, url=None, external_ids=None, display_index=None, visibility=None, path=None): # noqa: E501
"""EducationSummaryV30 - a model defined in Swagger""" # noqa: E501
self._created_date = None
self._last_modified_date = None
self._source = None
self._put_code = None
self._department_name = None
self._role_title = None
self._start_date = None
self._end_date = None
self._organization = None
self._url = None
self._external_ids = None
self._display_index = None
self._visibility = None
self._path = None
self.discriminator = None
if created_date is not None:
self.created_date = created_date
if last_modified_date is not None:
self.last_modified_date = last_modified_date
if source is not None:
self.source = source
if put_code is not None:
self.put_code = put_code
if department_name is not None:
self.department_name = department_name
if role_title is not None:
self.role_title = role_title
if start_date is not None:
self.start_date = start_date
if end_date is not None:
self.end_date = end_date
if organization is not None:
self.organization = organization
if url is not None:
self.url = url
if external_ids is not None:
self.external_ids = external_ids
if display_index is not None:
self.display_index = display_index
if visibility is not None:
self.visibility = visibility
if path is not None:
self.path = path
@property
def created_date(self):
"""Gets the created_date of this EducationSummaryV30. # noqa: E501
:return: The created_date of this EducationSummaryV30. # noqa: E501
:rtype: CreatedDateV30
"""
return self._created_date
@created_date.setter
def created_date(self, created_date):
"""Sets the created_date of this EducationSummaryV30.
:param created_date: The created_date of this EducationSummaryV30. # noqa: E501
:type: CreatedDateV30
"""
self._created_date = created_date
@property
def last_modified_date(self):
"""Gets the last_modified_date of this EducationSummaryV30. # noqa: E501
:return: The last_modified_date of this EducationSummaryV30. # noqa: E501
:rtype: LastModifiedDateV30
"""
return self._last_modified_date
@last_modified_date.setter
def last_modified_date(self, last_modified_date):
"""Sets the last_modified_date of this EducationSummaryV30.
:param last_modified_date: The last_modified_date of this EducationSummaryV30. # noqa: E501
:type: LastModifiedDateV30
"""
self._last_modified_date = last_modified_date
@property
def source(self):
"""Gets the source of this EducationSummaryV30. # noqa: E501
:return: The source of this EducationSummaryV30. # noqa: E501
:rtype: SourceV30
"""
return self._source
@source.setter
def source(self, source):
"""Sets the source of this EducationSummaryV30.
:param source: The source of this EducationSummaryV30. # noqa: E501
:type: SourceV30
"""
self._source = source
@property
def put_code(self):
"""Gets the put_code of this EducationSummaryV30. # noqa: E501
:return: The put_code of this EducationSummaryV30. # noqa: E501
:rtype: int
"""
return self._put_code
@put_code.setter
def put_code(self, put_code):
"""Sets the put_code of this EducationSummaryV30.
:param put_code: The put_code of this EducationSummaryV30. # noqa: E501
:type: int
"""
self._put_code = put_code
@property
def department_name(self):
"""Gets the department_name of this EducationSummaryV30. # noqa: E501
:return: The department_name of this EducationSummaryV30. # noqa: E501
:rtype: str
"""
return self._department_name
@department_name.setter
def department_name(self, department_name):
"""Sets the department_name of this EducationSummaryV30.
:param department_name: The department_name of this EducationSummaryV30. # noqa: E501
:type: str
"""
self._department_name = department_name
@property
def role_title(self):
"""Gets the role_title of this EducationSummaryV30. # noqa: E501
:return: The role_title of this EducationSummaryV30. # noqa: E501
:rtype: str
"""
return self._role_title
@role_title.setter
def role_title(self, role_title):
"""Sets the role_title of this EducationSummaryV30.
:param role_title: The role_title of this EducationSummaryV30. # noqa: E501
:type: str
"""
self._role_title = role_title
@property
def start_date(self):
"""Gets the start_date of this EducationSummaryV30. # noqa: E501
:return: The start_date of this EducationSummaryV30. # noqa: E501
:rtype: FuzzyDateV30
"""
return self._start_date
@start_date.setter
def start_date(self, start_date):
"""Sets the start_date of this EducationSummaryV30.
:param start_date: The start_date of this EducationSummaryV30. # noqa: E501
:type: FuzzyDateV30
"""
self._ |
ZombieNinjaPirate/kvmbuild | KVM/generate/ifobj.py | Python | lgpl-3.0 | 7,981 | 0.022052 | """These functions can be called to manage and generate various objects that are associated with
network interfaces. """
"""
Copyright (c) 2014, Are Hansen
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are
permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or other
materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND AN
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
__author__ = 'Are Hansen'
__date__ = '2014, October 25'
__version__ = '0.0.2'
import random
def macaddress():
"""The function generates a 48 bit MAC address (IEEE 802). The first 24 bits belong to a legit
vendor, the last 24 bits are randomly generated. The function will generate a pool of 30 unique
MAC addresses, from which one is selected at random and returned from the function. """
# Astarte Technology Co: 00:0e:8b
mac01 = [ 0x00, 0x0e, 0x8b,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff) ]
# Xensource, Inc: 00:16:3e
mac02 = [ 0x00, 0x16, 0x3e,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff) ]
# ProCurve Networking by HP: 00:1f:fe
mac03 = [ 0x00, 0x1f, 0xfe,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff) ]
# CISCO SYSTEMS, INC: a4:bc:c3
mac04 = [ 0xa4, 0xbc, 0xc3,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff) ]
# Netgear: 00:26:f2
mac05 = [ 0x00, 0x26, 0xf2,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff) ]
# Cisco-Linksys, LLC: 00:25:9c
mac06 = [ 0x00, 0x25, 0x9c,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff) ]
# ASUSTek COMPUTER INC: bc:ae:c5
mac07 = [ 0xbc, 0xae, 0xc5,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff) ]
# Oracle Corporation (was: Sun Microsystems Inc.): 08:00:20
mac08 = [ 0x08, 0x00, 0x20,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff) ]
# Schenck Pegasus Corp: 00:05:fc
mac09 = [ 0x00, 0x05, 0xfc,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff) ]
# Cisco-Linksys, LLC (was: Sipura Technology, Inc): 00:0e:08
mac10 = [ 0x00, 0x0e, 0x08,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff) ]
# NETGEAR INC: 28:c6:8e
mac11 = [ 0x28, 0xc6, 0x8e,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff) ]
# Sun Microsystems: 00:03:ba
mac12 = [ 0x00, 0x25, 0x9c,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff) ]
# ASUSTek COMPUTER INC: f4:6d:d4
mac13 = [ 0xf4, 0x6d, 0xd4,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff) ]
# Cisco-Linksys, LLC: c0:c1:c0
mac14 = [ 0xc9, 0xc1, 0xc0,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff) ]
# Samsung Electronics Co.,Ltd: c8:7e:75
mac15 = [ 0xc8, 0x7e, 0x75,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff) ]
# CSUN System Technology Co.,LTD: bc:39:a6
mac16 = [ 0x00, 0x25, 0x9c,
random.randint(0x00, 0x7f),
rand | om.randint(0x00, 0xff),
random.randint(0x0 | 0, 0xff) ]
# Belkin International: 94:44:52
mac17 = [ 0x94, 0x44, 0x52,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff) ]
# Oracle Corporation: 00:14:4f
mac18 = [ 0x00, 0x14, 0x4f,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff) ]
# Intel Corporate: 00:13:e8
mac19 = [ 0x00, 0x13, 0xe8,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff) ]
# AMD: 00:0c:87
mac20 = [ 0x00, 0x0c, 0x87,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff) ]
# Dell Inc: 00:1e:c9
mac21 = [ 0x00, 0x1e, 0xc9,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff) ]
# Intel Corporation: 00:04:23
mac22 = [ 0x00, 0x04, 0x23,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff) ]
# Hewlett-Packard Company: 00:14:38
mac23 = [ 0x00, 0x14, 0x38,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff) ]
# StarTech.com: e8:ea:6a
mac24 = [ 0xe8, 0xea, 0x6a,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff) ]
# IBM Corp: 00:22:00
mac25 = [ 0x00, 0x22, 0x00,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff) ]
# Hewlett-Packard Company: 00:11:85
mac26 = [ 0x00, 0x11, 0x85,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff) ]
# Dell Inc: 00:22:19
mac27 = [ 0x00, 0x22, 0x19,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff) ]
# IBM: 34:40:b5
mac28 = [ 0x34, 0x40, 0xb5,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff) ]
# Shenzhen Aero-Startech. Co.Ltd: 6c:3a:84
mac29 = [ 0x6c, 0x3a, 0x84,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff) ]
# Dell Inc: 00:13:72
mac30 = [ 0x00, 0x13, 0x72,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff) ]
mac_list = [ mac01, mac02, mac03, mac04, mac05,
mac06, mac07, mac08, mac09, mac10,
mac11, mac12, mac13, mac14, mac15,
mac16, mac17, mac18, mac19, mac20,
mac21, mac22, mac23, mac24, mac25,
mac26, mac27, mac28, mac29, mac30 ]
mac = random.choice(mac_list)
return ':'.join(map(lambda x: "%02x" % x, mac))
|
Intel-bigdata/swift | test/unit/common/middleware/test_acl.py | Python | apache-2.0 | 6,539 | 0.000765 | # Copyright (c) 2010-2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from swift.common.middleware import acl
class TestACL(unittest.TestCase):
def test_clean_acl(self):
value = acl.clean_acl('header', '.r:*')
self.assertEquals(value, '.r:*')
value = acl.clean_acl('header', '.r:specific.host')
self.assertEquals(value, '.r:specific.host')
value = acl.clean_acl('header', '.r:.ending.with')
self.assertEquals(value, '.r:.ending.with')
value = acl.clean_acl('header', '.r:*.ending.with')
self.assertEquals(value, '.r:.ending.with')
value = acl.clean_acl('header', '.r:-*.ending.with')
self.assertEquals(value, '.r:-.ending.with')
value = acl.clean_acl('header', '.r:one,.r:two')
self.assertEquals(value, '.r:one,.r:two')
value = acl.clean_acl('header', '.r:*,.r:-specific.host')
self.assertEquals(value, '.r:*,.r:-specific.host')
value = acl.clean_acl('header', '.r:*,.r:-.ending.with')
self.assertEquals(value, '.r:*,.r:-.ending.with')
value = acl.clean_acl('header', '.r:one,.r:-two')
self.assertEquals(value, '.r:one,.r:-two')
value = acl.clean_acl('header', '.r:one,.r:-two,account,account:user')
self.assertEquals(value, '.r:one,.r:-two,account,account:user')
value = acl.clean_acl('header', 'TEST_account')
self.assertEquals(value, 'TEST_account')
value = acl.clean_acl('header', '.ref:*')
self.assertEquals(value, '.r:*')
value = acl.clean_acl('header', '.referer:*')
self.assertEquals(value, '.r:*')
value = acl.clean_acl('header', '.referrer:*')
self.assertEquals(value, '.r:*')
value = acl.clean_acl('header',
' .r : one , ,, .r:two , .r : - three ')
self.assertEquals(value, '.r:one,.r:two,.r:-three')
self.assertRaises(ValueError, acl.clean_acl, 'header', '.unknown:test')
self.assertRaises(ValueError, acl.clean_acl, 'header', '.r:')
self.assertRaises(ValueError, acl.clean_acl, 'header', '.r:*.')
self.assertRaises(ValueError, acl.clean_acl, 'header', '.r : * . ')
self.assertRaises(ValueError, acl.clean_acl, 'header', '.r:-*.')
self.assertRaises(ValueError, acl.clean_acl, 'header', '.r : - * . ')
self.assertRaises(ValueError, acl.clean_acl, 'header', ' .r : ')
self.assertRaises(ValueError, acl.clean_acl, 'header', 'user , .r : ')
self.assertRaises(ValueError, acl.clean_acl, 'header', '.r:-')
self.assertRaises(ValueError, acl.clean_acl, 'header', ' .r : - ')
self.assertRaises(ValueError, acl.clean_acl, 'header',
'user , .r : - ')
self.assertRaises(ValueError, acl.clean_acl, 'write-header', '.r:r')
def test_parse_acl(self):
self.assertEquals(acl.parse_acl(None), ([], []))
self.assertEquals(acl.parse_acl(''), ([], []))
self.assertEquals(acl.parse_acl('.r:ref1'), (['ref1'], []))
self.assertEquals(acl.parse_acl('.r:-ref1'), (['-ref1'], []))
self.assertEquals(acl.parse_acl('account:user'),
([], ['account:user']))
self.assertEquals(acl.parse_acl('account'), ([], ['account']))
self.assertEquals(acl.parse_acl('acc1,acc2:usr2,.r:ref3,.r:-ref4'),
(['ref3', '-ref4'], ['acc1', 'acc2:usr2']))
self.assertEquals(acl.parse_acl(
'acc1,acc2:usr2,.r:ref3,acc3,acc4:usr4,.r:ref5,.r:-ref6'),
(['ref3', 'ref5', '-ref6'],
['acc1', 'acc2:usr2', 'acc3', 'acc4:usr4']))
def test_referrer_allowed(self):
self.assert_(not acl.referrer_allowed('host', None))
self.assert_(not acl.referrer_allowed('host', []))
self.assert_(acl.referrer_allowed(None, ['*']))
self.assert_(acl.referrer_allowed('', ['*']))
self.assert_(not acl.referrer_allowed(None, ['specific.host']))
self.assert_(not acl.referrer_allowed('', ['specific.host']))
self.assert_(acl.referrer_allowed('http://www.example.com/index.html',
['.example.com']))
self.assert_(acl.referrer_allowed(
'http://user@www.example.com/index.html', ['.example.com']))
self.assert_(acl.referrer_allowed(
'http://user:pass@www.example.com/index.html', ['.example.com']))
self.assert_(acl.referrer_allowed(
'http://www.example.com:8080/index.html', ['.example.com']))
self.assert_(acl.referrer_allowed(
'http://user@www.example.com:8080/index.html', ['.example.com']))
self.assert_(acl.referrer_allowed(
'http://user:pass@www.example.com:8080/index.html',
['.example.com']))
self.assert_(acl.referrer_allowed(
'http://user:pass@www.example.com:8080', ['.example.com']))
self.assert_(acl.referrer_allowed('http://www.example.com',
['.example.com']))
self.assert_(not acl.referrer_allowed('http://thief.example.com',
['.example.com', '-thief.example.com']))
sel | f.assert_(not acl.referrer_allowed('http://thief.example.com',
['*', '-thief.example.com']))
self.assert_(acl.referrer_allowed('http://www.example.com',
['.other.com', 'www.example.com']))
self.assert_(acl.referrer_allowed('http://www.example.com',
['-.example.com', 'www.example.com']))
# This is considered a relative uri to the re | quest uri, a mode not
# currently supported.
self.assert_(not acl.referrer_allowed('www.example.com',
['.example.com']))
self.assert_(not acl.referrer_allowed('../index.html',
['.example.com']))
self.assert_(acl.referrer_allowed('www.example.com', ['*']))
if __name__ == '__main__':
unittest.main()
|
deepmind/detcon | utils/augmentations.py | Python | apache-2.0 | 10,529 | 0.006743 | # Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Data preprocessing and augmentations."""
import functools
from typing import Any, Mapping, Text
import dm_pix as pix
import jax
import jax.numpy as jnp
# typing
JaxBatch = Mapping[Text, jnp.ndarray]
ConfigDict = Mapping[Text, Any]
augment_config = dict(
view1=dict(
random_flip=False, # Random left/right flip
color_transform=dict(
apply_prob=1.0,
# Range of jittering
brightness=0.4,
contrast=0.4,
saturation=0.2,
hue=0.1,
# Probability of applying color jittering
color_jitter_prob=0.8,
# Probability of converting to grayscale
to_grayscale_prob=0.2,
# Shuffle the order of color transforms
shuffle=True),
gaussian_blur=dict(
apply_prob=1.0,
# Kernel size ~ image_size / blur_divider
blur_divider=10.,
# Kernel distribution
sigma_min=0.1,
sigma_max=2.0),
solarize=dict(apply_prob=0.0, threshold=0.5),
),
view2=dict(
random_flip=False,
color_transform=dict(
apply_prob=1.0,
brightness=0.4,
contrast=0.4,
saturation=0.2,
hue=0.1,
color_jitter_prob=0.8,
to_grayscale_prob=0.2,
shuffle=True),
gaussian_blur=dict(
apply_prob=0.1, blur_divider=10., sigma_min=0.1, sigma_max=2.0),
solarize=dict(apply_prob=0.2, threshold=0.5),
))
def postprocess(inputs: JaxBatch, rng: jnp.ndarray):
"""Apply the image augmentations to crops in inputs (view1 and view2)."""
def _postprocess_image(
images: jnp.ndarray,
rng: jnp.ndarray,
presets: ConfigDict,
) -> JaxBatch:
"""Applies augmentations in post-processing.
Args:
images: an NHWC tensor (with C=3), with float values in [0, 1].
rng: a single PRNGKey.
presets: a dict of presets for the augmentations.
Returns:
A batch of augmented images with shape NHWC, with keys view1, view2
and labels.
"""
flip_rng, color_rng, blur_rng, solarize_rng = ja | x.random.split(rng, 4)
out = images
if presets['random_flip']:
out = random_flip(out, flip_rng)
| if presets['color_transform']['apply_prob'] > 0:
out = color_transform(out, color_rng, **presets['color_transform'])
if presets['gaussian_blur']['apply_prob'] > 0:
out = gaussian_blur(out, blur_rng, **presets['gaussian_blur'])
if presets['solarize']['apply_prob'] > 0:
out = solarize(out, solarize_rng, **presets['solarize'])
out = jnp.clip(out, 0., 1.)
return jax.lax.stop_gradient(out)
rng1, rng2 = jax.random.split(rng, num=2)
view1 = _postprocess_image(inputs['view1'], rng1, augment_config['view1'])
view2 = _postprocess_image(inputs['view2'], rng2, augment_config['view2'])
outputs = dict(view1=view1, view2=view2, labels=inputs['labels'])
for k in ['fh_segmentations1', 'fh_segmentations2',
'gt_segmentations1', 'gt_segmentations2']:
if k in inputs:
outputs[k] = inputs[k]
return outputs
def _maybe_apply(apply_fn, inputs, rng, apply_prob):
should_apply = jax.random.uniform(rng, shape=()) <= apply_prob
return jax.lax.cond(should_apply, inputs, apply_fn, inputs, lambda x: x)
def _random_gaussian_blur(image, rng, kernel_size, padding, sigma_min,
sigma_max, apply_prob):
"""Applies a random gaussian blur."""
apply_rng, transform_rng = jax.random.split(rng)
def _apply(image):
sigma_rng, = jax.random.split(transform_rng, 1)
sigma = jax.random.uniform(
sigma_rng,
shape=(),
minval=sigma_min,
maxval=sigma_max,
dtype=jnp.float32)
return pix.gaussian_blur(image, sigma, kernel_size, padding=padding)
return _maybe_apply(_apply, image, apply_rng, apply_prob)
def _color_transform_single_image(image, rng, brightness, contrast, saturation,
hue, to_grayscale_prob, color_jitter_prob,
apply_prob, shuffle):
"""Applies color jittering to a single image."""
apply_rng, transform_rng = jax.random.split(rng)
perm_rng, b_rng, c_rng, s_rng, h_rng, cj_rng, gs_rng = jax.random.split(
transform_rng, 7)
# Whether the transform should be applied at all.
should_apply = jax.random.uniform(apply_rng, shape=()) <= apply_prob
# Whether to apply grayscale transform.
should_apply_gs = jax.random.uniform(gs_rng, shape=()) <= to_grayscale_prob
# Whether to apply color jittering.
should_apply_color = jax.random.uniform(cj_rng, shape=()) <= color_jitter_prob
# Decorator to conditionally apply fn based on an index.
def _make_cond(fn, idx):
def identity_fn(unused_rng, x):
return x
def cond_fn(args, i):
def clip(args):
return jax.tree_map(lambda arg: jnp.clip(arg, 0., 1.), args)
out = jax.lax.cond(should_apply & should_apply_color & (i == idx), args,
lambda a: clip(fn(*a)), args,
lambda a: identity_fn(*a))
return jax.lax.stop_gradient(out)
return cond_fn
random_brightness = functools.partial(
pix.random_brightness, max_delta=brightness)
random_contrast = functools.partial(
pix.random_contrast, lower=1-contrast, upper=1+contrast)
random_hue = functools.partial(pix.random_hue, max_delta=hue)
random_saturation = functools.partial(
pix.random_saturation, lower=1-saturation, upper=1+saturation)
to_grayscale = functools.partial(pix.rgb_to_grayscale, keep_dims=True)
random_brightness_cond = _make_cond(random_brightness, idx=0)
random_contrast_cond = _make_cond(random_contrast, idx=1)
random_saturation_cond = _make_cond(random_saturation, idx=2)
random_hue_cond = _make_cond(random_hue, idx=3)
def _color_jitter(x):
if shuffle:
order = jax.random.permutation(perm_rng, jnp.arange(4, dtype=jnp.int32))
else:
order = range(4)
for idx in order:
if brightness > 0:
x = random_brightness_cond((b_rng, x), idx)
if contrast > 0:
x = random_contrast_cond((c_rng, x), idx)
if saturation > 0:
x = random_saturation_cond((s_rng, x), idx)
if hue > 0:
x = random_hue_cond((h_rng, x), idx)
return x
out_apply = _color_jitter(image)
out_apply = jax.lax.cond(should_apply & should_apply_gs, out_apply,
to_grayscale, out_apply, lambda x: x)
return jnp.clip(out_apply, 0., 1.)
def random_flip(images, rng):
rngs = jax.random.split(rng, images.shape[0])
return jax.vmap(pix.random_flip_left_right)(rngs, images)
def color_transform(images,
rng,
brightness=0.8,
contrast=0.8,
saturation=0.8,
hue=0.2,
color_jitter_prob=0.8,
to_grayscale_prob=0.2,
apply_prob=1.0,
shuffle=True):
"""Applies color jittering and/or grayscaling to a batch of images.
Args:
images: an NHWC tensor, with C=3.
rng: a single PRNGKey.
brightness: the range of jitter on brightness.
contrast: the range of jitter on contrast.
saturation: the range of jitter on saturation.
hue: the range of jitter on hue.
color_jitter_prob: the probability of applying color jittering.
to_grayscale_prob: the probability of converting the |
Havate/havate-openstack | proto-build/gui/horizon/Horizon_GUI/openstack_dashboard/dashboards/project/vpn/urls.py | Python | apache-2.0 | 1,933 | 0.000517 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013, Mirantis Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Tatiana Mazur
from django.conf.urls.defaults import patterns # noqa
from django.conf.urls.defaults import url # noqa
from openstack_dashboard.dashboards.project.vpn import views
urlpatterns = patterns('openstack_dashboard.dashboards.project.vpn.views',
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^addikepolicy$',
views.AddIKEPolicyView.as_view(), name='addikepolicy'),
url(r'^addipsecpolicy$',
views.AddIPSecPolicyView.as_view(), name='addipsecpolicy'),
url(r'^addipsecsitecon | nection$',
views.AddIPSecSiteConnectionView.as_view(),
name='addipsecsiteconnection'),
url(r'^addvpnservice$',
views.AddVPNServiceView.as_view(), name='addvpnservice'),
url(r'^ikepolicy/(?P<ikepolicy_id>[^/]+)/$',
views.IKEPolicyDetailsView.as_view(), name='ikep | olicydetails'),
url(r'^ipsecpolicy/(?P<ipsecpolicy_id>[^/]+)/$',
views.IPSecPolicyDetailsView.as_view(), name='ipsecpolicydetails'),
url(r'^vpnservice/(?P<vpnservice_id>[^/]+)/$',
views.VPNServiceDetailsView.as_view(), name='vpnservicedetails'),
url(r'^ipsecsiteconnection/(?P<ipsecsiteconnection_id>[^/]+)/$',
views.IPSecSiteConnectionDetailsView.as_view(),
name='ipsecsiteconnectiondetails'))
|
RealTimeWeb/wikisite | MoinMoin/web/utils.py | Python | apache-2.0 | 10,269 | 0.002532 | # -*- coding: iso-8859-1 -*-
"""
MoinMoin - Utility functions for the web-layer
@copyright: 2003-2008 MoinMoin:ThomasWaldmann,
2008-2008 MoinMoin:FlorianKrupicka
@license: GNU GPL, see COPYING for details.
"""
import time
from werkzeug import abort, redirect, cookie_date, Response
from MoinMoin import caching
from MoinMoin import log
from MoinMoin import wikiutil
from MoinMoin.Page import Page
from MoinMoin.web.exceptions import Forbidden, SurgeProtection
logging = log.getLogger(__name__)
def check_forbidden(request):
""" Simple action and host access checks
Spider agents are checked against the called actions,
hosts against the blacklist. Raises Forbidden if triggered.
"""
args = request.args
action = args.get('action')
if ((args or request.method != 'GET') and
action not in ['rss_rc', 'show', 'sitemap'] and
not (action == 'AttachFile' and args.get('do') == 'get')):
if request.isSpiderAgent:
raise Forbidden()
if request.cfg.hosts_deny:
remote_addr = request.remote_addr
for host in request.cfg.hosts_deny:
if host[-1] == '.' and remote_addr.startswith(host):
logging.debug("hosts_deny (net): %s" % remote_addr)
raise Forbidden()
if remote_addr == host:
logging.debug("hosts_deny (ip): %s" % remote_addr)
raise Forbidden()
return False
def check_surge_protect(request, kick=False, action=None, username=None):
""" Check for excessive requests
Raises a SurgeProtection exception on wiki overuse.
@param request: a moin request object
@param kick: immedia | tely ban this user
@param action: specify the action explicitly (default: requ | est.action)
@param username: give username (for action == 'auth-name')
"""
limits = request.cfg.surge_action_limits
if not limits:
return False
remote_addr = request.remote_addr or ''
if remote_addr.startswith('127.'):
return False
validuser = request.user.valid
current_action = action or request.action
if current_action == 'auth-ip':
# for checking if some specific ip tries to authenticate too often,
# not considering the username it tries to authenticate as (could
# be many different names)
if current_action not in limits:
# if admin did not add this key to the limits configuration, do nothing
return False
current_id = remote_addr
elif current_action == 'auth-name':
# for checking if some username tries to authenticate too often,
# not considering the ip the request comes from (could be a distributed
# attack on a high-privilege user)
if current_action not in limits:
# if admin did not add this key to the limits configuration, do nothing
return False
current_id = username
else:
# general case
current_id = validuser and request.user.name or remote_addr
default_limit = limits.get('default', (30, 60))
now = int(time.time())
surgedict = {}
surge_detected = False
try:
# if we have common farm users, we could also use scope='farm':
cache = caching.CacheEntry(request, 'surgeprotect', 'surge-log', scope='wiki', use_encode=True)
if cache.exists():
data = cache.content()
data = data.split("\n")
for line in data:
try:
id, t, action, surge_indicator = line.split("\t")
t = int(t)
maxnum, dt = limits.get(action, default_limit)
if t >= now - dt:
events = surgedict.setdefault(id, {})
timestamps = events.setdefault(action, [])
timestamps.append((t, surge_indicator))
except StandardError:
pass
maxnum, dt = limits.get(current_action, default_limit)
events = surgedict.setdefault(current_id, {})
timestamps = events.setdefault(current_action, [])
surge_detected = len(timestamps) > maxnum
surge_indicator = surge_detected and "!" or ""
timestamps.append((now, surge_indicator))
if surge_detected:
if len(timestamps) < maxnum * 2:
timestamps.append((now + request.cfg.surge_lockout_time, surge_indicator)) # continue like that and get locked out
if current_action not in ('cache', 'AttachFile', ): # don't add cache/AttachFile accesses to all or picture galleries will trigger SP
action = 'all' # put a total limit on user's requests
maxnum, dt = limits.get(action, default_limit)
events = surgedict.setdefault(current_id, {})
timestamps = events.setdefault(action, [])
if kick: # ban this guy, NOW
timestamps.extend([(now + request.cfg.surge_lockout_time, "!")] * (2 * maxnum))
surge_detected = surge_detected or len(timestamps) > maxnum
surge_indicator = surge_detected and "!" or ""
timestamps.append((now, surge_indicator))
if surge_detected:
if len(timestamps) < maxnum * 2:
timestamps.append((now + request.cfg.surge_lockout_time, surge_indicator)) # continue like that and get locked out
data = []
for id, events in surgedict.items():
for action, timestamps in events.items():
for t, surge_indicator in timestamps:
data.append("%s\t%d\t%s\t%s" % (id, t, action, surge_indicator))
data = "\n".join(data)
cache.update(data)
except StandardError:
pass
if surge_detected and validuser and request.user.auth_method in request.cfg.auth_methods_trusted:
logging.info("Trusted user %s would have triggered surge protection if not trusted.", request.user.name)
return False
elif surge_detected:
logging.warning("Surge Protection: action=%s id=%s (ip: %s)", current_action, current_id, remote_addr)
raise SurgeProtection(retry_after=request.cfg.surge_lockout_time)
else:
return False
def redirect_last_visited(request):
pagetrail = request.user.getTrail()
if pagetrail:
# Redirect to last page visited
last_visited = pagetrail[-1]
wikiname, pagename = wikiutil.split_interwiki(last_visited)
if wikiname != request.cfg.interwikiname and wikiname != 'Self':
wikitag, wikiurl, wikitail, error = wikiutil.resolve_interwiki(request, wikiname, pagename)
url = wikiurl + wikiutil.quoteWikinameURL(wikitail)
else:
url = Page(request, pagename).url(request)
else:
# Or to localized FrontPage
url = wikiutil.getFrontPage(request).url(request)
url = request.getQualifiedURL(url)
return abort(redirect(url))
class UniqueIDGenerator(object):
def __init__(self, pagename=None):
self.unique_stack = []
self.include_stack = []
self.include_id = None
self.page_ids = {None: {}}
self.pagename = pagename
def push(self):
"""
Used by the TOC macro, this ensures that the ID namespaces
are reset to the status when the current include started.
This guarantees that doing the ID enumeration twice results
in the same results, on any level.
"""
self.unique_stack.append((self.page_ids, self.include_id))
self.include_id, pids = self.include_stack[-1]
self.page_ids = {}
for namespace in pids:
self.page_ids[namespace] = pids[namespace].copy()
def pop(self):
"""
Used by the TOC macro to reset the ID namespaces after
having parsed the page for TOC generation and after
printing the TOC.
"""
self.page_ids, self.include_id = self.unique_stack.pop()
return self.page_ids, self.include_id
def begin(self, base):
"""
Called by the formatter when a document begins, which means
th |
Erotemic/local | depricated/speech/Examples/_foxitreader.py | Python | gpl-3.0 | 2,827 | 0.005664 | #
# This file is a command-module for Dragonfly.
# (c) Copyright 2008 by Christo Butcher
# Licensed under the LGPL, see <http://www.gnu.org/licenses/>
#
"""
Command-module for **Foxit Reader** PDF viewer
============================================================================
This module offers commands to control `Foxit Reader
<http://www.foxitsoftware.com/pdf/rd_intro.php>`_, a free
and lightweight PDF reader.
Installation
----------------------------------------------------------------------------
If you are using DNS and Natlink, simply place this file in you Natlink
macros directory. It will then be automatically loaded by Natlink when
you next toggle your microphone or restart Natlink.
"""
try:
import pkg_resources
pkg_resources.require("dragonfly >= 0.6.5beta1.dev-r76")
except ImportError:
pass
from dragonfly import (Grammar, AppContext, MappingRule, Dictation,
Key, Text, Config, Section, Item, IntegerRef)
#------------------------------------ | ---------------------------------------
# Set up this module's configuration.
config = Config("Foxit reader control")
config.lang = Section("Language section")
config.lang.new_win = Item("new (window | win)")
#config.generate_config_file()
config.load()
#---------------------------------------------------------------------------
# Create the main command rule.
class Com | mandRule(MappingRule):
mapping = {
"zoom in [<n>]": Key("c-equals:%(n)d"),
"zoom out [<n>]": Key("c-hyphen:%(n)d"),
"zoom [one] hundred": Key("c-1"),
"zoom [whole | full] page": Key("c-2"),
"zoom [page] width": Key("c-3"),
"find <text>": Key("c-f") + Text("%(text)s")
+ Key("f3"),
"find next": Key("f3"),
"[go to] page <int>": Key("cs-n") + Text("%(int)d\n"),
"print file": Key("c-p"),
"print setup": Key("a-f, r"),
}
extras = [
IntegerRef("n", 1, 10),
IntegerRef("int", 1, 10000),
Dictation("text"),
]
defaults = {
"n": 1,
}
#---------------------------------------------------------------------------
# Create and load this module's grammar.
context = AppContext(executable="foxitr")
grammar = Grammar("foxit reader", context=context)
grammar.add_rule(CommandRule())
grammar.load()
# Unload function which will be called by natlink at unload time.
def unload():
global grammar
if grammar: grammar.unload()
grammar = None
|
ecologylab/BigSemanticsService | Scripts/utils/crawl.py | Python | apache-2.0 | 3,552 | 0.020552 | #!python
import sys
import io
import re
import urllib
import urllib2
import urlparse
import lxml.etree
def get_outlinks(url):
'''
url: the url to the page to crawl
'''
result = []
if url is None:
return result
html = None
resp = None
try:
url = url.strip()
resp = urllib2.urlopen(url)
if resp.code == 200:
html = resp.read()
except (urllib2.URLError, Exception) as e:
print "can't access {0}: {1}".format(url, e)
finally:
if resp is not None:
resp.close()
if html is None:
return result
html_parser = lxml.etree.HTMLParser()
try:
uhtml = html.decode('utf-8', 'ignore')
tree = lxml.etree.parse(io.StringIO(uhtml), html_parser)
anchors = tree.xpath('//a')
for anchor in anchors:
href = anchor.attrib.get('href', None)
if href is not None:
href = href.strip()
dest = urlparse.urljoin(url, href)
if dest.startswith('http://'):
result.append(dest)
except Exception as e:
print "can't parse {0}: {1}".format(url, e)
return result
def crawl(urls,
max_to_handle,
handle_url,
crawl_test = None,
handle_test = None):
handled = []
visited = set()
i = 0
p = 0
while len(handled) < max_to_handle and i < len(urls):
url = urls[i]
| if url not in visited and crawl_test(url):
outlinks = get_outlinks(url)
visited.add(url)
urls.extend(outlinks)
if handle_test(url) and url not in handled:
handle_url | (url, p + 1, max_to_handle)
handled.append(url)
p += 1
i += 1
return handled
def call_semantics_service(url, i, max_to_handle):
service_pattern = "http://ecology-service.cse.tamu.edu/BigSemanticsService/metadata.xml?url={0}"
qurl = urllib.quote(url)
surl = service_pattern.format(qurl)
resp = urllib2.urlopen(surl)
content = resp.read()
is_downloaded = content.find('DOWNLOAD_DONE') >= 0
is_typed = content.find('</amazon_product>') >= 0
if resp.code == 200 and is_downloaded and is_typed:
print "[{0}/{1}] service called on {2}".format(
i, max_to_handle, url)
else:
print "[{0}/{1}] error calling service: {2}: c={3}, d={4}, t={5}".format(
i, max_to_handle, surl, resp.code, is_downloaded, is_typed)
def call_downloader_service(url, i, max_to_handle):
agent = "Mozilla%2F5.0%20(Windows%20NT%206.2%3B%20Win64%3B%20x64)%20AppleWebKit%2F537.36%20(KHTML%2C%20like%20Gecko)%20Chrome%2F32.0.1667.0%20Safari%2F537.36"
service_pattern = "http://ecology-service.cse.tamu.edu/DownloaderPool/page/download.xml?url={0}&agent={1}"
qurl = urllib.quote(url)
resp = urllib2.urlopen(service_pattern.format(qurl, agent))
if resp.code == 200:
print "[{0}/{1}] successful downloading invocation on {2}".format(
i, max_to_handle, url)
else:
print "[{0}/{1}] downloading error code {2} for {3}".format(
i, max_to_handle, resp.code, url)
if __name__ == '__main__':
if len(sys.argv) < 3:
print "usage: {0} <url_lst_file> <max_to_handle>".format(sys.argv[0])
quit()
f = open(sys.argv[1])
urls = f.readlines()
n = int(sys.argv[2])
crawl_test = lambda(url): url.find('amazon.com') > 0;
p_prod = r'^http://www.amazon.com/([^/]+/)?dp/[^/]+';
handle_test = lambda(url): re.search(p_prod, url) is not None;
handled = crawl(urls, n, call_semantics_service, crawl_test, handle_test);
for url in handled:
print url
|
lenoch/tagsetbench | makefile.py | Python | mit | 5,682 | 0 | from tagsetbench import ShellPath
class Makefile:
def __init__(self, path=None, recipes=None):
self.path = path # TODO: všechny budou ve working_dir, stačí 'name'
# TODO: self.declarations? (pomocí slovníku, aby na ně šlo odkazovat)
# split_corpora=' \\\n\t'.join(self.split_corpora),
# """SPLIT_CORPORA = \
# {split_corpora}""".format(split_corpora)
self.recipes = recipes or []
def write(self):
with self.path.open('w') as f:
delete_on_error_targets = []
for recipe in self.recipes:
if isinstance(recipe, MakefileRecipe) and \
recipe.delete_on_error:
delete_on_error_targets.extend(recipe.targets)
if delete_on_error_targets:
delete_on_error_recipe = MakefileRecipe(
targets=['.DELETE_ON_ERROR'],
dependencies=delete_on_error_targets,
)
for line in delete_on_error_recipe.lines():
print(line, file=f)
print(file=f)
for recipe in self.recipes:
if isinstance(recipe, MakefileRecipe):
for line in recipe.lines():
print(line, file=f)
else:
print(recipe, file=f)
print(file=f)
class MakefileRecipe:
def __init__(self, targets=None, dependencies=None, commands=None,
delete_on_error=True):
self.targets = targets or []
self.dependencies = dependencies or []
self.commands = commands or []
self.delete_on_error = delete_on_error
def lines(self):
"""
Solution to building multiple targets only once was inspired by
https://www.cmcrossroads.com/article/rules-multiple-outputs-gnu-make
"""
# TODO: 'make' moc nezvládá mezery, ale můžu ještě jednou prubnout ' '
if len(self.targets) > 1:
for target, next_target in zip(self.targets, self.targets[1:]):
yield '{}: {}'.format(self._get_filename(target),
self._get_filename(next_target))
yield '{}: {}'.format(self._get_filename(self.targets[-1]),
' '.join(self._get_filename(dep) for dep
in self.dependencies))
for command in self.commands:
yield from self._pretty_print_command(command)
@classmethod
def _pretty_print_command(cls, command):
tokens = iter(command)
executable = cls._format_token(next(tokens))
param_and_values, next_token = cls._consume_parameter_and_values(
tokens)
if param_and_values:
indentation = ' ' * len(executable)
elif next_token:
action = next_token
param_and_values, next_token = cls._consume_parameter_and_values(
tokens)
param_and_values.insert(0, action)
indentation = ' ' * (len(executable) + 1 + len(action))
yield '\t{} {}{}'.format(executable, ' '.join(
cls._format_token(token) for token in param_and_values),
' \\' if next_token else '')
while next_token:
param_and_values, next_token = cls._consume_parameter_and_values(
tokens, parameter=next_token)
yield '\t{} {}{}'.format(indentation, ' '.join(
cls._format_token(token) for token in param_and_values),
' \\' if next_token else '')
@staticmethod
def _consume_parameter_and_values(tokens, parameter=None):
values = []
token = None
for token in tokens:
if isinstance(token, str) and token.startswith('-'):
if parameter is None: # rename to "argument"?
parameter = token
token = None
else:
break # the next parameter encountered
else:
if parameter is None:
break
else:
values.append(token)
token = None
return ([parameter] if parameter else []) + values, token
# TODO: asi sloučit s tím dole (budu potřebovat speciální zacházení
# s ShellPath, protože právě tak dávám do command programy)
@staticmethod
def _get_filename(obj):
if hasattr(obj, 'path'):
return obj.path.name
elif hasattr(obj, 'name'):
return obj.name
else:
return str(obj)
# TODO: tohle vypadá trochu jako duplikát toho nahoře (_get_filename)
@staticmethod
def _format_token(token):
# NOTE: řídit se podle přípony u spustitelných souborů (*.py) moc nejde
# protože mám taky systémový cesty ("rft-annotate") – no, prostě
# bude lepší, když se read_args (né, prostě tahle funkce, přece?)
# koukne na to, jestli je „cesta“ spustitelná, to je nejjednoduš-
# ší (a protože mám všechny programy linkovaný do pracovního
# adresáře, stačí „binárkám“ dávat ./ a zbytku souborů nic)
# NOTE: samozřejmě většinou ty soubory nebudou ani existovat…
if isinstance(toke | n, ShellPath):
return str(token)
elif hasattr( | token, 'path'):
return token.path.name # str(token.path)?
elif isinstance(token, str):
return token
else:
raise ValueError(token)
|
androportal/f-droid-fdroidserver | setup.py | Python | agpl-3.0 | 443 | 0.009029 | #!/usr/bin/python
from distutils.core import setup
setup(name='FDroidServer',
version='0.1',
descriptio | n='F-Droid Server Tools',
author='The F-Droid Project',
author_email='admin@f-droid.org',
url='http://f-droid.org',
packages=['fdroidserver'],
scripts=['fdroid'],
data_files = [('', ['COPYING', 'config.sample.py']),
('docs', ['docs/*.texi'])
| ]
)
|
mugurrus/eve | examples/security/sha1-hmac.py | Python | bsd-3-clause | 1,400 | 0 | # -*- coding: utf-8 -*-
"""
Auth-SHA1/HMAC
~~~~~~~~~~~~~~
Securing an Eve-powered API with Basic Authentication (RFC2617).
This script assumes that user accounts are stored in a MongoDB collection
('accounts'), and that passwords are stored as SHA1 | /HMAC hashes. All API
resources/methods will be secured unless they are made explicitly public
(by fiddling with some settings you can open one or mor | e resources and/or
methods to public access -see docs).
Since we are using werkzeug we don't need any extra import (werkzeug being
one of Flask/Eve prerequisites).
Checkout Eve at https://github.com/nicolaiarocci/eve
This snippet by Nicola Iarocci can be used freely for anything you like.
Consider it public domain.
"""
from eve import Eve
from eve.auth import BasicAuth
from werkzeug.security import check_password_hash
from settings_security import SETTINGS
class Sha1Auth(BasicAuth):
def check_auth(self, username, password, allowed_roles, resource, method):
# use Eve's own db driver; no additional connections/resources are used
accounts = app.data.driver.db['accounts']
account = accounts.find_one({'username': username})
return account and \
check_password_hash(account['password'], password)
if __name__ == '__main__':
app = Eve(auth=Sha1Auth, settings=SETTINGS)
app.run()
|
antoinecarme/pyaf | tests/model_control/detailed/transf_Integration/model_control_one_enabled_Integration_MovingMedian_Seasonal_MonthOfYear_LSTM.py | Python | bsd-3-clause | 169 | 0.047337 | import | tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Integration'] , ['MovingMedian'] , ['Seasonal_MonthOfYear'] , ['LSTM'] ) | ; |
civet-software/CIVET-Django | djcivet_site/djciv_data/migrations/0001_initial.py | Python | mit | 1,089 | 0.002755 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_ke | y=True)),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created | =True, primary_key=True)),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name=b'date published')),
],
),
migrations.AddField(
model_name='choice',
name='question',
field=models.ForeignKey(to='djciv_data.Question'),
),
]
|
mooniak/hindkit | hindkit/AFDKOPython/generate_designspace.py | Python | mit | 1,566 | 0.019157 | #! AFDKOPython
import sys, pickle, os
from mutatorMath.ufo.document import DesignSpaceDocumentWriter
def main():
family = pickle.load(sys.stdin)
generate_designspace(family, 'font.designspace')
def generate_designspace(family, path):
def normalize_path(path):
return os.path.join(family['working_directory'], path)
doc = DesignSpaceDocumentWriter(normalize_path(path))
for i, master in enumerate(family['masters']):
doc.addSource(
path = normalize_path(master['path']),
name = 'master-' + master['name'],
location = {'weight': master['interpolation_value']},
copyLib = True if i == 0 else False,
copyGroups = True if i == 0 else False,
| copyInfo = True if i == 0 else False,
# muteInfo = False,
# muteKerning = False,
# mutedGlyphNames = None,
)
for style in family['styles']:
doc.startInstance(
name = 'instance-' + style['name'],
location = {'weight': style['interpolation_value']},
familyName = family['output_name'],
styleName = style['name'],
| fileName = normalize_path(style['path']),
postScriptFontName = style['output_full_name_postscript'],
# styleMapFamilyName = None,
# styleMapStyleName = None,
)
doc.writeInfo()
if family['has_kerning']:
doc.writeKerning()
doc.endInstance()
doc.save()
if __name__ == '__main__':
main()
|
teonlamont/mne-python | mne/io/bti/tests/test_bti.py | Python | bsd-3-clause | 12,138 | 0 | from __future__ import print_function
# Authors: Denis Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import os
import os.path as op
from functools import reduce, partial
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_allclose, assert_equal)
import pytest
from mne.datasets import testing
from mne.io import read_raw_fif, read_raw_bti
from mne.io.bti.bti import (_read_config, _process_bti_headshape,
_read_bti_header, _get_bti_dev_t,
_correct_trans, _get_bti_info)
from mne.io.tests.test_raw import _test_raw_reader
from mne.tests.common import assert_dig_allclose
from mne.io.pick import pick_info
from mne.io.constants import FIFF
from mne import pick_types
from mne.utils import run_tests_if_main
from mne.transforms import Transform, combine_transforms, invert_transform
from mne.externals import six
base_dir = op.join(op.abspath(op.dirname(__file__)), 'data')
archs = 'linux', 'solaris'
pdf_fnames = [op.join(base_dir, 'test_pdf_%s' % a) for a in archs]
config_fnames = [op.join(base_dir, 'test_config_%s' % a) for a in archs]
hs_fnames = [op.join(base_dir, 'test_hs_%s' % a) for a in archs]
exported_fnames = [op.join(base_dir, 'exported4D_%s_raw.fif' % a)
for a in archs]
tmp_raw_fname = op.join(base_dir, 'tmp_raw.fif')
fname_2500 = op.join(testing.data_path(download=False), 'BTi', 'erm_HFH',
'c,rfDC')
# the 4D exporter doesn't export all channels, so we confine our comparison
NCH = 248
@testing.requires_testing_data
def test_read_2500():
"""Test reading data from 2500 system."""
_test_raw_reader(read_raw_bti, pdf_fname=fname_2500, head_shape_fname=None)
def test_read_config():
"""Test read bti config file."""
# for config in config_fname, config_solaris_fname:
for config in config_fnames:
cfg = _read_config(config)
assert all('unknown' not in block.lower() and block != ''
for block in cfg['user_blocks'])
def test_crop_append():
"""Test crop and append raw."""
| raw = _test_raw_reader(
read_raw_bti, pdf_fname=pdf_fnames[0],
config_fname=config_fnames[0], head_shape_fname=hs_fnames[0])
y, t = raw[:]
t0, t1 = 0.25 * t[-1], 0.75 * t[-1]
mask = (t0 <= t) * (t <= t1)
raw_ = raw.copy().crop(t | 0, t1)
y_, _ = raw_[:]
assert (y_.shape[1] == mask.sum())
assert (y_.shape[0] == y.shape[0])
def test_transforms():
"""Test transformations."""
bti_trans = (0.0, 0.02, 0.11)
bti_dev_t = Transform('ctf_meg', 'meg', _get_bti_dev_t(0.0, bti_trans))
for pdf, config, hs, in zip(pdf_fnames, config_fnames, hs_fnames):
raw = read_raw_bti(pdf, config, hs, preload=False)
dev_ctf_t = raw.info['dev_ctf_t']
dev_head_t_old = raw.info['dev_head_t']
ctf_head_t = raw.info['ctf_head_t']
# 1) get BTI->Neuromag
bti_dev_t = Transform('ctf_meg', 'meg', _get_bti_dev_t(0.0, bti_trans))
# 2) get Neuromag->BTI head
t = combine_transforms(invert_transform(bti_dev_t), dev_ctf_t,
'meg', 'ctf_head')
# 3) get Neuromag->head
dev_head_t_new = combine_transforms(t, ctf_head_t, 'meg', 'head')
assert_array_equal(dev_head_t_new['trans'], dev_head_t_old['trans'])
@pytest.mark.slowtest
def test_raw():
"""Test bti conversion to Raw object."""
for pdf, config, hs, exported in zip(pdf_fnames, config_fnames, hs_fnames,
exported_fnames):
# rx = 2 if 'linux' in pdf else 0
pytest.raises(ValueError, read_raw_bti, pdf, 'eggs', preload=False)
pytest.raises(ValueError, read_raw_bti, pdf, config, 'spam',
preload=False)
if op.exists(tmp_raw_fname):
os.remove(tmp_raw_fname)
ex = read_raw_fif(exported, preload=True)
ra = read_raw_bti(pdf, config, hs, preload=False)
assert ('RawBTi' in repr(ra))
assert_equal(ex.ch_names[:NCH], ra.ch_names[:NCH])
assert_array_almost_equal(ex.info['dev_head_t']['trans'],
ra.info['dev_head_t']['trans'], 7)
assert len(ex.info['dig']) in (3563, 5154)
assert_dig_allclose(ex.info, ra.info, limit=100)
coil1, coil2 = [np.concatenate([d['loc'].flatten()
for d in r_.info['chs'][:NCH]])
for r_ in (ra, ex)]
assert_array_almost_equal(coil1, coil2, 7)
loc1, loc2 = [np.concatenate([d['loc'].flatten()
for d in r_.info['chs'][:NCH]])
for r_ in (ra, ex)]
assert_allclose(loc1, loc2)
assert_allclose(ra[:NCH][0], ex[:NCH][0])
assert_array_equal([c['range'] for c in ra.info['chs'][:NCH]],
[c['range'] for c in ex.info['chs'][:NCH]])
assert_array_equal([c['cal'] for c in ra.info['chs'][:NCH]],
[c['cal'] for c in ex.info['chs'][:NCH]])
assert_array_equal(ra._cals[:NCH], ex._cals[:NCH])
# check our transforms
for key in ('dev_head_t', 'dev_ctf_t', 'ctf_head_t'):
if ex.info[key] is None:
pass
else:
assert (ra.info[key] is not None)
for ent in ('to', 'from', 'trans'):
assert_allclose(ex.info[key][ent],
ra.info[key][ent])
ra.save(tmp_raw_fname)
re = read_raw_fif(tmp_raw_fname)
print(re)
for key in ('dev_head_t', 'dev_ctf_t', 'ctf_head_t'):
assert (isinstance(re.info[key], dict))
this_t = re.info[key]['trans']
assert_equal(this_t.shape, (4, 4))
# check that matrix by is not identity
assert (not np.allclose(this_t, np.eye(4)))
os.remove(tmp_raw_fname)
def test_info_no_rename_no_reorder_no_pdf():
"""Test private renaming, reordering and partial construction option."""
for pdf, config, hs in zip(pdf_fnames, config_fnames, hs_fnames):
info, bti_info = _get_bti_info(
pdf_fname=pdf, config_fname=config, head_shape_fname=hs,
rotation_x=0.0, translation=(0.0, 0.02, 0.11), convert=False,
ecg_ch='E31', eog_ch=('E63', 'E64'),
rename_channels=False, sort_by_ch_name=False)
info2, bti_info = _get_bti_info(
pdf_fname=None, config_fname=config, head_shape_fname=hs,
rotation_x=0.0, translation=(0.0, 0.02, 0.11), convert=False,
ecg_ch='E31', eog_ch=('E63', 'E64'),
rename_channels=False, sort_by_ch_name=False)
assert_equal(info['ch_names'],
[ch['ch_name'] for ch in info['chs']])
assert_equal([n for n in info['ch_names'] if n.startswith('A')][:5],
['A22', 'A2', 'A104', 'A241', 'A138'])
assert_equal([n for n in info['ch_names'] if n.startswith('A')][-5:],
['A133', 'A158', 'A44', 'A134', 'A216'])
info = pick_info(info, pick_types(info, meg=True, stim=True,
resp=True))
info2 = pick_info(info2, pick_types(info2, meg=True, stim=True,
resp=True))
assert (info['sfreq'] is not None)
assert (info['lowpass'] is not None)
assert (info['highpass'] is not None)
assert (info['meas_date'] is not None)
assert_equal(info2['sfreq'], None)
assert_equal(info2['lowpass'], None)
assert_equal(info2['highpass'], None)
assert_equal(info2['meas_date'], None)
assert_equal(info['ch_names'], info2['ch_names'])
assert_equal(info['ch_names'], info2['ch_names'])
for key in ['dev_ctf_t', 'dev_head_t', 'ctf_head_t']:
assert_array_equal(info[key]['trans'], info2[key]['trans'])
assert_array_equal(
np.array([ch['loc'] for ch in info['chs']]),
np.array([ch['loc'] for ch in info2['chs']]))
# just check reading data | corne |
brain0/archweb | releng/migrations/0003_release_populate_last_modified.py | Python | gpl-2.0 | 494 | 0.006073 | # -*- coding: utf-8 -*-
f | rom __future__ import unicode_literals
from django.db import models, migrations
def forwards(apps, schema_editor):
Release = apps.get_model('releng', 'Release')
Release.objects.update(last_modified=models.F('created'))
def backwards(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
| ('releng', '0002_release_last_modified'),
]
operations = [
migrations.RunPython(forwards, backwards)
]
|
makinacorpus/ionyweb | ionyweb/page/admin.py | Python | bsd-3-clause | 158 | 0 | # -*- | coding: utf-8 -*-
from django.contrib import admin
from ionyweb.page.models import Page, Layout
| admin.site.register(Page)
admin.site.register(Layout)
|
Curahelper/Cura | plugins/UltimakerMachineActions/UM2UpgradeSelection.py | Python | agpl-3.0 | 3,052 | 0.006881 | # Copyright (c) 2017 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
from UM.Settings.ContainerRegistry import Con | tainerRegistry
from UM.Settings.InstanceContainer import InstanceContainer
from cura.MachineAction import MachineAction
from PyQt5.QtCore import pyqtSlot, pyqtSignal, pyqtProperty
from UM.i18n import i18nCatalog
from UM.Application import Application
from UM.Util import parseBool
catalog = i | 18nCatalog("cura")
import UM.Settings.InstanceContainer
## The Ultimaker 2 can have a few revisions & upgrades.
class UM2UpgradeSelection(MachineAction):
def __init__(self):
super().__init__("UM2UpgradeSelection", catalog.i18nc("@action", "Select upgrades"))
self._qml_url = "UM2UpgradeSelectionMachineAction.qml"
self._container_registry = ContainerRegistry.getInstance()
def _reset(self):
self.hasVariantsChanged.emit()
hasVariantsChanged = pyqtSignal()
@pyqtProperty(bool, notify = hasVariantsChanged)
def hasVariants(self):
global_container_stack = Application.getInstance().getGlobalContainerStack()
if global_container_stack:
return parseBool(global_container_stack.getMetaDataEntry("has_variants", "false"))
@pyqtSlot(bool)
def setHasVariants(self, has_variants = True):
global_container_stack = Application.getInstance().getGlobalContainerStack()
if global_container_stack:
variant_container = global_container_stack.variant
variant_index = global_container_stack.getContainerIndex(variant_container)
if has_variants:
if "has_variants" in global_container_stack.getMetaData():
global_container_stack.setMetaDataEntry("has_variants", True)
else:
global_container_stack.addMetaDataEntry("has_variants", True)
# Set the variant container to a sane default
empty_container = ContainerRegistry.getInstance().getEmptyInstanceContainer()
if type(variant_container) == type(empty_container):
search_criteria = { "type": "variant", "definition": "ultimaker2", "id": "*0.4*" }
containers = self._container_registry.findInstanceContainers(**search_criteria)
if containers:
global_container_stack.variant = containers[0]
else:
# The metadata entry is stored in an ini, and ini files are parsed as strings only.
# Because any non-empty string evaluates to a boolean True, we have to remove the entry to make it False.
if "has_variants" in global_container_stack.getMetaData():
global_container_stack.removeMetaDataEntry("has_variants")
# Set the variant container to an empty variant
global_container_stack.variant = ContainerRegistry.getInstance().getEmptyInstanceContainer()
Application.getInstance().globalContainerStackChanged.emit()
|
alfredoavanzosc/odoo-addons | stock_orderpoint_filter/models/__init__.py | Python | agpl-3.0 | 164 | 0 | # -*- coding: u | tf-8 -*-
# (c) 2016 Alfredo de la Fuente - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from . import procurement_orde | r
|
BurtBiel/azure-cli | src/command_modules/azure-cli-resource/azure/cli/command_modules/resource/tests/test_api_check.py | Python | mit | 2,871 | 0.007315 | #---------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#---------------------------------------------------------------------------------------------
import unittest
try:
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock
# pylint: disable=line-too-long
from azure.cli.command_modules.resource._validators import (validate_resource_type,
validate_parent,
_resolve_api_version as resolve_api_version)
class TestApiCheck(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
pass
def tearDown(self):
pass
def test_resolve_api_provider_backup(self):
""" Verifies provider is used as backup if api-version not specified. """
resource_type = validate_resource_type('Mock/test')
self.assertEqual(resolve_api_version(self._get_mock_client(), resource_type), "2016-01-01")
def test_resolve_api_provider_with_parent_backup(self):
""" Verifies provider (with parent) is used as backup if api-version not specified. """
resource_type = validate_resource_type('Mock/bar')
parent = validate_parent('foo/testfoo123')
self.assertEqual(
resolve_api_version(self._get_mock_client(), resource_type, parent),
"1999-01-01"
)
def test_resolve_api_all_previews(self):
""" Verifies most recent preview version returned only if there are no non-preview versions. """
resource_type = validate_resource_type('Mock/preview')
self.assertEqual(
resolve_api_version(self._get_mock_client(), resource_type),
"2005-01-01-preview"
)
def _get_mock_client(self):
| client = MagicMock()
provider = MagicMock()
provider.resource_types = [
self._get_mock_resource_type('skip', ['2000-01-01-preview', '2000-01-01']),
self._get_mock_resource_type('test', ['2016-01-01-preview', '2016-01-01']),
self._get_mock_resource_type('foo/bar', ['1999-01-01-preview', '1999-01-01']),
self._get_mock_resource_type('preview', ['2005-01-01-preview', '2004-01-01-preview'])
]
client.provide | rs.get.return_value = provider
return client
def _get_mock_resource_type(self, name, api_versions): #pylint: disable=no-self-use
rt = MagicMock()
rt.resource_type = name
rt.api_versions = api_versions
return rt
if __name__ == '__main__':
unittest.main()
|
PatteWi/pythonwetter | executables/mailsend.py | Python | mit | 513 | 0.003899 | fr | om boto.ses import SESConnection
import os
def sendmail(name, comment):
source = "patte.wilhelm@googlemail.com"
subject = "Kommentar eingegangen"
body = 'Es wurde ein neues Wetter bewertet. Von: ' + name + ': ' + comment
to_addresses = ["patte.wilhelm@googlemail.com"]
connection = SESConnection(aws_access_key_id=os.environ['AWS_ACCESS_KEY'],
aws_secret_access_key=os.environ['AWS_SECRET_KEY'])
| connection.send_email(source, subject, body, to_addresses) |
lefteye/superroutingchioce | model.py | Python | gpl-2.0 | 2,888 | 0.026518 | # -*- coding:UTF-8 -*-
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Embedding
from keras.layers import LSTM,Flatten
import numpy as np
import loadpkl
(X_train, y_train),(X_test, y_test) = loadpkl.load_data()
#print "hello"
X_train = np.array(X_train)
y_train = np.array(y_train)
print X_train.shape
print y_train.shape
t0=X_train.shape[0]/300
#print t0
X_train = X_train.reshape(t0,50,6) #训练集行数
y_train = y_train.reshape(t0,50)
print X_train.shape
print y_train.shape
X_test = np.array(X_test)
y_test = np.array(y_test)
t1=X_test.shape[0]/300
print t1
X_test = X_test.reshape(t1,50,6)#测试集行数 、
y_test = y_test.reshape(t1,50)
#print (X_train, y_train)
#print (X_test, y_test)
#print X_train
#print y_train
Y | _train = y_train
#Y_test = y_test
#model = Sequential()
#model.add(Dense(200, input_dim = 30))
#model.add(Activation('tanh'))
#model.add(Dense(100))
#model.add(Activation('sigmoid'))
#model.add(Dense( | 50))
#model.add(Activation('tanh'))
#model.add(Dense(30))
#model.add(Activation('tanh'))
#model.add(Dense(20))
#model.add(Activation('tanh'))
#model.add(Dense(6))
#model.add(Activation('softmax'))
#model.compile(optimizer = 'rmsprop', loss = 'mse', metrics=['accuracy'])
#model.fit(X_train, Y_train, batch_size=10, nb_epoch=100, verbose=1, validation_split=0.2, shuffle=True)
#prob = model.predict(X_test)
#prob=model.evaluate(X_test, Y_test, batch_size=32, verbose=1)
#print prob
model = Sequential()
model.add(LSTM(256, input_shape=(50,6),return_sequences=True))# 32
#model.add(Dropout(0.5))
model.add(LSTM(128,return_sequences=True))
#model.add(Dropout(0.5))
model.add(LSTM(64,return_sequences=True))
model.add(Dropout(0.5))
model.add(LSTM(50,return_sequences=True))
#model.add(Flatten())
#model.add(Dense(50))
model.compile(optimizer = 'rmsprop', loss = 'mse', metrics=['accuracy'])
model.fit(X_train, Y_train, batch_size=100, nb_epoch=10, verbose=1, validation_split=0.2, shuffle=True)
#json_string = model.to_json()
#open('my_model_architecture.json', 'w').write(json_string)
#model.save_weights('my_model_weights.h5')
prob = model.predict(X_test)
#n=0
#for i in prob:
# if n<100:
# print i
# n=n+1
# else:break
print prob
#model = Sequential()
#model.add(LSTM(32, input_shape=(6,5),return_sequences=True))
#model.add(Activation('sigmoid'))
#model.add(LSTM(16,return_sequences=True))
#model.add(Activation('sigmoid'))
#model.add(LSTM(8,return_sequences=True))
#model.add(Activation('sigmoid'))
#model.add(LSTM(6))
#model.add(Activation('tanh'))
#model.compile(optimizer = 'rmsprop', loss = 'mse', metrics=['accuracy'])
#model.fit(X_train, Y_train, batch_size=100, nb_epoch=100, verbose=1, validation_split=0.2, shuffle=True)
#prob = model.predict(X_test)
#n=0
#for i in prob:
# if n<100:
# print i
# n=n+1
# else:break
#print prob
|
JohnGriffiths/nipype | nipype/algorithms/tests/test_auto_CalculateNormalizedMoments.py | Python | bsd-3-clause | 858 | 0.009324 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.algorithms.misc import CalculateNormalizedMoments
def test_CalculateNormalizedMoments_inputs():
input_map = dict(moment=dict(mandatory=True,
),
timeseries_file=dict(mandatory=True,
),
)
inputs = CalculateNormalizedMoments.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_CalculateNormalizedMoments_outputs():
output_map = dict(moments=dict(),
)
outputs = CalculateNorm | alizedMoments.output_spec()
for key, metadata in output_map.items():
for met | akey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
honzajavorek/tipi | tipi/langs/base.py | Python | mit | 2,966 | 0 | # -*- coding: utf-8 -*- #
from __future__ import unicode_literals
"""
Basic typography rules
Most regular expressions inspired by
`texytypo.py <https://gist.github.com/msgre/3805872>`, which is Python port
of typography rules from `Texy! <https://github.com/dg/texy/>`_ project.
"""
import re
__all__ = ('base_replacements',)
ch = r'A-Za-z\u00C0-\u02FF\u0370-\u1EFF'
base_replacements = (
# ellipsis ...
(
re.compile(r'(?<![.\u2026])\.{3,4}(?![.\u2026])', re.M | re.U),
'\u2026'
),
# en dash 123-123
(
re.compile(r'(?<=[\d ])-(?=[\d ]|$)'),
'\u2013'
),
# en dash alphanum--alphanum
(
re.compile(r'(?<=[^!*+,/:;<=>@\\\\_|-])--(?=[^!*+,/:;<=>@\\\\_|-])'),
'\u2013'
),
# en dash ,-
(
re.compile(r',-'),
",\u2013"
),
# em dash ---
(
re.compile(r' --- '),
'\u00a0\u2014 '
),
# before dash (dash stays at line end)
(
re.compile(r' ([\u2013\u2014])', re.U),
'\u00a0\\1'
),
# left right arrow <-->
(
re.compile(r' <-{1,2}> '),
' \u2194 '
),
# right arrow -->
(
re.compile(r' ?-{1,}> '),
' \u2192 '
),
# left arrow <--
(
re.compile(r' <-{1,} ?'),
' \u2190 '
),
# right arrow ==>
(
re.compile(r' ?={1,}> '),
' \u21d2 '
),
# +-
(
re.compile(r'\+-'),
'\u00b1'
),
# dimension sign 123 x 123...
(
re.compile(r'(\d+) x (?=\d)'),
'\\1 \u00d7 '
),
# dimension sign 123x
(
re.compile(r'(?<=\d)x(?= |,|.|$)', re.M),
'\u00d7'
),
# trademark (TM)
(
re.compile(r'((?<=\S)|(?<=\S ))\(TM\)', re.I),
'\u2122'
),
# registered (R)
(
re.compile(r'((?<=\S)|(?<=\S ))\(R\)', re.I),
'\u00ae'
),
# copyright (C)
(
re.compile(r'\(C\)((?=\S)|(?= \S))', re.I),
'\u00a9'
),
# Euro (EUR)
(
re.compile(r'\(EUR\)'),
'\u20ac'
),
# (phone) number 1 123 123 123...
(
re.compile(r'(\d) (?=\d{3})'),
'\\1\u00a0'
),
# space before last short word
(
re.compile(
r'(?<=.{50})\s+(?=[\x17-\x1F]*\S{1,6}[\x17-\x1F]*$)',
re.S | re.U
),
'\u00a0'
),
| # nbsp space between number (optionally followed by dot) and word, symbol,
# punctation, currency symbol
(
re.compile(
(r'(?<= |\.|,|-|\+|\x16|\()([\x17-\x1F]*\d+\.?[\x17-\x1F]*)\s+'
r'(?=[\x17-\x1F]*[%{0}\u00B0-\u00be\u2020-\u214f])').format(ch),
re.M | re.U
),
'\\1\u00a0',
),
(
re.compile(
(r'(?<=\d\u00A0)([\x17-\x1F]*\d+\.?[\x17 | -\x1F]*)\s+'
r'(?=[\x17-\x1F]*[%{0}\u00B0-\u00be\u2020-\u214f])').format(ch),
re.M | re.U
),
'\\1\u00a0'
),
)
|
dramatis/dramatis | setup.py | Python | mit | 633 | 0.006319 | from distutils.core import setup
s | etup( name='dramatis',
version='0.1.1',
author='Steven Parkes',
author_email='smparkes@smparkes.net',
url='http://dramatis.mischance.net',
description="an actor library for ruby and python",
package_dir = {'':'lib'},
packages=[
'dramatis',
'dramatis.error',
'dramatis.future_value',
'dramatis.actor',
'dramatis.actor.name',
'dramatis.runtime',
| 'dramatis.runtime.actor',
'dramatis.runtime.continuation',
],
)
|
webus/libthumbor | tests/testproj/settings.py | Python | mit | 2,850 | 0.002105 | # Django settings for testproj project.
import logging
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASE_SUPPORTS_TRANSACTIONS = True
DATABASE_ENGINE = 'sqlite3' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = 'test.db' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'r2+#7pwvtvou#d_d6*ftt+pud^%s6vl-+2duag37x@xxy@$yu^'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
ROOT_URLCONF = 'testproj | .urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
)
logging.basicConfig(level=logging.DEBUG)
THUMBOR | _SECURITY_KEY = 'my-security-key'
THUMBOR_SERVER = 'http://localhost:8888/'
|
plotly/plotly.py | packages/python/plotly/plotly/validators/treemap/marker/colorbar/_showticklabels.py | Python | mit | 477 | 0 | import _plotly_utils.basevalidators
class ShowticklabelsValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self,
plotly_name="showticklabels",
parent_name="treemap.m | arker.colorbar",
**kwargs
):
super(ShowticklabelsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
| edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs
)
|
Parbhat/cookiecutter-django-foundation | hooks/post_gen_project.py | Python | bsd-3-clause | 10,586 | 0.000756 | """
NOTE:
the below code is to be maintained Python 2.x-compatible
as the whole Cookiecutter Django project initialization
can potentially be run in Python 2.x environment
(at least so we presume in `pre_gen_project.py`).
TODO: ? restrict Cookiecutter Django project initialization to Python 3.x environments only
"""
from __future__ import print_function
import os
import random
import shutil
import string
try:
# Inspired by
# https://github.com/django/django/blob/master/django/utils/crypto.py
random = random.SystemRandom()
using_sysrandom = True
except NotImplementedError:
using_sysrandom = False
TERMINATOR = "\x1b[0m"
WARNING = "\x1b[1;33m [WARNING]: "
INFO = "\x1b[1;33m [INFO]: "
HINT = "\x1b[3;33m"
SUCCESS = "\x1b[1;32m [SUCCESS]: "
DEBUG_VALUE = "debug"
def remove_open_source_files():
file_names = ["CONTRIBUTORS.txt", "LICENSE"]
for file_name in file_names:
os.remove(file_name)
def remove_gplv3_files():
file_names = ["COPYING"]
for file_name in file_names:
os.remove(file_name)
def remove_pycharm_files():
idea_dir_path = ".idea"
if os.path.exists(idea_dir_path):
shutil.rmtree(idea_dir_path)
docs_dir_path = os.path.join("docs", "pycharm")
if os.path.exists(docs_dir_path):
shutil.rmtree(docs_dir_path)
def remove_docker_files():
shutil.rmtree("compose")
file_names = ["local.yml", "production.yml", ".dockerignore"]
for file_name in file_names:
os.remove(file_name)
def remove_utility_files():
shutil.rmtree("utility")
def remove_heroku_files():
file_names = ["Procfile", "runtime.txt", "requirements.txt"]
for file_name in file_names:
if (
file_name == "requirements.txt"
and "{{ cookiecutter.use_travisci }}".lower() == "y"
):
# don't remove the file if we are using travisci but not using heroku
continue
os.remove(file_name)
def remove_gulp_files():
file_names = ["gulpfile.js"]
for file_name in file_names:
os.remove(file_name)
def remove_packagejson_file():
file_names = ["package.json"]
for file_name in file_names:
os.remove(file_name)
def remove_celery_files():
file_names = [
os.path.join("config", "celery_app.py"),
os.path.join("{{ cookiecutter.project_slug }}", "users", "tasks.py"),
os.path.join(
"{{ cookiecutter.project_slug }}", "users", "tests", "test_tasks.py"
),
]
for file_name in file_names:
os.remove(file_name)
def remove_dottravisyml_file():
os.remove(".travis.yml")
def append_to_project_gitignore(path):
gitignore_file_path = ".gitignore"
with open(gitignore_file_path, "a") as gitignore_file:
gitignore_file.write(path)
gitignore_file.write(os.linesep)
def generate_random_string(
length, using_digits=False, using_ascii_letters=False, using_punctuation=False
):
"""
Example:
opting out for 50 symbol-long, [a-z][A-Z][0-9] string
would yield log_2((26+26+50)^50) ~= 334 bit strength.
"""
if not using_sysrandom:
return None
symbols = []
if using_digits:
symbols += string.digits
if using_ascii_letters:
symbols += string.ascii_letters
if using_punctuation:
all_punctuation = set(string.punctuation)
# These symbols can cause issues in environment variables
unsuitable = {"'", '"', "\\", "$"}
suitable = all_punctuation.difference(unsuitable)
symbols += "".join(suitable)
return "".join([random.choice(symbols) for _ in range(length)])
def set_flag(file_path, flag, value=None, formatted=None, *args, **kwargs):
if value is None:
random_string = generate_random_string(*args, **kwargs)
if random_string is None:
print(
"We couldn't find a secure pseudo-random number generator on your system. "
"Please, make sure to manually {} later.".format(flag)
)
random_string = flag
if formatted i | s not None:
random_string = formatted.format(random_string)
value = random_string
with open(file_path, "r+") as f:
file_contents = f.read().replace(flag, value)
f.seek(0)
f.write(file_contents)
f.truncate()
return value
def set_dj | ango_secret_key(file_path):
django_secret_key = set_flag(
file_path,
"!!!SET DJANGO_SECRET_KEY!!!",
length=64,
using_digits=True,
using_ascii_letters=True,
)
return django_secret_key
def set_django_admin_url(file_path):
django_admin_url = set_flag(
file_path,
"!!!SET DJANGO_ADMIN_URL!!!",
formatted="{}/",
length=32,
using_digits=True,
using_ascii_letters=True,
)
return django_admin_url
def generate_random_user():
return generate_random_string(length=32, using_ascii_letters=True)
def generate_postgres_user(debug=False):
return DEBUG_VALUE if debug else generate_random_user()
def set_postgres_user(file_path, value):
postgres_user = set_flag(file_path, "!!!SET POSTGRES_USER!!!", value=value)
return postgres_user
def set_postgres_password(file_path, value=None):
postgres_password = set_flag(
file_path,
"!!!SET POSTGRES_PASSWORD!!!",
value=value,
length=64,
using_digits=True,
using_ascii_letters=True,
)
return postgres_password
def set_celery_flower_user(file_path, value):
celery_flower_user = set_flag(
file_path, "!!!SET CELERY_FLOWER_USER!!!", value=value
)
return celery_flower_user
def set_celery_flower_password(file_path, value=None):
celery_flower_password = set_flag(
file_path,
"!!!SET CELERY_FLOWER_PASSWORD!!!",
value=value,
length=64,
using_digits=True,
using_ascii_letters=True,
)
return celery_flower_password
def append_to_gitignore_file(s):
with open(".gitignore", "a") as gitignore_file:
gitignore_file.write(s)
gitignore_file.write(os.linesep)
def set_flags_in_envs(postgres_user, celery_flower_user, debug=False):
local_django_envs_path = os.path.join(".envs", ".local", ".django")
production_django_envs_path = os.path.join(".envs", ".production", ".django")
local_postgres_envs_path = os.path.join(".envs", ".local", ".postgres")
production_postgres_envs_path = os.path.join(".envs", ".production", ".postgres")
set_django_secret_key(production_django_envs_path)
set_django_admin_url(production_django_envs_path)
set_postgres_user(local_postgres_envs_path, value=postgres_user)
set_postgres_password(
local_postgres_envs_path, value=DEBUG_VALUE if debug else None
)
set_postgres_user(production_postgres_envs_path, value=postgres_user)
set_postgres_password(
production_postgres_envs_path, value=DEBUG_VALUE if debug else None
)
set_celery_flower_user(local_django_envs_path, value=celery_flower_user)
set_celery_flower_password(
local_django_envs_path, value=DEBUG_VALUE if debug else None
)
set_celery_flower_user(production_django_envs_path, value=celery_flower_user)
set_celery_flower_password(
production_django_envs_path, value=DEBUG_VALUE if debug else None
)
def set_flags_in_settings_files():
set_django_secret_key(os.path.join("config", "settings", "local.py"))
set_django_secret_key(os.path.join("config", "settings", "test.py"))
def remove_envs_and_associated_files():
shutil.rmtree(".envs")
os.remove("merge_production_dotenvs_in_dotenv.py")
def remove_celery_compose_dirs():
shutil.rmtree(os.path.join("compose", "local", "django", "celery"))
shutil.rmtree(os.path.join("compose", "production", "django", "celery"))
def remove_node_dockerfile():
shutil.rmtree(os.path.join("compose", "local", "node"))
def remove_aws_dockerfile():
shutil.rmtree(os.path.join("compose", "production", "aws"))
def main():
debug = "{{ cookiecutter.debug }}".lower() == "y"
set_flags_ |
spennihana/h2o-3 | h2o-py/h2o/backend/cluster.py | Python | apache-2.0 | 11,458 | 0.003927 | # -*- encoding: utf-8 -*-
"""Information about the backend H2O cluster."""
from __future__ import division, print_function, absolute_import, unicode_literals
import sys
import time
import h2o
from h2o.exceptions import H2OConnectionError, H2OServerError
from h2o.display import H2ODisplay
from h2o.utils.compatibility import * # NOQA
from h2o.utils.typechecks import assert_is_type
from h2o.utils.shared_utils import get_human_readable_bytes, get_human_readable_time
class H2OCluster(object):
"""
Information about the backend H2O cluster.
This object is available from ``h2o.cluster()`` or ``h2o.connection().cluster``, and its purpose is to provide
basic information / manipulation methods for the underlying cluster.
"""
# If information is this many seconds old, it will be refreshed next time you call :meth:`status`.
REFRESH_INTERVAL = 1.0
def __init__(self):
"""Initialize new H2OCluster instance."""
self._props = {}
self._retrieved_at = None
@staticmethod
def from_kvs(keyvals):
"""
Create H2OCluster object from a list of key-value pairs.
TODO: This method should be moved into the base H2OResponse class.
"""
obj = H2OCluster()
obj._retrieved_at = time.time()
for k, v in keyvals:
if k in {"__meta", "_exclude_fields", "__schema"}: continue
if k in _cloud_v3_valid_keys:
obj._props[k] = v
else:
raise AttributeError("Attribute %s cannot be set on H2OCluster (= %r)" % (k, v))
return obj
@property
def skip_ticks(self):
return self._props.get("skip_ticks", None)
@property
def bad_nodes(self):
return self._props["bad_nodes"]
@property
def branch_name(self):
return self._props["branch_name"]
@property
def build_number(self):
return self._props["build_number"]
@property
def build_age(self):
# If the build age is unknown, then the cluster is so old it comes from "prehistoric" times when no
# build number reporting was done...
return self._props.get("build_age", "PREHISTORIC")
@property
def build_too_old(self):
# If the prop "build_too_old" wasn't reported by the server, then it's definitely too old :)
return self._props.get("build_too_old", True)
@property
def cloud_healthy(self):
return self._props["cloud_healthy"]
@property
def cloud_name(self):
return self._props["cloud_name"]
@property
def cloud_size(self):
return self._props["cloud_size"]
@property
def cloud_uptime_millis(self):
return self._props["cloud_uptime_millis"]
@property
def cloud_internal_timezone(self):
return self._props["cloud_internal_timezone"]
@property
def datafile_parser_timezone(self):
return self._props["datafile_parser_timezone"]
@property
def consensus(self):
return self._props["consensus"]
@property
def is_client(self):
return self._props["is_client"]
@property
def locked(self):
return self._props["locked"]
@property
def node_idx(self):
return self._props["node_idx"]
@property
def nodes(self):
return self._props["nodes"]
@property
def version(self):
return self._props["version"]
@property
def internal_security_enabled(self):
return self._props["internal_security_enabled"]
def node(self,node_idx):
"""
Get information about a particular node in an H2O cluster (node index is 0 based)
Information includes the following:
nthreads: Number of threads
pid: PID of current H2O process
mem_value_size: Data on Node memory
max_disk: Max disk
free_disk: Free d | isk
open_fds: Open File Descripters
swap_mem: Size of data on node's disk
tcps_active: Open TCP connections
num_cpus: Number of cpus
cpus_allowed: CPU's allowed
gflops: Linpack GFlops
fjthrds: F/J Thread count, by priority
mem_bw: Memory bandwith
fjqueue: F/J Task count, by priority
my_cpu_pct: System CPU percentage used by this H2O process in last interval
pojo_mem: Temp (non Data) memory
| num_keys: Number of local keys
ip_port: IP address and port in the form a.b.c.d:e
last_ping: Time (in msec) of last ping
rpcs_active: Active Remote Procedure Calls
max_mem: Maximum memory size for node
healthy: (now-last_ping)<HeartbeatThread.TIMEOUT
sys_load: System load; average #runnables/#cores
sys_cpu_pct: System CPU percentage used by everything in last interval
free_mem: Free heap
h2o: IP
:param node_idx: An int value indicating which node to extract information from
:returns: Dictionary containing node info
:examples:
>>>import h2o
>>>h2o.init()
>>>node_one = h2o.cluster().node(0)
>>>node_one["pid"] #Get PID for first node in H2O Cluster
"""
return self.nodes[node_idx]
def shutdown(self, prompt=False):
"""
Shut down the server.
This method checks if the H2O cluster is still running, and if it does shuts it down (via a REST API call).
:param prompt: A logical value indicating whether to prompt the user before shutting down the H2O server.
"""
if not self.is_running(): return
assert_is_type(prompt, bool)
if prompt:
question = "Are you sure you want to shutdown the H2O instance running at %s (Y/N)? " \
% h2o.connection().base_url
response = input(question) # works in Py2 & Py3 because redefined in h2o.utils.compatibility module
else:
response = "Y"
if response.lower() in {"y", "yes"}:
h2o.api("POST /3/Shutdown")
h2o.connection().close()
def is_running(self):
"""
Determine if the H2O cluster is running or not.
:returns: True if the cluster is up; False otherwise
"""
try:
if h2o.connection().local_server and not h2o.connection().local_server.is_running(): return False
h2o.api("GET /")
return True
except (H2OConnectionError, H2OServerError):
return False
def show_status(self, detailed=False):
"""
Print current cluster status information.
:param detailed: if True, then also print detailed information about each node.
"""
if self._retrieved_at + self.REFRESH_INTERVAL < time.time():
# Info is stale, need to refresh
new_info = h2o.api("GET /3/Cloud")
self._fill_from_h2ocluster(new_info)
ncpus = sum(node["num_cpus"] for node in self.nodes)
allowed_cpus = sum(node["cpus_allowed"] for node in self.nodes)
free_mem = sum(node["free_mem"] for node in self.nodes)
unhealthy_nodes = sum(not node["healthy"] for node in self.nodes)
status = "locked" if self.locked else "accepting new members"
if unhealthy_nodes == 0:
status += ", healthy"
else:
status += ", %d nodes are not healthy" % unhealthy_nodes
api_extensions = self.list_api_extensions()
H2ODisplay([
["H2O cluster uptime:", get_human_readable_time(self.cloud_uptime_millis)],
["H2O cluster timezone:", self.cloud_internal_timezone],
["H2O data parsing timezone:", self.datafile_parser_timezone],
["H2O cluster version:", self.version],
["H2O cluster version age:", "{} {}".format(self.build_age, ("!!!" if self.build_too_old else ""))],
["H2O cluster name:", self.cloud_name],
["H2O cluster total nodes:", self.cloud_size],
["H2O cluster free memory:", get_human_readable_bytes(free_mem)],
["H2O cluster total cores:", str(ncpus)],
["H2O cluster al |
unnikrishnankgs/va | venv/lib/python3.5/site-packages/tensorflow/contrib/opt/python/training/drop_stale_gradient_optimizer.py | Python | bsd-2-clause | 4,333 | 0.003462 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrapper optimizer for checking and dropping stale gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.summary import summary
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_util
class DropStaleGradientOptimizer(optimizer.Optimizer):
"""Wrapper optimizer that checks and drops stale gradient.
This optimizer records the global step for each worker before computing
gradients and compares it with the global step at the time of applying the
gradients. If the difference is larger than a threshold, it will drop all
the computed gradients.
"""
def __init__(self,
opt,
staleness,
use_locking=False,
name="DropStaleGradient"):
"""Constructs a new DropStaleGradientOptimizer.
Args:
opt: The actual optimizer that will be used to compute and apply the
| gradients. Must be one of the Optimizer classes.
staleness: The maximum staleness allowed for the optimizer.
use_locking: If `True` use locks for clip update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "DropStaleGradient".
"""
super(DropStaleGradientOptimizer, self).__init__(use_locking, | name)
self._opt = opt
self._staleness = staleness
def compute_gradients(self, loss, *args, **kwargs):
# Record current global step for worker.
with ops.colocate_with(loss):
self._local_step = training_util.get_global_step() + 0
with ops.control_dependencies([self._local_step]):
loss = gen_array_ops.identity(loss)
return self._opt.compute_gradients(loss, *args, **kwargs)
def get_slot(self, *args, **kwargs):
return self._opt.get_slot(*args, **kwargs)
def get_slot_names(self, *args, **kwargs):
return self._opt.get_slot_names(*args, **kwargs)
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
gradients = []
# Number of stale gradients.
stale_counter = variable_scope.get_variable(
"stale_counter", [],
initializer=init_ops.zeros_initializer(),
trainable=False)
def _AcceptGradientOp():
with ops.control_dependencies(
[self._opt.apply_gradients(
grads_and_vars, global_step=global_step, name=name)]):
return gen_array_ops.identity(0.0)
def _DropGradientOp():
return gen_array_ops.identity(1.0)
for grad_and_var in grads_and_vars:
grad = grad_and_var[0]
if isinstance(grad, ops.Tensor):
gradients.append(grad)
elif grad is not None:
gradients.append(grad.op)
with ops.control_dependencies(gradients), ops.colocate_with(global_step):
staleness = gen_array_ops.reshape(
global_step - self._local_step, shape=())
conditional_update = stale_counter.assign_add(control_flow_ops.cond(
gen_math_ops.less_equal(staleness, self._staleness),
_AcceptGradientOp, _DropGradientOp))
summary.scalar(
"Gradient staleness percentage",
stale_counter / (math_ops.cast(global_step + 1, dtypes.float32)))
return conditional_update
|
kalikaneko/leap_mail | src/leap/mail/imap/fields.py | Python | gpl-3.0 | 4,610 | 0.000217 | # -*- coding: utf-8 -*-
# fields.py
# Copyright (C) 2013 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Fields for Mailbox and Message.
"""
from leap.mail.imap.parser import MBoxParser
class WithMsgFields(object):
"""
Container class for class-attributes to be shared by
several message-related classes.
"""
# indexing
CONTENT_HASH_KEY = "chash"
PAYLOAD_HASH_KEY = "phash"
# Internal representation of Message
# flags doc
UID_KEY = "uid"
MBOX_KEY = "mbox"
SEEN_KEY = "seen"
DEL_KEY = "deleted"
RECENT_KEY = "recent"
FLAGS_KEY = "flags"
MULTIPART_KEY = "multi"
SIZE_KEY = "size"
# headers
HEADERS_KEY = "headers"
DATE_KEY = "date"
SUBJECT_KEY = "subject"
PARTS_MAP_KEY = "part_map"
BODY_KEY = "body" # link to phash of body
MSGID_KEY = "msgid"
# content
LINKED_FROM_KEY = "lkf" # XXX not implemented yet!
RAW_KEY = "raw"
CTYPE_KEY = "ctype"
# Mailbox specific keys
CLOSED_KEY = "closed"
CREATED_KEY = "created"
SUBSCRIBED_KEY = "subscribed"
RW_KEY = "rw"
LAST_UID_KEY = "lastuid"
RECENTFLAGS_KEY = "rct"
HDOCS_SET_KEY = "hdocset"
# Document Type, for indexing
TYPE_KEY = "type"
TYPE_MBOX_VAL = "mbox"
TYPE_FLAGS_VAL = "flags"
TYPE_HEADERS_VAL = "head"
TYPE_CONTENT_VAL = "cnt"
TYPE_RECENT_VAL = "rct"
TYPE_HDOCS_SET_VAL = "hdocset"
INBOX_VAL = "inbox"
# Flags in Mailbox and Message
SEEN_FLAG = "\\Seen"
RECENT_FLAG = "\\Recent"
ANSWERED_FLAG = "\\Answered"
FLAGGED_FLAG = "\\Flagged" # yo dawg
DELETED_FLAG = "\\Deleted"
DRAFT_FLAG = "\\Draft"
NOSELECT_FLAG = "\\Noselect"
LIST_FLAG = "List" # is this OK? (no \. ie, no system flag)
# Fields in mail object
SUBJECT_FIELD = "Subject"
DATE_FIELD = "Date"
# Index types
# --------------
TYPE_IDX = 'by-type'
TYPE_MBOX_IDX = 'by-type-and-mbox'
TYPE_MBOX_UID_IDX = 'by-type-and-mbox-and-uid'
TYPE_SUBS_IDX = 'by-type-and-subscribed'
TYPE_MSGID_IDX = 'by-type-and-message-id'
TYPE_MBOX_SEEN_IDX = 'by-type-and-mbox-and-seen'
TYPE_MBOX_RECT_IDX = 'by-type-and-mbox-and-recent'
TYPE_MBOX_DEL_IDX = 'by-type-and-mbox-and-deleted'
TYPE_MBOX_C_HASH_IDX = 'by-type-and-mbox-and-contenthash'
TYPE_C_HASH_IDX = 'by-type-and-contenthash'
TYPE_C_HASH_PART_IDX = 'by-type-and-contenthash-and-partnumber'
TYPE_P_HASH_IDX = 'by-type-and-payloadhash'
# Tomas created the `re | cent and seen index`, but the semantic is not too
# correct since the recent flag is volatile.
TYPE_MBOX_RECT_SEEN_IDX = 'by-type-and-mbox-and-recent-and-seen'
KTYPE = TYPE_KEY
MBOX_VAL = TYP | E_MBOX_VAL
CHASH_VAL = CONTENT_HASH_KEY
PHASH_VAL = PAYLOAD_HASH_KEY
INDEXES = {
# generic
TYPE_IDX: [KTYPE],
TYPE_MBOX_IDX: [KTYPE, MBOX_VAL],
TYPE_MBOX_UID_IDX: [KTYPE, MBOX_VAL, UID_KEY],
# mailboxes
TYPE_SUBS_IDX: [KTYPE, 'bool(subscribed)'],
# fdocs uniqueness
TYPE_MBOX_C_HASH_IDX: [KTYPE, MBOX_VAL, CHASH_VAL],
# headers doc - search by msgid.
TYPE_MSGID_IDX: [KTYPE, MSGID_KEY],
# content, headers doc
TYPE_C_HASH_IDX: [KTYPE, CHASH_VAL],
# attachment payload dedup
TYPE_P_HASH_IDX: [KTYPE, PHASH_VAL],
# messages
TYPE_MBOX_SEEN_IDX: [KTYPE, MBOX_VAL, 'bool(seen)'],
TYPE_MBOX_RECT_IDX: [KTYPE, MBOX_VAL, 'bool(recent)'],
TYPE_MBOX_DEL_IDX: [KTYPE, MBOX_VAL, 'bool(deleted)'],
TYPE_MBOX_RECT_SEEN_IDX: [KTYPE, MBOX_VAL,
'bool(recent)', 'bool(seen)'],
}
MBOX_KEY = MBOX_VAL
EMPTY_MBOX = {
TYPE_KEY: MBOX_KEY,
TYPE_MBOX_VAL: MBoxParser.INBOX_NAME,
SUBJECT_KEY: "",
FLAGS_KEY: [],
CLOSED_KEY: False,
SUBSCRIBED_KEY: False,
RW_KEY: 1,
LAST_UID_KEY: 0
}
fields = WithMsgFields # alias for convenience
|
timedata-org/expressy | expressy/ast_handlers/expressions.py | Python | mit | 1,421 | 0 | from . operators import operators
def Expr(node): # A container for an expression.
return (lambda x: x), [node.value]
def BinOp(node): # a + b
| return operators(node.op), [node.left, node.right]
def BoolOp(node): # a and b and c
return operators(node.op), node.values
def UnaryOp(node): # -a, not a, +a, ~a
return operators(node.op), [node.operand]
def Compare(node): # a < b < c > d
ops = [operators(o) for o in node.ops]
def compare(left, *values):
assert len(ops) == len(values)
for op, value in zip(ops, values):
if | not op(left, value):
return False
left = value
return True
return compare, [node.left] + node.comparators
def Call(node): # f(a, *b, **c)
if not node.keywords:
def call(caller, *args):
return caller(*args)
return call, [node.func] + node.args
arg_length = len(node.args)
kv = [(k.arg, k.value) for k in node.keywords]
keys, value_nodes = zip(*kv)
def call(caller, *args_values):
args, values = args_values[:arg_length], args_values[arg_length:]
return caller(*args, **dict(zip(keys, values)))
return call, [node.func] + node.args + list(value_nodes)
def IfExp(node): # a if b else c
def if_exp(body, test, orelse):
return body if test else orelse
return if_exp, [node.body, node.test, node.orelse]
|
tintoy/seqlog | setup.py | Python | mit | 1,510 | 0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
'python_dateutil>=2.5.3',
'requests>=2.10.0',
'PyYAML>=3.11',
]
test_requirements = [
'pip>=8.1.2',
'bumpversion>=0.5.3',
'wheel>=0.29.0',
'watchdog>=0.8.3',
'flake8>=2.6.0',
'tox>=2.3.1',
'coverage>=4.1',
'Sphinx>=1.4.4',
'cryptography>=1.4',
'PyYAML>=3.11',
'pytest>=2.9.2',
'httmock>=1.2.5'
]
s | etup(
name='seqlog',
version='0.3.21a1',
description="SeqLog enables logging from Python to Seq.",
lon | g_description=readme + '\n\n' + history,
author="Adam Friedman",
author_email='tintoy@tintoy.io',
url='https://github.com/tintoy/seqlog',
packages=[
'seqlog',
],
package_dir={'seqlog':
'seqlog'},
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords='seqlog',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
tests_require=test_requirements
)
|
adngdb/socorro | socorro/processor/processor_2015.py | Python | mpl-2.0 | 11,998 | 0.000083 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""this file defines the method of converting a raw crash into a processed
crash. In this latest version, all transformations have been reimplemented
as sets of loadable rules. The rules are applied one at a time, each doing
some small part of the transformation process."""
import ujson
from configman import Namespace, RequiredConfig
from configman.dotdict import DotDict as OrderedDotDict
from configman.converters import (
str_to_python_object,
)
from socorro.lib.converters import str_to_classes_in_namespaces_converter
from socorro.lib.datetimeutil import utc_now
from socorro.lib.util import DotDict
# Rule sets are defined as lists of lists (or tuples). As they will be loaded
# from json, they will always come in a lists rather than tuples. Arguably,
# tuples may be more appropriate, but really, they can be anything iterable.
# The outermost sequence is a list of rule sets. There can be any number of
# them and can be organized at will. The example below shows an organization
# by processing stage: pre-processing the raw_crash, converter raw to
# processed, and post-processing the processed_crash.
# Each rule set is defined by five elements:
# rule name: any useful string
# tag: a categorization system, programmer defined system (for future)
# rule set class: the fully qualified name of the class that implements
# the rule application process. On the introduction of
# Processor2015, the only option is the one in the example.
# rule list: a comma delimited list of fully qualified class names that
# implement the individual transformation rules. The API that
# these classes must conform to is defined by the rule base class
# socorro.lib.transform_rules.Rule
default_rule_set = [
[ # rules to change the internals of the raw crash
"raw_transform", # name of the rule
"processor.json_rewrite", # a tag in a dotted-form
"socorro.lib.transform_rules.TransformRuleSystem", # rule set class
"apply_all_rules", # rule set class method to apply rules
"" # comma delimited list of fully qualified rule class names
],
[ # rules to transform a raw crash into a processed crash
"raw_to_processed_transform",
"processer.raw_to_processed",
"socorro.lib.transform_rules.TransformRuleSystem",
"apply_all_rules",
""
],
[ # post processing of the processed crash
"processed_transform",
"processer.processed",
"socorro.lib.transform_rules.TransformRuleSystem",
"apply_all_rules",
""
],
]
# rules come into Socorro via Configman. Configman defines them as strings
# conveniently, a json module can be used to serialize and deserialize them.
default_rules_set_str = ujson.dumps(default_rule_set)
def rule_sets_from_string(rule_sets_as_string):
"""this configman converter takes a json file in the form of a string,
and converts it into rules sets for use in the processor. See the
default rule set above for the form."""
rule_sets = ujson.loads(rule_sets_as_string)
class ProcessorRuleSets(RequiredConfig):
# why do rules come in sets? Why not just have a big list of rules?
# rule sets are containers for rules with a similar purpose and
# execution mode. For example, there are rule sets for adjusting the
# raw_crash, transforming raw to processed, post processing the
# processed_crash and then all the different forms of classifiers.
# Some rule sets have different execution modes: run all the rules,
# run the rules until one fails, run the rules until one succeeds,
# etc.
required_config = Namespace()
names = []
for (name, tag, rule_set_class_str, action_str, default_rules_str) \
in rule_sets:
names.append(name)
required_config.namespace(name)
required_config[name].add_option(
name='tag',
doc='the lookup tag associated with this rule set',
default=tag
)
required_config[name].add_option(
name='rule_system_class',
default=rule_set_class_str,
doc='the fully qualified name of the rule system class',
from_string_converter=str_to_python_object,
likely_to_be_changed=True,
)
required_config[name].add_option(
name='action',
default=action_str,
doc=(
'the name of the rule set method to run to processes '
'these rules'
),
likely_to_be_changed=True,
)
required_config[name].add_option(
name='rules_list',
doc='a list of fully qualified class names for the rules',
default=default_rules_str,
from_string_converter=str_to_classes_in_namespaces_converter(
name_of_class_option='rule_class'
),
likely_to_be_changed=True,
)
@classmethod
def to_str(klass):
return "'%s'" % rule_sets_as_string
return ProcessorRuleSets
class Processor2015(RequiredConfig):
"""this class is a generalization of the Processor into a rule processing
framework. This class is suitable for use in the 'processor_app'
introducted in 2012."""
required_config = Namespace()
required_config.add_option(
name='rule_sets',
doc="a hierarchy of rules in json form",
default=default_rules_set_str,
from_string_converter=rule_sets_from_string,
likely_to_be_changed=True,
)
def __init__(self, config, quit_check_callback=None):
super(Processor2015, self).__init__()
self.config = config
# the quit checks are components of a system of callbacks used
# primarily by the TaskManager system. This is the system that
# controls the execution model. If the ThreadedTaskManager is in use,
# these callbacks just check the ThreadedTaskManager task manager's
# quit flag. If they detect a quit condition, they raise an exception
# that causes the thread to shut down. For the GreenletTaskMangager,
# using cooperative multitasking, the callbacks do the 'yield' to
# allow another green thread to take over.
# It is perfectly acceptable to hook into this callback system to
# accomplish any task that needs be done periodically.
if quit_check_callback:
self.quit_check = quit_check_callback
else:
self.quit_check = lambda: False
# here we instantiate the rule sets and their rules.
self.rule_system = OrderedDotDict()
for a_rule_set_name in config.rule_sets.names:
self.config.logger.debug(
'setting up rule set: %s',
a_rule_set_name
)
self.rule_system[a_rule_set_name] = (
config[a_rule_set_name].rule_system_class(
config[a_rule_set_name],
self.quit_check
)
)
def process_crash(self, raw_cr | ash, raw_dumps, processed_crash):
"""Take a raw_crash and its associated raw_dumps and return a
processed_crash.
"""
# processor_meta_data will be us | ed to ferry "inside information" to
# transformation rules. Sometimes rules need a bit more extra
# information about the transformation process itself.
processor_meta_data = DotDict()
processor_meta_data.processor_notes = [
self.config.processor_name,
self.__class__.__name__
]
processor_meta_data.quit_check = self.quit_check
processor_meta_data.process |
HydrelioxGitHub/home-assistant | homeassistant/components/scene/__init__.py | Python | apache-2.0 | 3,292 | 0 | """Allow users to set and activate scenes."""
import asyncio
import importlib
import logging
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID, CONF_PLATFORM, SERVICE_TURN_ON)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.state import HASS_DOMAIN
DOMAIN = 'scene'
STATE = 'scening'
STATES = 'states'
def _hass_domain_validator(config):
"""Validate platform in config for homeassistant domain."""
if CONF_PLATFORM not in config:
config = {CONF_PLATFORM: HASS_DOMAIN, STATES: config}
return config
def _platform_validator(config):
"""Validate it is a valid platform."""
try:
platform = importlib.import_module(
'homeassistant.components.scene.{}'.format(
config[CONF_PLATFORM]))
except ImportError:
try:
platform = importlib.import_module(
'homeassistant.components.{}.scene'.format(
config[CONF_PLATFORM]))
except ImportError:
raise vol.Invalid('Invalid platform specified') from None
if not hasattr(platform, 'PLATFORM_SCHEMA'):
return config
return platform.PLATFORM_SCHEMA(config)
PLATFORM_SCHEMA = vol.Schema(
vol.All(
_hass_domain_validator, |
vol.Schema({
vol.Required(CONF_PLATFORM): str
}, extra=vol.ALLOW_EXTRA),
_platform_valida | tor
), extra=vol.ALLOW_EXTRA)
SCENE_SERVICE_SCHEMA = vol.Schema({
vol.Required(ATTR_ENTITY_ID): cv.entity_ids,
})
async def async_setup(hass, config):
"""Set up the scenes."""
logger = logging.getLogger(__name__)
component = hass.data[DOMAIN] = EntityComponent(logger, DOMAIN, hass)
await component.async_setup(config)
async def async_handle_scene_service(service):
"""Handle calls to the switch services."""
target_scenes = component.async_extract_from_service(service)
tasks = [scene.async_activate() for scene in target_scenes]
if tasks:
await asyncio.wait(tasks, loop=hass.loop)
hass.services.async_register(
DOMAIN, SERVICE_TURN_ON, async_handle_scene_service,
schema=SCENE_SERVICE_SCHEMA)
return True
async def async_setup_entry(hass, entry):
"""Set up a config entry."""
return await hass.data[DOMAIN].async_setup_entry(entry)
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
return await hass.data[DOMAIN].async_unload_entry(entry)
class Scene(Entity):
"""A scene is a group of entities and the states we want them to be."""
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def state(self):
"""Return the state of the scene."""
return STATE
def activate(self):
"""Activate scene. Try to get entities into requested state."""
raise NotImplementedError()
def async_activate(self):
"""Activate scene. Try to get entities into requested state.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(self.activate)
|
Jorge-Rodriguez/ansible | lib/ansible/modules/network/netvisor/pn_port_config.py | Python | gpl-3.0 | 12,238 | 0.000735 | #!/usr/bin/python
# Copyright: (c) 2018, Pluribus Networks
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: pn_port_config
author: "Pluribus Networks (@rajaspachipulusu17)"
version_added: 2.8
short_description: CLI command to modify port-config
description:
- This module can be used to modify a port configuration.
options:
pn_cliswitch:
description:
- Target switch to run the CLI on.
required: False
type: str
state:
description:
- State the action to perform. Use C(update) to modify the port-config.
required: True
type: str
choices: ['update']
pn_intf:
description:
- physical interface.
required: False
type: str
pn_crc_check_enable:
description:
- CRC check on ingress and rewrite on egress.
required: False
type: bool
pn_dscp_map:
description:
- DSCP map name to enable on port.
required: False
type: str
pn_autoneg:
description:
- physical port autonegotiation.
required: False
type: bool
pn_speed:
description:
- physical port speed.
required: False
choices: ['disable', '10m', '100m', '1g',
'2.5g', '10g', '25g', '40g', '50g', '100g']
pn_port:
description:
- physical port.
required: False
type: str
pn_vxlan_termination:
description:
- physical port vxlan termination setting.
required: False
type: bool
pn_pause:
description:
- physical port pause.
required: False
type: bool
pn_loopback:
description:
- physical port loopback.
required: False
type: bool
pn_loop_vlans:
description:
- looping vlans.
required: False
type: str
pn_routing:
description:
- routing.
required: False
type: bool
pn_edge_switch:
description:
- physical port edge switch.
required: False
type: bool
pn_enable:
description:
- physical port enable.
required: False
type: bool
pn_description:
description:
- physical port description.
required: False
type: str
pn_host_enable:
description:
- Host facing port control setting.
required: False
type: bool
pn_allowed_tpid:
description:
- Allowed TPID in addition to 0x8100 on Vlan header.
required: False
type: str
choices: ['vlan', 'q-in-q', 'q-in-q-old']
pn_mirror_only:
description:
- physical port mirror only.
required: False
type: bool
pn_reflect:
description:
- physical port reflection.
required: False
type: bool
pn_jumbo:
description:
- jumbo frames on physical port.
required: False
type: bool
pn_egress_rate_limit:
description:
- max egress port data rate limit.
required: False
type: str
pn_eth_mode:
description:
- physical Ethernet mode.
required: False
choices: ['1000base-x', 'sgmii', 'disabled', 'GMII']
pn_fabric_guard:
description:
- Fabric guard configuration.
required: False
type: bool
pn_local_switching:
description:
- no-local-switching port cannot bridge traffic to
another no-local-switching port.
required: False
type: bool
pn_lacp_priority:
description:
- LACP priority from 1 to 65535.
required: False
type: str
pn_send_port:
description:
- send port.
required: False
type: str
pn_port_mac_address:
description:
- physical port MAC Address.
required: False
type: str
pn_defer_bringup:
description:
- defer port bringup.
required: False
type: bool
"""
EXAMPLES = """
- name: port config modify
pn_port_config:
pn_cliswitch: "sw01"
state: "update"
pn_port: "all"
pn_dscp_map: "foo"
- name: port config modify
pn_port_config:
pn_cliswitch: "sw01"
state: "update"
pn_port: "all"
pn_host_enable: true
"""
RETURN = """
command:
description: the CLI command run on the target node.
returned: always
type: str
stdout:
description: set of responses from the port-config command.
returned: always
type: list
stderr:
description: set of error responses from the port-config command.
returned: on error
type: list
changed:
description: indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli, booleanArgs
def check_cli(module, cli):
"""
This method checks for idempotency using the dscp-map-show name command.
If a user with given name exists, return True else False.
:param module: The Ansible module to fetch input parameters
:param cli: The CLI string
"""
name = module.params['pn_dscp_map']
cli += ' dscp-map-show name %s format name no-show-headers' % name
out = module.run_command(cli.split(), use_unsafe_shell=True)[1]
out = out.split()
return True if name in out else False
def main():
""" This section is for arguments parsing """
state_map = dict(
update='port-config-modify'
)
module = AnsibleModule(
argument_spec=dict(
pn_cliswitch=dict(required=Fals | e, type='str'),
state=dict(required=True, type='str',
| choices=['update']),
pn_intf=dict(required=False, type='str'),
pn_crc_check_enable=dict(required=False, type='bool'),
pn_dscp_map=dict(required=False, type='str'),
pn_autoneg=dict(required=False, type='bool'),
pn_speed=dict(required=False, type='str',
choices=['disable', '10m', '100m',
'1g', '2.5g', '10g', '25g',
'40g', '50g', '100g']),
pn_port=dict(required=False, type='str'),
pn_vxlan_termination=dict(required=False, type='bool'),
pn_pause=dict(required=False, type='bool'),
pn_loopback=dict(required=False, type='bool'),
pn_loop_vlans=dict(required=False, type='str'),
pn_routing=dict(required=False, type='bool'),
pn_edge_switch=dict(required=False, type='bool'),
pn_enable=dict(required=False, type='bool'),
pn_description=dict(required=False, type='str'),
pn_host_enable=dict(required=False, type='bool'),
pn_allowed_tpid=dict(required=False, type='str',
choices=['vlan', 'q-in-q', 'q-in-q-old']),
pn_mirror_only=dict(required=False, type='bool'),
pn_reflect=dict(required=False, type='bool'),
pn_jumbo=dict(required=False, type='bool'),
pn_egress_rate_limit=dict(required=False, type='str'),
pn_eth_mode=dict(required=False, type='str',
choices=['1000base-x', 'sgmii',
'disabled', 'GMII']),
pn_fabric_guard=dict(required=False, type='bool'),
pn_local_switching=dict(required=False, type='bool'),
pn_lacp_priority=dict(required=False, type='str'),
pn_send_port=dict(required=False, type='str'),
pn_port_mac_address=dict(required=False, type='str'),
pn_defer_bringup=dict(required=False, type='bool'),
),
required_if=(
['state', 'update', ['pn_port']],
),
required_one_of=[['pn_intf', 'pn_crc_check_enable', 'pn_dscp_map',
'pn_speed', 'pn_autoneg',
'pn_vxlan_termination', 'pn_pause',
'pn_fec', 'pn_loopback', 'pn_loop_vlans',
'pn_routing', 'pn_edge_switch',
'pn_enable', 'pn_description',
'pn_host_enable', ' |
UMONS-GFA/ardas | docs/source/conf.py | Python | gpl-3.0 | 5,513 | 0.00127 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# ardas documentation build configuration file, created by |
# sphinx-quickstart on Mon Mar 20 08:42:32 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values | are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'ardas'
copyright = '2017, Olivier Kaufmann, Christophe Bastin'
author = 'Olivier Kaufmann, Christophe Bastin'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.1'
# The full version, including alpha/beta/rc tags.
release = '1.1.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'ardasdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ardas.tex', 'ardas Documentation',
'Olivier Kaufmann, Christophe Bastin', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'ardas', 'ardas Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ardas', 'ardas Documentation',
author, 'ardas', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
|
urashima9616/Leetcode_Python | Leet415_AddStrings.py | Python | gpl-3.0 | 975 | 0.024615 | def addStrings(s1, s2):
"""
type s1, s2: String
rtype string
"""
digit2num = {"0":0, "1":1, "2":2, "3":3, "4":4, "5":5, "6":6, "7":7, "8":8, "9":9}
pt1, pt2 = len(s1)-1, len(s2)-1
carry = 0
res = []
while pt1 >= 0 and pt2 >= 0:
digit_sum = digit2num[s1[pt1]] + digit2num[s2[pt2]] + carry
carry = digit_sum/10
res.append(str(digit_sum%10))
pt1 -=1
pt2 -=1
else:
if pt1 < 0:
while pt2 >=0 :
digit_sum = digit2num[s2[pt2]] + carry |
carry = digit_sum/10
res.append(str(digit_sum%10))
pt2 -=1
else:
while pt1 >=0 | :
digit_sum = digit2num[s1[pt1]] + carry
carry = digit_sum/10
res.append(str(digit_sum%10))
pt1 -=1
if carry != 0:
res.append(str(carry))
return "".join(reversed(res))
print addStrings("9919", "12312")
|
borisroman/vdsm | lib/yajsonrpc/stompreactor.py | Python | gpl-2.0 | 19,115 | 0 | # Copyright (C) 2014 Saggi Mizrahi, Red Hat Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from __future__ import absolute_import
import logging
from collections import deque
from uuid import uuid4
import functools
import threading
from vdsm import utils
from vdsm.config import config
from vdsm.compat import json
from vdsm.sslcompat import SSLSocket
from . import JsonRpcClient, JsonRpcServer
from . import stomp
from .betterAsyncore import Dispatcher, Reactor
_STATE_LEN = "Waiting for message length"
_STATE_MSG = "Waiting for message"
def parseHeartBeatHeader(v):
try:
x, y = v.split(",", 1)
except ValueError:
x, y = (0, 0)
try:
x = int(x)
except ValueError:
x = 0
try:
y = int(y)
except ValueError:
y = 0
return (x, y)
class StompAdapterImpl(object):
log = logging.getLogger("Broker.StompAdapter")
"""
This class is responsible for stomp message processing
in the server side. It uses two dictionaries to track
request/response destinations.
sub_map - maps a destination id to _Subsctiption object
representing stomp subscription.
req_dest - maps a request id to a destination.
"""
def __init__(self, reactor, sub_map, req_dest):
self._reactor = reactor
self._outbox = deque()
self._sub_dests = sub_map
self._req_dest = req_dest
self._sub_ids = {}
request_queues = config.get('addresses', 'request_queues')
self.request_queues = request_queues.split(",")
self._commands = {
stomp.Command.CONNECT: self._cmd_connect,
stomp.Command.SEND: self._cmd_send,
stomp.Command.SUBSCRIBE: self._cmd_subscribe,
stomp.Command.UNSUBSCRIBE: self._cmd_unsubscribe,
stomp.Command.DISCONNECT: self._cmd_disconnect}
@property
def has_outgoing_messages(self):
return (len(self._outbox) > 0)
def peek_message(self):
return self._outbox[0]
def pop_message(self):
return self._outbox.popleft()
def queue_frame(self, frame):
self._outbox.append(frame)
def remove_subscriptions(self):
for sub in self._sub_ids.values():
self._remove_subscription(sub)
self._sub_ids.clear()
def _cmd_connect(self, dispatcher, frame):
self.log.info("Processing CONNECT request")
version = frame.headers.get(stomp.Headers.ACCEPT_VERSION, None)
if version != "1.2":
resp = stomp.Frame(
stomp.Command.ERROR,
None,
"Version unsupported"
)
else:
resp = stomp.Frame(stomp.Command.CONNECTED, {"version": "1.2"})
cx, cy = parseHeartBeatHeader(
frame.headers.get(stomp.Headers.HEARTEBEAT, "0,0")
)
# Make sure the heart-beat interval is sane
if cy != 0:
cy = max( | cy, 1000)
# The server can send a heart-beat every cy ms and doesn't want
# to receive any heart-beat from the client.
resp.headers[stomp.Headers.HEARTEBEAT] = "%d,0" % (cy,)
di | spatcher.setHeartBeat(cy)
self.queue_frame(resp)
self._reactor.wakeup()
def _cmd_subscribe(self, dispatcher, frame):
self.log.info("Subscribe command received")
destination = frame.headers.get("destination", None)
sub_id = frame.headers.get("id", None)
if not destination or not sub_id:
self._send_error("Missing destination or subscription id header",
dispatcher.connection)
return
ack = frame.headers.get("ack", stomp.AckMode.AUTO)
subscription = stomp._Subscription(dispatcher.connection, destination,
sub_id, ack, None)
self._sub_dests[destination].append(subscription)
self._sub_ids[sub_id] = subscription
def _send_error(self, msg, connection):
res = stomp.Frame(
stomp.Command.ERROR,
None,
msg
)
connection.send_raw(res)
def _cmd_unsubscribe(self, dispatcher, frame):
self.log.info("Unsubscribe command received")
sub_id = frame.headers.get("id", None)
if not sub_id:
self._send_error("Missing id header",
dispatcher.connection)
return
try:
subscription = self._sub_ids.pop(sub_id)
except KeyError:
self.log.debug("No subscription for %s id",
sub_id)
return
else:
self._remove_subscription(subscription)
def _cmd_disconnect(self, dispatcher, frame):
self.log.info("Disconnect command received")
r_id = frame.headers[stomp.Headers.RECEIPT]
if not r_id:
self.log.debug("No receipt id for disconnect frame")
# it is not mandatory to send receipt frame
return
headers = {stomp.Headers.RECEIPT_ID: r_id}
dispatcher.connection.send_raw(stomp.Frame(stomp.Command.RECEIPT,
headers))
def _remove_subscription(self, subscription):
subs = self._sub_dests[subscription.destination]
if len(subs) == 1:
del self._sub_dests[subscription.destination]
else:
if subscription in subs:
subs.remove(subscription)
def _cmd_send(self, dispatcher, frame):
destination = frame.headers.get(stomp.Headers.DESTINATION, None)
if destination in self.request_queues:
# default subscription
self._handle_internal(dispatcher,
frame.headers.get(stomp.Headers.REPLY_TO),
frame.body)
return
elif stomp.LEGACY_SUBSCRIPTION_ID_REQUEST == destination:
self._handle_internal(dispatcher,
stomp.LEGACY_SUBSCRIPTION_ID_RESPONSE,
frame.body)
return
else:
try:
subs = self._sub_dests[destination]
except KeyError:
self._send_error("Subscription not available",
dispatcher.connection)
return
if not subs:
self._send_error("Subscription not available",
dispatcher.connection)
return
for subscription in subs:
headers = utils.picklecopy(frame.headers)
headers = {stomp.Headers.SUBSCRIPTION: subscription.id}
headers.update(frame.headers)
res = stomp.Frame(
stomp.Command.MESSAGE,
headers,
frame.body
)
subscription.client.send_raw(res)
def _handle_internal(self, dispatcher, req_dest, request):
"""
We need to build response dictionary which maps message id
with destination. For legacy mode we use known 3.5 destination
or for standard mode we use 'reply-to' header.
"""
try:
self._handle_destination(dispatcher, req_dest, json.loads(request))
except Exception:
# let json server process issue
pass
dispatcher.connection.handleMessage(request)
def _handle_destination(self, dispatcher, req_dest, requ |
alvason/probability-insighter | code/mutation_drift.py | Python | gpl-2.0 | 7,287 | 0.008508 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
%matplotlib inline
# packages
import numpy as np
import itertools
import matplotlib.pyplot as plt
import matplotlib as mpl
import argparse
# global variables
pop_size = 50
seq_length = 100
alphabet = ['A', 'T', 'G', 'C']
mutation_rate = 0.0001 # per gen per individual per site
generations = 500
# population
base_haplotype = ''.join(["A" for i in range(seq_length)])
pop = {}
pop[base_haplotype] = pop_size
history = []
# mutation
def get_mutation_count():
mean = mutation_rate * pop_size * seq_length
return np.random.poisson(mean)
def get_random_haplotype():
haplotypes = pop.keys()
frequencies = [x/float(pop_size) for x in pop.values()]
total = sum(frequencies)
frequencies = [x / total for x in frequencies]
return np.random.choice(haplotypes, p=frequencies)
def get_mutant(haplotype):
site = np.random.randint(seq_length)
possible_mutations = list(alphabet)
possible_mutations.remove(haplotype[site])
mutation = np.random.choice(possible_mutations)
new_haplotype = haplotype[:site] + mutation + haplotype[site+1:]
return new_haplotype
def mutation_event():
haplotype = get_random_haplotype()
if pop[haplotype] > 1:
pop[haplotype] -= 1
new_haplotype = get_mutant(haplotype)
if new_haplotype in pop:
pop[new_haplotype] += 1
else:
pop[new_haplotype] = 1
def mutation_step():
mutation_count = get_mutation_count()
for i in range(mutation_count):
mutation_event()
# genetic drift
def get_offspring_counts():
haplotypes = pop.keys()
frequencies = [x/float(pop_size) for x in pop.values()]
total = sum(frequencies)
frequencies = [x / total for x in frequencies]
return list(np.random.multinomial(pop_size, frequencies))
def offspring_step():
counts = get_of | fspring_counts()
for (haplotype, count) in zip(pop.keys(), counts):
if (count > 0):
pop[haplotype] = count
else:
del pop[haplotype]
# simulate
def time_step():
mutation_step()
offspring_step()
def simulate():
clone_pop = dict(pop)
history.append(clone_pop)
for i in range(generations):
time_step()
clone_pop = dict(pop)
history.append(clone_pop)
|
# plot diversity
def get_distance(seq_a, seq_b):
diffs = 0
length = len(seq_a)
assert len(seq_a) == len(seq_b)
for chr_a, chr_b in zip(seq_a, seq_b):
if chr_a != chr_b:
diffs += 1
return diffs / float(length)
def get_diversity(population):
haplotypes = population.keys()
haplotype_count = len(haplotypes)
diversity = 0
for i in range(haplotype_count):
for j in range(haplotype_count):
haplotype_a = haplotypes[i]
haplotype_b = haplotypes[j]
frequency_a = population[haplotype_a] / float(pop_size)
frequency_b = population[haplotype_b] / float(pop_size)
frequency_pair = frequency_a * frequency_b
diversity += frequency_pair * get_distance(haplotype_a, haplotype_b)
return diversity
def get_diversity_trajectory():
trajectory = [get_diversity(generation) for generation in history]
return trajectory
def diversity_plot(xlabel="generation"):
mpl.rcParams['font.size']=14
trajectory = get_diversity_trajectory()
plt.plot(trajectory, "#447CCD")
plt.ylabel("diversity")
plt.xlabel(xlabel)
# plot divergence
def get_divergence(population):
haplotypes = population.keys()
divergence = 0
for haplotype in haplotypes:
frequency = population[haplotype] / float(pop_size)
divergence += frequency * get_distance(base_haplotype, haplotype)
return divergence
def get_divergence_trajectory():
trajectory = [get_divergence(generation) for generation in history]
return trajectory
def divergence_plot(xlabel="generation"):
mpl.rcParams['font.size']=14
trajectory = get_divergence_trajectory()
plt.plot(trajectory, "#447CCD")
plt.ylabel("divergence")
plt.xlabel(xlabel)
# plot trajectories
def get_frequency(haplotype, generation):
pop_at_generation = history[generation]
if haplotype in pop_at_generation:
return pop_at_generation[haplotype]/float(pop_size)
else:
return 0
def get_trajectory(haplotype):
trajectory = [get_frequency(haplotype, gen) for gen in range(generations)]
return trajectory
def get_all_haplotypes():
haplotypes = set()
for generation in history:
for haplotype in generation:
haplotypes.add(haplotype)
return haplotypes
def stacked_trajectory_plot(xlabel="generation"):
colors_lighter = ["#A567AF", "#8F69C1", "#8474D1", "#7F85DB", "#7F97DF", "#82A8DD", "#88B5D5", "#8FC0C9", "#97C8BC", "#A1CDAD", "#ACD1A0", "#B9D395", "#C6D38C", "#D3D285", "#DECE81", "#E8C77D", "#EDBB7A", "#EEAB77", "#ED9773", "#EA816F", "#E76B6B"]
mpl.rcParams['font.size']=18
haplotypes = get_all_haplotypes()
trajectories = [get_trajectory(haplotype) for haplotype in haplotypes]
plt.stackplot(range(generations), trajectories, colors=colors_lighter)
plt.ylim(0, 1)
plt.ylabel("frequency")
plt.xlabel(xlabel)
# plot snp trajectories
def get_snp_frequency(site, generation):
minor_allele_frequency = 0.0
pop_at_generation = history[generation]
for haplotype in pop_at_generation.keys():
allele = haplotype[site]
frequency = pop_at_generation[haplotype] / float(pop_size)
if allele != "A":
minor_allele_frequency += frequency
return minor_allele_frequency
def get_snp_trajectory(site):
trajectory = [get_snp_frequency(site, gen) for gen in range(generations)]
return trajectory
def get_all_snps():
snps = set()
for generation in history:
for haplotype in generation:
for site in range(seq_length):
if haplotype[site] != "A":
snps.add(site)
return snps
def snp_trajectory_plot(xlabel="generation"):
colors = ["#781C86", "#571EA2", "#462EB9", "#3F47C9", "#3F63CF", "#447CCD", "#4C90C0", "#56A0AE", "#63AC9A", "#72B485", "#83BA70", "#96BD60", "#AABD52", "#BDBB48", "#CEB541", "#DCAB3C", "#E49938", "#E68133", "#E4632E", "#DF4327", "#DB2122"]
mpl.rcParams['font.size']=18
snps = get_all_snps()
trajectories = [get_snp_trajectory(snp) for snp in snps]
data = []
for trajectory, color in itertools.izip(trajectories, itertools.cycle(colors)):
data.append(range(generations))
data.append(trajectory)
data.append(color)
plt.plot(*data)
plt.ylim(0, 1)
plt.ylabel("frequency")
plt.xlabel(xlabel)
if __name__=="__main__":
pop_size = 50
mutation_rate = 0.0001
seq_length = 100
generations = 500
simulate()
plt.figure(num=None, figsize=(14, 10), dpi=80, facecolor='w', edgecolor='k')
plt.subplot2grid((3,2), (0,0), colspan=2)
stacked_trajectory_plot(xlabel="")
plt.subplot2grid((3,2), (1,0), colspan=2)
snp_trajectory_plot(xlabel="")
plt.subplot2grid((3,2), (2,0))
diversity_plot()
plt.subplot2grid((3,2), (2,1))
divergence_plot()
plt.show()
# <codecell>
|
zyantific/IDASkins | plugins/idaskins/objectinspector.py | Python | mit | 2,576 | 0 | from __future__ import absolute_import, division, print_function
import os
from idaskins import UI_DIR
from PyQt5 import uic
from PyQt5.Qt import qApp
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QCursor, QFont, QKeySequence
from PyQt5.QtWidgets import QShortcut, QWidget
Ui_ObjectInspector, ObjectInspectorBase = uic.loadUiType(
os.path.join(UI_DIR, 'ObjectInspector.ui')
)
class ObjectInspector(ObjectInspectorBase):
"""
Rudimentary Qt object inspector.
Allows for easier finding of object names and classes
for usage in QSS stylesheets.
"""
def __init__(self, *args, **kwargs):
super(ObjectInspector, self).__init__(*args, **kwargs)
self._selected_widget = None
self._ui = Ui_ObjectInspector()
self._ui.setupUi(self)
# Make everything monospace.
font = QFont('Monospace')
font.setStyleHint(QFont.TypeWriter)
self._ui.teInspectionResults.setFont(font)
# Register signals.
self._update_key = QShortcut(QKeySequence(Qt.Key_F7), self)
self._ui.btnSelectParent.released.connect(self.select_parent)
self._update_key.activated.connect(self.update_inspection)
def update_inspection(self):
widget = qApp.widgetAt(QCursor.pos())
self.update_selected_widget(widget)
def select_parent(self):
if self._selected_widget:
parent = self._selected_widget.parent()
if parent and parent.inherits('QWidget'):
self.update_selected_widget(parent)
def update_selected_widget(self, widget):
if self._selected_widget:
self._selected_widget.destroyed.disconnect(
self.on_selected_widget_destroyed
)
self._selected_widget = widget
if widget:
self._ui.btnSelectParent.setEnabled(widget.parent() is not None)
self._ui.te | InspectionResults.setText((
"Type: {}\n"
"Name: {}\n"
"Number of children: {}\n"
"QSS: {}"
).format(
widget.metaObject().className(),
widget.objectName() or '<none>',
len(widget.children()),
widget.styleSheet() or '<none>',
))
self._selected_widget.destroyed | .connect(
self.on_selected_widget_destroyed
)
else:
self._ui.teInspectionResults.setText('<no object under cursor>')
def on_selected_widget_destroyed(self, obj):
self._selected_widget = None
|
rlr/fjord | fjord/feedback/views.py | Python | bsd-3-clause | 17,044 | 0 | import json
from functools import wraps
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.utils import translation
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt, csrf_protect
from django.views.decorators.http import require_POST
from mobility.decorators import mobile_template
from statsd import statsd
import waffle
from fjord.base.browsers import UNKNOWN
from fjord.base.urlresolvers import reverse
from fjord.base.utils import (
actual_ip_plus_context,
ratelimit,
smart_int,
smart_str,
translate_country_name
)
from fjord.feedback import config
from fjord.feedback import models
from fjord.feedback.forms import ResponseForm
from fjord.feedback.models import Response
from fjord.feedback.utils import clean_url
from fjord.feedback.config import TRUNCATE_LENGTH
from fjord.suggest.utils import get_suggestions
def happy_redirect(request):
"""Support older redirects from Input v1 era"""
return HttpResponseRedirect(reverse('feedback') + '?happy=1')
def sad_redirect(request):
"""Support older redirects from Input v1 era"""
return HttpResponseRedirect(reverse('feedback') + '?happy=0')
@mobile_template('feedback/{mobile/}download_firefox.html')
def download_firefox(request, template):
return render(request, template)
def thanks(request):
feedback = None
suggestions = None
# FIXME: Hard-coded default product.
product = u'Firefox'
response_id = None
# If the user is an analyzer/admin, then we let them specify
# the response_id via the querystring. This makes debugging
# the system easier.
if ((request.user.is_authenticated()
and request.user.has_perm('analytics.can_view_dashboard'))):
response_id = smart_int(request.GET.get('response_id', None))
# If we don't have a response_id, then pull it from the
# session where it was placed if the user had just left
# feedback.
if not response_id:
response_id = request.session.get('response_id')
if response_id:
try:
feedback = Response.objects.get(id=response_id)
except Response.DoesNotExist:
pass
if feedback:
product = feedback.product
if waffle.flag_is_active(request, ' | thankyou'):
suggestions = get_suggestions(feedback, request)
return render(request, 'feedbac | k/thanks.html', {
'product': product,
'feedback': feedback,
'suggestions': suggestions
})
def requires_firefox(func):
"""Redirects to "download firefox" page if not Firefox.
If it isn't a Firefox browser, then we don't want to deal with it.
This is a temporary solution. See bug #848568.
"""
@wraps(func)
def _requires_firefox(request, *args, **kwargs):
# Note: This is sort of a lie. What's going on here is that
# parse_ua only parses Firefox-y browsers. So if it's UNKNOWN
# at this point, then it's not Firefox-y. If parse_ua ever
# changes, then this will cease to be true.
if request.BROWSER.browser == UNKNOWN:
return HttpResponseRedirect(reverse('download-firefox'))
return func(request, *args, **kwargs)
return _requires_firefox
@ratelimit(rulename='doublesubmit_1p10m',
keyfun=actual_ip_plus_context(
lambda req: req.POST.get('description', u'no description')),
rate='1/10m')
@ratelimit(rulename='50ph', rate='50/h')
def _handle_feedback_post(request, locale=None, product=None,
version=None, channel=None):
"""Saves feedback post to db accounting for throttling
:arg request: request we're handling the post for
:arg locale: locale specified in the url
:arg product: None or the Product
:arg version: validated and sanitized version specified in the url
:arg channel: validated and sanitized channel specified in the url
"""
if getattr(request, 'limited', False):
# If we're throttled, then return the thanks page, but don't
# add the response to the db.
return HttpResponseRedirect(reverse('thanks'))
# Get the form and run is_valid() so it goes through the
# validation and cleaning machinery. We don't really care if it's
# valid, though, since we will take what we got and do the best we
# can with it. Error validation is now in JS.
form = ResponseForm(request.POST)
form.is_valid()
get_data = request.GET.copy()
data = form.cleaned_data
description = data.get('description', u'').strip()
if not description:
# If there's no description, then there's nothing to do here,
# so thank the user and move on.
return HttpResponseRedirect(reverse('thanks'))
opinion = models.Response(
# Data coming from the user
happy=data['happy'],
url=clean_url(data.get('url', u'').strip()),
description=description,
# Pulled from the form data or the url
locale=data.get('locale', locale),
# Data from mobile devices which is probably only
# applicable to mobile devices
manufacturer=data.get('manufacturer', ''),
device=data.get('device', ''),
)
# Add user_agent and inferred data.
user_agent = request.META.get('HTTP_USER_AGENT', '')
if user_agent:
browser = request.BROWSER
opinion.user_agent = user_agent
opinion.browser = browser.browser
opinion.browser_version = browser.browser_version
opinion.browser_platform = browser.platform
if browser.platform == 'Windows':
opinion.browser_platform += ' ' + browser.platform_version
# source is src or utm_source
source = (
get_data.pop('src', [u''])[0] or
get_data.pop('utm_source', [u''])[0]
)
if source:
opinion.source = source[:100]
campaign = get_data.pop('utm_campaign', [u''])[0]
if campaign:
opinion.campaign = campaign[:100]
# If they sent "happy=1"/"happy=0" in the querystring, it will get
# picked up by the javascript in the form and we can just drop it
# here.
get_data.pop('happy', None)
platform = u''
if product:
# If we have a product at this point, then it came from the
# url and it's a Product instance and we need to turn it into
# the product.db_name which is a string.
product_db_name = product.db_name
else:
# Check the POST data for the product.
product_db_name = data.get('product', '')
# For the version, we try the url data, then the POST data.
version = version or data.get('version', '')
# At this point, we have a bunch of values, but we might be
# missing some values, too. We're going to cautiously infer data
# from the user agent where we're very confident it's appropriate
# to do so.
if request.BROWSER != UNKNOWN:
# If we don't have a product, try to infer that from the user
# agent information.
if not product_db_name:
product_db_name = models.Response.infer_product(request.BROWSER)
# If we have a product and it matches the user agent browser,
# then we can infer the version and platform from the user
# agent if they're missing.
if product_db_name:
product = models.Product.objects.get(db_name=product_db_name)
if product.browser and product.browser == request.BROWSER.browser:
if not version:
version = request.BROWSER.browser_version
if not platform:
platform = models.Response.infer_platform(
product_db_name, request.BROWSER)
# Make sure values are at least empty strings--no Nones.
opinion.product = product_db_name or u''
opinion.version = version or u''
opinion.channel = channel or u''
opinion.platform = platform or u''
opinion.save()
# If there was an email address, save that separately.
if data.get('email_ok') and data.get('email'):
e = models.ResponseEmail(email= |
antoinecarme/pyaf | tests/bench/web_traffic_jobs/en.wikipedia.org/test_web_traffic_en.wikipedia.org_pyaf_hierarchical_top_down.py | Python | bsd-3-clause | 447 | 0.002237 | impor | t pyaf.Bench.web_traffic.Forecaster as fo
PROJECTS = ['en.wikipedia.org']
data_dir = 'data/web-traffic-time-series-forecasting'
lForecaster = fo.cProjectForecaster()
lForecaster.mDataDirectory = data_dir
lForecaster.mBackendName = 'pyaf_hierarchical_top_down'
lForecaster.mKeysFileName = 'key_1.csv.zip'
last_date = '2016-12-31'
horizon = 60
lForecaster.mKeysFileName = 'key_1.csv.zip'
lForecaster.forecast(PROJECTS, last_date , horiz | on)
|
mehdidc/py-earth | examples/return_sympy.py | Python | bsd-3-clause | 760 | 0.005263 | """
=====================================================
Exporting a fitted Earth models as a sympy expression
=============== | ======================================
A simple example returning a sympy expression describing the fit of a sine function computed by Earth.
"""
import numpy
fro | m pyearth import Earth
from pyearth import export
# Create some fake data
numpy.random.seed(2)
m = 1000
n = 10
X = 10 * numpy.random.uniform(size=(m, n)) - 40
y = 100 * \
(numpy.sin((X[:, 6])) - 4.0) + \
10 * numpy.random.normal(size=m)
# Fit an Earth model
model = Earth(max_degree=2, minspan_alpha=.5, verbose=False)
model.fit(X, y)
print(model.summary())
#return sympy expression
print('Resulting sympy expression:')
print(export.export_sympy(model))
|
sparkslabs/kamaelia_ | Sketches/MPS/BugReports/FixTests/Kamaelia/Kamaelia/Apps/IRCLogger/Support.py | Python | apache-2.0 | 7,622 | 0.010102 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
#
from Kamaelia.File.Writing import SimpleFileWriter
import os
import Kamaelia.Support.Protocol.IRC
import time
"""
===================================
Supporting Functions for IRC Logger
===================================
BasicLogger and Logger depend heavily on this library. These functions are in a
separate module from the Logger components so we can reload these functions
without stopping Logger
There are no components in this module.
"""
logging = True
lastlog = time. | time()
def cannedResponse():
return [
"Hi, I'm a bot. I've been put here to answer faq's and log the channel.",
"I've not really been configured yet, so I won't do much here yet :-)",
]
def cannedYesTheyreAround():
return [
"Hi, I'm a bot. I've been put here to answer faq's and log the channel.",
"I've not | really been configured yet, so I won't do much here yet :-)",
]
def respondToQueries(self, msg):
"""Takes a BasicLogger as its first argument. If this function recognizes
"msg" as a command, then it sends back the appropriate response to IRC
"""
replyLines = ""
tag = 'PRIVMSG'
if msg[0] == 'PRIVMSG' and msg[3].split(':')[0] == self.name:
words = msg[3].split()
if words[1] == 'logfile':
replyLines = [self.logname]
elif words[1] == 'infofile':
replyLines = [self.infoname]
elif words[1] == 'help':
replyLines = ["Name: %s Channel: %s" % (self.name, self.channel),
"I do a simple job -- recording all channel traffic.",
"Lines prefixed by [off] won't get recorded",
"I respond to the following: 'logfile', 'infofile', 'help', 'date', 'time', 'dance', 'poke', 'slap', 'ecky', 'boo', and 'reload {modulename}'."
]
elif words[1] == 'date':
replyLines = [self.currentDateString()]
elif words[1] == 'time':
replyLines = [self.currentTimeString()]
elif words[1] == 'dance':
tag = 'ME'
replyLines = ['does the macarena']
elif words[1] == 'poke':
replyLines = ['Not the eye! Not the eye!']
elif words[1] == 'slap':
replyLines = ['Ouch!']
elif words[1] == 'ecky':
replyLines = ['Ptang!']
elif words[1] == 'boo':
replyLines = ['Nice try, but that didn\'t scare me']
elif words[1] == 'learn':
replyLines = ['OK, trying, but not ready to do that yet - I will though' + str(len(words)) ]
if msg[0] == 'PRIVMSG':
words = [ x.lower() for x in msg[3].split() ]
if ("any" in words) \
and (("mentors" in words) or ("mentor" in words)):
replyLines = cannedResponse()
elif ("when" in words) \
and (("feedback" in words) or ("expect" in words)):
replyLines = cannedResponse()
elif ("when" in words) \
and (("feedback" in words) or ("expect" in words)):
replyLines = cannedResponse()
elif ("i" in words) \
and (("have" in words) or ("had" in words)) \
and (("question" in words) or ("query" in words) or ("doubt" in words)):
replyLines = cannedResponse()
elif ("who" in words) \
and ("can" in words) \
and ("i" in words) \
and ("ask" in words) \
and (("soc" in words) or ("gcos" in words) or ("gsoc" in words)):
replyLines = cannedResponse()
elif (("about" in words) or ("around" in words)) \
and (("is" in words) or ("are" in words)) \
and (("mentors-" in words) or ("ms-" in words) or ("mhrd" in words) or ("lawouach" in words)):
replyLines = cannedYesTheyreAround()
elif ("anyone" in words) \
and ("seen" in words) \
and (("mentors-" in words) or ("ms-" in words) or ("mhrd" in words) or ("lawouach" in words)):
replyLines = cannedYesTheyreAround()
elif ("hi" in words) \
and (("everybody" in words) or ("evreybody" in words)):
replyLines = [ "hi" ]
elif ("kamaeliabot" in words):
if ("bonjour" in words):
replyLines = ["Bonjour"]
elif ("parrot" in words):
replyLines = [ repr(words) ]
if replyLines == "":
if ("anyone" in words) and ("know" in words):
replyLines = ['Hm?']
if replyLines:
for reply in replyLines:
self.send((tag, self.channel, reply), "irc")
self.send(self.format("Reply: %s \n" % reply), "outbox")
def TimedOutformat(data):
"""\
prepends a timestamp onto formatted data and ignores all privmsgs prefixed
by "[off]"
"""
if data[0] == 'PRIVMSG' and data[3][0:5] == '[off]':
return
if type(data) == type(""):
formatted = data
else:
formatted = Kamaelia.Support.Protocol.IRC.outformat(data)
curtime = time.gmtime()
timestamp = time.strftime("[%H:%M] ", curtime)
if formatted: return timestamp+formatted
def HTMLOutformat(data):
"""each formatted line becomes a line on a table."""
global logging
if logging:
head = " <tr><td>"
end = "</td></tr>\n"
formatted = TimedOutformat(data)
if formatted:
formatted = formatted.replace('<', '< ')
formatted = formatted.replace('>', '>')
return head + formatted.rstrip() + end
def AppendingFileWriter(filename):
"""appends to instead of overwrites logs"""
return SimpleFileWriter(filename, mode='ab')
def LoggerWriter(filename):
"""
puts an html header in front of every file it opens. Does not make well-
formed HTML, as files are closed without closing the HTML tags. However,
most browsers don't have a problem with this. =D
"""
htmlhead = "<html><body><table>\n"
if not os.path.exists(filename):
f = open(filename, "wb")
f.write(htmlhead)
f.close()
return SimpleFileWriter(filename, mode='ab')
def currentDateString():
"""returns the current date in YYYY-MM-DD format"""
curtime = time.gmtime()
return time.strftime("%Y-%m-%d", curtime)
def currentTimeString():
"""returns the current time in hour:minute:second format"""
curtime = time.gmtime()
return time.strftime("%H:%M:%S", curtime)
def getFilenames(logdir, channel):
"""returns tuple (logname, infoname) according to the parameters given"""
name = logdir + channel.lstrip('#') + currentDateString()
return name + "_log.html", name + "_info.html"
outformat = HTMLOutformat
|
mattclay/ansible | test/units/playbook/test_conditional.py | Python | gpl-3.0 | 9,810 | 0.002039 | from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat import unittest
from units.mock.loader import DictDataLoader
from mock import MagicMock
from ansible.template import Templar
from ansible import errors
from ansible.playbook import conditional
class TestConditional(unittest.TestCase):
def setUp(self):
self.loader = DictDataLoader({})
self.cond = conditional.Conditional(loader=self.loader)
self.templar = Templar(loader=self.loader, variables={})
def _eval_con(self, when=None, variables=None):
when = when or []
variables = variables or {}
self.cond.when = when
ret = self.cond.evaluate_conditional(self.templar, variables)
return ret
def test_false(self):
when = [u"False"]
ret = self._eval_con(when, {})
self.assertFalse(ret)
def test_true(self):
when = [u"True"]
ret = self._eval_con(when, {})
self.assertTrue(ret)
def test_true_boolean(self):
self.cond.when = [True]
m = MagicMock()
ret = self.cond.evaluate_conditional(m, {})
self.assertTrue(ret)
self.assertFalse(m.is_template.called)
def test_false_boolean(self):
self.cond.when = [False]
m = MagicMock()
ret = self.cond.evaluate_conditional(m, {})
self.assertFalse(ret)
self.assertFalse(m.is_template.called)
def test_undefined(self):
when = [u"{{ some_undefined_thing }}"]
self.assertRaisesRegex(errors.AnsibleError, "The conditional check '{{ some_undefined_thing }}' failed",
self._eval_con, when, {})
def test_defined(self):
variables = {'some_defined_thing': True}
when = [u"{{ some_defined_thing }}"]
ret = self._eval_con(when, variables)
self.assertTrue(ret)
def test_dict_defined_values(self):
variables = {'dict_value': 1,
'some_defined_dict': {'key1': 'value1',
'key2': '{{ dict_value }}'}}
when = [u"some_defined_dict"]
ret = self._eval_con(when, variables)
self.assertTrue(ret)
def test_dict_defined_values_is_defined(self):
variables = {'dict_value': 1,
'some_defined_dict': {'key1': 'value1',
'key2': '{{ dict_value }}'}}
when = [u"some_defined_dict.key1 is defined"]
| ret = self._eval_con(when, variables)
self.assertTrue(ret)
def test_dict_defined_multiple_values_is_defined(self):
variables = | {'dict_value': 1,
'some_defined_dict': {'key1': 'value1',
'key2': '{{ dict_value }}'}}
when = [u"some_defined_dict.key1 is defined",
u"some_defined_dict.key2 is not undefined"]
ret = self._eval_con(when, variables)
self.assertTrue(ret)
def test_nested_hostvars_undefined_values(self):
variables = {'dict_value': 1,
'hostvars': {'host1': {'key1': 'value1',
'key2': '{{ dict_value }}'},
'host2': '{{ dict_value }}',
'host3': '{{ undefined_dict_value }}',
# no host4
},
'some_dict': {'some_dict_key1': '{{ hostvars["host3"] }}'}
}
when = [u"some_dict.some_dict_key1 == hostvars['host3']"]
# self._eval_con(when, variables)
self.assertRaisesRegex(errors.AnsibleError,
r"The conditional check 'some_dict.some_dict_key1 == hostvars\['host3'\]' failed",
# "The conditional check 'some_dict.some_dict_key1 == hostvars['host3']' failed",
# "The conditional check 'some_dict.some_dict_key1 == hostvars['host3']' failed.",
self._eval_con,
when, variables)
def test_dict_undefined_values_bare(self):
variables = {'dict_value': 1,
'some_defined_dict_with_undefined_values': {'key1': 'value1',
'key2': '{{ dict_value }}',
'key3': '{{ undefined_dict_value }}'
}}
# raises an exception when a non-string conditional is passed to extract_defined_undefined()
when = [u"some_defined_dict_with_undefined_values"]
self.assertRaisesRegex(errors.AnsibleError,
"The conditional check 'some_defined_dict_with_undefined_values' failed.",
self._eval_con,
when, variables)
def test_dict_undefined_values_is_defined(self):
variables = {'dict_value': 1,
'some_defined_dict_with_undefined_values': {'key1': 'value1',
'key2': '{{ dict_value }}',
'key3': '{{ undefined_dict_value }}'
}}
when = [u"some_defined_dict_with_undefined_values is defined"]
self.assertRaisesRegex(errors.AnsibleError,
"The conditional check 'some_defined_dict_with_undefined_values is defined' failed.",
self._eval_con,
when, variables)
def test_is_defined(self):
variables = {'some_defined_thing': True}
when = [u"some_defined_thing is defined"]
ret = self._eval_con(when, variables)
self.assertTrue(ret)
def test_is_undefined(self):
variables = {'some_defined_thing': True}
when = [u"some_defined_thing is undefined"]
ret = self._eval_con(when, variables)
self.assertFalse(ret)
def test_is_undefined_and_defined(self):
variables = {'some_defined_thing': True}
when = [u"some_defined_thing is undefined", u"some_defined_thing is defined"]
ret = self._eval_con(when, variables)
self.assertFalse(ret)
def test_is_undefined_and_defined_reversed(self):
variables = {'some_defined_thing': True}
when = [u"some_defined_thing is defined", u"some_defined_thing is undefined"]
ret = self._eval_con(when, variables)
self.assertFalse(ret)
def test_is_not_undefined(self):
variables = {'some_defined_thing': True}
when = [u"some_defined_thing is not undefined"]
ret = self._eval_con(when, variables)
self.assertTrue(ret)
def test_is_not_defined(self):
variables = {'some_defined_thing': True}
when = [u"some_undefined_thing is not defined"]
ret = self._eval_con(when, variables)
self.assertTrue(ret)
def test_is_hostvars_quotes_is_defined(self):
variables = {'hostvars': {'some_host': {}},
'compare_targets_single': "hostvars['some_host']",
'compare_targets_double': 'hostvars["some_host"]',
'compare_targets': {'double': '{{ compare_targets_double }}',
'single': "{{ compare_targets_single }}"},
}
when = [u"hostvars['some_host'] is defined",
u'hostvars["some_host"] is defined',
u"{{ compare_targets.double }} is defined",
u"{{ compare_targets.single }} is defined"]
ret = self._eval_con(when, variables)
self.assertTrue(ret)
def test_is_hostvars_quotes_is_defined_but_is_not_defined(self):
variables = {'hostvars': {'some_host': {}},
'compare_targets_single': "hostvars['some_host']",
'compare_targets_double': 'hostvars["some_host"]',
'compare_targets' |
wechatpy/wechatpy | tests/test_constants.py | Python | mit | 527 | 0 | # -*- coding: utf-8 -*-
import unittest
from wechatpy.constants import WeChatErrorCode
class WeChatErrorCodeTestCase(unittest.TestCase):
"""ensure python compatibility"""
def test_error_code(self):
self.assertEqual(-1000, WeChatErrorCode.SYSTEM_ERROR.value)
self.assertEqual(42001, WeChatErrorCode.EXPIRED_ACCESS_TOKEN.value)
self.assertEqual(48001, WeChatErrorCode.UNAUTHORIZED_API.value)
def test_enum(self):
| self.assertEqual(WeC | hatErrorCode.SYSTEM_BUSY, WeChatErrorCode(-1))
|
execunix/vinos | xsrc/external/mit/MesaLib/dist/src/gallium/auxiliary/indices/u_indices_gen.py | Python | apache-2.0 | 10,366 | 0.014663 | #!/usr/bin/env python
copyright = '''
/*
* Copyright 2009 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* on the rights to use, copy, modify, merge, publish, distribute, sub
* license, and/or sell copies of the Software, and to permit persons to whom
* the Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all c | opies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* VMWARE AND | /OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
'''
GENERATE, UBYTE, USHORT, UINT = 'generate', 'ubyte', 'ushort', 'uint'
FIRST, LAST = 'first', 'last'
INTYPES = (GENERATE, UBYTE, USHORT, UINT)
OUTTYPES = (USHORT, UINT)
PVS=(FIRST, LAST)
PRIMS=('points',
'lines',
'linestrip',
'lineloop',
'tris',
'trifan',
'tristrip',
'quads',
'quadstrip',
'polygon')
LONGPRIMS=('PIPE_PRIM_POINTS',
'PIPE_PRIM_LINES',
'PIPE_PRIM_LINE_STRIP',
'PIPE_PRIM_LINE_LOOP',
'PIPE_PRIM_TRIANGLES',
'PIPE_PRIM_TRIANGLE_FAN',
'PIPE_PRIM_TRIANGLE_STRIP',
'PIPE_PRIM_QUADS',
'PIPE_PRIM_QUAD_STRIP',
'PIPE_PRIM_POLYGON')
longprim = dict(zip(PRIMS, LONGPRIMS))
intype_idx = dict(ubyte='IN_UBYTE', ushort='IN_USHORT', uint='IN_UINT')
outtype_idx = dict(ushort='OUT_USHORT', uint='OUT_UINT')
pv_idx = dict(first='PV_FIRST', last='PV_LAST')
def prolog():
print '''/* File automatically generated by indices.py */'''
print copyright
print r'''
/**
* @file
* Functions to translate and generate index lists
*/
#include "indices/u_indices.h"
#include "indices/u_indices_priv.h"
#include "pipe/p_compiler.h"
#include "util/u_debug.h"
#include "pipe/p_defines.h"
#include "util/u_memory.h"
static unsigned out_size_idx( unsigned index_size )
{
switch (index_size) {
case 4: return OUT_UINT;
case 2: return OUT_USHORT;
default: assert(0); return OUT_USHORT;
}
}
static unsigned in_size_idx( unsigned index_size )
{
switch (index_size) {
case 4: return IN_UINT;
case 2: return IN_USHORT;
case 1: return IN_UBYTE;
default: assert(0); return IN_UBYTE;
}
}
static u_translate_func translate[IN_COUNT][OUT_COUNT][PV_COUNT][PV_COUNT][PRIM_COUNT];
static u_generate_func generate[OUT_COUNT][PV_COUNT][PV_COUNT][PRIM_COUNT];
'''
def vert( intype, outtype, v0 ):
if intype == GENERATE:
return '(' + outtype + ')(' + v0 + ')'
else:
return '(' + outtype + ')in[' + v0 + ']'
def point( intype, outtype, ptr, v0 ):
print ' (' + ptr + ')[0] = ' + vert( intype, outtype, v0 ) + ';'
def line( intype, outtype, ptr, v0, v1 ):
print ' (' + ptr + ')[0] = ' + vert( intype, outtype, v0 ) + ';'
print ' (' + ptr + ')[1] = ' + vert( intype, outtype, v1 ) + ';'
def tri( intype, outtype, ptr, v0, v1, v2 ):
print ' (' + ptr + ')[0] = ' + vert( intype, outtype, v0 ) + ';'
print ' (' + ptr + ')[1] = ' + vert( intype, outtype, v1 ) + ';'
print ' (' + ptr + ')[2] = ' + vert( intype, outtype, v2 ) + ';'
def do_point( intype, outtype, ptr, v0 ):
point( intype, outtype, ptr, v0 )
def do_line( intype, outtype, ptr, v0, v1, inpv, outpv ):
if inpv == outpv:
line( intype, outtype, ptr, v0, v1 )
else:
line( intype, outtype, ptr, v1, v0 )
def do_tri( intype, outtype, ptr, v0, v1, v2, inpv, outpv ):
if inpv == outpv:
tri( intype, outtype, ptr, v0, v1, v2 )
else:
if inpv == FIRST:
tri( intype, outtype, ptr, v1, v2, v0 )
else:
tri( intype, outtype, ptr, v2, v0, v1 )
def do_quad( intype, outtype, ptr, v0, v1, v2, v3, inpv, outpv ):
do_tri( intype, outtype, ptr+'+0', v0, v1, v3, inpv, outpv );
do_tri( intype, outtype, ptr+'+3', v1, v2, v3, inpv, outpv );
def name(intype, outtype, inpv, outpv, prim):
if intype == GENERATE:
return 'generate_' + prim + '_' + outtype + '_' + inpv + '2' + outpv
else:
return 'translate_' + prim + '_' + intype + '2' + outtype + '_' + inpv + '2' + outpv
def preamble(intype, outtype, inpv, outpv, prim):
print 'static void ' + name( intype, outtype, inpv, outpv, prim ) + '('
if intype != GENERATE:
print ' const void * _in,'
print ' unsigned start,'
print ' unsigned nr,'
print ' void *_out )'
print '{'
if intype != GENERATE:
print ' const ' + intype + '*in = (const ' + intype + '*)_in;'
print ' ' + outtype + ' *out = (' + outtype + '*)_out;'
print ' unsigned i, j;'
print ' (void)j;'
def postamble():
print '}'
def points(intype, outtype, inpv, outpv):
preamble(intype, outtype, inpv, outpv, prim='points')
print ' for (i = start; i < (nr+start); i++) { '
do_point( intype, outtype, 'out+i', 'i' );
print ' }'
postamble()
def lines(intype, outtype, inpv, outpv):
preamble(intype, outtype, inpv, outpv, prim='lines')
print ' for (i = start; i < (nr+start); i+=2) { '
do_line( intype, outtype, 'out+i', 'i', 'i+1', inpv, outpv );
print ' }'
postamble()
def linestrip(intype, outtype, inpv, outpv):
preamble(intype, outtype, inpv, outpv, prim='linestrip')
print ' for (i = start, j = 0; j < nr; j+=2, i++) { '
do_line( intype, outtype, 'out+j', 'i', 'i+1', inpv, outpv );
print ' }'
postamble()
def lineloop(intype, outtype, inpv, outpv):
preamble(intype, outtype, inpv, outpv, prim='lineloop')
print ' for (i = start, j = 0; j < nr - 2; j+=2, i++) { '
do_line( intype, outtype, 'out+j', 'i', 'i+1', inpv, outpv );
print ' }'
do_line( intype, outtype, 'out+j', 'i', '0', inpv, outpv );
postamble()
def tris(intype, outtype, inpv, outpv):
preamble(intype, outtype, inpv, outpv, prim='tris')
print ' for (i = start; i < (nr+start); i+=3) { '
do_tri( intype, outtype, 'out+i', 'i', 'i+1', 'i+2', inpv, outpv );
print ' }'
postamble()
def tristrip(intype, outtype, inpv, outpv):
preamble(intype, outtype, inpv, outpv, prim='tristrip')
print ' for (i = start, j = 0; j < nr; j+=3, i++) { '
if inpv == FIRST:
do_tri( intype, outtype, 'out+j', 'i', 'i+1+(i&1)', 'i+2-(i&1)', inpv, outpv );
else:
do_tri( intype, outtype, 'out+j', 'i+(i&1)', 'i+1-(i&1)', 'i+2', inpv, outpv );
print ' }'
postamble()
def trifan(intype, outtype, inpv, outpv):
preamble(intype, outtype, inpv, outpv, prim='trifan')
print ' for (i = start, j = 0; j < nr; j+=3, i++) { '
do_tri( intype, outtype, 'out+j', '0', 'i+1', 'i+2', inpv, outpv );
print ' }'
postamble()
def polygon(intype, outtype, inpv, outpv):
preamble(intype, outtype, inpv, outpv, prim='polygon')
print ' for (i = start, j = 0; j < nr; j+=3, i++) { '
if inpv == FIRST:
do_tri( intype, outtype, 'out+j', '0', 'i+1', 'i+2', inpv, outpv );
else:
do_tri( intype, outtype, 'out+j', 'i+1', 'i+2', '0', inpv, outpv );
print ' }'
postamble()
def quads(intype, outtype, inpv, outpv):
preamble(intype, outtype, inpv, outpv, prim='quads')
print ' for (i = start, j = 0; j < nr; j+=6, i+=4) { '
do_quad( intype, outtype, 'out+j', 'i+0', 'i+1', 'i+2', 'i+3', inpv, outpv );
print ' }'
postamble()
def quadstrip(intype, outtype, inpv, outpv):
preamble(intype, outtype, inpv, outpv, prim='quadstrip')
print ' for (i |
Daksh/tuxmath | activity.py | Python | gpl-2.0 | 6,945 | 0.010369 | # -*- coding: UTF-8 -*-
# Copyright 2007-2008 One Laptop Per Child
# Copyright 2007 Gerard J. Cerchio <www.circlesoft.com>
# Copyright 2008 Andrés Ambrois <andresambrois@gmail.com>
# Copyright 2010 Marcos Orfila <www.marcosorfila.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import logging
import sugar.logger
import gtk
import pygtk
from gettext import gettext as _
from sugar.activity.activity import Activity, ActivityToolbox
import pango
import os
import commands
import sys
logger = logging.getLogger('Tuxmath')
class TuxmathStart(Activity):
def __init__(self, handle):
# Initialize the parent
Activity.__init__(self, handle)
logger.debug('Initiating Tuxmath')
# Set the activity toolbox
toolbox = ActivityToolbox(self)
self.set_toolbox(toolbox)
self.ceibaljam_icon_path = os.getenv("SUGAR_BUNDLE_PATH") + "/images/ceibaljam.png"
#
# There's a good explanation of the use of boxes in PyGTK here:
# http://www.pygtk.org/pygtk2tutorial/sec-DetailsOfBoxes.html
#
box_canvas = gtk.VBox(False, 0)
self.set_canvas(box_canvas)
# Title
box_title = gtk.VBox(False, 0)
label_title = gtk.Label(_("Tuxmath"))
label_title.set_justify(gtk.JUSTIFY_CENTER)
label_title.modify_font(pango.FontDescription("Arial 22"))
box_title.add(gtk.Label("\n\n\n"))
box_title.add(label_title)
box_title.add(gtk.Label("\n"))
# Author
box_author = gtk.VBox(False, 0)
box_author.add(gtk.Label(""))
box_author.add(gtk.Label(_("Created by Tux4kids")))
label_author_url = gtk.Label('<b>http://tux4kids.alioth.debian.org</b>')
label_author_url.set_use_markup(True)
box_author.add(label_author_url)
# Options box
box_options = gtk.VBox(False, 0)
label_options = gtk.Label(_("Options:"))
label_options.set_justify(gtk.JUSTIFY_LEFT)
self.checkbtn_sound = gtk.CheckButton(label=_("No sound"))
self.checkbtn_sound.set_active(True)
self.checkbtn_negatives = gtk.CheckButton(label=_("Include negative numbers"))
self.checkbtn_negatives.set_active(False)
# Pack the checkboxes in HBoxes to center them
hbox1 = gtk.HBox(False, 0)
hbox1.add(gtk.Label(""))
hbox1.add(gtk.Label(""))
hbox1.add(gtk.Label(""))
hbox1.add(gtk.Label(""))
hbox1.add(gtk.Label(""))
hbox1.add(gtk.Label(""))
hbox1.add(gtk.Label(""))
hbox1.add(self.checkbtn_sound)
hbox1.add(gtk.Label(""))
hbox1.add(gtk.Label(""))
hbox1.add(gtk.Label(""))
hbox1.add(gtk.Label(""))
hbox1.add(gtk.Label(""))
hbox1.add(gtk.Label(""))
box_options.add(hbox1)
#box_options.add(gtk.Label(""))
#box_options.add(label_options)
#box_options.add(self.checkbtn_sound)
#box_options.add(self.checkbtn_negatives)
# Credits
box_credits = gtk.VBox(False, 0)
box_credits.add(gtk.Label(""))
box_credits.add(gtk.Label(_('Spanish translation and pedagogical evaluation by %(TEACHER)s') % { 'TEACHER': 'Ana Cichero' }))
label_teacher_email= gtk.Label('<b>ana.cichero@gmail.com</b>')
label_teacher_email.set_use_markup(True)
box_credits.add(label_teacher_email)
box_credits.add(gtk.Label(_('Sugarized by %(SUGARIZER)s') % { 'SUGARIZER': 'Marcos Orfila' }))
label_sugarizer_website = gtk.Label('<b>http://www.marcosorfila.com</b>')
label_sugarizer_website.set_use_markup(True)
box_credits.add(label_sugarizer_website)
box_credits.add(gtk.Label(""))
# Footer box (Activities on CeibalJAM! website)
box_footer = gtk.VBox(False, 0)
box_footer.add(gtk.Label(""))
box_footer.add(gtk.Label(_('Find more activities on %(CEIBALJAM)s website:') % { 'CEIBALJAM': 'CeibalJAM!'}))
label_ceibaljam_website = gtk.Label('<b>http://activities.ceibaljam.org</b>')
label_ceibaljam_website.set_use_markup(True)
box_footer.add(label_ceibaljam_website)
box_footer.add(gtk.Label(""))
# CeibalJAM! image
box_ceibaljam_image = gtk.VBox(False, 0)
image_ceibaljam = gtk.Image()
image_ceibaljam.set_from_file(self.ceibaljam_icon_path)
box_ceibaljam_image.pack_end(image_ceibaljam, False, False, 0)
# Buttons box
box_buttons = gtk.HBox(False, 0)
self.button_play = gtk.Button(_("Play"))
self.button_play.connect("clicked", self._button_play_clicked_cb)
self.button_exit = gtk.Button(_("Exit"))
self.button_exit.connect("clicked", self._button_exit_clicked_cb)
box_buttons.add(gtk.VBox())
box_buttons.add(self.button_play)
box_buttons.add(gtk.VBox())
box_buttons.add(self.button_exit)
box_buttons.add(gtk.VBox())
# Get all the boxes together
box_canvas.pack_start(box_title, False, False, 0)
box_canvas.pack_start(box_options, False, False, 0)
box_canvas.pack_end(gtk.Label("\n\n"), False, False, 0)
box_canvas.pack_end(box_buttons, False, False, 0)
box_canvas.pack_end(gtk.Label("\n"), False, False, 0)
box_canvas.pack_end(box_footer, False, False, 0)
box_canvas.pack_end(box_ceibaljam_image, False, False, 0)
box_canvas.pack_end(box_credits, False, False, 0)
box_canvas.pack_end(box_author, False, False, 0)
self.button_play.grab_focus()
self.show_all()
def create_script(self, script_path):
"""Create the script to run the program"""
# In the future, some options to be included in the tuxmath script (like "--nosound")
# could be selected by the user.
script_text = "exec $SUGAR_BUNDLE_PA | TH/bin/tuxmath --homedir $TUX_HOMEDIR --fullscreen"
if (self.checkbtn_sound.get_active()):
script_text += " --nosound "
"""
if (self.checkbtn_negatives.get_active()):
script_text += " --allownegatives | "
"""
f = open(script_path, 'w')
f.write(script_text)
f.close()
os.chmod(script_path, 0755)
def _button_play_clicked_cb(self, widget):
self.create_script(os.getenv("TUXMATH_SCRIPT"))
sys.exit(0)
def _button_exit_clicked_cb(self, widget):
sys.exit(0)
|
lscube/feng | contrib/feng_destroyer.py | Python | lgpl-2.1 | 2,136 | 0.044944 | #!/usr/bin/python
import os, sys, signal, random
from datetime import datetime, timedelta
VLC_POOL_SIZE = 10 #number of vlc client to keep open
SPAWN_TIMER = 5 #number of seconds between vlc spawn
MAX_VLC_LIFE_TIME = 60 #max life time in seconds of a vlc client
VLC_COMMAND = '/usr/bin/vlc'
class Vlc(object):
def __init__(self, uri):
super(Vlc, self).__init__()
self.pid = None
self.uri = uri
self.spawn_time = None
def _close_all_open_fd(self):
for fd in xrange(0, os.sysconf('SC_OPEN_MAX')):
try:
os.close(fd)
except OSError:
pass
def run(self):
if self.pid:
return False
pid = os.fork()
if pid:
self.pid = pid
self.spawn_time = datetime.now()
return True
else:
self._close_all_open_fd()
os.execvp(VLC_COMMAND, ['vlc', self.uri])
return None
def stop(self):
if not self.pid:
return False
try:
os.kill(self.pid, signal.SIGTERM)
os.waitpid(self.pid, 0)
except Exception, e:
print 'Vlc wasn\'t here anymore', e
pass
return True
def main(url):
random.seed()
last_spawn = datetime.now() - timedelta(0, SPAWN_TIMER)
vlc_pool = []
while True:
to_remove = []
now = datetime.now()
if (now - last_spawn >= timedelta(0, SPAWN_TIMER)) and (len(vlc_pool) < VLC_POOL_SIZE):
last_spawn = now
vlc = Vlc(url)
print 'Running a new vlc'
state = vlc.run()
if state:
vlc_pool.append(vlc)
elif state == None:
print 'Vlc Client exited by itself?'
return
else:
print 'Failed to start Vlc'
for vlc in vlc_pool:
if now - vlc.spawn_time >= timedelta(0, MAX_VLC_LIFE_TIME):
if random.random() >= 0.5:
print 'Stopping an old vlc started at', vlc.spawn_time
vlc.stop()
| to_remove.append(vlc)
if len(to_remove) and random.random() > 0.95:
for vlc in vlc_pool:
if not vlc in to_remove:
print 'Stopping multiple vlcs', vlc.spawn_time
vlc.stop()
to_remove.append(vlc)
for vlc in to_remove:
vlc_pool.remove(vlc)
if __ | name__ == '__main__':
if len(sys.argv) != 2:
print '%s requires an rtsp url to request' % sys.argv[0]
else:
main(sys.argv[1])
|
gleitz/automaticdj | pyechonest/playlist.py | Python | mit | 20,190 | 0.008618 | #!/usr/bin/env python
# encoding: utf-8
"""
Copyright (c) 2010 The Echo Nest. All rights reserved.
Created by Tyler Williams on 2010-04-25.
The Playlist module loosely covers http://developer.echonest.com/docs/v4/playlist.html
Refer to the official api documentation if you are unsure about something.
"""
import util
from proxies import PlaylistProxy
from song import Song
import catalog
class Playlist(PlaylistProxy):
"""
A Dynamic Playlist object
Attributes:
session_id: Playlist Session ID
song: The current song
Example:
>>> p = Playlist(type='artist-radio', artist=['ida maria', 'florence + the machine'])
>>> p
<Dynamic Playlist - 9c210205d4784144b4fa90770fa55d0b>
>>> p.song
<song - Later On>
>>> p.get_next_song()
<song - Overall>
>>>
"""
def __init__(self, session_id=None, type='artist', artist_pick='song_hotttnesss-desc', variety=.5, artist_id=None, artist=None, \
song_id=None, description=None, max_tempo=None, min_tempo=None, max_duration=None, \
min_duration=None, max_loudness=None, min_loudness=None, max_danceability=None, min_danceability=None, \
max_energy=None, min_energy=None, artist_max_familiarity=None, artist_min_famil | iarity=None, \
artist_max_hotttnesss=None, artist_min_hotttnesss=None, song_max_hotttnesss=None, song_min_hotttnesss=None, \
min_longitude=None, max_longitude=None, min_latitude=No | ne, max_latitude=None, \
mode=None, key=None, buckets=[], sort=None, limit=False, dmca=False, audio=False, chain_xspf=False, \
seed_catalog=None, steer=None, source_catalog=None, steer_description=None):
"""
Args:
Kwargs:
type (str): a string representing the playlist type ('artist', 'artist-radio', ...)
artist_pick (str): How songs should be chosen for each artist
variety (float): A number between 0 and 1 specifying the variety of the playlist
artist_id (str): the artist_id
artist (str): the name of an artist
song_id (str): the song_id
description (str): A string describing the artist and song
results (int): An integer number of results to return
max_tempo (float): The max tempo of song results
min_tempo (float): The min tempo of song results
max_duration (float): The max duration of song results
min_duration (float): The min duration of song results
max_loudness (float): The max loudness of song results
min_loudness (float): The min loudness of song results
artist_max_familiarity (float): A float specifying the max familiarity of artists to search for
artist_min_familiarity (float): A float specifying the min familiarity of artists to search for
artist_max_hotttnesss (float): A float specifying the max hotttnesss of artists to search for
artist_min_hotttnesss (float): A float specifying the max hotttnesss of artists to search for
song_max_hotttnesss (float): A float specifying the max hotttnesss of songs to search for
song_min_hotttnesss (float): A float specifying the max hotttnesss of songs to search for
max_energy (float): The max energy of song results
min_energy (float): The min energy of song results
max_dancibility (float): The max dancibility of song results
min_dancibility (float): The min dancibility of song results
mode (int): 0 or 1 (minor or major)
key (int): 0-11 (c, c-sharp, d, e-flat, e, f, f-sharp, g, a-flat, a, b-flat, b)
max_latitude (float): A float specifying the max latitude of artists to search for
min_latitude (float): A float specifying the min latitude of artists to search for
max_longitude (float): A float specifying the max longitude of artists to search for
min_longitude (float): A float specifying the min longitude of artists to search for
sort (str): A string indicating an attribute and order for sorting the results
buckets (list): A list of strings specifying which buckets to retrieve
limit (bool): A boolean indicating whether or not to limit the results to one of the id spaces specified in buckets
seed_catalog (str or Catalog): A Catalog object or catalog id to use as a seed
source_catalog (str or Catalog): A Catalog object or catalog id
steer (str): A steering value to determine the target song attributes
steer_description (str): A steering value to determine the target song description term attributes
Returns:
A dynamic playlist object
"""
kwargs = {}
if type:
kwargs['type'] = type
if artist_pick:
kwargs['artist_pick'] = artist_pick
if variety is not None:
kwargs['variety'] = variety
if artist:
kwargs['artist'] = artist
if artist_id:
kwargs['artist_id'] = artist_id
if song_id:
kwargs['song_id'] = song_id
if description:
kwargs['description'] = description
if max_tempo is not None:
kwargs['max_tempo'] = max_tempo
if min_tempo is not None:
kwargs['min_tempo'] = min_tempo
if max_duration is not None:
kwargs['max_duration'] = max_duration
if min_duration is not None:
kwargs['min_duration'] = min_duration
if max_loudness is not None:
kwargs['max_loudness'] = max_loudness
if min_loudness is not None:
kwargs['min_loudness'] = min_loudness
if max_danceability is not None:
kwargs['max_danceability'] = max_danceability
if min_danceability is not None:
kwargs['min_danceability'] = min_danceability
if max_energy is not None:
kwargs['max_energy'] = max_energy
if min_energy is not None:
kwargs['min_energy'] = min_energy
if artist_max_familiarity is not None:
kwargs['artist_max_familiarity'] = artist_max_familiarity
if artist_min_familiarity is not None:
kwargs['artist_min_familiarity'] = artist_min_familiarity
if artist_max_hotttnesss is not None:
kwargs['artist_max_hotttnesss'] = artist_max_hotttnesss
if artist_min_hotttnesss is not None:
kwargs['artist_min_hotttnesss'] = artist_min_hotttnesss
if song_max_hotttnesss is not None:
kwargs['song_max_hotttnesss'] = song_max_hotttnesss
if song_min_hotttnesss is not None:
kwargs['song_min_hotttnesss'] = song_min_hotttnesss
if mode is not None:
kwargs['mode'] = mode
if key is not None:
kwargs['key'] = key
if max_latitude is not None:
kwargs['max_latitude'] = max_latitude
if min_latitude is not None:
kwargs['min_latitude'] = min_latitude
if max_longitude is not None:
kwargs['max_longitude'] = max_longitude
if min_longitude is not None:
kwargs['min_longitude'] = min_longitude
if sort:
kwargs['sort'] = sort
if buckets:
kwargs['bucket'] = buckets
if limit:
kwargs['limit'] = 'true'
if dmca:
kwargs['dmca'] = 'true'
if chain_xspf:
kwargs['chain_xspf'] = 'true'
if audio:
kwargs['audio'] = 'true'
if steer:
kwargs['steer'] = steer
if steer_description:
kwargs['steer_description'] = steer_description
if seed_catalog:
if isinstance(seed_catalog, catalog.Catalog):
kwargs['seed_catalog'] = s |
Partysun/corona_lua.snippets | converter/convert.py | Python | mit | 1,847 | 0.016243 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys, getopt
import codecs
import json
def load_data(filename):
"""Load Atom Snippets data from json
:filename: Atom Snippets json file
:returns: json atom data
"""
snippets_file = open(filename)
# data = snippets_file.read().decode('utf-8')
atom_snippets_fixture = json.load(snippets_file)
return atom_snippets_fixture
def convert(atom_snippet):
"""Convert Atom snippet to UltiSnippet
:data: Atom snippet
:returns: UtilSnippet
"""
snippet = ('snippet % | (trigger)s "%(description)s"\n'
% {
'trigger': (atom_snippet['displayText']).replace(" ", ""),
'description': (atom_snippet['description']).replace("\" | ", "\'")
} +
'%s\n' % atom_snippet['snippet'] +
'endsnippet')
return snippet
def convert_snippets(atom_snippets_file, new_filename):
"""Convert atom snippets
from file to UltiSnippets
:atom_snippets_file: a file path of atom snippets
:new_filename: new UltiSnippets file
"""
atom_snippets = load_data(atom_snippets_file)
result_snippets = ''
for snippet in atom_snippets:
result_snippets += convert(snippet) + '\n\n'
result_file = codecs.open(new_filename, "w", encoding='utf8')
result_file.write(result_snippets)
result_file.close()
def main(argv):
inputfile = ''
outputfile = ''
try:
opts, args = getopt.getopt(argv,"hi:o:",["ifile=","ofile="])
except getopt.GetoptError:
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
convert_snippets(inputfile, outputfile)
if __name__ == "__main__":
main(sys.argv[1:])
|
DanielNeugebauer/adhocracy | src/adhocracy/model/instance_filter.py | Python | agpl-3.0 | 531 | 0 | from beaker.util import T | hreadLocal
thread_instance = ThreadLocal()
def setup_thread(instance):
global thread_instance
thread_instance.put(instance)
def teardown_thread():
'''
A counterpart for setup_thread(), probly only |
useful in test_code
'''
global thread_instance
try:
thread_instance.remove()
except AttributeError:
# no value saved
pass
def has_instance():
return thread_instance.get() is not None
def get_instance():
return thread_instance.get()
|
openpolis/open_municipio | open_municipio/people/admin.py | Python | agpl-3.0 | 10,689 | 0.005613 | from django.contrib import | admin
from django.utils.translation import ugettext_lazy as _
from django.utils.datetime_safe import date
from django.db.models import Q
from open_municipio.people.models import *
from open_municipio.votations.admin import VotationsInline
from open_municipio.acts.model | s import Speech
from open_municipio.widgets import SortWidget
from sorl.thumbnail.admin import AdminImageMixin
from django.contrib.admin.util import unquote
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.utils.functional import update_wrapper
from django.utils.html import strip_spaces_between_tags as short
from django.forms import ModelForm
from open_municipio.people.forms import SittingItemFormSet, SpeechInlineForm
from open_municipio.om.admin import LinkedTabularInline
from .filters import FilterActiveCharge
class PersonResourceInline(admin.TabularInline):
model = PersonResource
extra = 0
class PersonAdminWithResources(AdminImageMixin, admin.ModelAdmin):
list_display = ('id', '__unicode__', 'has_current_charges', 'birth_date', 'birth_location' )
list_display_links = ('__unicode__',)
search_fields = ['^first_name', '^last_name']
prepopulated_fields = {"slug": ("first_name","last_name","birth_date", "birth_location",)}
inlines = [PersonResourceInline, ]
class GroupResourceInline(admin.TabularInline):
model = GroupResource
extra = 0
class GroupChargeInline(admin.TabularInline):
model = GroupCharge
raw_id_fields = ('charge', )
extra = 1
class GroupIsMajorityInline(admin.TabularInline):
model = GroupIsMajority
extra = 0
class InCouncilNow(admin.SimpleListFilter):
title = _("In council now")
parameter_name = "in_council_now"
def lookups(self, request, model_admin):
return(
('1', _('Yes')),
('0', _('No')),
)
def queryset(self, request, queryset):
val = self.value()
today = date.today()
# note: groups with no related item (groupismajority) will be considered
# as not in council
if val == '1':
queryset = queryset.exclude(groupismajority__isnull=True).filter(Q(groupismajority__end_date__gt=today) | Q(groupismajority__end_date__isnull=True))
elif val == '0':
# the check for groups NOT in majority is more complex because
# we have to check that ALL related objects (groupismajority)
# have an end_date previous the current date
groups_in_council = Group.objects.exclude(groupismajority__isnull=True).filter(Q(groupismajority__end_date__gt=today) | Q(groupismajority__end_date__isnull=True))
queryset = queryset.exclude(pk__in=groups_in_council)
return queryset
class GroupAdminWithCharges(AdminImageMixin, admin.ModelAdmin):
prepopulated_fields = {"slug": ("name","start_date")}
list_display = ('name', 'acronym', 'is_majority_now', 'start_date', 'end_date', 'in_council_now')
inlines = [GroupResourceInline, GroupIsMajorityInline, GroupChargeInline]
search_fields = [ 'name', 'acronym', 'slug', 'charge_set__person__first_name', 'charge_set__person__last_name', ]
list_filter = [ InCouncilNow, 'groupismajority__is_majority' ]
ordering = [ 'name', 'acronym' ]
def is_majority_now(self, obj):
return obj.is_majority_now
is_majority_now.short_description = _("Is majority now")
def in_council_now(self, obj):
return obj.in_council_now
in_council_now.short_description = _("In council now")
class ChargeInline(admin.StackedInline):
raw_id_fields = ('person', )
fieldsets = (
(None, {
'fields': (('person', 'start_date', 'end_date'), )
}),
(_('Advanced options'), {
'classes': ('collapse',),
'fields': ('description', 'end_reason')
})
)
extra = 1
class CompanyChargeInline(ChargeInline):
model = CompanyCharge
class AdministrationChargeInline(ChargeInline):
model = AdministrationCharge
class InstitutionResourceInline(admin.TabularInline):
model = InstitutionResource
extra = 0
class InstitutionChargeInline(ChargeInline):
model = InstitutionCharge
raw_id_fields = ('person', 'substitutes', 'substituted_by')
fieldsets = (
(None, {
'fields': (('person', 'op_charge_id', 'start_date', 'end_date'), )
}),
(_('Advanced options'), {
'classes': ('collapse',),
'fields': ('description', 'end_reason', ('substitutes', 'substituted_by'))
})
)
class ResponsabilityInline(admin.TabularInline):
raw_id_fields = ('charge',)
extra = 0
class InstitutionResponsabilityInline(ResponsabilityInline):
model = InstitutionResponsability
fields = ('charge', 'charge_type', 'start_date', 'end_date', 'description')
class GroupResponsabilityInline(admin.TabularInline):
model = GroupResponsability
raw_id_fields = ('charge',)
extra = 0
fields = ('charge', 'charge_type', 'start_date', 'end_date', 'description')
class ChargeAdmin(admin.ModelAdmin):
pass
class CompanyChargeAdmin(ChargeAdmin):
model = CompanyCharge
raw_id_fields = ('person', 'company')
fieldsets = (
(None, {
'fields': (('person', 'company'),
('start_date', 'end_date', 'end_reason'),
'description')
}),
)
class AdministrationChargeAdmin(ChargeAdmin):
model = AdministrationCharge
raw_id_fields = ('person', 'office')
fieldsets = (
(None, {
'fields': (('person', 'office','charge_type',),
('start_date', 'end_date', 'end_reason'),
'description')
}),
)
class InstitutionChargeAdmin(ChargeAdmin):
model = InstitutionCharge
raw_id_fields = ('person', 'substitutes', 'substituted_by', 'original_charge')
search_fields = ['^person__first_name', '^person__last_name']
fieldsets = (
(None, {
'fields': (('person', 'op_charge_id', 'institution', 'original_charge'),
('start_date', 'end_date', 'end_reason'),
'description',
('substitutes', 'substituted_by'),
'can_vote')
}),
(_("Presences"), {
'fields': (('n_present_votations', 'n_absent_votations'), ('n_present_attendances', 'n_absent_attendances'))
}),
)
list_display = ('__unicode__', 'institution', 'start_date', 'end_date')
list_select_related = True
list_filter = ['institution__name', FilterActiveCharge, ]
inlines = [InstitutionResponsabilityInline]
class GroupChargeAdmin(admin.ModelAdmin):
raw_id_fields = ('charge', )
list_display = ('__unicode__', 'start_date', 'end_date')
list_select_related = True
list_filter = ['group']
inlines = [GroupResponsabilityInline]
search_fields = [ 'charge__person__first_name', 'charge__person__last_name', 'group__name', 'group__acronym', ]
class BodyAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
class CompanyAdmin(BodyAdmin):
inlines = [CompanyChargeInline]
class OfficeAdmin(BodyAdmin):
inlines = [AdministrationChargeInline]
class InstitutionAdmin(BodyAdmin):
list_filter = ("institution_type", )
def get_urls(self):
from django.conf.urls.defaults import patterns, url
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
info = self.model._meta.app_label, self.model._meta.module_name
return patterns('',
url(r'^(.+)/move-(up)/$',
wrap(self.move_view),
name='%s_%s_move_up' % info),
url(r'^(.+)/move-(down)/$',
wrap(self.move_view),
name='%s_%s_move_down' % info),
) + super(InstitutionAdmin, self).get_urls()
def move_view( |
rene-dev/stmbl | tools/create_config.py | Python | gpl-3.0 | 784 | 0.002551 | #!/usr/bin/env python
import re
import sys
import os
config = []
for infile in sys.argv[2:]:
with open(infile) as f:
config.append((os.path.splitext(os.path.basename(infile))[0], f.read()))
code = open(sys.argv[1], 'w')
code.write("//generated by " + sys.argv[0] + " DO NOT EDIT\n\n")
code.write("#include \"config.h\"\n\n")
code.write("const uint32_t num_of_config_templates = " + str(len(config)) + ";\n\n")
code.write("config_template_t config_templates[] = {\n")
for index, (file_name, content) in enumerate(config):
code.write("{\n")
code | .write(".name = \"" + file_name + "\",\n")
code.write(".config = \"\\\n")
for line in content.splitli | nes():
code.write(line + "\\n\\\n")
code.write("\"\n},\n\n")
code.write("};\n")
code.close()
|
danjac/ownblock | ownblock/ownblock/apps/buildings/permissions.py | Python | mit | 284 | 0 | from rest_framework.permissions import BasePermission
class IsBuilding(BasePermissio | n):
"""Checks if a current building (preselected by middleware)
has been assigned for this user"""
def has_permission(self, request, view):
return request.build | ing is not None
|
MAndelkovic/pybinding | tests/utils/fuzzy_equal.py | Python | bsd-2-clause | 5,216 | 0.001725 | import math
from functools import singledispatch, update_wrapper
import numpy as np
from scipy.sparse import csr_matrix, coo_matrix
import pybinding as pb
def _assertdispatch(func):
"""Adapted `@singledispatch` for custom assertions
* Works with methods instead of functions
* Keeps track of the data structure via context stack
* Detects objects which can be used with `pb.save` and `pb.load`
"""
dispatcher = singledispatch(func)
def wrapper(self, actual, expected, context=None):
if context is not None:
self.stack.append(context)
is_pb_savable = any(hasattr(actual, s) for s in ['__getstate__', '__getinitargs__'])
kind = pb.save if is_pb_savable else actual.__class__
dispatcher.dispatch(kind)(self, actual, expected)
if context is not None and self.stack:
self.stack.pop()
wrapper.register = dispatcher.register
update_wrapper(wrapper, func)
return wrapper
def _assert_fuzzy_equal(actual, expected, rtol, atol):
"""Explains failed fuzzy_equal asserts
For example:
actual = array([3, 1, 7, 2, 9])
expected = array([3, 5, 7, 4, 6])
> assert pytest.fuzzy_equal(actual, expected)
E AssertionError:
E
E Faile | d on 3 of 5 values: 60%
E actual: [1, 2, 9]
E expected: [5, 4, 6]
E indices: [1, 3, 4]
"""
actual, expected = map(np.asanyarray, (ac | tual, expected))
if actual.shape != expected.shape:
raise AssertionError("\n".join([
"\nFailed on shape mismatch",
"actual: {}".format(actual.shape),
"expected: {}".format(expected.shape),
]))
isclose = np.isclose(actual, expected, rtol, atol)
if np.all(isclose):
return
notclose = np.logical_not(isclose)
num_failed = np.sum(notclose)
if len(notclose) > 10:
a = actual[notclose]
b = expected[notclose]
else:
a, b = actual, expected
raise AssertionError("\n".join([
"\nFailed on {} of {} values: {:.0%}".format(num_failed, actual.size,
num_failed / actual.size),
" actual: {}".format(a),
" expected: {}".format(b),
" indices: {}".format([idx[0] if idx.size == 1 else list(idx)
for idx in np.argwhere(notclose)]),
" abs diff: {}".format(abs(a - b)),
" rel diff: {}".format(abs(a - b) / abs(b)),
]))
class FuzzyEqual:
"""Recursively compares structures of ndarrays using np.isclose() comparison
The `stack` attribute shows the structure depth at a given assert.
"""
def __init__(self, actual, expected, rtol=1e-05, atol=1e-08):
self.actual = actual
self.expected = expected
self.rtol = rtol
self.atol = atol
self.decimal = -math.frexp(rtol)[1]
self.stack = []
def __bool__(self):
# noinspection PyUnusedLocal
__tracebackhide__ = True # hide traceback for pytest
try:
self._assert(self.actual, self.expected)
except AssertionError as e:
msg = str(e)
else:
return True
if self.stack:
msg = ''.join(self.stack) + "\n" + msg
raise AssertionError(msg.strip())
def __repr__(self):
return ''.join(self.stack)
@_assertdispatch
def _assert(self, actual, expected):
if hasattr(actual, "__dict__"):
return self._assert(actual.__dict__, expected.__dict__)
else:
try:
return np.testing.assert_almost_equal(actual, expected, self.decimal)
except TypeError:
pass
return np.testing.assert_equal(actual, expected)
@_assert.register(csr_matrix)
def _(self, actual, expected):
for s in ['shape', 'data', 'indices', 'indptr']:
self._assert(getattr(actual, s), getattr(expected, s), context=".{}".format(s))
@_assert.register(coo_matrix)
def _(self, actual, expected):
for s in ['shape', 'data', 'row', 'col']:
self._assert(getattr(actual, s), getattr(expected, s), context=".{}".format(s))
@_assert.register(tuple)
@_assert.register(list)
@_assert.register(np.ndarray)
def _(self, actual, expected):
try:
return _assert_fuzzy_equal(actual, expected, rtol=self.rtol, atol=self.atol)
except TypeError:
pass
# Fallback for non-numeric lists and tuples
assert len(actual) == len(expected)
for index, (a, b) in enumerate(zip(actual, expected)):
self._assert(a, b, context="[{}]".format(index))
@_assert.register(dict)
def _(self, actual, expected):
assert sorted(actual) == sorted(expected)
for key in actual:
self._assert(actual[key], expected[key], context="['{}']".format(key))
@_assert.register(pb.save)
def _(self, actual, expected):
specials = [s for s in ['__getstate__', '__getinitargs__'] if hasattr(actual, s)]
for s in specials:
self._assert(getattr(actual, s)(), getattr(expected, s)(), context="{}()".format(s))
|
joachimmetz/plaso | tests/preprocessors/test_lib.py | Python | apache-2.0 | 5,825 | 0.002232 | # -*- coding: utf-8 -*-
"""Preprocessing related functions and classes for testing."""
from artifacts import reader as artifacts_reader
from artifacts import registry as artifacts_registry
from dfvfs.helpers import fake_file_system_builder
from dfvfs.helpers import file_system_searcher
from dfvfs.lib import definitions as dfvfs_definitions
from dfvfs.path import factory as path_spec_factory
from dfwinreg import registry as dfwinreg_registry
from dfwinreg import registry_searcher
from plaso.containers import artifacts
from plaso.containers import sessions
from plaso.engine import knowledge_base
from plaso.preprocessors import manager
from plaso.preprocessors import mediator
from plaso.storage.fake import writer as fake_writer
from tests import test_lib as shared_test_lib
class ArtifactPreprocessorPluginTestCase(shared_test_lib.BaseTestCase):
"""Artifact preprocessor plugin test case."""
@classmethod
def setUpClass(cls):
"""Makes preparations before running any of the tests."""
artifacts_path = shared_test_lib.GetTestFilePath(['artifacts'])
cls._artifacts_registry = artifacts_registry.ArtifactDefinitionsRegistry()
reader = artifacts_reader.YamlArtifactsReader()
cls._artifacts_registry.ReadFromDirectory(reader, artifacts_path)
def _CreateTestStorageWriter(self):
"""Creates a storage writer for testing purposes.
Returns:
StorageWriter: storage writer.
"""
storage_writer = fake_writer.FakeStorageWriter()
storage_writer.Open()
return storage_writer
def _RunPreprocessorPluginOnFileSystem(
self, file_system, mount_point, storage_writer, plugin):
"""Runs a preprocessor plugin on a file system.
Args:
file_system (dfvfs.FileSystem): file system to be preprocessed.
mount_point (dfvfs.PathSpec): mount point path specification that refers
to the base location of the file system.
storage_writer (StorageWriter): storage writer.
plugin (ArtifactPreprocessorPlugin): preprocessor plugin.
Return:
PreprocessMediator: preprocess mediator.
"""
artifact_definition = self._artifacts_registry.GetDefinitionByName(
plugin.ARTIFACT_DEFINITION_NAME)
self.assertIsNotNone(artifact_definition)
session = sessions.Session()
test_knowledge_base = knowledge_base.KnowledgeBase()
test_mediator = mediator.PreprocessMediator(
session, storage_writer, test_knowledge_base)
searcher = file_system_searcher.FileSystemSearcher(file_system, mount_point)
plugin.Collect(test_mediator, artifact_definition, searcher, file_system)
return test_mediator
def _RunPreprocessorPluginOnWindowsRegistryValue(
self, file_system, mount_point, storage_writer, plugin):
"""Runs a preprocessor plugin on a Windows Registry value.
Args:
file_system (dfvfs.FileSystem): file system to be preprocessed.
mount_point (dfvfs.PathSpec): mount point path specification that refers
to the base location of the file system.
storage_writer (StorageWriter): storage writer.
plugin (ArtifactPreprocessorPlugin): preprocessor plugin.
Return:
PreprocessMediator: preprocess mediator.
"""
artifact_definition = self._artifacts_registry.GetDefinitionByName(
plugin.ARTIFACT_DEFINITION_NAME)
self.assertIsNotNone(artifact_definition)
environment_variable = artifacts.EnvironmentVariableArtifact(
case_sensitive=False, name='SystemRoot', value='C:\\Windows')
registry_file_reader = manager.FileSystemWinRegistryFileReader(
file_system, mount_point, environment_variables=[environment_variable])
win_registry = dfwinreg_registry.WinRegistry(
registry_file_reader=registry_file_reader)
session = sessions.Session()
test_knowledge_base = knowledge_base.KnowledgeBase()
test_ | mediator = mediator.PreprocessMediator(
session, storage_writer, test_knowledge_base)
searcher = registry_searcher.WinRegistrySearcher(win_registry)
plugin.Collect(test_mediator, artifact_definition, searcher)
return test_mediator
def _RunPreprocessorPluginOnWindowsRegistryValueSoftware(
self, storage_writer, plugin):
"""Runs a preprocessor plugin on a Windows Registry value in SOFTWARE.
Args:
storage_writer (StorageWriter): storage | writer.
plugin (ArtifactPreprocessorPlugin): preprocessor plugin.
Return:
PreprocessMediator: preprocess mediator.
"""
file_system_builder = fake_file_system_builder.FakeFileSystemBuilder()
test_file_path = self._GetTestFilePath(['SOFTWARE'])
file_system_builder.AddFileReadData(
'/Windows/System32/config/SOFTWARE', test_file_path)
mount_point = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_FAKE, location='/')
return self._RunPreprocessorPluginOnWindowsRegistryValue(
file_system_builder.file_system, mount_point, storage_writer, plugin)
def _RunPreprocessorPluginOnWindowsRegistryValueSystem(
self, storage_writer, plugin):
"""Runs a preprocessor plugin on a Windows Registry value in SYSTEM.
Args:
storage_writer (StorageWriter): storage writer.
plugin (ArtifactPreprocessorPlugin): preprocessor plugin.
Return:
PreprocessMediator: preprocess mediator.
"""
file_system_builder = fake_file_system_builder.FakeFileSystemBuilder()
test_file_path = self._GetTestFilePath(['SYSTEM'])
file_system_builder.AddFileReadData(
'/Windows/System32/config/SYSTEM', test_file_path)
mount_point = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_FAKE, location='/')
return self._RunPreprocessorPluginOnWindowsRegistryValue(
file_system_builder.file_system, mount_point, storage_writer, plugin)
|
adamfeuer/ArtOfGratitude_app | gratitude/migrations/0005_auto__add_action.py | Python | mit | 7,091 | 0.00832 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Action'
db.create_table('gratitude_action', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('gratitude', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['gratitude.Gratitude'], null=True)),
('action', self.gf('django.db.models.fields.CharField')(max_length=500)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
))
db.send_create_signal('gratitude', ['Action'])
def backwards(self, orm):
# Deleting model 'Action'
db.delete_table('gratitude_action')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
| 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups' | : ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'gratitude.action': {
'Meta': {'object_name': 'Action'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'gratitude': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gratitude.Gratitude']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'gratitude.gratitude': {
'Meta': {'object_name': 'Gratitude'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'stash_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100'}),
'stashed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '5000'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'gratitude.setting': {
'Meta': {'object_name': 'Setting'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'gratitude.userdetail': {
'Meta': {'object_name': 'UserDetail'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'no_messages': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['gratitude'] |
openearth/delft3d-gt-server | delft3dworker/management/commands/containersync_sceneupdate.py | Python | gpl-3.0 | 4,813 | 0.000208 | import logging
from time import sleep
from celery.result import AsyncResult
from django.core.management import BaseCommand
from delft3dcontainermanager.tasks import do_docker_remove, get_docker_ps
from delft3dworker.models import Container, Scene
"""
Synchronization command that's called periodically.
- Run docker ps (celery task)
- Loop over container models and compare with the output of docker ps
- Missing container model (orphan) -> Error, stop container
- For the other container run container.update_state(docker_ps)
- Finally loop over the scene models and call update_state()
"""
class Command(BaseCommand):
help = "sync containers with container and scene model"
def handle(self, *args, **options):
# STEP I : Loop over non empty celery_task_ids in containers
# Sets task_uuid to None except for when a task is queued
# Queued for log, no start? expire gebruiken
self._update_container_tasks()
# STEP II : Get latest container statuses
if self._get_latest_docker_status():
# STEP III : Update Scenes and their Phases
# Controls container desired states
self._update_scene_phases()
# STEP IV : Synchronise Container Models with docker containers
self._fix_container_state_mismatches_or_log()
def _update_container_tasks(self):
"""
Update Containers with results from finished tasks.
"""
celery_ | set = set(Container.objects.exclude(task_uuid__exact=None))
for container in celery_set:
container.update_task_result()
def _get_latest_docker_status(self):
"""
Synchronise local Django Container models with remote Docker containers
"""
ps = get_docker_ps.apply_async(queue="priority")
# Wait until the task finished successfully
# or return if waiti | ng too long
checked = 0
while not ps.successful():
sleep(1)
# if things take too long, revoke the task and return
checked += 1
if checked >= 30:
ps.revoke()
return False
# task is succesful, so we're getting the result and create a set
containers_docker = ps.result
docker_dict = {x["Id"]: x for x in containers_docker}
docker_set = set(docker_dict.keys())
# retrieve container from database
container_set = set(
Container.objects.exclude(docker_id__exact="").values_list(
"docker_id", flat=True
)
)
# Work out matching matrix
# docker yes no
# model x
# yes 1_1 1_0
# no 0_1 0_0
#
m_1_1 = container_set & docker_set
m_1_0 = container_set - docker_set
m_0_1 = docker_set - container_set
m_0_0 = (
(docker_set | container_set)
- (docker_set ^ container_set)
- (docker_set & container_set)
)
# Update state of all matching containers
container_match = m_1_1 | m_1_0
for con_id in container_match:
snapshot = docker_dict[con_id] if con_id in docker_dict else None
for c in Container.objects.filter(docker_id=con_id):
c.update_from_docker_snapshot(snapshot)
# Call error for mismatch
container_mismatch = m_0_1 | m_0_0
for container in container_mismatch:
info = docker_dict[container]
if (
"Config" in info
and "Labels" in info["Config"]
and "type" in info["Config"]["Labels"]
):
type = info["Config"]["Labels"]["type"]
choices = Container.CONTAINER_TYPE_CHOICES
if type in [choice[0] for choice in choices]:
msg = "Docker container {} not found in database!".format(container)
self.stderr.write(msg)
do_docker_remove.delay(container, force=True)
else:
logging.info("Found non-delft3dgt docker container, ignoring.")
return True # successful
def _update_scene_phases(self):
"""
Update Scenes with latest status of their Containers, and possibly
shift Scene phase
"""
# ordering is done on start date (first, and id second):
# if a simulation slot is available, we want simulations to start
# in order of their date_started
for scene in Scene.objects.all().order_by("date_started", "id"):
scene.update_and_phase_shift()
def _fix_container_state_mismatches_or_log(self):
for container in Container.objects.all():
container.fix_mismatch_or_log()
|
lampwins/netbox | netbox/virtualization/api/views.py | Python | apache-2.0 | 3,151 | 0.001587 | from django.db.models import Count
from dcim.models import Device, Interface
from extras.api.views import CustomFieldModelViewSet
from utilities.api import FieldChoicesViewSet, ModelViewSet
from utilities.utils import get_subquery
from virtualization import filters
from virtualization.models import Cluster, ClusterGroup, ClusterType, VirtualMachine
from . import serializers
#
# Field choices
#
class VirtualizationFieldChoicesViewSet(FieldChoicesViewSet):
fields = (
(VirtualMachine, ['status']),
)
#
# Clusters
#
class ClusterTypeViewSet(ModelViewSet):
queryset = ClusterType.objects.annotate(
cluster_count=Count('clusters')
)
serializer_class = serializers.ClusterTypeSerializer
filterset_class = filters.ClusterTypeFilter
class ClusterGroupViewSet(ModelViewSet):
queryset = ClusterGroup.objects.annotate(
cluster_count=Count('clusters')
)
serializer_class = serializers.ClusterGroupSerializer
filterset_class = filters.ClusterGroupFilter
class ClusterViewSet(CustomFieldModelViewSet):
queryset = Cluster.objects.select_related(
'type', 'group', 'site',
).prefetch_related(
'tags'
).annotate(
device_count=get_subquery(Device, 'cluster'),
virtualmachine_count=get_subquery(VirtualMachine, 'cluster')
)
serializer_class = serializers.ClusterSerializer
filterset_class = filters.ClusterFilter
#
# Virtual | machines
#
class VirtualMachineViewSet(CustomFieldModelViewSet):
queryset = VirtualMachine.objects.select_related(
'cluster__site', 'role', 'tenant', 'platform', 'primary_ip4', 'primary_ip6'
).prefetch_related('tags')
filterset_class = filters.VirtualMachineFilter
def get_serializer_class(self):
"""
Select the specific serializer based on the request context.
If the `brief` query param equates to True, return the Ne | stedVirtualMachineSerializer
If the `exclude` query param includes `config_context` as a value, return the VirtualMachineSerializer
Else, return the VirtualMachineWithConfigContextSerializer
"""
request = self.get_serializer_context()['request']
if request.query_params.get('brief', False):
return serializers.NestedVirtualMachineSerializer
elif 'config_context' in request.query_params.get('exclude', []):
return serializers.VirtualMachineSerializer
return serializers.VirtualMachineWithConfigContextSerializer
class InterfaceViewSet(ModelViewSet):
queryset = Interface.objects.filter(
virtual_machine__isnull=False
).select_related('virtual_machine').prefetch_related('tags')
serializer_class = serializers.InterfaceSerializer
filterset_class = filters.InterfaceFilter
def get_serializer_class(self):
request = self.get_serializer_context()['request']
if request.query_params.get('brief', False):
# Override get_serializer_for_model(), which will return the DCIM NestedInterfaceSerializer
return serializers.NestedInterfaceSerializer
return serializers.InterfaceSerializer
|
tshadley/examples | word_language_model_bptt_hsm/generate.py | Python | bsd-3-clause | 2,597 | 0.000385 | ###############################################################################
# Language Modeling on Penn Tree Bank
#
# This file generates new sentences sampled from the language model
#
###############################################################################
import argparse
import torch
from torch.autograd import Variable
import data
parser = argparse.ArgumentParser(description='PyTorch PTB Language Model')
# Model parameters.
parser.add_argument('--data', type=str, default='./data/penn',
help='location of the data corpus')
parser.add_argument('--checkpoint', t | ype=str, default='./model.pt',
help='model checkpoint to use')
parser.add_argument('--outf', type=str, default='generated.txt',
help='output file for generated text')
parser.add_argument('--words', type=int, default='1000',
help='number of wo | rds to generate')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--cuda', action='store_true',
help='use CUDA')
parser.add_argument('--temperature', type=float, default=1.0,
help='temperature - higher will increase diversity')
parser.add_argument('--log-interval', type=int, default=100,
help='reporting interval')
args = parser.parse_args()
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.manual_seed(args.seed)
if args.temperature < 1e-3:
parser.error("--temperature has to be greater or equal 1e-3")
with open(args.checkpoint, 'rb') as f:
model = torch.load(f)
model.eval()
if args.cuda:
model.cuda()
else:
model.cpu()
corpus = data.Corpus(args.data)
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(1)
input = Variable(torch.rand(1, 1).mul(ntokens).long(), volatile=True)
if args.cuda:
input.data = input.data.cuda()
with open(args.outf, 'w') as outf:
for i in range(args.words):
output, hidden = model(input, hidden)
word_weights = output.squeeze().data.div(args.temperature).exp().cpu()
word_idx = torch.multinomial(word_weights, 1)[0]
input.data.fill_(word_idx)
word = corpus.dictionary.idx2word[word_idx]
outf.write(word + ('\n' if i % 20 == 19 else ' '))
if i % args.log_interval == 0:
print('| Generated {}/{} words'.format(i, args.words))
|
timothyb0912/pylogit | src/pylogit/conditional_logit.py | Python | bsd-3-clause | 17,093 | 0 | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 25 07:19:49 2016
@name: MultiNomial Logit
@author: Timothy Brathwaite
@summary: Contains functions necessary for estimating multinomial logit
models (with the help of the "base_multinomial_cm.py" file).
Differs from version one since it works with the shape, intercept,
index coefficient partitioning of estimated parameters as opposed
to the shape, index coefficient partitioning scheme of version 1.
"""
from __future__ import absolute_import
import warnings
import numpy as np
from scipy.sparse import diags
from . import choice_calcs as cc
from . import base_multinomial_cm_v2 as base_mcm
from .estimation import LogitTypeEstimator
from .estimation import estimate
from .display_names import model_type_to_display_name
# Create a variable that will be printed if there is a non-fatal error
# in the MNL class construction
_msg_1 = "The Multinomial Logit Model has no shape parameters. "
_msg_2 = "shape_names and shape_ref_pos will be ignored if passed."
_shape_ignore_msg = _msg_1 + _msg_2
# Create a warning string that will be issued if ridge regression is performed.
_msg_3 = "NOTE: An L2-penalized regression is being performed. The "
_msg_4 = "reported standard errors and robust standard errors "
_msg_5 = "***WILL BE INCORRECT***."
_ridge_warning_msg = _msg_3 + _msg_4 + _msg_5
# Alias necessary functions from the base multinomial choice model module
general_log_likelihood = cc.calc_log_likelihood
| general_gradient = cc. | calc_gradient
general_calc_probabilities = cc.calc_probabilities
general_hessian = cc.calc_hessian
def split_param_vec(beta,
rows_to_alts=None,
design=None,
return_all_types=False,
*args, **kwargs):
"""
Parameters
----------
beta : 1D ndarray.
All elements should by ints, floats, or longs. Should have 1 element
for each utility coefficient being estimated (i.e. num_features).
rows_to_alts : None,
Not actually used. Included merely for consistency with other models.
design : None.
Not actually used. Included merely for consistency with other models.
return_all_types : bool, optional.
Determines whether or not a tuple of 4 elements will be returned (with
one element for the nest, shape, intercept, and index parameters for
this model). If False, a tuple of 3 elements will be returned, as
described below.
Returns
-------
tuple.
`(None, None, beta)`. This function is merely for compatibility with
the other choice model files.
Note
----
If `return_all_types == True` then the function will return a tuple of four
objects. In order, these objects will either be None or the arrays
representing the arrays corresponding to the nest, shape, intercept, and
index parameters.
"""
if return_all_types:
return None, None, None, beta
else:
return None, None, beta
def _mnl_utility_transform(systematic_utilities, *args, **kwargs):
"""
Parameters
----------
systematic_utilities : 1D ndarray.
Should contain the systematic utilities for each each available
alternative for each observation.
Returns
-------
`systematic_utilities[:, None]`
"""
# Be sure to return a 2D array since other functions will be expecting this
if len(systematic_utilities.shape) == 1:
systematic_utilities = systematic_utilities[:, np.newaxis]
return systematic_utilities
def _mnl_transform_deriv_c(*args, **kwargs):
"""
Returns None.
This is a place holder function since the MNL model has no shape
parameters.
"""
# This is a place holder function since the MNL model has no shape
# parameters.
return None
def _mnl_transform_deriv_alpha(*args, **kwargs):
"""
Returns None.
This is a place holder function since the MNL model has no intercept
parameters outside of the index.
"""
# This is a place holder function since the MNL model has no intercept
# parameters outside the index.
return None
class MNLEstimator(LogitTypeEstimator):
"""
Estimation Object used to enforce uniformity in the estimation process
across the various logit-type models.
Parameters
----------
model_obj : a pylogit.base_multinomial_cm_v2.MNDC_Model instance.
Should contain the following attributes:
- alt_IDs
- choices
- design
- intercept_ref_position
- shape_ref_position
- utility_transform
mapping_res : dict.
Should contain the scipy sparse matrices that map the rows of the long
format dataframe to various other objects such as the available
alternatives, the unique observations, etc. The keys that it must have
are `['rows_to_obs', 'rows_to_alts', 'chosen_row_to_obs']`
ridge : int, float, long, or None.
Determines whether or not ridge regression is performed. If a
scalar is passed, then that scalar determines the ridge penalty for
the optimization. The scalar should be greater than or equal to
zero..
zero_vector : 1D ndarray.
Determines what is viewed as a "null" set of parameters. It is
explicitly passed because some parameters (e.g. parameters that must be
greater than zero) have their null values at values other than zero.
split_params : callable.
Should take a vector of parameters, `mapping_res['rows_to_alts']`, and
model_obj.design as arguments. Should return a tuple containing
separate arrays for the model's shape, outside intercept, and index
coefficients. For each of these arrays, if this model does not contain
the particular type of parameter, the callable should place a `None` in
its place in the tuple.
constrained_pos : list or None, optional.
Denotes the positions of the array of estimated parameters that are
not to change from their initial values. If a list is passed, the
elements are to be integers where no such integer is greater than
`num_params` Default == None.
weights : 1D ndarray or None, optional.
Allows for the calculation of weighted log-likelihoods. The weights can
represent various things. In stratified samples, the weights may be
the proportion of the observations in a given strata for a sample in
relation to the proportion of observations in that strata in the
population. In latent class models, the weights may be the probability
of being a particular class.
"""
def set_derivatives(self):
# Pre-calculate the derivative of the transformation vector with
# respect to the vector of systematic utilities
dh_dv = diags(np.ones(self.design.shape[0]), 0, format='csr')
# Create a function to calculate dh_dv which will return the
# pre-calculated result when called
def calc_dh_dv(*args):
return dh_dv
self.calc_dh_dv = calc_dh_dv
self.calc_dh_d_alpha = _mnl_transform_deriv_alpha
self.calc_dh_d_shape = _mnl_transform_deriv_c
def check_length_of_initial_values(self, init_values):
"""
Ensures that `init_values` is of the correct length. Raises a helpful
ValueError if otherwise.
Parameters
----------
init_values : 1D ndarray.
The initial values to start the optimization process with. There
should be one value for each index coefficient, outside intercept
parameter, and shape parameter being estimated.
Returns
-------
None.
"""
# Calculate the expected number of index parameters
num_index_coefs = self.design.shape[1]
if init_values.shape[0] != num_index_coefs:
msg_1 = "The initial values are of the wrong dimension."
msg_2 = "It should be of dimension {}"
msg_3 = "But instead it h |
the-zebulan/CodeWars | tests/kyu_8_tests/test_a_needle_in_the_haystack.py | Python | mit | 897 | 0 | import unittest
from katas.kyu_8.a_needle_in_the_haystack import find_needle
class FindNeedleTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(find_needle([
| '3', '123124234', None, 'needle', 'world', 'hay', 2, '3',
True, False
]), 'found the needle at position 3')
def test_equals_2(self):
self.assertEqual(find_needle([
'283497238987234', 'a dog', 'a cat', 'some random junk',
'a piece of hay', 'needle', 'something somebody lost a while ago'
]), 'found the needle at position 5')
def test_equals_3(self):
self.assertEqual(find_needle([
| 1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 5, 4, 3, 4, 5, 6, 67, 5, 5, 3,
3, 4, 2, 34, 234, 23, 4, 234, 324, 324, 'needle', 1, 2, 3, 4,
5, 5, 6, 5, 4, 32, 3, 45, 54
]), 'found the needle at position 30')
|
plaid/plaid-python | plaid/model/link_token_get_metadata_response.py | Python | mit | 8,788 | 0.000455 | """
The Plaid API
The Plaid REST API. Please see https://plaid.com/docs/api for more details. # noqa: E501
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from plaid.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from plaid.model.account_filters_response import AccountFiltersResponse
from plaid.model.country_code import CountryCode
from plaid.model.products import Products
globals()['AccountFiltersResponse'] = AccountFiltersResponse
globals()['CountryCode'] = CountryCode
globals()['Products'] = Products
class LinkTokenGetMetadataResponse(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'initial_products': ([Products],), # noqa: E501
'webhook': (str, none_type,), # noqa: E501
'country_codes': ([CountryCode],), # noqa: E501
'language': (str, none_type,), # noqa: E501
'redirect_uri': (str, none_type,), # noqa: E501
'client_name': (str, none_type,), # noqa: E501
'account_filters': (AccountFiltersResponse,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'initial_products': 'initial_products', # noqa: E501
'webhook': 'webhook', # noqa: E501
'country_codes': 'country_codes', # noqa: E501
'language': 'language', # noqa: E501
'redirect_uri': 'redirect_uri', # noqa: E501
'client_name': 'client_name', # noqa: E501
'account_filters': 'account_filters', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, initial_products, webhook, country_codes, language, redirect_uri, client_name, *args, **kwargs): # noqa: E501
"""LinkTokenGetMetadataResponse - a model defined in OpenAPI
Args:
initial_products ([Products]): The `products` specified in the `/link/token/create` call.
webhook (str, none_type): The `webhook` specified in the `/link/token/create` call.
country_codes ([CountryCode]): The `country_codes` specified in the `/link/token/create` call.
language (str, none_type): The `language` specified in the `/link/token/create` call.
redirect_uri (str, none_type): The `redirect_uri` specified in the `/link/token/create` call.
client_name (str, none_type): The `client_name` specified in the `/link/token/create` call.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input d | ata
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
d | eserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
account_filters (AccountFiltersResponse): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.initial_products = initial_p |
idaholab/civet | ci/ajax/tests/test_views.py | Python | apache-2.0 | 14,812 | 0.000743 | # -*- coding: utf-8 -*-
# Copyright 2016 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals, absolute_import
from django.urls import reverse
from django.utils.html import escape
from ci.tests import utils
from mock import patch
from ci.github import api
from ci import models, Permissions
from ci.tests import DBTester
from django.test import override_settings
@override_settings(INSTALLED_GITSERVERS=[utils.github_config()])
class Tests(DBTester.DBTester):
@patch.object(api.GitHubAPI, 'is_collaborator')
@override_settings(COLLABORATOR_CACHE_TIMEOUT=0)
def test_get_result_output(self, mock_is_collaborator):
mock_is_collaborator.return_value = False
url = reverse('ci:ajax:get_result_output')
# no parameters
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
result = utils.create_step_result()
result.output = 'output'
result.save()
result.job.recipe.private = False
result.job.recipe.save()
data = {'result_id': result.pk}
# should be ok since recipe isn't private
response = self.client.get(url, data)
| self.assertEqual(response.status_code, 200)
json_data = response.json()
self.assertIn(result.output, json_data["contents"])
result.job.recipe.private = True
result.job.recipe.save()
# recipe is private and not signed in, shouldn't see it
response = self.client.get(url, data)
self.assertEqual(response.status_code, 403)
user = utils.get_test_user()
utils.simulate_login(self.client.session, user)
# recipe is priva | te, not a collaborator
response = self.client.get(url, data)
self.assertEqual(response.status_code, 403)
mock_is_collaborator.return_value = True
# recipe is private, but a collaborator
response = self.client.get(url, data)
self.assertEqual(response.status_code, 200)
json_data = response.json()
self.assertIn(result.output, json_data["contents"])
def test_pr_update(self):
url = reverse('ci:ajax:pr_update', args=[1000])
# bad pr
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
pr = utils.create_pr(title="Foo <type> & bar …")
url = reverse('ci:ajax:pr_update', args=[pr.pk])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
json_data = response.json()
self.assertIn('events', json_data)
def test_event_update(self):
ev = utils.create_event()
url = reverse('ci:ajax:event_update', args=[1000])
# no parameters
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
url = reverse('ci:ajax:event_update', args=[ev.pk])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
json_data = response.json()
self.assertIn('events', json_data)
def test_main_update(self):
url = reverse('ci:ajax:main_update')
# no parameters
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
pr_open = utils.create_pr(title='Foo <type> & bar …', number=1)
ev_open = utils.create_event()
pr_open.closed = False
pr_open.save()
ev_open.pull_request = pr_open
ev_open.save()
pr_closed = utils.create_pr(title='closed_pr', number=2)
pr_closed.closed = True
pr_closed.save()
ev_closed = utils.create_event(commit1='2345')
ev_closed.pull_request = pr_closed
ev_closed.save()
pr_open.repository.active = True
pr_open.repository.save()
ev_branch = utils.create_event(commit1='1', commit2='2', cause=models.Event.PUSH)
ev_branch.base.branch.status = models.JobStatus.RUNNING
ev_branch.base.branch.save()
recipe, depends_on = utils.create_recipe_dependency()
utils.create_job(recipe=recipe)
utils.create_job(recipe=depends_on)
data = {'last_request': 10, 'limit': 30}
response = self.client.get(url, data)
self.assertEqual(response.status_code, 200)
json_data = response.json()
self.assertIn('repo_status', json_data)
self.assertIn('closed', json_data)
self.assertEqual(len(json_data['repo_status']), 1)
self.assertEqual(len(json_data['repo_status'][0]['prs']), 1)
self.assertIn(escape(pr_open.title), json_data['repo_status'][0]['prs'][0]['description'])
self.assertEqual(pr_closed.pk, json_data['closed'][0]['id'])
@patch.object(api.GitHubAPI, 'is_collaborator')
@patch.object(Permissions, 'is_allowed_to_see_clients')
def test_job_results(self, mock_allowed, mock_is_collaborator):
mock_is_collaborator.return_value = False
mock_allowed.return_value = True
url = reverse('ci:ajax:job_results')
# no parameters
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
client = utils.create_client()
step_result = utils.create_step_result()
step_result.complete = True
step_result.save()
step_result.job.save()
data = {'last_request': 10, 'job_id': 0 }
# not signed in, not a collaborator
response = self.client.get(url, data)
self.assertEqual(response.status_code, 404)
data['job_id'] = step_result.job.pk
recipe = step_result.job.recipe
recipe.private = True
recipe.save()
# not signed in, not a collaborator on a private recipe
response = self.client.get(url, data)
self.assertEqual(response.status_code, 403)
recipe.private = False
recipe.save()
# recipe no longer private, should work
response = self.client.get(url, data)
self.assertEqual(response.status_code, 200)
json_data = response.json()
self.assertEqual(json_data['job_info']['client_name'], '')
self.assertEqual(json_data['job_info']['client_url'], '')
user = utils.get_test_user()
utils.simulate_login(self.client.session, user)
mock_is_collaborator.return_value = True
recipe.private = True
recipe.save()
job = step_result.job
job.client = client
job.save()
# should work now
response = self.client.get(url, data)
self.assertEqual(response.status_code, 200)
json_data = response.json()
self.assertIn('job_info', json_data)
self.assertIn('results', json_data)
self.assertEqual(step_result.job.pk, json_data['job_info']['id'])
self.assertEqual(step_result.pk, json_data['results'][0]['id'])
self.assertEqual(json_data['job_info']['client_name'], client.name)
# should work now but return no results since nothing has changed
data['last_request'] = json_data['last_request']+10
response = self.client.get(url, data)
self.assertEqual(response.status_code, 200)
json_data = response.json()
self.assertIn('job_info', json_data)
self.assertIn('results', json_data)
# job_info is always returned
self.assertNotEqual('', json_data['job_info'])
self.assertEqual([], json_data['results'])
self.assertEqual(json_data['job_info']['client_name'], '')
def test_repo_update(self):
url = reverse('ci:ajax:repo_update')
# no parameters
response = self.client.get(url)
|
heldergg/labs | labs_django/manage.py | Python | gpl-3.0 | 254 | 0 | # | !/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "labs_django.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv) | |
ddico/odoo | addons/event_sale/models/sale_order.py | Python | agpl-3.0 | 7,205 | 0.003747 | # -*- coding: utf-8 -*-
from odoo import api, fields, models, _
class SaleOrder(models.Model):
_inherit = "sale.order"
attendee_count = fields.Integer('Attendee Count', compute='_compute_attendee_count')
def write(self, vals):
""" Synchronize partner from SO to registrations. This is done notably
in website_sale controller shop/address that updates customer, but not
only. """
result = super(SaleOrder, self).write(vals)
if vals.get('partner_id'):
registrations_toupdate = self.env['event.registration'].search([('sale_order_id', 'in', self.ids)])
registrations_toupdate.write({'partner_id': vals['partner_id']})
return result
def action_confirm(self):
res = super(SaleOrder, self).action_confirm()
for so in self:
# confirm registration if it was free (otherwise it will be confirmed once invoice fully paid)
so.order_line._update_registrations(confirm=so.amount_total == 0, cancel_to_draft=False)
if any(so.order_line.filtered(lambda line: line.event_id)):
return self.env['ir.actions.act_window'] \
.with_context(default_sale_order_id=so.id) \
.for_xml_id('event_sale', 'action_sale_order_event_registration')
return res
def action_view_attendee_list(self):
action = self.env.ref('event.event_registration_action_tree').read()[0]
action['domain'] = [('sale_order_id', 'in', self.ids)]
return action
def _compute_attendee_count(self):
sale_orders_data = self.env['event.registration'].read_group(
[('sale_order_id', 'in', self.ids)],
['sale_order_id'], ['sale_order_id']
)
attendee_count_data = {
sale_order_data['sale_order_id'][0]:
sale_order_data['sale_order_id_count']
for sale_order_data in sale_orders_data
}
for sale_order in self:
sale_order.attendee_count = attendee_count_data.get(sale_order.id, 0)
class SaleOrderLine(models.Model):
_inherit = 'sale.order.line'
event_id = fields.Many2one(
'event.event', string='Event',
help="Choose an event and it will automatically create a registration for this event.")
event_ticket_id = fields.Many2one(
'event.event.ticket', string='Event Ticket',
help="Choose an event ticket and it will automatically create a registration for this event ticket.")
event_ok = fields.Boolean(related='product_id.event_ok', readonly=True)
@api.depends('state', 'event_id')
def _compute_product_uom_readonly(self):
event_lines = self.filtered(lambda line: line.event_id)
event_lines.update({'product_uom_readonly': True})
super(SaleOrderLine, self - event_lines)._compute_product_uom_readonly()
def _update_registrations(self, confirm=True, cancel_to_draft=False, registration_data=None, mark_as_paid=False):
""" Create or update registrations linked to a sales order line. A sale
order line has a product_uom_qty attribute that will be the number of
registrations linked to this line. This method update existing registrations
and create new one for missing one. """
RegistrationSudo = self.env['event.registration'].sudo()
registrations = RegistrationSudo.search([('sale_order_line_id', 'in', self.ids)])
registrations_vals = []
for so_line in self.filtered('event_id'):
existing_registrations = registrations.filtered(lambda self: self.sale_order_line_id.id == so_line.id)
if confirm:
existing_registrations.filtered(lambda self: self.state not in ['open', 'cancel']).action_confirm()
if mark_as_paid:
existing_registrations.filtered(lambda self: not self.is_paid)._action_set_paid()
if cancel_to_draft:
existing_registrations.filtered(lambda self: self.state == 'cancel').action_set_draft()
for count in range(int(so_line.product_uom_qty) - len(existing_registrations)):
values = {
'sale_order_line_id': so_line.id,
'sale_order_id': so_line.order_id.id
}
# TDE CHECK: auto confirmation
if registration_data:
values.update(registration_data.pop())
registrations_vals.append(values)
if registrations_vals:
RegistrationSudo.create(registrations_vals)
return True
@api.onchange('product_id')
def _onchange_product_id(self):
# We reset the event when keeping it would lead to an inconstitent state.
# We need to do it this way because the only relation between the product and the event is through the corresponding tickets.
if self.event_id and (not self.product_id or self.product_id.id not in self.event_id.mapped('event_ticket_ids.product_id.id')):
self.event_id = None
@api.onchange('event_id')
def _onchange_event_id(self):
# We reset the ticket when keeping it would lead to an inconstitent state.
if self.event_ticket_id and (not self.event_id or self.event_id != self.event_ticket_id.event_id):
self.event_ticket_id = | None
@api.onchange('product_uom', 'product_uom_qty')
def product_uom | _change(self):
if not self.event_ticket_id:
super(SaleOrderLine, self).product_uom_change()
@api.onchange('event_ticket_id')
def _onchange_event_ticket_id(self):
# we call this to force update the default name
self.product_id_change()
def get_sale_order_line_multiline_description_sale(self, product):
""" We override this method because we decided that:
The default description of a sales order line containing a ticket must be different than the default description when no ticket is present.
So in that case we use the description computed from the ticket, instead of the description computed from the product.
We need this override to be defined here in sales order line (and not in product) because here is the only place where the event_ticket_id is referenced.
"""
if self.event_ticket_id:
ticket = self.event_ticket_id.with_context(
lang=self.order_id.partner_id.lang,
)
return ticket._get_ticket_multiline_description() + self._get_sale_order_line_multiline_description_variants()
else:
return super(SaleOrderLine, self).get_sale_order_line_multiline_description_sale(product)
def _get_display_price(self, product):
if self.event_ticket_id and self.event_id:
company = self.event_id.company_id or self.env.company
currency = company.currency_id
return currency._convert(
self.event_ticket_id.price, self.order_id.currency_id,
self.order_id.company_id or self.env.company.id,
self.order_id.date_order or fields.Date.today())
else:
return super()._get_display_price(product)
|
FlorianNaumann/BA_ePuck_Graph_Formations | communication.py | Python | gpl-2.0 | 680 | 0.022059 | """
Graph ePuck framework:
communication.py
You can put Bluetooth or WLAN communication in here
that sets | up a connection and sends/receives mission goals
"""
# standard imports
import numpy as np
import platform
# project related imports
if platform.machine() == 'armv7l':
from ePuck import *
else:
fr | om fakebot import *
from constants import *
def get_new_msg():
return None
def get_desired_leader():
"""
decides the leader of the formation
idea: can be divided into "server setting" and "election by the swarm",
perhaps switching between these modes or depending on connection to server -> increasing stability
"""
return []
def setup(robot):
return True
|
JarbasAI/jarbas-core | mycroft/skills/intent_service.py | Python | gpl-3.0 | 14,724 | 0.001426 | # Copyright 2016 Mycroft AI, Inc.
#
# This file is part of Mycroft Core.
#
# Mycroft Core is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mycroft Core is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
from adapt.engine import IntentDeterminationEngine
import time
from time import sleep
from multiprocessing import Process
from mycroft.messagebus.message import Message
from mycroft.skills.core import open_intent_envelope
from mycroft.util.log import getLogger
from mycroft.util.parse import normalize
__author__ = 'seanfitz'
logger = getLogger(__name__)
class IntentService(object):
def __init__(self, emitter):
self.engine = IntentDeterminationEngine()
self.emitter = emitter
self.emitter.on('register_vocab', self.handle_register_vocab)
self.emitter.on('register_intent', self.handle_register_intent)
self.emitter.on('recognizer_loop:utterance', self.handle_utterance)
self.emitter.on('detach_intent', self.handle_detach_intent)
self.emitter.on('detach_skill', self.handle_detach_skill)
self.emitter.on('converse_status_response', self.handle_conversation_response)
self.emitter.on('intent_request', self.handle_intent_request)
self.emitter.on('intent_to_skill_request', self.handle_intent_to_skill_request)
self.emitter.on('active_skill_request', self.handle_active_skill_request)
self.active_skills = [] # [skill_id , timestamp]
self.skill_ids = {} # {skill_id: [intents]}
self.converse_timeout = 5 # minutes to prune active_skills
def do_conversation(self, utterances, skill_id, lang):
self.emitter.emit(Message("converse_status_request", {
"skill_id": skill_id, "utterances": utterances, "lang": lang}))
self.waiting = True
self.result = False
start_time = time.time()
t = 0
while self.waiting and t < 5:
t = time.time() - start_time
self.waiting = False
return self.result
def handle_intent_to_skill_request(self, message):
intent = message.data["intent_name"]
for id in self.skill_ids:
for name in self.skill_ids[id]:
if name == intent:
self.emitter.emit(Message("intent_to_skil | l_response", {
"skill_id": id, "intent_name": intent}))
return id
self.emitter.emit(Message("intent_to_skill_response", {
"skill_id": 0, "intent_name": intent}))
re | turn 0
def handle_conversation_response(self, message):
# id = message.data["skill_id"]
# no need to crosscheck id because waiting before new request is made
# no other skill will make this request is safe assumption
result = message.data["result"]
self.result = result
self.waiting = False
def remove_active_skill(self, skill_id):
for skill in self.active_skills:
if skill[0] == skill_id:
self.active_skills.remove(skill)
def add_active_skill(self, skill_id):
# you have to search the list for an existing entry that already contains it and remove that reference
self.remove_active_skill(skill_id)
# add skill with timestamp to start of skill_list
self.active_skills.insert(0, [skill_id, time.time()])
def handle_active_skill_request(self, message):
# allow external sources to ensure converse method of this skill is called
skill_id = message.data["skill_id"]
self.add_active_skill(skill_id)
def handle_intent_request(self, message):
utterance = message.data["utterance"]
# Get language of the utterance
lang = message.data.get('lang', None)
if not lang:
lang = "en-us"
best_intent = None
try:
# normalize() changes "it's a boy" to "it is boy", etc.
best_intent = next(self.engine.determine_intent(
normalize(utterance, lang), 100))
# TODO - Should Adapt handle this?
best_intent['utterance'] = utterance
except StopIteration, e:
logger.exception(e)
if best_intent and best_intent.get('confidence', 0.0) > 0.0:
skill_id = int(best_intent['intent_type'].split(":")[0])
intent_name = best_intent['intent_type'].split(":")[1]
self.emitter.emit(Message("intent_response", {
"skill_id": skill_id, "utterance": utterance, "lang": lang, "intent_name": intent_name}))
return True
self.emitter.emit(Message("intent_response", {
"skill_id": 0, "utterance": utterance, "lang": lang, "intent_name": ""}))
return False
def handle_utterance(self, message):
# Get language of the utterance
lang = message.data.get('lang', None)
if not lang:
lang = "en-us"
utterances = message.data.get('utterances', '')
source = message.data.get("source")
target = message.data.get("target")
mute = message.data.get("mute")
user = message.data.get("user")
if target is None:
target = source
if mute is None:
mute = False
# check for conversation time-out
self.active_skills = [skill for skill in self.active_skills
if time.time() - skill[1] <= self.converse_timeout * 60]
# check if any skill wants to handle utterance
for skill in self.active_skills:
if self.do_conversation(utterances, skill[0], lang):
# update timestamp, or there will be a timeout where
# intent stops conversing whether its being used or not
self.add_active_skill(skill[0])
return
# no skill wants to handle utterance, proceed
best_intent = None
for utterance in utterances:
try:
# normalize() changes "it's a boy" to "it is boy", etc.
best_intent = next(self.engine.determine_intent(
normalize(utterance, lang), 100))
# TODO - Should Adapt handle this?
best_intent['utterance'] = utterance
except StopIteration, e:
logger.exception(e)
continue
if best_intent and best_intent.get('confidence', 0.0) > 0.0:
best_intent["target"] = target
best_intent["mute"] = mute
best_intent["user"] = user
reply = message.reply(
best_intent.get('intent_type'), best_intent)
self.emitter.emit(reply)
# update active skills
skill_id = int(best_intent['intent_type'].split(":")[0])
self.add_active_skill(skill_id)
elif len(utterances) == 1:
self.emitter.emit(Message("intent_failure", {
"utterance": utterances[0],
"lang": lang
}))
else:
self.emitter.emit(Message("multi_utterance_intent_failure", {
"utterances": utterances,
"lang": lang
}))
def handle_register_vocab(self, message):
start_concept = message.data.get('start')
end_concept = message.data.get('end')
regex_str = message.data.get('regex')
alias_of = message.data.get('alias_of')
if regex_str:
self.engine.register_regex_entity(regex_str)
else:
self.engine.register_entity(
start_concept, end_concept, alias_of=alias_of)
def handle_register_intent(self, message):
intent |
openai/universe | universe/vncdriver/libvnc_session.py | Python | mit | 4,042 | 0.001484 | import logging
import os
from twisted.internet import defer, endpoints
from universe import error, utils
from universe.twisty import reactor
from universe.vncdriver import screen, vnc_client
PYGAME_INSTALLED = None
def load_pygame():
global PYGAME_INSTALLED, pygame
if PYGAME_INSTALLED is not None:
return
try:
import pygame
PYGAME_INSTALLED = True
except ImportError:
PYGAME_INSTALLED = False
logger = logging.getLogger(__name__)
class LibVNCSession(object):
def __init__(self, remotes, error_buffer, encoding=None, compress_level=None, fine_quality_level=None, subsample_level=None):
"""compress_level: 0-9 [9 is highest compression]
fine_quality_level: 0-100 [100 is best quality]
subsample_level: 0-3 [0 is best quality]
Lots of references for this, but
https://github.com/TurboVNC/turbovnc/blob/master/doc/pe | rformance.txt
is decent.
"""
load_pygame()
import libvncdriver
if encoding is None:
encoding = os.environ.get('LIBVNC_ENCODING', 'tight')
if compress_level is None:
compress_level = int(os.environ.get('LIBVNC_COMPRESS_LEVEL', '0'))
if fine | _quality_level is None:
fine_quality_level = int(os.environ.get('LIBVNC_FINE_QUALITY_LEVEL', '100'))
if subsample_level is None:
subsample_level = int(os.environ.get('LIBVNC_SUBSAMPLE_LEVEL', '0'))
if not hasattr(libvncdriver, 'VNCSession'):
raise error.Error('''
*=================================================*
|| libvncdriver is not installed ||
|| Try installing with "pip install libvncdriver" ||
|| or use the go or python driver by setting ||
|| UNIVERSE_VNCDRIVER=go ||
|| UNIVERSE_VNCDRIVER=py ||
*=================================================*''')
logger.info("Using libvncdriver's %s encoding" % encoding)
self.driver = libvncdriver.VNCSession(
remotes=remotes,
error_buffer=error_buffer,
encoding=encoding,
compress_level=compress_level,
fine_quality_level=fine_quality_level,
subsample_level=subsample_level,
)
self.screen = None
self.render_called_once = False
if PYGAME_INSTALLED:
pygame.init()
def flip(self):
return self._guard(self.driver.flip)
def step(self, action):
return self.driver.step(action)
def render(self):
self._guard(self._render)
def _guard(self, fn):
try:
return fn()
except (KeyboardInterrupt, SystemExit):
self.close()
def _render(self):
self.before_render()
if not PYGAME_INSTALLED:
return
# For some reason pygame wants X and Y swapped
aray, n = self.driver.flip()
if self.screen is None:
self.screen = pygame.display.set_mode(aray[0].shape[:2][::-1])
surf = pygame.surfarray.make_surface(aray[0].swapaxes(0, 1))
rect = surf.get_rect()
self.screen.blit(surf, rect)
pygame.display.flip()
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.close()
def before_render(self):
if not self.render_called_once:
self.render_called_once = True
if not PYGAME_INSTALLED:
logger.warn('''
*================================================================*
|| ||
|| Rendering disabled when using libvnc without pygame installed. ||
|| Consider viewing over VNC or running "pip install pygame". ||
|| ||
*================================================================*''')
def close(self):
if PYGAME_INSTALLED:
pygame.quit()
self.driver.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.