repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
cuspaceflight/tawhiri | docs/conf.py | Python | gpl-3.0 | 9,512 | 0.006098 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Tawhiri documentation build configuration file, created by
# sphinx-quickstart on Thu Apr 10 23:05:18 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
class Mock(object):
__all__ = []
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
def __iter__(self):
raise StopIteration
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
mockType = type(name, (), {})
mockType.__module__ = __name__
return mockType
else:
return Mock()
MOCK_MODULES = ['numpy', 'pygrib', 'pyproj', 'gevent',
'gevent.local', 'gevent.timeout', 'gevent.event',
'gevent.pool', 'gevent.queue', 'gevent.dns', 'gevent.coros',
'gevent.socket', 'tawhiri.interpolate']
for mod in MOCK_MODULES:
sys.modules[mod] = Mock()
sys.path.insert(0, os.path.abspath(os.path.join(__file__, "..", "..")))
import tawhiri
tawhiri.interpolate = Mock()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make | it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or | your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = tawhiri.__name__
copyright = "2014, " + tawhiri.__author__
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = tawhiri.__version__
# The full version, including alpha/beta/rc tags.
release = tawhiri.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
html_use_opensearch = 'http://tawhiri.cusf.co.uk/'
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'tawhiridoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Tawhiri.tex', 'Tawhiri Documentation',
'Cambridge University Spaceflight', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#l |
lichengshuang/createvhost | others/webvirtmgr/addServer.py | Python | apache-2.0 | 1,380 | 0.013043 | #!/usr/bin/python
#-*-encoding:utf-8-*-
#author: asher
#date: 20160429 on train D909
# this scripts useed for add server ip to webvirtmgr
# if not , each server must add by website,it's too slow, and very not interesting.
# use this , it's make you feel very happy
import sqlite3
try:
conn = sqlite3.connect('../webvirtmgr.sqlite3')
cur = conn.cursor()
print "Input the server ip address like:"
ips = raw_input("Ips 172.23.32:").strip()
ips1 = int(raw_input("Input start last ip num: 1:>").strip())
ips2 = int(raw_input("Input end ip num: 100:>").strip())
jifang = str(raw_input("DataCenter like:jxq:>").strip())
login = str(raw_input("User:admin or others:>").strip())
password = str(raw_input("Password:>").strip())
while True:
if ips1 <= ips2:
ips1 = str(ips1)
newip = ips + "." + ips1
jifang1 = jifang + "_" + newip
print "Add %s into database\n" % jifang1
| cur.execute('''insert into servers_compute (name,hostname,login,password,type) values('%s','%s','%s','%s','%d')''' % (jifang1,newip,login,password,1))
ips1 = int(ips1)
ips1 += 1
conn.commit()
else:
break
finally:
allservers = cur.execute("select id,name,hostname,login,type from servers_compute").fetchall()
for i in allservers:
prin | t i
conn.close()
|
Chibin/gpdb | src/test/tinc/tincrepo/mpp/lib/gpSystem.py | Python | apache-2.0 | 4,502 | 0.004887 | #!/usr/bin/env python
# Line too long - pylint: disable=C0301
# Invalid name - pylint: disable=C0103
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Capture information regarding the machine we run test on
"""
import os
import platform
import socket
import re
#####
class GpSystem:
"""
Capture information regarding the machine we run test on
@class GpSystem
@organization: DCD Partner Engineering
@contact: Kenneth Wong
@modified: Johnny Soedomo
@note: platform.system returns Linux for both Redhat and SuSE, check the release file
@change: Jacqui Taing - added attributes: os_version, os_major_version to provide OS release version
"""
###
def __init__(self):
"""
Constructor for GpSystem regards information we care about on the test system
"""
self.architecture = platform.machine()
myos = platform.system()
if myos == "Darwin":
self.os = 'OSX'
(version, major_version) = self.__getOsVersion()
self.os_version = version
self.os_major_version = major_version
elif myos == "Linux":
if os.path.exists("/etc/SuSE-release"):
self.os = 'SUSE'
(version, major_version) = self.__getOsVersion('/etc/SuSE-release')
self.os_version = version
self.os_major_version = major_version
elif os.path.exists("/etc/redhat-release"):
self.os = 'RHEL'
(version, major_version) = self.__getOsVersion('/etc/redhat-release')
self.os_version = version
self.os_major_version = major_version
elif myos == "SunOS":
self.os = 'SOL'
(version, major_version) = self.__getOsVersion('/etc/release')
self.os_version = version
self.os_major_version = major_version
self.host = socket.gethostname()
###
def __str__(self):
"""
@return: a string consists of host, architecture, os, os version, os major version
"""
return '\nGpSystem:\n host: %s\n architecture: %s\n os: %s\n os versio | n: %s\n os major version: %s' % (self.host, self.architecture, self.os, self.os_version, self.os_major_version)
def __getOsVersion(self, releasefile=None) | :
"""
@summary: Internal function to get the OS full release version (e.g. 5.5.) and major release version (e.g. 5)
@return: list (full_version, major_version), e.g. (5.5, 5)
"""
if self.os == 'OSX':
full_version = platform.mac_ver()[0]
major_version = full_version.split('.')[0]
return (full_version, major_version)
else:
if os.path.exists(releasefile):
f = open(releasefile)
releasetext = f.read()
f.close()
full_version = re.search('[\d\.*\d]+', releasetext).group(0)
major_version = full_version.split('.')[0]
return (full_version, major_version)
else:
return None
###
def GetArchitecture(self):
"""
@return: architecture
"""
return self.architecture
###
def GetOS(self):
"""
@return: os
"""
return self.os
def GetOSMajorVersion(self):
"""
@return: major release version of OS, e.g. 5 for RHEL 5.5
"""
return self.os_major_version
def GetOSVersion(self):
"""
@return: full release version of OS, e.g. 5.5 for RHEL 5.5.
"""
return self.os_version
###
def GetHost(self):
"""
@return: host
"""
return self.host
##########################
if __name__ == '__main__':
gpSystem = GpSystem()
print(gpSystem)
|
Tom-Trumper/selenium | py/selenium/__init__.py | Python | apache-2.0 | 812 | 0 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See th | e NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 ( | the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
__version__ = "3.10.0"
|
teampheenix/StarCraft-Casting-Tool | scctool/settings/history.py | Python | gpl-3.0 | 4,381 | 0 | """Provide history manager for SCCTool."""
import json
import logging
import scctool.settings.translation
from scctool.settings import getJsonFile, idx2race, race2idx
module_logger = logging.getLogger(__name__)
_ = scctool.settings.translation.gettext
class HistoryManager:
"""History manager for SCCTool."""
__max_length = 100
def __init__(self):
"""Init the history manager."""
self.loadJson()
self.updateDataStructure()
def loadJson(self):
"""Read json data from file."""
try:
with open(getJsonFile('history'), 'r',
encoding='utf-8-sig') as json_file:
data = json.load(json_file)
except Exception:
data = dict()
self.__player_history = data.get('player', [])
self.__team_history = data.get('team', [])
def dumpJson(self):
"""Write json data to file."""
data = dict()
data['player'] = self.__player_history
data['team'] = self.__team_history
try:
with open(getJsonFile('history'), 'w',
encoding='utf-8-sig') as outfile:
json.dump(data, outfile)
except Exception:
module_logger.exception("message")
def updateDataStructure(self):
"""Update the data structure (from a previous version)."""
for idx, item in enumerate(self.__team_history):
if isinstance(item, str):
self.__team_history[idx] = {'team': item, 'logo': '0'}
def insertPlayer(self, player, race):
"""Insert a player into the history."""
player = player.strip()
if not player or player.lower() == "tbd":
return
if race is str:
race = race2idx(race)
race = idx2race(race)
for item in self.__player_history:
if item.get('player', '').lower() == player.lower():
self.__player_history.remove(item)
if race == "Random":
race = item.get('race', 'Random')
break
self.__player_history.insert(0, {"player": player, "race": race})
# self.enforeMaxLength("player")
def insertTeam(self, team, logo='0'):
"""Insert a team into the history."""
team = team.strip()
if not team or team.lower() == "tbd":
return
for item in self.__team_history:
if item.get('team', '').lower() == team.lower():
self.__team_history.remove(item)
if logo == '0':
logo = item.get('logo', '0')
break
self.__team_history.insert(0, {"team": team, "logo": logo})
# self.enforeMaxLength("team")
def enforeMaxLength(self, scope=None):
"""Delete old history elements."""
if not scope or scope == "player":
while len(self.__player_history) > self.__max_length:
self.__player_history.pop()
if not scope or scope == "team":
while len(self.__team_history) > self.__max_length:
self.__team_history.pop()
def getPlayerList(self):
"""Return a list of all players in history."""
playerList = list()
for item in self.__player_history:
player = item['player']
if player not in playerList:
playerList.append(player)
return playerList
def getTeamList(self):
"""Return a list of all teams in history."""
teamList = list()
for item in self.__team_history:
team = item.get('team')
if team not in teamList:
teamList.append(team)
return teamList
def getRace(self, player):
"""Look up the race of a player in the history."""
player = player.lower() | .strip()
race = "Random"
for item in self.__player_history:
if item.get('player', '').lower() == player:
race = item.get('race', 'Random')
break
return race
def getLogo(self, team):
"""Look up the logo of a team in history."""
team = team.lower().strip()
logo = '0'
for item in self.__team_history:
if item.get('team', '').lower() == team:
logo = item.get('logo | ', '0')
break
return logo
|
domidimi/ddt | test/test_functional.py | Python | mit | 6,480 | 0.000154 | import os
import json
import six
from ddt import ddt, data, file_data, is_hash_randomized
from nose.tools import assert_equal, assert_is_not_none, assert_raises
@ddt
class Dummy(object):
"""
Dummy class to test the data decorator on
"""
@data(1, 2, 3, 4)
def test_something(self, value):
return value
@ddt
class DummyInvalidIdentifier():
"""
Dummy class to test the data decorator receiving values invalid characters
indentifiers
"""
@data('32v2 g #Gmw845h$W b53wi.')
def test_data_with_invalid_identifier(self, value):
return value
@ddt
class FileDataDummy(object):
"""
Dummy class to test the file_data decorator on
"""
@file_data("test_data_dict.json")
def test_something_again(self, value):
return value
@ddt
class FileDataMissingDummy(object):
"""
Dummy class to test the file_data decorator on when
JSON file is missing
"""
@file_data("test_data_dict_missing.json")
def test_something_again(self, value):
return value
def test_data_decorator():
"""
Test the ``data`` method decorator
"""
def hello():
pass
pre_size = len(hello.__dict__)
keys = set(hello.__dict__.keys())
data_hello = data(1, 2)(hello)
dh_keys = set(data_hello.__dict__.keys())
post_size = len(data_hello.__dict__)
assert_equal(post_size, pre_size + 1)
extra_attrs = dh_keys - keys
assert_equal(len(extra_attrs), 1)
extra_attr = extra_attrs.pop()
assert_equal(getattr(data_hello, extra_attr), (1, 2))
def test_file_data_decorator_with_dict():
"""
Test the ``file_data`` method decorator
"""
def hello():
pass
pre_size = len(hello.__dict__)
keys = set(hello.__dict__.keys())
data_hello = data("test_data_dict.json")(hello)
dh_keys = set(data_hello.__dict__.keys())
post_size = len(data_hello.__dict__)
assert_equal(post_size, pre_size + 1)
extra_attrs = dh_keys - keys
assert_equal(len(extra_attrs), 1)
extra_attr = extra_attrs.pop()
assert_equal(getattr(data_hello, extra_attr), ("test_data_dict.json",))
is_test = lambda x: x.startswith('test_')
def test_ddt():
"""
Test the ``ddt`` class decorator
"""
tests = len(list(filter(is_test, Dummy.__dict__)))
assert_equal(tests, 4)
def test_file_data_test_creation():
"""
Test that the ``file_data`` decorator creates two tests
"""
tests = len(list(filter(is_test, FileDataDummy.__dict__)))
assert_equal(tests, 2)
def test_file_data_test_names_dict():
"""
Test that ``file_data`` creates tests with the correct name
Name is the the function name plus the key in the JSON data,
when it is parsed as a dictionary.
"""
tests = set(filter(is_test, FileDataDummy.__dict__))
tests_dir = os.path.dirname(__file__)
test_data_path = os.path.join(tests_dir, 'test_data_dict.json')
test_data = json.loads(open(test_data_path).read())
created_tests = set([
"test_something_again_{0}_{1}".format(index + 1, name)
for index, name in enumerate(test_data.keys())
])
assert_equal(tests, created_tests)
def test_feed_data_data():
"""
Test that data is fed to the decorated tests
"""
tests = filter(is_test, Dummy.__dict__)
values = []
obj = Dummy()
for test in tests:
method = getattr(obj, test)
values.append(method())
assert_equal(set(values), set([1, 2, 3, 4]))
def test_feed_data_file_data():
"""
Test that data is fed to the decorated tests from a file
"""
tests = filter(is_test, FileDataDummy.__dict__)
values = []
obj = FileDataDummy()
for test in tests:
method = getattr(obj, test)
values.extend(method())
assert_equal(set(values), set([10, 12, 15, 15, 12, 50]))
def test_feed_data_file_data_missing_json():
"""
Test that a ValueError is raised
"""
tests = filter(is_test, FileDataMissingDummy.__dict__)
obj = FileDataMissingDummy()
for test in tests:
method = getattr(obj, test)
assert_raises(ValueError, method)
def test_ddt_data_name_attribute():
"""
Test the ``__name__`` attribute handling of ``data`` items with ``ddt``
"""
def hello():
pass
class Myint(int):
pass
class Mytest(object):
pass
d1 = Myint(1)
d1.__name__ = 'data1'
d2 = Myint(2)
data_hello = data(d1, d2)(hello)
setattr(Mytest, 'test_hello', data_hello)
|
ddt_mytest = ddt(Mytest)
assert_is_not_none(getattr(ddt_mytest, 'test_hello_1_ | data1'))
assert_is_not_none(getattr(ddt_mytest, 'test_hello_2_2'))
def test_ddt_data_unicode():
"""
Test that unicode strings are converted to function names correctly
"""
def hello():
pass
# We test unicode support separately for python 2 and 3
if six.PY2:
@ddt
class Mytest(object):
@data(u'ascii', u'non-ascii-\N{SNOWMAN}', {u'\N{SNOWMAN}': 'data'})
def test_hello(self, val):
pass
assert_is_not_none(getattr(Mytest, 'test_hello_1_ascii'))
assert_is_not_none(getattr(Mytest, 'test_hello_2_non_ascii__u2603'))
if is_hash_randomized():
assert_is_not_none(getattr(Mytest, 'test_hello_3'))
else:
assert_is_not_none(getattr(Mytest,
'test_hello_3__u__u2603____data__'))
elif six.PY3:
@ddt
class Mytest(object):
@data('ascii', 'non-ascii-\N{SNOWMAN}', {'\N{SNOWMAN}': 'data'})
def test_hello(self, val):
pass
assert_is_not_none(getattr(Mytest, 'test_hello_1_ascii'))
assert_is_not_none(getattr(Mytest, 'test_hello_2_non_ascii__'))
if is_hash_randomized():
assert_is_not_none(getattr(Mytest, 'test_hello_3'))
else:
assert_is_not_none(getattr(Mytest, 'test_hello_3________data__'))
def test_feed_data_with_invalid_identifier():
"""
Test that data is fed to the decorated tests
"""
tests = list(filter(is_test, DummyInvalidIdentifier.__dict__))
assert_equal(len(tests), 1)
obj = DummyInvalidIdentifier()
method = getattr(obj, tests[0])
assert_equal(
method.__name__,
'test_data_with_invalid_identifier_1_32v2_g__Gmw845h_W_b53wi_'
)
assert_equal(method(), '32v2 g #Gmw845h$W b53wi.')
|
huyphan/pyyawhois | test/record/parser/test_response_whois_domainregistry_ie_property_contacts_multiple.py | Python | mit | 1,945 | 0.004627 |
# This file is autogenerated. Do not edit it manually.
# If you want change the content of this file, edit
#
# spec/fixtures/responses/whois.domainregistry.ie/property_contacts_multiple
#
# and regenerate the tests with the following script
#
# $ scripts/generate_tests.py
#
from nose.tools import *
from dateutil.parser import parse as time_parse
import yawhois
class TestWhoisDomainregistryIePropertyContactsMultiple(object):
def setUp(self):
fixture_path = "spec/fixtures/responses/whois.domainregistry.ie/property_contacts_multiple.txt"
host = "whois.domainregistry.ie"
part = yawhois.record.Part(open(fixture_path, "r").read(), host)
self.record = yawhois.record.Record(None, [part])
def test_admin_contacts(self):
eq_(self.record.admin_contacts.__class__.__name__, 'list')
eq_(len(self.record.admin_contacts), 2)
eq_(self.record.admin_contacts[0].__class__.__name__, 'Contact')
eq_(self.record.admin_contacts[0].type, yawhois.record.Contact.TYPE_ADMINISTRATIVE)
eq_(self.record.admin_contacts[0].id, "JL241-IEDR")
eq_(self.record.admin_contacts[0].name, "Jonathan Lundberg")
eq_(self.record.admin_contacts[1].__class__.__name__, 'Contact')
eq_(self.record.admin_contacts[1].type, yawhois.record.Contact.TYPE_ADMINISTRATIVE)
eq_(self.record.admin_contacts[1].id, "JM474-IEDR")
eq_(self.record.admin_contacts[1]. | name, "John Moylan")
def test_technical_contacts(self):
eq_(self.record.technical_contacts.__class__.__name__, 'list')
eq_(len(self.record.technical_contacts), 1)
eq_(self.record.technical_contacts[0].__class__.__name__, 'Contact')
eq_(self.record.tec | hnical_contacts[0].type, yawhois.record.Contact.TYPE_TECHNICAL)
eq_(self.record.technical_contacts[0].id, "JM474-IEDR")
eq_(self.record.technical_contacts[0].name, "John Moylan")
|
wgrose/leanto | google/appengine/ext/admin/__init__.py | Python | apache-2.0 | 31,441 | 0.010114 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Simple datastore view and interactive console, for use in dev_appserver."""
import cgi
import csv
import cStringIO
import datetime
import logging
import math
import mimetypes
import os
import os.path
import pickle
import pprint
import random
import sys
import time
import traceback
import types
import urllib
import urlparse
import wsgiref.handlers
from google.appengine.api import datastore
from google.appengine.api import datastore_admin
from google.appengine.api import datastore_types
from google.appengine.api import datastore_errors
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
_DEBUG = True
class ImageHandler(webapp.RequestHandler):
"""Serves a static image.
This exists because we don't want to burden the user with specifying
a static file handler for the image resources used by the admin tool.
"""
PATH = '/images/.*'
def get(self):
image_name = os.path.basename(self.request.path)
content_type, encoding = mimetypes.guess_type(image_name)
if not content_type or not content_type.startswith('image/'):
logging.debug('image_name=%r, content_type=%r, encoding=%r',
image_name, content_type, encoding)
self.error(404)
return
directory = os.path.dirname(__file__)
path = os.path.join(directory, 'templates', 'images', image_name)
try:
image_stream = open(path, 'rb')
except IOError, e:
logging.error('Cannot open image %s: %s', image_name, e)
self.error(404)
return
try:
image_data = image_stream.read()
finally:
image_stream.close()
self.response.headers['Content-Type'] = content_type
self.response.out.write(image_data)
class BaseRequestHandler(webapp.RequestHandler):
"""Supplies a common template generation function.
When you call generate(), we augment the template variables supplied with
the current user in the 'user' variable and the current webapp request
in the 'request' variable.
"""
def generate(self, template_name, template_values={}):
base_path = self.base_path()
values = {
'application_name': self.request.environ['APPLICATION_ID'],
'user': users.get_current_user(),
'request': self.request,
'home_path': base_path + DefaultPageHandler.PATH,
'datastore_path': base_path + DatastoreQueryHandler.PATH,
'datastore_edit_path': base_path + DatastoreEditHandler.PATH,
'datastore_batch_edit_path': base_path + DatastoreBatchEditHandler.PATH,
'interactive_path': base_path + InteractivePageHandler.PATH,
'interactive_execute_path': base_path + InteractiveExecuteHandler.PATH,
'memcach | e_path': base_path + MemcachePageHandler.PATH,
}
values.update(template_values)
directory = os.path.dirname(__file__)
path = os.path.join(directory, os.path.join('templates', template_name))
self.response.out.write(template.render(path, values, debug=_DEBUG))
def base_path(self):
"""Returns the base path of this admin app, which is chosen by the user.
The user specifies which paths map to this application in their app.cfg.
You can get that base path with this method | . Combine with the constant
paths specified by the classes to construct URLs.
"""
path = self.__class__.PATH
return self.request.path[:-len(path)]
def filter_url(self, args):
"""Filters the current URL to only have the given list of arguments.
For example, if your URL is /search?q=foo&num=100&start=10, then
self.filter_url(['start', 'num']) => /search?num=100&start=10
self.filter_url(['q']) => /search?q=10
self.filter_url(['random']) => /search?
"""
queries = []
for arg in args:
value = self.request.get(arg)
if value:
queries.append(arg + '=' + urllib.quote_plus(self.request.get(arg)))
return self.request.path + '?' + '&'.join(queries)
def in_production(self):
"""Detects if app is running in production.
Returns a boolean.
"""
server_software = os.environ['SERVER_SOFTWARE']
return not server_software.startswith('Development')
class DefaultPageHandler(BaseRequestHandler):
"""Redirects to the Datastore application by default."""
PATH = '/'
def get(self):
if self.request.path.endswith('/'):
base = self.request.path[:-1]
else:
base = self.request.path
self.redirect(base + DatastoreQueryHandler.PATH)
class InteractivePageHandler(BaseRequestHandler):
"""Shows our interactive console HTML."""
PATH = '/interactive'
def get(self):
self.generate('interactive.html')
class InteractiveExecuteHandler(BaseRequestHandler):
"""Executes the Python code submitted in a POST within this context.
For obvious reasons, this should only be available to administrators
of the applications.
"""
PATH = InteractivePageHandler.PATH + '/execute'
def post(self):
save_stdout = sys.stdout
results_io = cStringIO.StringIO()
try:
sys.stdout = results_io
code = self.request.get('code')
code = code.replace("\r\n", "\n")
try:
compiled_code = compile(code, '<string>', 'exec')
exec(compiled_code, globals())
except Exception, e:
traceback.print_exc(file=results_io)
finally:
sys.stdout = save_stdout
results = results_io.getvalue()
self.generate('interactive-output.html', {'output': results})
class MemcachePageHandler(BaseRequestHandler):
"""Shows stats about memcache and query form to get values."""
PATH = '/memcache'
TYPES = ((str, str, 'String'),
(unicode, unicode, 'Unicode String'),
(bool, lambda value: MemcachePageHandler._ToBool(value), 'Boolean'),
(int, int, 'Integer'),
(long, long, 'Long Integer'),
(float, float, 'Float'))
DEFAULT_TYPESTR_FOR_NEW = 'String'
@staticmethod
def _ToBool(string_value):
"""Convert string to boolean value.
Args:
string_value: A string.
Returns:
Boolean. True if string_value is "true", False if string_value is
"false". This is case-insensitive.
Raises:
ValueError: string_value not "true" or "false".
"""
string_value_low = string_value.lower()
if string_value_low not in ('false', 'true'):
raise ValueError('invalid literal for boolean: %s' % string_value)
return string_value_low == 'true'
def _GetValueAndType(self, key):
"""Fetch value from memcache and detect its type.
Args:
key: String
Returns:
(value, type), value is a Python object or None if the key was not set in
the cache, type is a string describing the type of the value.
"""
try:
value = memcache.get(key)
except (pickle.UnpicklingError, AttributeError, EOFError, ImportError,
IndexError), e:
msg = 'Failed to retrieve value from cache: %s' % e
return msg, 'error'
if value is None:
return None, self.DEFAULT_TYPESTR_FOR_NEW
for typeobj, _, typestr in self.TYPES:
if isinstance(value, typeobj):
break
else:
typestr = 'pickled'
value = pprint.pformat(value, indent=2)
return value, typestr
def _SetValue(self, key, type_, value):
"""Convert a string value and store the result in memcache.
Args:
key: String
type_: String, describing what type the value should have in the cache.
value: String, will be converted accord |
vanaf/mirrormanager-rfremix | server/mirrormanager/rfremix.py | Python | gpl-2.0 | 1,543 | 0.022035 | from mirrormanager.model import Product, Category, Arch, Directory
product = Product(name='RFRemix')
categories = {'RFRemix Linux':('releases/RFRemix','rsync://mirror.yandex.ru/fedor | a/russianfedora'),
'RFRemix Repo Fixes':('russianfedora/fixes/fed | ora', 'rsync://mirror.yandex.ru/fedora/russianfedora'),
'RFRemix Repo Free':('russianfedora/free/fedora', 'rsync://mirror.yandex.ru/fedora/russianfedora'),
'RFRemix Repo Nonfree':('russianfedora/nonfree/fedora', 'rsync://mirror.yandex.ru/fedora/russianfedora'),
'RFRemix Build':('build', 'rsync://mirror.yandex.ru/fedora/russianfedora'),
'RFRemix Stage':('stage', 'rsync://mirror.yandex.ru/fedora/russianfedora')}
for name, (dirname, canonicalhost) in categories.iteritems():
d = Directory(name=dirname)
Category(product=product, name=name, canonicalhost=canonicalhost, topdir=d)
product = Product(name='RERemix')
categories = {'RERemix Linux':('releases/RERemix','rsync://mirror.yandex.ru/fedora/russianfedora'),
'RERemix Repo Fixes':('russianfedora/fixes/el', 'rsync://mirror.yandex.ru/fedora/russianfedora'),
'RERemix Repo Free':('russianfedora/free/el', 'rsync://mirror.yandex.ru/fedora/russianfedora'),
'RERemix Repo Nonfree':('russianfedora/nonfree/el', 'rsync://mirror.yandex.ru/fedora/russianfedora')}
for name, (dirname, canonicalhost) in categories.iteritems():
d = Directory(name=dirname)
Category(product=product, name=name, canonicalhost=canonicalhost, topdir=d)
|
tensorflow/tensorflow | tensorflow/python/distribute/multi_worker_util_test.py | Python | apache-2.0 | 9,133 | 0.004051 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for multi_worker_util."""
from tensorflow.core.protobuf import cluster_pb2
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.eager import test
from tensorflow.python.training import server_lib
class NormalizeClusterSpecTest(test.TestCase):
def assert_same_cluster(self, lhs, rhs):
self.assertEqual(
server_lib.ClusterSpec(lhs).as_dict(),
server_lib.ClusterSpec(rhs).as_dict())
def testDictAsInput(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
self.assert_same_cluster(
cluster_spec, multi_worker_util.normalize_cluster_spec(cluster_spec))
def testClusterDefAsInput(self):
cluster_def = cluster_pb2.ClusterDef()
job = cluster_def.job.add()
job.name = "chief"
job.tasks[0] = "127.0.0.1:1234"
job = cluster_def.job.add()
job.name = "worker"
job.tasks[0] = "127.0.0.1:8964"
job.tasks[1] = "127.0.0.1:2333"
job = cluster_def.job.add()
job.name = "ps"
job.tasks[0] = "127.0.0.1:1926"
job.tasks[1] = "127.0.0.1:3141"
self.assert_same_cluster(
cluster_def, multi_worker_util.normalize_cluster_spec(cluster_def))
def testClusterSpecAsInput(self):
cluster_spec = server_lib.ClusterSpec({
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
})
self.assert_same_cluster(
cluster_spec, multi_worker_util.normalize_cluster_spec(cluster_spec))
def testUnexpectedInput(self):
cluster_spec = ["127.0.0.1:8964", "127.0.0.1:2333"]
with self.assertRaisesRegex(
ValueError,
"`cluster_spec' should be dict or a `tf.train.ClusterSpec` or a "
"`tf.train.ClusterDef` object"):
multi_worker_util.normalize_cluster_spec(cluster_spec)
class IsChiefTest(test.TestCase):
def testClusterWithChief(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
self.assertTrue(multi_worker_util.is_chief(cluster_spec, "chief", 0))
self.assertFalse(multi_worker_util.is_chief(cluster_spec, "worker", 0))
def testClusterWithoutChief(self):
cluster_spec = {"worker": ["127.0.0.1:8964", "127.0.0.1:2333"]}
self.assertTrue(multi_worker_util.is_chief(cluster_spec, "worker", 0))
self.assertFalse(multi_worker_util.is_chief(cluster_spec, "worker", 1))
with self.assertRaisesRegex(
ValueError, "`task_type` 'chief' not found in cluster_spec."):
multi_worker_util.is_chief(cluster_spec, "chief", 0)
with self.assertRaisesRegex(
ValueError, "The `task_id` 2 exceeds the maximum id of worker."):
multi_worker_util.is_chief(cluster_spec, "worker", 2)
def testEvaluatorIsChief(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"evaluator": ["127.0.0.1:2019"]
}
self.assertTrue(multi_worker_util.is_chief(cluster_spec, "evaluator", 0))
class NumWorkersTest(test.TestCase):
def testCountWorker(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
self.assertEqual(
multi_worker_util.worker_count(cluster_spec, task_type="chief"), 3)
self.assertEqual(
multi_worker_util.worker_count(cluster_spec, task_type="worker"), 3)
def testCountEvaluator(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"evaluator": ["127.0.0.1:7566"]
}
self.assertEqual(
multi_worker_util.worker_count(cluster_spec, task_type="evaluator"), 1)
def testTaskTypeNotFound(self):
| cluster_spec = {}
with self.assertRaisesRegex(
ValueError, "`task_type` 'worker' not found in cluster_spec."):
multi_worker_util.worker_count(cluster_spec, task_type="worker")
def testCountPs(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"ps": ["127.0.0.1:1926 | ", "127.0.0.1:3141"]
}
# A "ps" job shouldn't call this method.
with self.assertRaisesRegex(ValueError, "Unexpected `task_type` 'ps'"):
multi_worker_util.worker_count(cluster_spec, task_type="ps")
class IdInClusterTest(test.TestCase):
def testChiefId(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
self.assertEqual(
multi_worker_util.id_in_cluster(cluster_spec, "chief", 0), 0)
def testWorkerId(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
self.assertEqual(
multi_worker_util.id_in_cluster(cluster_spec, "worker", 1), 2)
cluster_spec = {
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
self.assertEqual(
multi_worker_util.id_in_cluster(cluster_spec, "worker", 1), 1)
def testEvaluatorId(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"evaluator": ["127.0.0.1:7566"]
}
self.assertEqual(
multi_worker_util.id_in_cluster(cluster_spec, "evaluator", 0), 0)
def testPsId(self):
cluster_spec = {"chief": ["127.0.0.1:1234"], "ps": ["127.0.0.1:7566"]}
with self.assertRaisesRegex(ValueError,
"There is no id for task_type 'ps'"):
multi_worker_util.id_in_cluster(cluster_spec, "ps", 0)
def testMultipleChiefs(self):
cluster_spec = {
"chief": ["127.0.0.1:8258", "127.0.0.1:7566"],
}
with self.assertRaisesRegex(ValueError,
"There must be at most one 'chief' job."):
multi_worker_util.id_in_cluster(cluster_spec, "chief", 0)
class CollectiveLeaderTest(test.TestCase):
def testChiefAsLeader(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
self.assertEqual(
multi_worker_util.collective_leader(cluster_spec, "worker", 0),
"/job:chief/replica:0/task:0")
def testWorkerAsLeader(self):
cluster_spec = {
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
self.assertEqual(
multi_worker_util.collective_leader(cluster_spec, "worker", 1),
"/job:worker/replica:0/task:0")
def testLeaderForEvaluator(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"],
"evaluator": ["127.0.0.1:2019"]
}
self.assertEqual(
multi_worker_util.collective_leader(cluster_spec, "evaluator", 0), "")
def testLocalLeader(self):
cluster_spec = {}
self.assertEqual(
multi_worker_util.collective_leader(cluster_spec, None, 0), "")
# Most of the validation logic is tested by above tests except for some.
class ClusterSpecValidationTest(test.Test |
EndPointCorp/lg_ros_nodes | lg_twister/scripts/mux_twists.py | Python | apache-2.0 | 2,793 | 0 | #!/usr/bin/env python3
from functools import partial
import math
from threading import Lock
import rospy
from geometry_msgs.msg import Twist
from lg_common.helpers import run_with_influx_exception_handler
NODE_NAME = 'mux_twists'
DEFAULT_TICK_RATE = 65.0
DEFAULT_AXIS_LIMIT = math.sqrt(2) / 2
DEFAULT_AGE_LIMIT = 1.0
def clamp(val, lo, hi):
return min(max(val, lo), hi)
def clamp_twist(twist, lo, hi):
twist.linear.x = clamp(twist.linear.x, lo, hi)
twist.linear.y = clamp(twist.linear.y, lo, hi)
twist.linear.z = clamp(twist.linear.z, lo, hi)
twist.angular.x = clamp(twist.angular.x, lo, hi)
twist.angular.y = clamp(twist.angular.y, lo, hi)
twist.angular.z = clamp(twist.angular.z, lo, hi)
class TwistMuxer:
def __init__(self, twist_pub, axis_limit, age_limit):
self._lock = Lock()
self.twist_pub = twist_pub
self.axis_limit = axis_limit
self.age_limit = rospy.Duration(age_limit)
self.samples = {}
self.sample_stamps = {}
def handle_twist(self, topic, twist):
with self._lock:
self._handle_twist(topic, twist)
def _handle_twist(self, topic, twist):
self.samples[topic] = twist
self.sample_stamps[topic] = rospy.Time.now()
def tick(self, tev):
with self._lock:
self._tick(tev)
def _tick(self, tev):
t = rospy.Time.now()
result = Twist()
for topic in list(self.samples.keys()):
stamp = self.sample_stamps[topic]
if t - stamp > self.age_limit:
continue
twist = self.samples[topic]
result.linear.x += twist.linear.x
result.linear.y += twist.linear.y
result.linear.z += twist.linear.z
result.angular.x += twist.angular.x
result.angular.y += twist.angular.y
| result.angular.z += twist.angular.z
clamp_twist(result, -self.axis_limit, self.axis_limit)
self.twist_pub.publish(result)
def main():
rospy.init_node(NODE_NAME)
tick_rate = float(rospy.get_param('~tick_rate', DEFAULT_TICK_RATE))
sources = [
s.strip() for s in rospy.get_param('~sources').split(',')
]
axis_limit = float(rospy.get_param('~axis_limit', DEFAULT_AXIS_LIMIT))
age_limit = float(rospy.get_param('~age_limit', DEFAU | LT_AGE_LIMIT))
twist_pub = rospy.Publisher('/lg_twister/twist', Twist, queue_size=10)
muxer = TwistMuxer(twist_pub, axis_limit, age_limit)
for source in sources:
handler = partial(muxer.handle_twist, source)
rospy.Subscriber(source, Twist, handler)
rospy.Timer(rospy.Duration(1.0 / tick_rate), muxer.tick)
rospy.spin()
if __name__ == '__main__':
run_with_influx_exception_handler(main, NODE_NAME)
|
Azure/azure-sdk-for-python | sdk/applicationinsights/azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/v2018_05_01_preview/operations/_proactive_detection_configurations_operations.py | Python | mit | 15,441 | 0.004598 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
resource_group_name: str,
subscription_id: str,
resource_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2018-05-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/components/{resourceName}/ProactiveDetectionConfigs')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
resource_group_name: str,
subscription_id: str,
resource_name: str,
configuration_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2018-05-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/components/{resourceName}/ProactiveDetectionConfigs/{ConfigurationId}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
"ConfigurationId": _SERIALIZER.url("configuration_id", configuration_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_update_request(
resource_group_name: str,
subscription_id: str,
resource_name: str,
configuration_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2018-05-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/components/{resourceName}/ProactiveDetectionConfigs/{ConfigurationId}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
"ConfigurationId": _SERIALIZER.url("configuration_id", configuration_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
class ProactiveDetectionConfigurationsOperations(object):
"""ProactiveDetectionConfigurationsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.applicationinsights.v2018_05_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> List["_models.ApplicationInsightsComponentProactiveDetectionConfiguration"]:
"""Gets a list of ProactiveDetection config | urations of an Application Insights component.
:param resour | ce_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the Application Insights component resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of ApplicationInsightsComponentProactiveDetectionConfiguration, or the result of
cls(response)
:rtype:
list[~azure.mgmt.applicationinsights.v2018_05_01_preview.models.ApplicationInsightsComponentProactiveDetectionConfiguration]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ApplicationInsightsComponentProactiveDetectionConfiguration"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_request(
|
hbiyik/tribler | src/tribler-gui/tribler_gui/tests/fake_tribler_api/models/torrent.py | Python | lgpl-3.0 | 2,618 | 0.001528 | import time
from binascii import unhexlify
from random import choice, randint, uniform
from tribler_core.modules.metadata_store.serialization import REGULAR_TORRENT
from tribler_core.utilities.unicode import hexlify
from tribler_gui.tests.fake_tribler_api.constants import COMMITTED
from tribler_gui.tests.fake_tribler_api.utils import get_random_filename, get_random_hex_string
class Torrent(object):
def __init__(self, infohash, name, length, category, status=COMMITTED):
self.id_ = randint(10000, 100000000)
self.infohash = infohash
self.name = name
self.length = length
self.category = category
self.files = []
self.time_added = randint(1200000000, 1460000000)
self.relevance_score = uniform(0, 20)
self.status = status
self.trackers = []
self.last_tracker_check = 0
self.num_seeders = 0
self.num_leechers = 0
self.updated = int(time.time())
if randint(0, 1) == 0:
# Give this torrent some health
self.update_health()
for ind in range(randint(0, 10)):
self.trackers.append("https://tracker%d.org" % ind)
def update_health(self):
self.last_tracker_check = randint(int(time.time()) - 3600 * 24 * 30, int(time.time()))
self.num_seeders = randint(0, 500) if randint(0, 1) == 0 else 0
self.num_leechers = randint(0, 500) if randint(0, 1) == 0 else 0
def get_json(self, include_status=False, include_trackers=False):
result = {
"name": self.name,
"id": self.id_,
"infohash": hexlify(self.infohash),
| "size": self.length,
"category": self.category,
"relevance_score": self.relevance_score,
"num_seeders": self.num_seeders,
"num_leechers": self.num_leechers,
"last_tracker_check": self.last_tracker_check,
"type": REGULAR_TORRENT,
}
if include_status:
result["status"] = self.status
if include_trackers:
| result["trackers"] = self.trackers
return result
@staticmethod
def random():
infohash = unhexlify(get_random_hex_string(40))
name = get_random_filename()
categories = ['document', 'audio', 'video', 'xxx']
torrent = Torrent(infohash, name, randint(1024, 1024 * 3000), choice(categories))
# Create the files
for _ in range(randint(1, 20)):
torrent.files.append({"path": get_random_filename(), "length": randint(1024, 1024 * 3000)})
return torrent
|
Ecotrust/COMPASS | mp/visualize/forms.py | Python | apache-2.0 | 197 | 0.005076 | from madrona.features.forms import FeatureForm
from django import forms
from visualize.models import *
|
class BookmarkForm(FeatureForm):
class Meta(Fe | atureForm.Meta):
model = Bookmark
|
OmnesRes/pan_cancer | paper/cox_regression/BLCA/patient_info.py | Python | mit | 7,247 | 0.024424 | ## A script for extracting info about the patients used in the analysis
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirna | me(os.path.abspath(__file__))))
## Read the follow up data
## It was found that the v4.0 file co | ntained more recent follow up data than v2.0, but the files contained nonredundant patients.
## So both files are loaded with the v4.0 getting preference.
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
f=open(os.path.join(BASE_DIR,'tcga_data','BLCA','clinical','nationwidechildrens.org_clinical_follow_up_v4.0_blca.txt'))
f.readline()
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical1=[['','','']]
for i in data:
try:
if clinical1[-1][0]==i[0]:
if i[8]=='Alive':
clinical1[-1]=[i[0],int(i[9]),'Alive']
elif i[8]=='Dead':
clinical1[-1]=[i[0],int(i[10]),'Dead']
else:
pass
else:
if i[8]=='Alive':
clinical1.append([i[0],int(i[9]),'Alive'])
elif i[8]=='Dead':
clinical1.append([i[0],int(i[10]),'Dead'])
else:
pass
except:
pass
## Removing the empty value.
clinical=clinical1[1:]
f=open(os.path.join(BASE_DIR,'tcga_data','BLCA','clinical','nationwidechildrens.org_clinical_follow_up_v2.0_blca.txt'))
f.readline()
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical2=[['','','']]
for i in data:
if i[0] not in [j[0] for j in clinical]:
try:
if clinical2[-1][0]==i[0]:
if i[6]=='Alive':
clinical2[-1]=[i[0],int(i[7]),'Alive']
elif i[6]=='Dead':
clinical2[-1]=[i[0],int(i[8]),'Dead']
else:
pass
else:
if i[6]=='Alive':
clinical2.append([i[0],int(i[7]),'Alive'])
elif i[6]=='Dead':
clinical2.append([i[0],int(i[8]),'Dead'])
else:
pass
except:
pass
## Removing the empty value and combining the lists.
clinical+=clinical2[1:]
## Grade, sex and age information were taken from the "clinical_patient" file. A dictionary was created for grade and sex.
more_clinical={}
grade_dict={}
grade_dict['High Grade']=1
grade_dict['Low Grade']=0
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','BLCA','clinical','nationwidechildrens.org_clinical_patient_blca.txt'))
f.readline()
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[0]]=[grade_dict[i[-5]],sex_dict[i[6]],int(i[42])]
if i[21]=='Alive':
clinical4.append([i[0],int(i[22]),'Alive'])
elif i[21]=='Dead':
clinical4.append([i[0],int(i[23]),'Dead'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
f=open(os.path.join(BASE_DIR,'tcga_data','BLCA','FILE_SAMPLE_MAP.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
##normalized files were used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## We only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, grade, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
##print average age at diagnosis
age=np.mean([i[5] for i in clinical_and_files])
##print number of males
males=len([i for i in clinical_and_files if i[4]==0])
##print number of females
females=len([i for i in clinical_and_files if i[4]==1])
##to get the median survival we need to call survfit from r
##prepare variables for R
ro.globalenv['times']=ro.IntVector([i[1] for i in clinical_and_files])
##need to create a dummy variable group
ro.globalenv['group']=ro.IntVector([0 for i in clinical_and_files])
##need a vector for deaths
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
ro.globalenv['died']=ro.IntVector([death_dic[i[2]] for i in clinical_and_files])
res=ro.r('survfit(Surv(times,died) ~ as.factor(group))')
#the number of events(deaths) is the fourth column of the output
deaths=str(res).split('\n')[-2].strip().split()[3]
#the median survival time is the fifth column of the output
median=str(res).split('\n')[-2].strip().split()[4]
##write data to a file
f=open('patient_info.txt','w')
f.write('Average Age')
f.write('\t')
f.write('Males')
f.write('\t')
f.write('Females')
f.write('\t')
f.write('Deaths')
f.write('\t')
f.write('Median Survival')
f.write('\n')
f.write(str(age))
f.write('\t')
f.write(str(males))
f.write('\t')
f.write(str(females))
f.write('\t')
f.write(deaths)
f.write('\t')
f.write(median)
|
ddico/odoo | addons/account_edi_facturx/models/account_edi_format.py | Python | agpl-3.0 | 12,584 | 0.004371 | # -*- coding: utf-8 -*-
from odoo import api, models, fields, tools, _
from odoo.tools import DEFAULT_SERVER_DATE_FORMAT, float_repr
from odoo.tests.common import Form
from odoo.exceptions import UserError
from datetime import datetime
from lxml import etree
from PyPDF2 import PdfFileReader
import io
import logging
_logger = logging.getLogger(__name__)
DEFAULT_FACTURX_DATE_FORMAT = '%Y%m%d'
class AccountEdiFormat(models.Model):
_inherit = 'account.edi.format'
def _export_invoice_to_attachment(self, invoice):
self.ensure_one()
if self.code != 'facturx_1_0_05':
return super()._export_invoice_to_attachment(invoice)
def format_date(dt):
# Format the date in the Factur-x standard.
dt = dt or datetime.now()
return dt.strftime(DEFAULT_FACTURX_DATE_FORMAT)
def format_monetary(number, currency):
# Format the monetary values to avoid trailing decimals (e.g. 90.85000000000001).
return float_repr(number, currency.decimal_places)
# Create file conten | t.
template_values = {
'record': invoice,
'format_date': format_date,
'format_monetary': form | at_monetary,
}
xml_content = b"<?xml version='1.0' encoding='UTF-8'?>"
xml_content += self.env.ref('account_edi_facturx.account_invoice_facturx_export')._render(template_values)
xml_name = '%s_facturx.xml' % (invoice.name.replace('/', '_'))
return {'name': xml_name,
'datas': xml_content,
'res_model': 'account.move',
'res_id': invoice._origin.id,
'mimetype': 'application/xml'
}
def _is_facturx(self, filename, tree):
return self.code == 'facturx_1_0_05' and tree.tag == '{urn:un:unece:uncefact:data:standard:CrossIndustryInvoice:100}CrossIndustryInvoice'
def _create_invoice_from_xml_tree(self, filename, tree):
self.ensure_one()
if self._is_facturx(filename, tree):
return self._import_facturx(tree, self.env['account_move'])
return super()._create_invoice_from_xml_tree(filename, tree)
def _update_invoice_from_xml_tree(self, filename, tree, invoice):
self.ensure_one()
if self._is_facturx(filename, tree):
return self._import_facturx(tree, invoice)
return super()._update_invoice_from_xml_tree(filename, tree, invoice)
def _import_facturx(self, tree, invoice):
""" Decodes a factur-x invoice into an invoice.
:param tree: the factur-x tree to decode.
:param invoice: the invoice to update or an empty recordset.
:returns: the invoice where the factur-x data was imported.
"""
amount_total_import = None
default_move_type = False
if invoice._context.get('default_journal_id'):
journal = self.env['account.journal'].browse(self.env.context['default_journal_id'])
default_move_type = 'out_invoice' if journal.type == 'sale' else 'in_invoice'
elif invoice._context.get('default_move_type'):
default_move_type = self._context['default_move_type']
elif invoice.move_type in self.env['account.move'].get_invoice_types(include_receipts=True):
# in case an attachment is saved on a draft invoice previously created, we might
# have lost the default value in context but the type was already set
default_move_type = invoice.move_type
if not default_move_type:
raise UserError(_("No information about the journal or the type of invoice is passed"))
if default_move_type == 'entry':
return
# Total amount.
elements = tree.xpath('//ram:GrandTotalAmount', namespaces=tree.nsmap)
total_amount = elements and float(elements[0].text) or 0.0
# Refund type.
# There is two modes to handle refund in Factur-X:
# a) type_code == 380 for invoice, type_code == 381 for refund, all positive amounts.
# b) type_code == 380, negative amounts in case of refund.
# To handle both, we consider the 'a' mode and switch to 'b' if a negative amount is encountered.
elements = tree.xpath('//rsm:ExchangedDocument/ram:TypeCode', namespaces=tree.nsmap)
type_code = elements[0].text
default_move_type.replace('_refund', '_invoice')
if type_code == '381':
default_move_type = 'out_refund' if default_move_type == 'out_invoice' else 'in_refund'
refund_sign = -1
else:
# Handle 'b' refund mode.
if total_amount < 0:
default_move_type = 'out_refund' if default_move_type == 'out_invoice' else 'in_refund'
refund_sign = -1 if 'refund' in default_move_type else 1
# Write the type as the journal entry is already created.
invoice.move_type = default_move_type
# self could be a single record (editing) or be empty (new).
with Form(invoice.with_context(default_move_type=default_move_type)) as invoice_form:
# Partner (first step to avoid warning 'Warning! You must first select a partner.').
partner_type = invoice_form.journal_id.type == 'purchase' and 'SellerTradeParty' or 'BuyerTradeParty'
elements = tree.xpath('//ram:' + partner_type + '/ram:SpecifiedTaxRegistration/ram:ID', namespaces=tree.nsmap)
partner = elements and self.env['res.partner'].search([('vat', '=', elements[0].text)], limit=1)
if not partner:
elements = tree.xpath('//ram:' + partner_type + '/ram:Name', namespaces=tree.nsmap)
partner_name = elements and elements[0].text
partner = elements and self.env['res.partner'].search([('name', 'ilike', partner_name)], limit=1)
if not partner:
elements = tree.xpath('//ram:' + partner_type + '//ram:URIID[@schemeID=\'SMTP\']', namespaces=tree.nsmap)
partner = elements and self.env['res.partner'].search([('email', '=', elements[0].text)], limit=1)
if partner:
invoice_form.partner_id = partner
# Reference.
elements = tree.xpath('//rsm:ExchangedDocument/ram:ID', namespaces=tree.nsmap)
if elements:
invoice_form.ref = elements[0].text
# Name.
elements = tree.xpath('//ram:BuyerOrderReferencedDocument/ram:IssuerAssignedID', namespaces=tree.nsmap)
if elements:
invoice_form.payment_reference = elements[0].text
# Comment.
elements = tree.xpath('//ram:IncludedNote/ram:Content', namespaces=tree.nsmap)
if elements:
invoice_form.narration = elements[0].text
# Total amount.
elements = tree.xpath('//ram:GrandTotalAmount', namespaces=tree.nsmap)
if elements:
# Currency.
if elements[0].attrib.get('currencyID'):
currency_str = elements[0].attrib['currencyID']
currency = self.env.ref('base.%s' % currency_str.upper(), raise_if_not_found=False)
if currency != self.env.company.currency_id and currency.active:
invoice_form.currency_id = currency
# Store xml total amount.
amount_total_import = total_amount * refund_sign
# Date.
elements = tree.xpath('//rsm:ExchangedDocument/ram:IssueDateTime/udt:DateTimeString', namespaces=tree.nsmap)
if elements:
date_str = elements[0].text
date_obj = datetime.strptime(date_str, DEFAULT_FACTURX_DATE_FORMAT)
invoice_form.invoice_date = date_obj.strftime(DEFAULT_SERVER_DATE_FORMAT)
# Due date.
elements = tree.xpath('//ram:SpecifiedTradePaymentTerms/ram:DueDateDateTime/udt:DateTimeString', namespaces=tree.nsmap)
if elements:
date_str = elements[0].text
date_obj = datetime.strptime(date_str, DEFAULT_FACTURX_DATE |
wangjiezhe/FetchNovels | novel/sources/myshuge.py | Python | gpl-3.0 | 1,042 | 0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from pyquery import PyQuery
from novel import serial, utils, config
BASE_URL = 'http://www.myshuge.com/read/{}/'
class Myshuge(serial.SerialNovel):
def __init__(self, tid):
super().__init__(utils.base_to_url(BASE_URL, tid), '#content',
chap_type=serial.ChapterType.path,
chap_sel='dd',
tid=tid)
se | lf.encoding = config.GB
def get_intro(self):
intro = self.doc('meta').filter(
lambda i, e: PyQuery(e).attr('property') == 'og:description'
).attr('content')
intro = self.refine(intro)
return intro
def get_title_and_author(self):
name = self.doc('meta').filter(
lambda i, e: PyQuery(e).attr('property') == 'og:novel:book_name' |
).attr('content')
author = self.doc('meta').filter(
lambda i, e: PyQuery(e).attr('property') == 'og:novel:author'
).attr('content')
return name, author
|
xbmc/atv2 | xbmc/lib/libPython/Python/Lib/tempfile.py | Python | gpl-2.0 | 14,377 | 0.002295 | """Temporary files.
This module provides generic, low- and high-level interfaces for
creating temporary files and directories. The interfaces listed
as "safe" just below can be used without fear of race conditions.
Those listed as "unsafe" cannot, and are provided for backward
compatibility only.
This module also provides some data items to the user:
TMP_MAX - maximum number of names that will be tried before
giving up.
template - the default prefix for all temporary names.
You may change this to control the default prefix.
tempdir - If this is set to a string before the first use of
any routine from this module, it will be considered as
another candidate location to store temporary files.
"""
__all__ = [
"NamedTemporaryFile", "TemporaryFile", # high level safe interfaces
"mkstemp", "mkdtemp", # low level safe interfaces
"mktemp", # deprecated unsafe interface
"TMP_MAX", "gettempprefix", # constants
"tempdir", "gettempdir"
]
# Imports.
import os as _os
import errno as _errno
from random import Random as _Random
if _os.name == 'mac':
import Carbon.Folder as _Folder
import Carbon.Folders as _Folders
try:
import fcntl as _fcntl
except ImportError:
def _set_cloexec(fd):
pass
else:
def _set_cloexec(fd):
try:
flags = _fcntl.fcntl(fd, _fcntl.F_GETFD, 0)
except IOError:
pass
else:
# flags read successfully, modify
flags |= _fcntl.FD_CLOEXEC
_fcntl.fcntl(fd, _fcntl.F_SETFD, flags)
try:
import thread as _thread
except ImportError:
import dummy_thread as _thread
_allocate_lock = _thread.allocate_lock
_text_openflags = _os.O_RDWR | _os.O_CREAT | _os.O_EXCL
if hasattr(_os, 'O_NOINHERIT'):
_text_openflags |= _os.O_NOINHERIT
if hasattr(_os, 'O_NOFOLLOW'):
_text_openflags |= _os.O_NOFOLLOW
_bin_openflags = _text_openflags
if hasattr(_os, 'O_BINARY'):
_bin_openflags |= _os.O_BINARY
if hasattr(_os, 'TMP_MAX'):
TMP_MAX = _os.TMP_MAX
else:
TMP_MAX = 10000
template = "tmp"
tempdir = None
# Internal routines.
_once_lock = _allocate_lock()
if hasattr(_os, "lstat"):
_stat | = _os.lstat
elif hasattr(_os, "stat"):
_stat = _os.stat
else:
# Fallback. All we need is something that raises os.error if the
# file doesn't exist.
def _stat(fn):
try:
f = open(fn)
except IOError:
raise _os.error
f.close()
def _exists(fn):
try:
| _stat(fn)
except _os.error:
return False
else:
return True
class _RandomNameSequence:
"""An instance of _RandomNameSequence generates an endless
sequence of unpredictable strings which can safely be incorporated
into file names. Each string is six characters long. Multiple
threads can safely use the same instance at the same time.
_RandomNameSequence is an iterator."""
characters = ("abcdefghijklmnopqrstuvwxyz" +
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" +
"0123456789-_")
def __init__(self):
self.mutex = _allocate_lock()
self.rng = _Random()
self.normcase = _os.path.normcase
def __iter__(self):
return self
def next(self):
m = self.mutex
c = self.characters
choose = self.rng.choice
m.acquire()
try:
letters = [choose(c) for dummy in "123456"]
finally:
m.release()
return self.normcase(''.join(letters))
def _candidate_tempdir_list():
"""Generate a list of candidate temporary directories which
_get_default_tempdir will try."""
dirlist = []
# First, try the environment.
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = _os.getenv(envname)
if dirname: dirlist.append(dirname)
# Failing that, try OS-specific locations.
if _os.name == 'mac':
try:
fsr = _Folder.FSFindFolder(_Folders.kOnSystemDisk,
_Folders.kTemporaryFolderType, 1)
dirname = fsr.as_pathname()
dirlist.append(dirname)
except _Folder.error:
pass
elif _os.name == 'riscos':
dirname = _os.getenv('Wimp$ScrapDir')
if dirname: dirlist.append(dirname)
elif _os.name == 'nt':
dirlist.extend([ r'c:\temp', r'c:\tmp', r'\temp', r'\tmp' ])
else:
dirlist.extend([ '/tmp', '/var/tmp', '/usr/tmp' ])
# As a last resort, the current directory.
try:
dirlist.append(_os.getcwd())
except (AttributeError, _os.error):
dirlist.append(_os.curdir)
return dirlist
def _get_default_tempdir():
"""Calculate the default directory to use for temporary files.
This routine should be called exactly once.
We determine whether or not a candidate temp dir is usable by
trying to create and write to a file in that directory. If this
is successful, the test file is deleted. To prevent denial of
service, the name of the test file must be randomized."""
namer = _RandomNameSequence()
dirlist = _candidate_tempdir_list()
flags = _text_openflags
for dir in dirlist:
if dir != _os.curdir:
dir = _os.path.normcase(_os.path.abspath(dir))
# Try only a few names per directory.
for seq in xrange(100):
name = namer.next()
filename = _os.path.join(dir, name)
try:
fd = _os.open(filename, flags, 0600)
fp = _os.fdopen(fd, 'w')
fp.write('blat')
fp.close()
_os.unlink(filename)
del fp, fd
return dir
except (OSError, IOError), e:
if e[0] != _errno.EEXIST:
break # no point trying more names in this directory
pass
raise IOError, (_errno.ENOENT,
("No usable temporary directory found in %s" % dirlist))
_name_sequence = None
def _get_candidate_names():
"""Common setup sequence for all user-callable interfaces."""
global _name_sequence
if _name_sequence is None:
_once_lock.acquire()
try:
if _name_sequence is None:
_name_sequence = _RandomNameSequence()
finally:
_once_lock.release()
return _name_sequence
def _mkstemp_inner(dir, pre, suf, flags):
"""Code common to mkstemp, TemporaryFile, and NamedTemporaryFile."""
names = _get_candidate_names()
for seq in xrange(TMP_MAX):
name = names.next()
file = _os.path.join(dir, pre + name + suf)
try:
fd = _os.open(file, flags, 0600)
_set_cloexec(fd)
return (fd, _os.path.abspath(file))
except OSError, e:
if e.errno == _errno.EEXIST:
continue # try again
raise
raise IOError, (_errno.EEXIST, "No usable temporary file name found")
# User visible interfaces.
def gettempprefix():
"""Accessor for tempdir.template."""
return template
tempdir = None
def gettempdir():
"""Accessor for tempdir.tempdir."""
global tempdir
if tempdir is None:
_once_lock.acquire()
try:
if tempdir is None:
tempdir = _get_default_tempdir()
finally:
_once_lock.release()
return tempdir
def mkstemp(suffix="", prefix=template, dir=None, text=False):
"""mkstemp([suffix, [prefix, [dir, [text]]]])
User-callable function to create and return a unique temporary
file. The return value is a pair (fd, name) where fd is the
file descriptor returned by os.open, and name is the filename.
If 'suffix' is specified, the file name will end with that suffix,
otherwise there will be no suffix.
If 'prefix' is specified, the file name will begin with that prefix,
otherwise a default prefix is used.
If 'dir' is specified, the file will be created in that directory,
otherwise a default directory is used.
If 'text' |
justanr/py3traits | src/pytraits/core/binders.py | Python | apache-2.0 | 6,145 | 0 | #!/usr/bin/python -tt
# -*- coding: utf-8 -*-
'''
Copyright 2014-2015 Teppo Perä
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governi | ng permissions and
limitations under the License.
'''
def bind_method_to_class(clazz, method, name=None):
"""
Binds a single method into class.
This can be very useful in situation, where your trait properties
are dynamic, and you first need to construct your trait in some
fashion and after the trait is ready, you can transfer the qualities |
to some class (You don't have always full control to creation
process).
@param clazz: Class to be extended
@param method: Method that is added as a trait into class
@param name: New name for the method. When omitted, original is used.
>>> class MyClass(object):
... def __init__(self):
... self._value = 42
...
>>> class MyTrait(object):
... def __init__(self):
... self._value = 0
...
... def trait_method(self):
... return self.__class__.__name__, self._value
...
>>> my_trait = MyTrait()
>>> bind_method_to_class(MyClass, my_trait.trait_method)
>>> MyClass().trait_method()
('MyClass', 42)
>>> my_trait.trait_method()
('MyTrait', 0)
"""
# Rip out the original function from the class and set it also
# as member of our new class.
clazz_function = method.__self__.__class__.__dict__[method.__name__]
setattr(clazz, name or method.__name__, clazz_function)
def bind_function_to_class(clazz, function, name=None):
"""
Binds a single function into class.
In practice this method turns any function as a unbound method into
class.
@param clazz: Class to be extended
@param function: Function that is added as a trait into class
@param name: New name for the method. When omitted, original is used.
>>> class MyClass(object):
... def __init__(self):
... self._value = 42
...
>>> def trait_function(self):
... return self.__class__.__name__, self._value
...
>>> bind_function_to_class(MyClass, trait_function)
>>> MyClass().trait_function()
('MyClass', 42)
"""
setattr(clazz, name or function.__name__, function)
def bind_property_to_class(clazz, prop, name):
"""
Binds a single property into class.
@param clazz: Class to be extended
@param prop: Property that is added as a trait into class
@param name: New name for the property. Mandatory for properties.
>>> class MyClass(object):
... def __init__(self):
... self._value = 42
...
>>> class MyTrait(object):
... def __init__(self):
... self._value = 0
...
... @property
... def value(self):
... return self.__class__.__name__, self._value
...
>>> bind_property_to_class(MyClass, MyTrait.value, 'value')
>>> MyClass().value
('MyClass', 42)
"""
setattr(clazz, name, prop)
def bind_method_to_instance(instance, method, name=None):
"""
@param instance: Instance to be extended.
@param name: New name for the method. When omitted, original is used.
>>> class MyClass(object):
... def __init__(self):
... self._value = 327
...
>>> class MyTrait(object):
... def __init__(self):
... self._value = 331
...
... def method(self):
... return self.__class__.__name__, self._value
...
>>> instance = MyClass()
>>> trait = MyTrait()
>>> bind_method_to_instance(instance, trait.method)
>>> instance.method()
('MyClass', 327)
>>> trait.method()
('MyTrait', 331)
"""
clazz_function = method.__self__.__class__.__dict__[method.__name__]
bound_method = clazz_function.__get__(instance, instance.__class__)
instance.__dict__[name or method.__name__] = bound_method
def bind_function_to_instance(instance, function, name=None):
"""
@param instance: Instance to be extended.
@param name: New name for the method. When omitted, original is used.
>>> class MyClass(object):
... def __init__(self):
... self._value = 42
...
>>> def trait_function(self):
... return self.__class__.__name__, self._value
...
>>> my_instance = MyClass()
>>> bind_function_to_instance(my_instance, trait_function)
>>> my_instance.trait_function()
('MyClass', 42)
>>> 'trait_function' in vars(MyClass)
False
"""
# Functions are always descriptors. Here, we are getting
# class trait which contains a functions we desire to transfer.
# Inside class.__dict__, function is stored as type function, so
# idea here is to get the function out from the class, bind it
# to new instance and store it there.
#
# See more: http://users.rcn.com/python/download/Descriptor.htm
new_function = function.__get__(instance, instance.__class__)
instance.__dict__[name or function.__name__] = new_function
def bind_property_to_instance(instance, trait, name=None):
"""
@param instance: Instance to be extended.
@param name: New name for the method. When omitted, original is used.
>>> class MyClass:
... def __init__(self):
... self._value = 42
...
>>> class MyTrait:
... @property
... def value(self):
... return self._value
...
>>> my_instance = MyClass()
>>> bind_property_to_instance(my_instance, MyTrait.value, 'value')
>>> my_instance.value
42
"""
setattr(instance.__class__, name, trait)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
sjl/d | d/base.py | Python | mit | 6,812 | 0.003083 | import sys, os, shutil
import markdown
from pyquery import PyQuery as pq
j = os.path.join
md = markdown.Markdown(extensions=['toc', 'codehilite'])
up = lambda p: j(*os.path.split(p)[:-1])
dirname = lambda p: os.path.basename(os.path.abspath(p))
extensions = ['md', 'mdown', 'markdown']
INDEX_PRE = u'''\
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8" />
<title>{title_tag}</title>
<link rel="stylesheet" href="./_dmedia/tango.css"/>
<link rel="stylesheet/less" type="text/css" href="./_dmedia/style.less">
<script src="./_dmedia/less.js" type="text/javascript">
</script>
</head>
<body class="index">
<div class="wrap">
<header><h1><a href="">{project_title}</a></h1></header>
<div class="markdown">
'''
CONTENT_PRE = u'''\
<!DOCTYPE html>
<html>
<hea | d>
<meta charset="utf-8" />
<title>{title_tag}</title>
<link rel="stylesheet" href="../_dmedia/tango.css"/>
<link rel="stylesheet/less" type="text/css" href="../_dmedia/style.less">
<script src="../_dmedia/less.js" type="text/javascript">
</script>
</head>
<body class="content">
<div class="wrap">
| <header><h1><a href="..">{project_title}</a></h1></header>
<div class="markdown">
'''
POST = u'''
</div>
<footer>{footer}</footer>
</div>
</body>
</html>
'''
def _read(f):
return f.read().decode('utf-8')
def _write(f, content):
return f.write(content.encode('utf-8'))
def _get_target_url(path, destination):
return os.path.split(_get_target(path, destination))[-1]
def _get_target(filename, destination):
parts = filename.split('-', 1)
if len(parts) > 1 and all(c in '0123456789' for c in parts[0]):
filename = parts[1]
return j(destination, filename.rsplit('.', 1)[0])
def _get_project_title(source):
if os.path.isfile(j(source, 'title')):
with open(j(source, 'title')) as f:
return _read(f).strip()
else:
current = dirname(source).lower()
if current not in ['doc', 'docs', 'documentation']:
return current
else:
return dirname(j(source, '..')).lower()
def _find_chapters(source):
for filename in sorted(os.listdir(source)):
name, ext = os.path.splitext(filename)
if ext[1:] in extensions:
if name not in ['footer', 'index']:
yield filename
def _get_footer(source):
for ext in extensions:
filename = 'footer.' + ext
target = j(source, filename);
if os.path.isfile(target):
with open(target) as f:
return md.convert(_read(f))
return ''
def _get_toc(chapters, destination):
toc = '<h2>Table of Contents</h2>'
toc += '<ol class="toc">'
for filename, title in chapters:
toc += '<li><a href="%s/">%s</a></li>' % (_get_target_url(filename, destination), title)
toc += '</ol>'
return toc
def _fix_md_toc(content):
"""Remove the first heading level from the Markdown-generated TOC.
Only do so if it's on its own, though.
"""
e = pq(content)
if not e('.toc'):
return content
lis = e('.toc > ul > li')
if len(lis) > 1:
return content
subtoc = e('.toc > ul > li > ul').html()
e('.toc > ul').html(subtoc)
return unicode(e)
def _linkify_title(content, fallback_title):
e = pq(content)
title = e('.markdown h1').text()
if title:
e('.markdown h1').html('<a href="">' + title + '</a>')
else:
e('.markdown').prepend('<h1><a href="">' + fallback_title + '</a></h1>')
# What the fuck, pyquery?
return u'<!DOCTYPE html>\n' + unicode(e)
def _ensure_dir(path):
if not os.path.isdir(path):
os.makedirs(path)
def _get_fallback_title(path):
title = path.split('.', 1)[0]
if '-' in title and all([c in '0123456789' for c in title.split('-', 1)[0]]):
title = title.split('-', 1)[1]
title = title.replace('-', ' ').replace('_', ' ')
if title.lower() == title:
title = title.capitalize()
return title
def _find_title(content):
# TODO: Make this less ugly.
lines = content.splitlines()
if len(lines) == 0:
return None
first_line = lines[0].strip()
if first_line.startswith('#'):
return first_line.lstrip('#')
if len(lines) == 1:
return None
second_line = lines[1].strip()
if second_line and all(c == '=' for c in second_line):
return first_line
return None
def _render(title, header, footer, source, destination, page_type, toc=None):
with open(source) as f:
data = _read(f)
fallback_title = _get_fallback_title(source)
if page_type == 'content':
page_title = _find_title(data) or fallback_title
title_tag = page_title + ' / ' + title
else:
page_title = title_tag = title
content = header.format(title_tag=title_tag, project_title=title)
content += md.convert(data)
content += toc or ''
content += POST.format(footer=footer)
if page_type == 'content':
content = _linkify_title(_fix_md_toc(content), fallback_title)
if not os.path.isdir(destination):
os.makedirs(destination)
with open(j(destination, 'index.html'), 'w') as f:
_write(f, content)
return page_title
def render_index(title, footer, chapters, source, destination):
index_file = None
for ext in extensions:
filename = 'index.' + ext
if os.path.isfile(j(source, filename)):
index_file = j(source, filename)
if index_file is None:
return
toc = _get_toc(chapters, destination)
return _render(title, INDEX_PRE, footer, index_file, destination, 'index', toc)
def render_files(source, destination):
_ensure_dir(destination)
_ensure_dir(j(destination, '_dmedia'))
title = _get_project_title(source)
footer = _get_footer(source)
resources = j(up(__file__), 'resources')
for filename in os.listdir(resources):
shutil.copyfile(j(resources, filename), j(destination, '_dmedia', filename))
static_source = j(source, 'static')
if os.path.isdir(static_source):
static_destination = j(destination, 'static')
if os.path.isdir(static_destination):
shutil.rmtree(static_destination)
shutil.copytree(static_source, static_destination)
chapters = []
for filename in _find_chapters(source):
chapter_title = _render(title, CONTENT_PRE, footer,
j(source, filename), _get_target(filename, destination), 'content')
chapters.append((filename, chapter_title))
render_index(title, footer, chapters, source, destination)
|
Erikun/elogy | elogy/patch.py | Python | gpl-3.0 | 4,904 | 0.00306 | #!/usr/bin/env python
# coding=utf-8
# License: Public domain (CC0)
# Isaac Turner 2016/12/05
# Port to Python 3 and cleanup by Johan Forsberg
from __future__ import print_function
import difflib
import re
_no_eol = "\ No newline at end of file"
_hdr_pat = re.compile("^@@ -(\d+),?(\d+)? \+(\d+),?(\d+)? @@$")
def make_patch(a, b):
"""
Get unified string diff between two strings. Trims top two lines.
Returns empty string if strings are identical.
"""
diffs = difflib.unified_diff(a.splitlines(True), b.splitlines(True), n=0)
try:
_, _ = next(diffs), next(diffs)
except StopIteration:
pass
# diffs = list(diffs); print(diffs)
return ''.join(d if d[-1] == '\n' else d + '\n' + _no_eol + '\n'
for d in diffs)
def apply_patch(s, patch, revert=False):
"""
Apply patch to string s to recover newer string.
If revert is True, treat s as the newer string, recover older string.
"""
s = s.splitlines(True)
p = patch.splitlines(True)
t = ''
i = sl = 0
(midx, sign) = (1, '+') if not revert else (3, '-')
while i < len(p) and p[i].startswith(("---", "+++")):
i += 1 # skip header lines
while i < len(p):
m = _hdr_pat.match(p[i])
if not m:
raise Exception(
"Bad patch -- regex mismatch [line " + str(i) + "]")
l = int(m.group(midx)) - 1 + (m.group(midx + 1) == '0')
if sl > l or l > len(s):
| raise Exception("Bad patch -- bad line num [line " + str(i) + "]")
t += ''.join(s[sl:l])
sl = l
i += 1
while i < len(p) and p[i][0] != '@':
if i + 1 < len(p) and p[i + 1][0] == '\\':
| line = p[i][:-1]
i += 2
else:
line = p[i]
i += 1
if len(line) > 0:
if line[0] == sign or line[0] == ' ':
t += line[1:]
sl += (line[0] != sign)
t += ''.join(s[sl:])
return t
#
# Testing
#
import random
import string
import traceback
import sys
import codecs
def test_diff(a, b):
mp = make_patch(a, b)
try:
assert apply_patch(a, mp) == b
assert apply_patch(b, mp, True) == a
except Exception:
print("=== a ===")
print([a])
print("=== b ===")
print([b])
print("=== mp ===")
print([mp])
print("=== a->b ===")
print(apply_patch(a, mp))
print("=== a<-b ===")
print(apply_patch(b, mp, True))
traceback.print_exc()
sys.exit(-1)
def randomly_interleave(*args):
""" Randomly interleave multiple lists/iterators """
iters = [iter(x) for x in args]
while iters:
i = random.randrange(len(iters))
try:
yield next(iters[i])
except StopIteration:
# swap empty iterator to end and remove
iters[i], iters[-1] = iters[-1], iters[i]
iters.pop()
def rand_ascii():
return random.choice(string.printable)
def rand_unicode():
a = u"\\u%04x" % random.randrange(0x10000)
# return a.decode('utf-8')
return str(codecs.encode(a, 'utf-8'))
def generate_test(nlines=10, linelen=10, randchar=rand_ascii):
"""
Generate two strings with approx `nlines` lines, which share approx half their
lines. Then run the diff/patch test unit with the two strings.
Lines are random characters and may include newline / linefeeds.
"""
aonly, bonly, nshared = (random.randrange(nlines) for _ in range(3))
a = [''.join([randchar() for _ in range(linelen)]) for _ in range(aonly)]
b = [''.join([randchar() for _ in range(linelen)]) for _ in range(bonly)]
ab = [''.join([randchar() for _ in range(linelen)])
for _ in range(nshared)]
a = randomly_interleave(a, ab)
b = randomly_interleave(b, ab)
test_diff(''.join(a), ''.join(b))
def std_tests():
test_diff("asdf\nhamster\nmole\nwolf\ndog\ngiraffe",
"asdf\nhampster\nmole\nwolf\ndooog\ngiraffe\n")
test_diff("asdf\nhamster\nmole\nwolf\ndog\ngiraffe",
"hampster\nmole\nwolf\ndooog\ngiraffe\n")
test_diff("hamster\nmole\nwolf\ndog",
"asdf\nhampster\nmole\nwolf\ndooog\ngiraffe\n")
test_diff("", "")
test_diff("", "asdf\nasf")
test_diff("asdf\nasf", "xxx")
# Things can get nasty, we need to be able to handle any input
# see https://docs.python.org/3/library/stdtypes.html
test_diff("\x0c", "\n\r\n")
test_diff("\x1c\v", "\f\r\n")
def main():
print("Testing...")
std_tests()
print("Testing random ASCII...")
for _ in range(50):
generate_test(50, 50, rand_ascii)
print("Testing random unicode...")
for _ in range(50):
generate_test(50, 50, rand_unicode)
print("Passed ✓")
if __name__ == '__main__':
main()
|
deepmind/enn | enn/experiments/neurips_2021/distillation/train_lib.py | Python | apache-2.0 | 5,289 | 0.009265 | # python3
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Library for helper functions developing distillation ENN."""
import dataclasses
from typing import Dict, Sequence, Tuple
from enn import base as enn_base
from enn import losses
from enn import utils
import haiku as hk
import jax
import jax.numpy as jnp
@dataclasses.dataclass
class DistillationIndexer(enn_base.EpistemicIndexer):
indexer: enn_base.EpistemicIndexer
@property
def mean_index(self) -> enn_base.Index:
return -1
def __call__(self, key: enn_base.RngKey) -> enn_base.Index:
return self.indexer(key)
def _merge_params(params_seq: Sequence[hk.Params]) -> hk.Params:
"""Unsafe way to combine parameters in Haiku."""
holding_dict = {}
# TODO(author2): Look for safe/upstream version in Haiku.
for params in params_seq:
holding_dict.update(hk.data_structures.to_mutable_dict(params))
return hk.data_structures.to_immutable_dict(holding_dict)
class DistillRegressionMLP(enn_base.EpistemicNetwork):
"""Add an extra MLP predicting (mean, log_var) to ENN extra output."""
def __init__(self,
enn: enn_base.EpistemicNetwork,
hidden_sizes: Sequence[int] = (50, 50)):
"""Add an extra MLP predicting (mean, log_var) to ENN extra output."""
def net_fn(x: enn_base.Array) -> Dict[str, enn_base.Array]:
mean = hk.nets.MLP(list(hidden_sizes) + [1], name='distill_mean')
var = hk.nets.MLP(list(hidden_sizes) + [1], name='distill_var')
return {'mean': mean(x), 'log_var': var(x)}
transformed = hk.without_apply_rng(hk.transform(net_fn))
def apply(params: hk.Params,
x: enn_base.Array,
z: enn_base.Index) -> enn_base.OutputWithPrior:
net_out = enn.apply(params, x, z)
if not isinstance(net_out, enn_base.OutputWithPrior):
n | et_out = enn_base.OutputWithPrior(net_out)
net_out: enn_base.OutputWithPrior = net_out
net_out.extra.update(transformed.apply(params, x))
return net_out
def init(key: enn_base.RngKey,
x: enn_base.Array,
z: enn_base.Index) -> hk.Pa | rams:
base_params = enn.init(key, x, z)
distill_params = transformed.init(key, x)
return _merge_params([base_params, distill_params])
indexer = DistillationIndexer(enn.indexer)
super().__init__(apply, init, indexer)
@dataclasses.dataclass
class DistillRegressionLoss(enn_base.LossFn):
"""Distills mean and variance targets to extra components."""
num_fake_batch: int
num_index_sample: int
only_real_data: bool = False
def __call__(
self,
enn: enn_base.EpistemicNetwork,
params: hk.Params,
batch: enn_base.Batch,
key: enn_base.RngKey,
) -> Tuple[enn_base.Array, enn_base.LossMetrics]:
"""Distills mean and variance targets to extra components."""
if self.only_real_data:
x = batch.x
else:
x = jax.random.normal(key, [self.num_fake_batch, batch.x.shape[1]])
batched_out = losses.generate_batched_forward_at_data(
self.num_index_sample, x, enn, params, key)
batched_out: enn_base.OutputWithPrior = jax.lax.stop_gradient(batched_out)
if hasattr(enn.indexer, 'mean_index'):
distill_out = enn.apply(params, x, enn.indexer.mean_index) # pytype:disable=attribute-error
loss = kl_gauss(batched_out, distill_out)
return jnp.mean(loss), {}
else:
raise ValueError(f'Indexer {enn.indexer} has no mean_index.')
def kl_gauss(batched_out: enn_base.OutputWithPrior,
distill_out: enn_base.OutputWithPrior) -> enn_base.Array:
batched_out = jax.lax.stop_gradient(batched_out)
observed_mean = jnp.mean(utils.parse_net_output(batched_out), axis=0)
observed_var = jnp.var(utils.parse_net_output(batched_out), axis=0)
mean = distill_out.extra['mean']
log_var = distill_out.extra['log_var']
log_term = log_var - jnp.log(observed_var)
mean_term = (observed_var + (mean - observed_mean) ** 2) / jnp.exp(log_var)
return 0.5 * (log_term + mean_term - 1)
def combine_losses(loss_seq: Sequence[enn_base.LossFn]) -> enn_base.LossFn:
"""Combines a sequence of losses as a sum."""
def combined_loss_fn(
enn: enn_base.EpistemicNetwork,
params: hk.Params,
batch: enn_base.Batch,
key: enn_base.RngKey,
) -> Tuple[enn_base.Array, enn_base.LossMetrics]:
combined_loss = 0.
combined_metrics = {}
for loss_fn in loss_seq:
loss, metrics = loss_fn(enn, params, batch, key)
combined_loss += loss
combined_metrics.update(metrics)
return combined_loss, combined_metrics
return combined_loss_fn
|
FireballDWF/cloud-custodian | c7n/output.py | Python | apache-2.0 | 13,380 | 0.000299 | # Copyright 2015-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Outputs metrics, logs, stats, traces, and structured records across
a variety of sinks.
See docs/usage/outputs.rst
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import contextlib
from datetime import datetime
import json
import gzip
import logging
import os
import shutil
import time
import uuid
from c7n.exceptions import InvalidOutputConfig
from c7n.registry import PluginRegistry
from c7n.utils import parse_url_config
try:
import psutil
HAVE_PSUTIL = True
except ImportError:
HAVE_PSUTIL = False
log = logging.getLogger('custodian.output')
# TODO remove
DEFAULT_NAMESPACE = "CloudMaid"
class OutputRegistry(PluginRegistry):
default_protocol = None
def select(self, selector, ctx):
if not selector:
return self['default'](ctx, {'url': selector})
if self.default_protocol and '://' not in selector:
selector = "{}://{}".format(
self.default_protocol, selector)
for k in self.keys():
if selector.startswith(k):
return self[k](ctx, parse_url_config(selector))
raise InvalidOutputConfig("Invalid %s: %s" % (
self.plugin_type,
selector))
class BlobOutputRegistry(OutputRegistry):
default_protocol = "file"
class LogOutputRegistry(OutputRegistry):
default_protocol = "aws"
class MetricsRegistry(OutputRegistry):
def select(self, selector, ctx):
# Compatibility for boolean configuration
if isinstance(selector, bool) and selector:
selector = 'aws'
return super(MetricsRegistry, self).select(selector, ctx)
api_stats_outputs = OutputRegistry('c7n.output.api_stats')
blob_outputs = BlobOutputRegistry('c7n.output.blob')
log_outputs = LogOutputRegistry('c7n.output.logs')
metrics_outputs = MetricsRegistry('c7n.output.metrics')
tracer_outputs = OutputRegistry('c7n.output.tracer')
sys_stats_outputs = OutputRegistry('c7n.output.sys_stats')
@tracer_outputs.register('default')
class NullTracer(object):
"""Tracing provides for detailed analytics of a policy execution.
Uses native cloud provider integration (xray, stack driver trace).
"""
def __init__(self, ctx, config=None):
self.ctx = ctx
self.config = config or {}
@contextlib.contextmanager
def subsegment(self, name):
"""Create a named subsegment as a context manager
"""
yield self
def __enter__(self):
"""Enter main segment for policy execution.
"""
def __exit__(self, exc_type=None, exc_value=None, exc_traceback=None):
"""Exit main segment for policy execution.
"""
class DeltaStats(object):
"""Capture stats (dictionary of string->integer) as a stack.
Popping the stack automatically creates a delta of the last
stack element to the current stats.
"""
def __init__(self, ctx, config=None):
self.ctx = ctx
self.config = config or {}
self.snapshot_stack = []
def push_snapshot(self):
self.snapshot_stack.append(self.get_snapshot())
def pop_snapshot(self):
return self.delta(
self.snapshot_stack.pop(), self.get_snapshot())
def get_snapshot(self):
return {}
def delta(self, before, after):
delta = {}
for k in before:
val = after[k] - before[k]
if val:
delta[k] = val
return delta
@sys_stats_outputs.register('default')
@api_stats_outputs.register('def | ault')
class NullStats(object):
"""Execution statistics/metrics collection.
Encompasses concrete implementations over system stats (memory, cpu, cache size)
and api calls.
The api supports stack nested snapshots, with delta consumption to support
tracing metadata annotation across nested subsegments.
"""
def __init__(self, ctx, config=None):
self.ctx | = ctx
self.config = config or {}
def push_snapshot(self):
"""Take a snapshot of the system stats and append to the stack."""
def pop_snapshot(self):
"""Remove a snapshot from the snack and return a delta of the current stats to it.
"""
return {}
def get_metadata(self):
"""Return default of current to last snapshot, without popping.
"""
return {}
def __enter__(self):
"""Push a snapshot
"""
def __exit__(self, exc_type=None, exc_value=None, exc_traceback=None):
"""Pop a snapshot
"""
@sys_stats_outputs.register('psutil', condition=HAVE_PSUTIL)
class SystemStats(DeltaStats):
"""Collect process statistics via psutil as deltas over policy execution.
"""
def __init__(self, ctx, config=None):
super(SystemStats, self).__init__(ctx, config)
self.process = psutil.Process(os.getpid())
def __enter__(self):
self.push_snapshot()
def __exit__(self):
self.pop_snapshot()
def get_metadata(self):
if self.snapshot_stack:
return self.delta(self.snapshot_stack[-1], self.get_snapshot())
return self.get_snapshot()
def get_snapshot(self):
snapshot = {
'num_threads': self.process.num_threads(),
'snapshot_time': time.time(),
'cache_size': self.ctx.policy.get_cache().size()
}
# no num_fds on Windows, but likely num_handles
if hasattr(self.process, "num_fds"):
snapshot['num_fds'] = self.process.num_fds()
elif hasattr(self.process, "num_handles"):
snapshot['num_handles'] = self.process.num_handles()
with self.process.oneshot():
# simpler would be json.dumps(self.process.as_dict()), but
# that complicates delta diffing between snapshots.
cpu_time = self.process.cpu_times()
snapshot['cpu_user'] = cpu_time.user
snapshot['cpu_system'] = cpu_time.system
(snapshot['num_ctx_switches_voluntary'],
snapshot['num_ctx_switches_involuntary']) = self.process.num_ctx_switches()
# io counters ( not available on osx)
if getattr(self.process, 'io_counters', None):
try:
io = self.process.io_counters()
for counter in (
'read_count', 'write_count',
'write_bytes', 'read_bytes'):
snapshot[counter] = getattr(io, counter)
except NotImplementedError:
# some old kernels and Windows Linux Subsystem throw this
pass
# memory counters
mem = self.process.memory_info()
for counter in (
'rss', 'vms', 'shared', 'text', 'data', 'lib',
'pfaults', 'pageins'):
v = getattr(mem, counter, None)
if v is not None:
snapshot[counter] = v
return snapshot
class Metrics(object):
permissions = ()
namespace = DEFAULT_NAMESPACE
BUFFER_SIZE = 20
def __init__(self, ctx, config=None):
self.ctx = ctx
self.config = config
self.buf = []
def _format_metric(self, key, value, unit, dimensions):
raise NotImplementedError("subclass responsiblity")
def _put_metrics(self, ns, metrics):
raise NotImplementedError("subclass responsiblity")
def flush(self):
if self.buf:
self._put_metrics(self.namespace, self.buf)
self.buf = []
|
JWilson0/dt211-cloud-repo | Euler/sovle1.py | Python | mit | 120 | 0.016667 | sum | = 0
for i in range(1000):
if(i % 3 == 0 or i % 5 == 0):
totalSum+=i
print(i)
print(totalSum)
| |
mitchsmith/qd_screener | questionnaire/admin.py | Python | mit | 7,839 | 0.007782 | from django.contrib import admin
from django.forms import ModelChoiceField, ModelForm, Textarea
from questionnaire.models import *
from django.core.exceptions import ValidationError
from django.forms.models import BaseInlineFormSet
from questionnaire import admin_helper
############################################### HELPER METHODS ###########################################
def is_strictly_monotonically_increasing(sequence):
"""
Determines if sequence is strictly monotically increasing. Used to validate the order specified for
Questions and Answers.
"""
return all(x<y for x, y in zip(sequence, sequence[1:]))
############################################### Questionnaire Manager ##############################################
class CustomQuestionnaireModelField(ModelChoiceField):
def label_from_instance(self, obj):
return "%s" % obj.title
class QuestionnaireManagerAdminForm(ModelForm):
current_questionnaire = CustomQuestionnaireModelField(queryset=Questionnaire.objects.all())
class Meta:
model = QuestionnaireManager
class QuestionnaireManagerAdmin(admin.ModelAdmin):
form = QuestionnaireManagerAdminForm
admin.site.register(QuestionnaireManager, QuestionnaireManagerAdmin)
################################################ Answer ###################################################
class CustomQuestionField(ModelChoiceField):
def label_from_instance(self, obj):
return "%s" % obj.question
class AnswerAdminForm(ModelForm):
question = CustomQuestionField(queryset=Question.objects.all())
def Question_Text(obj):
return obj.question.question
class AnswerFormSet(BaseInlineFormSet):
def clean(self):
"""
Check that:
1. There's only one correct answer (this must be checked to be eligible)
2. A valid order in which to display answers has been specified
3. There are at least 2 answers
"""
super(AnswerFormSet, self).clean()
# Check #1
specified_sequence = []
num_correct_answers = 0
num_answers = 0
for form in self.forms:
if not hasattr(form, 'cleaned_data'):
continue
data = form.cleaned_data
if data.get('is_correct', False):
num_correct_answers += 1
data = data.get('sequence_order', -1)
if data > -1:
specified_sequence.append(data)
num_answers += 1
if num_correct_answers != 1:
raise ValidationError('Need to choose one "correct" answer')
# Check #2
specified_sequence = sorted(specified_sequence)
if not is_strictly_monotonically_increasing(specified_sequence):
message = """ The order you've specified in which to display answers doens't make sense.
Please enter a sequence starting with 1, without skipping or repeating numbers. """
raise ValidationError(message)
# Check #3
if num_answers < 2:
message = 'There should be at least 2 answers'
raise ValidationError(message)
class AnswerInline(admin.TabularInline):
model = Answer
formset = AnswerFormSet
ordering = ('-created_on',)
################################################# Question ##############################################
def Questionnaire_Title(obj):
return obj.questionnaire.title
class QuestionAdminForm(ModelForm):
# questionnaire = CustomQuestionnaireModelField(queryset=Questionnaire.objects.all())
def check_url(self, url):
if len(url) > 0:
return url.startswith('http')
return True
def clean(self):
cleaned_data = super(QuestionAdminForm, self).clean()
related_content_link = cleaned_data.get('related_content_link')
related_content_text = cleaned_data.get('related_content_text')
at_least_one_field_has_text = (len(related_content_link.strip()) + len(related_content_text.strip())) > 0
both_fields_have_text = (len(related_content_link.strip()) * len(related_content_text.strip())) > 0
if at_least_one_field_has_text and not both_fields_have_text:
raise ValidationError('Both related_content_link and related_content_text need to be either set or empty')
if not self.check_url(related_content_link):
raise ValidationError('%s does not seem to be a valid url' % related_content_link)
return cleaned_data
class Meta:
model = Question
widgets = {
'question': Textarea
}
class QuestionAdmin(admin.ModelAdmin):
inlines = [ AnswerInline, ]
search_fields = ['questionnaire__title', 'question' ]
list_display = ['question', Questionnaire_Title, 'sequence_order', 'created_on']
list_filter = ['created_on', admin_helper.QuestionListFilter]
ordering = ('-created_on',)
form = QuestionAdminForm
admin.site.register(Question, QuestionAdmin)
class QuestionFormSet(BaseInlineFormSet):
def clean(self):
"""
1. Check that all answers have been assigned a sequence (1..k)
in order, without skipping indices, and unique!
2. Check that related_content_link and related_content_text are both either
specified or blank
"""
super(QuestionFormSet, self).clean()
# Check #1
specified_sequence = []
for form in self.forms:
if not hasattr(form, 'cleaned_data'):
continue
data = form.cleaned_data
data = data.get('sequence_order', -1)
if data > -1:
specified_sequence.append(data)
specified_sequence = sorted(specified_sequence)
if not is_strictly_monotonically_increasing(specified_sequence):
message = """ The order you've specified in which to display questions doens't make sense.
Please enter a sequence starting with 1, without skipping or repeating numbers. """
raise ValidationError(message)
# Check #2
for form in self.forms:
if not hasattr(form, 'cleaned_data'):
continue
data = form.cleaned_data
related_content_link = data.get('related_content_link', '').strip()
related_content_text = data.get('related_content_text', '').strip()
at_least_one_field_has_text = (len(related_content_link.s | trip()) + len(related_content_text.strip())) > 0
both_fields_have_text = (len(related_content_link.strip()) * len(related_content_text.strip())) > 0
i | f at_least_one_field_has_text and not both_fields_have_text:
raise ValidationError('Both related_content_link and related_content_text need to be either set or empty')
class QuestionInline(admin.TabularInline):
model = Question
formset = QuestionFormSet
ordering = ('-created_on',)
def formfield_for_dbfield(self, db_field, **kwargs):
if db_field.name == 'question':
kwargs['widget'] = Textarea()
return super(QuestionInline,self).formfield_for_dbfield(db_field,**kwargs)
############################################## Questionnaire ###############################################
class QuestionnaireAdmin(admin.ModelAdmin):
inlines = [ QuestionInline ]
list_display = ['title', 'description', 'created_on']
list_filter = ['created_on']
search_fields = ['title', 'sub_title']
ordering = ('-created_on',)
admin.site.register(Questionnaire, QuestionnaireAdmin)
class StudyAdmin(admin.ModelAdmin):
list_display = ['protocol_number', 'created_on']
list_filter = ['created_on']
search_fields = ['protocol_number']
ordering = ('-created_on',)
admin.site.register(Study, StudyAdmin)
|
pouyana/teireader | webui/applications/admin/models/access.py | Python | mit | 5,924 | 0.000675 | import base64
import os
import time
from gluon import portalocker
from gluon.admin import apath
from gluon.fileutils import read_file
# ###########################################################
# ## make sure administrator is on localhost or https
# ###########################################################
http_host = request.env.http_host.split(':')[0]
if request.env.web2py_runtime_gae:
session_db = DAL('gae')
session.connect(request, response, db=session_db)
hosts = (htt | p_host, )
is_gae = True
else:
is_gae = False
if request.env.http_ | x_forwarded_for or request.is_https:
session.secure()
elif not request.is_local and not DEMO_MODE:
raise HTTP(200, T('Admin is disabled because insecure channel'))
try:
_config = {}
port = int(request.env.server_port or 0)
restricted(
read_file(apath('../parameters_%i.py' % port, request)), _config)
if not 'password' in _config or not _config['password']:
raise HTTP(200, T('admin disabled because no admin password'))
except IOError:
import gluon.fileutils
if is_gae:
if gluon.fileutils.check_credentials(request):
session.authorized = True
session.last_time = time.time()
else:
raise HTTP(200,
T('admin disabled because not supported on google app engine'))
else:
raise HTTP(
200, T('admin disabled because unable to access password file'))
def verify_password(password):
session.pam_user = None
if DEMO_MODE:
return True
elif not _config.get('password'):
return False
elif _config['password'].startswith('pam_user:'):
session.pam_user = _config['password'][9:].strip()
import gluon.contrib.pam
return gluon.contrib.pam.authenticate(session.pam_user, password)
else:
return _config['password'] == CRYPT()(password)[0]
# ###########################################################
# ## handle brute-force login attacks
# ###########################################################
deny_file = os.path.join(request.folder, 'private', 'hosts.deny')
allowed_number_of_attempts = 5
expiration_failed_logins = 3600
def read_hosts_deny():
import datetime
hosts = {}
if os.path.exists(deny_file):
hosts = {}
f = open(deny_file, 'r')
portalocker.lock(f, portalocker.LOCK_SH)
for line in f.readlines():
if not line.strip() or line.startswith('#'):
continue
fields = line.strip().split()
if len(fields) > 2:
hosts[fields[0].strip()] = ( # ip
int(fields[1].strip()), # n attemps
int(fields[2].strip()) # last attempts
)
portalocker.unlock(f)
f.close()
return hosts
def write_hosts_deny(denied_hosts):
f = open(deny_file, 'w')
portalocker.lock(f, portalocker.LOCK_EX)
for key, val in denied_hosts.items():
if time.time() - val[1] < expiration_failed_logins:
line = '%s %s %s\n' % (key, val[0], val[1])
f.write(line)
portalocker.unlock(f)
f.close()
def login_record(success=True):
denied_hosts = read_hosts_deny()
val = (0, 0)
if success and request.client in denied_hosts:
del denied_hosts[request.client]
elif not success and not request.is_local:
val = denied_hosts.get(request.client, (0, 0))
if time.time() - val[1] < expiration_failed_logins \
and val[0] >= allowed_number_of_attempts:
return val[0] # locked out
time.sleep(2 ** val[0])
val = (val[0] + 1, int(time.time()))
denied_hosts[request.client] = val
write_hosts_deny(denied_hosts)
return val[0]
# ###########################################################
# ## session expiration
# ###########################################################
t0 = time.time()
if session.authorized:
if session.last_time and session.last_time < t0 - EXPIRATION:
session.flash = T('session expired')
session.authorized = False
else:
session.last_time = t0
if request.vars.is_mobile in ('true', 'false', 'auto'):
session.is_mobile = request.vars.is_mobile or 'auto'
if request.controller == 'default' and request.function == 'index':
if not request.vars.is_mobile:
session.is_mobile = 'auto'
if not session.is_mobile:
session.is_mobile = 'auto'
if session.is_mobile == 'true':
is_mobile = True
elif session.is_mobile == 'false':
is_mobile = False
else:
is_mobile = request.user_agent().is_mobile
if DEMO_MODE:
session.authorized = True
session.forget()
if request.controller == "webservices":
basic = request.env.http_authorization
if not basic or not basic[:6].lower() == 'basic ':
raise HTTP(401, "Wrong credentials")
(username, password) = base64.b64decode(basic[6:]).split(':')
if not verify_password(password) or MULTI_USER_MODE:
time.sleep(10)
raise HTTP(403, "Not authorized")
elif not session.authorized and not \
(request.controller + '/' + request.function in
('default/index', 'default/user', 'plugin_jqmobile/index', 'plugin_jqmobile/about')):
if request.env.query_string:
query_string = '?' + request.env.query_string
else:
query_string = ''
if request.env.web2py_original_uri:
url = request.env.web2py_original_uri
else:
url = request.env.path_info + query_string
redirect(URL(request.application, 'default', 'index', vars=dict(send=url)))
elif session.authorized and \
request.controller == 'default' and \
request.function == 'index':
redirect(URL(request.application, 'default', 'site'))
if request.controller == 'appadmin' and DEMO_MODE:
session.flash = 'Appadmin disabled in demo mode'
redirect(URL('default', 'sites'))
|
hos7ein/firewalld | src/firewall/core/io/service.py | Python | gpl-2.0 | 11,872 | 0.00598 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2011-2016 Red Hat, Inc.
#
# Authors:
# Thomas Woerner <twoerner@redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
__all__ = [ "Service", "service_reader", "service_writer" ]
import xml.sax as sax
import os
import io
import shutil
from firewall import config
from firewall.functions import u2b_if_py2
from firewall.core.io.io_object import PY2, IO_Object, \
IO_Object_ContentHandler, IO_Object_XMLGenerator, check_port, \
check_tcpudp, check_protocol, check_address
from firewall.core.logger import log
from firewall import errors
from firewall.errors import FirewallError
class Service(IO_Object):
IMPORT_EXPORT_STRUCTURE = (
( "version", "" ), # s
( "short", "" ), # s
( "description", "" ), # s
( "ports", [ ( "", "" ), ], ), # a(ss)
( "modules", [ "", ], ), # as
( "destination", { "": "", }, ), # a{ss}
( "protocols", [ "", ], ), # as
( "source_ports", [ ( "", "" ), ], ), # a(ss)
)
DBUS_SIGNATURE = '(sssa(ss)asa{ss}asa(ss))'
ADDITIONAL_ALNUM_CHARS = [ "_", "-" ]
PARSER_REQUIRED_ELEMENT_ATTRS = {
"short": None,
"description": None,
"service": None,
}
PARSER_OPTIONAL_ELEMENT_ATTRS = {
"service": [ "name", "version" ],
"port": [ "port", "protocol" ],
"protocol": [ "value" ],
"module": [ "name" ],
"destination": [ "ipv4", "ipv6" ],
"source-port": [ "port", "protocol" ],
}
def __init__(self):
super(Service, self).__init__()
self.version = ""
self.short = ""
self.description = ""
self.ports = [ ]
self.protocols = [ ]
self.modules = [ ]
self.destination = { }
self.source_ports = [ ]
def cleanup(self):
self.version = ""
self.short = ""
self.description = ""
del self.ports[:]
del self.protocols[:]
del self.modules[:]
self.destination.clear()
del self.source_ports[:]
def encode_strings(self):
""" HACK. I haven't been able to make sax parser return
strings encoded (because of python 2) instead of in unicode.
Get rid of it once we throw out python 2 support."""
self.version = u2b_if_py2(self.version)
self.short = u2b_if_py2(self.short)
self.description = u2b_if_py2(self.description)
self.ports = [(u2b_if_py2(po),u2b_if_py2(pr)) for (po,pr) in self.ports]
self.modules = [u2b_if_py2(m) for m in self.modules]
self.destination = {u2b_if_py2(k):u2b_if_py2(v) for k,v in self.destination.items()}
self.protocols = [u2b_if_py2(pr) for pr in self.protocols]
self.source_ports = [(u2b_if_py2(po),u2b_if_py2(pr)) for (po,pr)
in self.source_ports]
def _check_config(self, config, item):
if item == "ports":
for port in config:
if port[0] != "":
check_port(port[0])
check_tcpudp(port[1])
else:
# only protocol
check_protocol(port[1])
elif item == "protocols":
for proto in config:
check_protocol(proto)
elif item == "source_ports":
for port in config:
check_port(port[0])
check_tcpudp(port[1])
elif item == "destination":
for destination in config:
if destination not in [ "ipv4", "ipv6" ]:
raise FirewallError(errors.INVALID_DESTINATION,
"'%s' not in {'ipv4'|'ipv6'}" % \
destination)
check_address(destination, config[destination])
elif item == "modules":
for module in config:
if module.startswith("nf_conntrack_"):
module = module.replace("nf_conntrack_", "")
if "_" in module:
module = module.replace("_", "-")
if len(module) < 2:
raise FirewallError(errors.INVALID_MODULE, module)
# PARSER
class service_ContentHandler(IO_Object_ContentHandler):
def startElement(self, name, attrs):
IO_Object_ContentHandler.startElement(self, name, attrs)
self.item.parser_check_element_attrs(name, attrs)
if name == "service":
if "name" in attrs:
log.warning("Ignoring deprecated attribute name='%s'",
attrs["name"])
if "version" in attrs:
self.item.version = attrs["version"]
elif name == "short":
pass
elif name == "description":
pass
elif name == "port":
if attrs["port"] != "":
check_port(attrs["port"])
check_tcpudp(attrs["protocol"])
entry = (attrs["port"], attrs["protocol"])
if entry not in self.item.ports:
self.item.ports.append(entry)
else:
log.warning("Port '%s/%s' already set, ignoring.",
attrs["port"], attrs["protocol"])
else:
check_protocol(attrs["protocol"])
if attrs["protocol"] not in self.item.protocols:
self.item.protocols.append(attrs["protocol"])
else:
log.warning("Protocol '%s' already set, ignoring.",
attrs["protocol"])
elif name == "protocol":
check_protocol(attrs["value"])
if attrs["value"] not in self.item.protocols:
self.item.protocols.append(attrs["value"])
else:
log.warning("Protocol '%s' already set, ignoring.",
attrs["value"])
elif name == "source-port":
check_port(attrs["port"])
check_tcpudp(attrs["protocol"])
entry = (attrs["port"], attrs["protocol"])
if entry not in self.item.source_ports:
self.item.source_ports.append(entry)
else:
log.warning("SourcePort '%s/%s' already set, ignoring.",
attrs["port"], attrs["protocol"])
elif name == "destination":
for x in [ "ipv4", "ipv6" ]:
if x in attrs:
check_address(x, attrs[x])
| if x in self.item.destination:
| log.warning("Destination address for '%s' already set, ignoring",
x)
else:
self.item.destination[x] = attrs[x]
elif name == "module":
module = attrs["name"]
if module.startswith("nf_conntrack_"):
module = module.replace("nf_conntrack_", "")
if "_" in module:
module = module.replace("_", "-")
if module not in self.item.modules:
self.item.modules.append(module)
else:
log.warning("Module '%s' already set, ignoring.",
module)
def service_reader(filename, path):
service = Service()
if not filename.endswith(".xml"):
raise FirewallError(errors.INVALID_NAME,
"'%s' is missing .xml s |
ucsd-ccbb/jupyter-genomics | src/microbiome/exec_fastqc.py | Python | mit | 3,451 | 0.002028 | """ Module to call fastqc in pipeline"""
import os
import subprocess
import argparse
import logging
__author__ = "YiDing Fang"
__maintainer__ = "YiDing Fang"
__email__ = "yif017@eng.ucsd.edu"
__status__ = "prototype"
def form_fastqc_cmd_list(fastqc_fp, fastq_fp, outdir):
"""Generate | argument list to be given as input to the fastqc function call.
Args:
fastqc_fp(str): the string representing path to fastqc program
fastq_fp(str): the string representing path to the fastq file to be eval | uated
outdir(str): the string representing the path to the output directory
Return value:
call_args(list): the list of call_args representing the options for the fastqc subprocess call
Raises:
ValueError is raised when either the fastqc path or the fastqc input files are empty
"""
# throw exceptions to prevent user from accidentally using interactive fastqc
if fastqc_fp is '':
raise ValueError('fastqc_fp name is empty')
if fastq_fp is '':
raise ValueError('fastq_fp file name is empty')
# required arguments
call_args_list = [fastqc_fp, fastq_fp]
# direct output
if outdir is not None:
call_args_list.extend(["--outdir", outdir])
return call_args_list
def run_fastqc(fastqc_fp, fastq_fp, outdir):
""" Call the fastqc software as a subprocess using input arguments.
Args:
fastqc_fp(str): the string representing path to fastqc program
fastq_fp(str): the string representing the file to be evaluated
outdir(str): the string representing the path to the output directory
Return value:
output(str): the string of characters sent to stdout by fastqc
err(str): the string of characters sent to stderr by fastq
"""
logging.debug('beginning run_fastqc function call')
# call the form_fastqc_cmd_list method to generate the appropriate command
call_args = form_fastqc_cmd_list(fastqc_fp, fastq_fp, outdir)
logging.info("calling popen with arguments '{0}'".format(" ".join(call_args)))
process = subprocess.Popen(call_args, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, err = process.communicate()
return output, err
def main():
"""Parse command-line arguments and pass them to the fastqc software."""
parser = argparse.ArgumentParser()
parser.add_argument("fastqc", type=str, help="path/to/fastqc_executable/")
parser.add_argument("fastq_fp", type=str, help="path/to/fastq")
parser.add_argument("-o", "--outdir", type=str, help="/path/to/output_directory")
parser.add_argument("-l", "--log", type=str, help="/path/to/log_file")
args = parser.parse_args()
if args.outdir is not None:
output_dir = args.outdir
else:
# use the directory of the input file for output
output_dir, _ = os.path.split(args.fastq_fp)
# if a log file is specified, set up the logger
if args.log is not None:
log_file = args.log
logging.basicConfig(filename=log_file, level=logging.INFO)
logging.debug('begin main')
logging.info('The fastqc file path is: %s' % args.fastqc)
logging.info('The fastq file processed by fastqc is: %s' % args.fastq_fp)
logging.info('The fastqc output directory is: %s' % args.outdir)
# call the fastqc subprocess
print(run_fastqc(args.fastqc, args.fastq_fp, output_dir))
if __name__ == "__main__":
main()
|
seanballais/SAElections | SAElections/voting/admin.py | Python | mit | 1,715 | 0.005831 | from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from django.contrib.auth.admin import UserAdmin
from django.contrib import admin
from django import forms
class VoterCreationForm(UserCreationForm):
section = forms.CharField()
def save(self, commit=True):
user = super(VoterCreationForm, self).save(commit=False)
user.section = self.cleaned_data['section']
if commit:
user.save()
return user
class Meta:
model = User
fields = ('username', 'password1', 'password2', 'section', 'first_name', 'last_n | ame', 'is_active', 'is_staff', 'is_superuser')
class VoterChangeForm(UserChangeForm):
section = forms.CharField()
def save(self, commit=True):
user = super(VoterChangeForm, self).save(commit=False)
user.section = self.cleaned_data['section']
if commit:
user.save()
return user
class Meta:
model = User
exclude = ('',)
class VoterAdmin(UserAdmin) | :
form = VoterChangeForm
add_form = VoterCreationForm
list_filter = UserAdmin.list_filter + ('section',)
fieldsets = (
(None, {'fields': ('username', 'password')}),
(('Personal info'), {'fields': ('first_name', 'last_name', 'section')}),
(('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('username', 'password1', 'password2', 'section', 'first_name', 'last_name', 'is_active', 'is_staff', 'is_superuser')}
),
)
admin.site.unregister(User)
admin.site.register(User, VoterAdmin) |
renebentes/Python4Zumbis | Materiais/Ultima Semana/euler 02.py | Python | mit | 285 | 0.042105 | def fib():
a, b = 1, 1
while True:
yield b
a, b = b, a + b
def pares(seq):
for n in seq:
if n % 2 == 0:
yield n
def menores_4M(seq):
for n in seq:
if n > 4000000:
break
yield n |
print (sum(pares(menores_4M(fib()))))
| |
lnls-sirius/dev-packages | siriuspy/siriuspy/injctrl/__init__.py | Python | gpl-3.0 | 36 | 0 | """Injection | control subpackag | e."""
|
NickDaly/GemRB-FixConfig-Branch | gemrb/GUIScripts/GUIClasses.py | Python | gpl-2.0 | 6,912 | 0.011719 | #-*-python-*-
#GemRB - Infinity Engine Emulator
#Copyright (C) 2009 The GemRB Project
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 2
#of the License, or (at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import _GemRB
from MetaClasses import metaIDWrapper, metaControl
class GTable:
__metaclass__ = metaIDWrapper
methods = {
'GetValue': _GemRB.Table_GetValue,
'FindValue': _GemRB.Table_FindValue,
'GetRowIndex': _GemRB.Table_GetRowIndex,
'GetRowName': _GemRB.Table_GetRowName,
'GetColumnIndex': _GemRB.Table_GetColumnIndex,
'GetColumnName': _GemRB.Table_GetColumnName,
'GetRowCount': _GemRB.Table_GetRowCount,
'GetColumnCount': _GemRB.Table_GetColumnCount
}
def __del__(self):
# don't unload tables if the _GemRB module is already unloaded at exit
if self.ID != -1 and _GemRB:
pass #_GemRB.Table_Unload(self.ID)
def __nonzero__(self):
return self.ID != -1
class GSymbol:
__metaclass__ = metaIDWrapper
methods = {
'GetValue': _GemRB.Symbol_GetValue,
'Unload': _GemRB.Symbol_Unload
}
class GWindow:
__metaclass__ = metaIDWrapper
methods = {
'SetSize': _GemRB.Window_SetSize,
'SetFrame': _GemRB.Window_SetFrame,
'SetPicture': _GemRB.Window_SetPicture,
'SetPos': _GemRB.Window_SetPos,
'HasControl': _GemRB.Window_HasControl,
'DeleteControl': _GemRB.Window_DeleteControl,
'Unload': _GemRB.Window_Unload,
'SetupEquipmentIcons': _GemRB.Window_SetupEquipmentIcons,
'SetupSpellIcons': _GemRB.Window_SetupSpellIcons,
'SetupControls': _GemRB.Window_SetupControls,
'SetVisible': _GemRB.Window_SetVisible,
'ShowModal': _GemRB.Window_ShowModal,
'Invalidate': _GemRB.Window_Invalidate
}
def GetControl(self, control):
return _GemRB.Window_GetControl(self.ID, control)
def CreateWorldMapControl(self, control, *args):
_GemRB.Window_CreateWorldMapControl(self.ID, control, *args)
return _GemRB.Window_GetControl(self.ID, control)
def CreateMapControl(self, control, *args):
_GemRB.Window_CreateMapControl(self.ID, control, *args)
return _GemRB.Window_GetControl(self.ID, control)
def CreateLabel(self, control, *args):
_GemRB.Window_CreateLabel(self.ID, control, *args)
return _GemRB.Window_GetControl(self.ID, control)
def CreateButton(self, control, *args):
_GemRB.Window_CreateButton(self.ID, control, *args)
return _GemRB.Window_GetControl(self.ID, control)
def CreateScrollBar(self, control, *args):
_GemRB.Window_CreateScrollBar(self.ID, control, *args)
return _GemRB.Window_GetControl(self.ID, control)
def CreateTextEdit(self, control, *args):
_GemRB.Window_CreateTextEdit(self.ID, control, *args)
return _GemRB.Window_GetControl(self.ID, control)
class GControl:
__metaclass__ = metaControl
methods = {
'SetVarAssoc': _GemRB.Control_SetVarAssoc,
'SetPos': _GemRB.Control_SetPos,
'SetSize': _GemRB.Control_SetSize,
'SetAnimationPalette': _GemRB.Control_SetAnimationPalette,
'SetAnimation': _GemRB.Control_SetAnimation,
'QueryText': _GemRB.Control_QueryText,
'SetText': _GemRB.Control_SetText,
'SetTooltip': _GemRB.Control_SetTooltip,
'SetEvent': _GemRB.Control_SetEvent,
'SetStatus': _GemRB.Control_SetStatus,
}
def AttachScrollBar(self, scrollbar):
if self.WinID != scrollbar.WinID:
raise RuntimeError, "Scrollbar must be in same Window as Control"
return _GemRB.Control_AttachScrollBar(self.WinID, self.ID, scrollbar.ID)
class GLab | el(GControl):
__metaclass__ = metaControl
methods = {
'SetTextColor': _GemRB.Label_SetTextColor,
'SetUseRGB': _GemRB.Label_SetUseRGB
}
class GTextArea(GControl):
__metaclass__ = metaControl
methods = {
'Rewind': _GemRB.TextAr | ea_Rewind,
'SetHistory': _GemRB.TextArea_SetHistory,
'Append': _GemRB.TextArea_Append,
'Clear': _GemRB.TextArea_Clear,
'Scroll': _GemRB.TextArea_Scroll,
'SetFlags': _GemRB.Control_TextArea_SetFlags,
'GetCharSounds': _GemRB.TextArea_GetCharSounds,
'GetCharacters': _GemRB.TextArea_GetCharacters,
'GetPortraits': _GemRB.TextArea_GetPortraits
}
def MoveText(self, other):
_GemRB.TextArea_MoveText(self.WinID, self.ID, other.WinID, other.ID)
class GTextEdit(GControl):
__metaclass__ = metaControl
methods = {
'SetBufferLength': _GemRB.TextEdit_SetBufferLength
}
def ConvertEdit(self, ScrollBarID):
newID = _GemRB.TextEdit_ConvertEdit(self.WinID, self.ID, ScrollBarID)
return GTextArea(self.WinID, self.ID)
class GScrollBar(GControl):
__metaclass__ = metaControl
methods = {
'SetDefaultScrollBar': _GemRB.ScrollBar_SetDefaultScrollBar,
'SetSprites': _GemRB.ScrollBar_SetSprites
}
class GButton(GControl):
__metaclass__ = metaControl
methods = {
'SetSprites': _GemRB.Button_SetSprites,
'SetOverlay': _GemRB.Button_SetOverlay,
'SetBorder': _GemRB.Button_SetBorder,
'EnableBorder': _GemRB.Button_EnableBorder,
'SetFont': _GemRB.Button_SetFont,
'SetTextColor': _GemRB.Button_SetTextColor,
'SetFlags': _GemRB.Button_SetFlags,
'SetState': _GemRB.Button_SetState,
'SetPictureClipping': _GemRB.Button_SetPictureClipping,
'SetPicture': _GemRB.Button_SetPicture,
'SetSprite2D': _GemRB.Button_SetSprite2D,
'SetMOS': _GemRB.Button_SetMOS,
'SetPLT': _GemRB.Button_SetPLT,
'SetBAM': _GemRB.Button_SetBAM,
'SetSpellIcon': _GemRB.Button_SetSpellIcon,
'SetItemIcon': _GemRB.Button_SetItemIcon,
'SetActionIcon': _GemRB.Button_SetActionIcon
}
def CreateLabelOnButton(self, control, *args):
_GemRB.Button_CreateLabelOnButton(self.WinID, self.ID, control, *args)
return _GemRB.Window_GetControl(self.WinID, control)
class GWorldMap(GControl):
__metaclass__ = metaControl
methods = {
'AdjustScrolling': _GemRB.WorldMap_AdjustScrolling,
'GetDestinationArea': _GemRB.WorldMap_GetDestinationArea,
'SetTextColor': _GemRB.WorldMap_SetTextColor
}
class GSaveGame:
__metaclass__ = metaIDWrapper
methods = {
'GetDate': _GemRB.SaveGame_GetDate,
'GetGameDate': _GemRB.SaveGame_GetGameDate,
'GetName': _GemRB.SaveGame_GetName,
'GetPortrait': _GemRB.SaveGame_GetPortrait,
'GetPreview': _GemRB.SaveGame_GetPreview,
'GetSaveID': _GemRB.SaveGame_GetSaveID,
}
class GSprite2D:
__metaclass__ = metaIDWrapper
methods = {}
|
sdgathman/pymilter | testsample.py | Python | gpl-2.0 | 5,060 | 0.014427 | import unittest
import Milter
import sample
import template
import mime
import zipfile
from Milter.test import TestBase
from Milter.testctx import TestCtx
class TestMilter(TestBase,sample.sampleMilter):
def __init__(self):
TestBase.__init__(self)
sample.sampleMilter.__init__(self)
class BMSMilterTestCase(unittest.TestCase):
def setUp(self):
self.zf = zipfile.ZipFile('test/virus.zip','r')
self.zf.setpassword(b'denatured')
def tearDown(self):
self.zf.close()
self.zf = None
def testTemplate(self,fname='test2'):
ctx = TestCtx()
Milter.factory = template.myMilter
ctx._setsymval('{auth_authen}','batman')
ctx._setsymval('{auth_type}','batcomputer')
ctx._setsymval('j','mailhost')
count = 10
while count > 0:
rc = ctx._connect(helo='milter-template.example.org')
self.assertEquals(rc,Milter.CONTINUE)
with open('test/'+fname,'rb') as fp:
rc = ctx._feedFile(fp)
milter = ctx.getpriv()
self.assertFalse(ctx._bodyreplaced,"Message body replaced")
ctx._close()
count -= 1
def testHeader(self,fname='utf8'):
ctx = TestCtx()
Milter.factory = sample.sampleMilter
ctx._setsymval('{auth_authen}','batman')
ctx._setsymval('{auth_type}','batcomputer')
ctx._setsymval('j','mailhost')
rc = ctx._connect()
self.assertEquals(rc,Milter.CONTINUE)
with open('test/'+fname,'rb') as fp:
rc = ctx._feedFile(fp)
milter = ctx.getpriv()
self.assertFalse(ctx._bodyreplaced,"Message body replaced")
fp = ctx._body
with open('test/'+fname+".tstout","wb") as ofp:
ofp.write(fp.getvalue())
ctx._close()
def testCtx(self,fname='virus1'):
ctx = TestCtx()
Milter.factory = sample.sampleMilter
ctx._setsymval('{auth_authen}','batman')
ctx._setsymval('{auth_type}','batcomputer')
ctx._setsymval('j','mailhost')
rc = ctx._connect()
self.assertTrue(rc == Milter.CONTINUE)
with self.zf.open(fname) as fp:
rc = ctx._feedFile(fp)
milter = ctx.getpriv()
# self.assertTrue(milter.user == 'batman',"getsymval failed: "+
# "%s != %s"%(milter.user,'batman'))
self.assertEquals(milter.user,'batman')
self.assertTrue(milter.auth_type != 'batcomputer',"setsymlist failed")
self.assertTrue(rc == Milter.ACCEPT)
self.assertTrue(ctx._bodyreplaced,"Message body not replaced")
fp = ctx._body
with open('test/'+fname+".tstout","wb") as f:
f.write(fp.getvalue())
#self.assertTrue(fp.getvalue() == open("test/virus1.out","r").read())
fp.seek(0)
msg = mime.message_from_file(fp)
s = msg.get_payload(1).get_payload()
milter.log(s)
ctx._close()
def testDefang(self,fname='virus1'):
milter = TestMilter()
milter.setsymval('{auth_authen}','batman')
milter.setsymval('{auth_type}','batcomputer')
milter.setsymval('j','mailhost')
rc = milter.connect()
self.assertTrue(rc == Milter.CONTINUE)
with self.zf.open(fname) as fp:
rc = milter.feedFile(fp)
self.assertTrue(milter.user == 'batman',"getsymval failed")
# setsymlist not working in TestBase
#self.assertTrue(milter.auth_type != 'batcomputer',"setsymlist failed")
self.assertTrue(rc == Milter.ACCEPT)
self.assertTrue(milter._bodyreplaced,"Message body not replaced")
fp = milter._body
with open('test/'+fname+".tstout","wb") as f:
f.write(fp.getvalue())
#self.assertTrue(fp.getvalue() == open("test/virus1.out","r").read())
fp.seek(0)
msg = mime.message_from_file(fp)
s = msg.get_payload(1).get_payload()
milter.log(s)
milter.close()
def testParse(self,fname='spam7'):
milter = TestMilter()
milter.connect('somehost')
rc = milter.feedMsg(fname)
self.assertTrue(rc == Milter.ACCEPT)
self.assertFalse(milter._bodyreplaced,"Milter needlessly replaced body.")
fp = milter._body
with open('test/'+fname+".tstout","wb") as f:
f.write(fp.getvalue())
milter.close()
def testDefang2(self):
milter = TestMilter()
milter.connect('somehost')
rc = milter.feedMsg('samp1')
self.assertTrue(rc == Milter.ACCEPT)
self.assertFalse(milter._bodyreplaced,"Milter needlessly replaced body.")
with self.zf.open("virus3") as fp:
rc = milter.feedFile(fp)
self.assertTrue(rc == Milter.ACCEPT)
self.assertTrue(milter._ | bodyreplaced,"Message body not replaced")
fp = milter._body
with open("test/virus3.tstout","wb") as f:
f.write(fp.getvalue())
#self.assertTrue(fp.getvalue() == open("test/virus3.out","r").read())
with self.zf.open("virus6") as fp:
rc = milter. | feedFile(fp)
self.assertTrue(rc == Milter.ACCEPT)
self.assertTrue(milter._bodyreplaced,"Message body not replaced")
self.assertTrue(milter._headerschanged,"Message headers not adjusted")
fp = milter._body
with open("test/virus6.tstout","wb") as f:
f.write(fp.getvalue())
milter.close()
def suite(): return unittest.makeSuite(BMSMilterTestCase,'test')
if __name__ == '__main__':
unittest.main()
|
DocDir/docdir | plans/migrations/0002_auto_20150722_0003.py | Python | bsd-3-clause | 3,284 | 0.003959 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('plans', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='DataSource',
| fields=[
('id', models.AutoField(auto | _created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, verbose_name='organization name')),
('source_type', models.CharField(max_length=1, choices=[('C', 'Crawled'), ('V', 'Official Release csv'), ('P', 'Official Release pdf')])),
('notes', models.CharField(help_text='assorted notes re: data source', max_length=200)),
],
),
migrations.AddField(
model_name='contract',
name='created',
field=models.DateTimeField(auto_now_add=True, null=True, verbose_name='timestamp record created'),
),
migrations.AddField(
model_name='contract',
name='score',
field=models.FloatField(default=1.0, verbose_name='data reliability score'),
),
migrations.AddField(
model_name='doctorcontact',
name='created',
field=models.DateTimeField(auto_now_add=True, null=True, verbose_name='timestamp record created'),
),
migrations.AddField(
model_name='doctorcontact',
name='score',
field=models.FloatField(default=1.0, verbose_name='data reliability score'),
),
migrations.AddField(
model_name='doctorspecialty',
name='created',
field=models.DateTimeField(auto_now_add=True, null=True, verbose_name='timestamp record created'),
),
migrations.AddField(
model_name='doctorspecialty',
name='score',
field=models.FloatField(default=1.0, verbose_name='data reliability score'),
),
migrations.AlterField(
model_name='contract',
name='start',
field=models.DateTimeField(null=True, default=None),
),
migrations.AlterField(
model_name='doctorcontact',
name='start',
field=models.DateTimeField(null=True, default=None),
),
migrations.AlterField(
model_name='doctorspecialty',
name='start',
field=models.DateTimeField(null=True, default=None),
),
migrations.AddField(
model_name='contract',
name='source',
field=models.ForeignKey(help_text='where relationship data comes from', to='plans.DataSource', default=None, null=True),
),
migrations.AddField(
model_name='doctorcontact',
name='source',
field=models.ForeignKey(help_text='where relationship data comes from', to='plans.DataSource', default=None, null=True),
),
migrations.AddField(
model_name='doctorspecialty',
name='source',
field=models.ForeignKey(help_text='where relationship data comes from', to='plans.DataSource', default=None, null=True),
),
]
|
grlee77/nipype | nipype/interfaces/afni/tests/test_auto_To3D.py | Python | bsd-3-clause | 1,337 | 0.023186 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.afni.preprocess import To3D
def test_To3D_inputs():
input_map = dict(args=dict(argstr='%s',
),
assumemosaic=dict(argstr='-assume_dicom_mosaic',
),
datatype=dict(argstr='-datum %s',
),
environ=dict(nohash=True,
usedefault=True,
),
filetype=dict(argstr='-%s',
),
funcparams=dict(argstr='-time:zt %s alt+z2',
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_folder=dict(argstr='%s/*.dcm',
mandatory=True,
position=-1,
),
out_file=dict(argstr='-prefix %s',
name_source=['in_folder'],
name_template='%s',
),
outputtype=dict(),
skipoutliers=dict(argstr='-skip_outliers',
| ),
terminal_output=dict(nohash=True,
),
)
inputs = To3D.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_To3D_outputs():
output_map = dict(out_file=dict(),
)
| outputs = To3D.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
setokinto/slack-shogi | app/shogi.py | Python | mit | 4,557 | 0.002766 |
import re
import functools
from slackbot.bot import respond_to
from app.modules.shogi_input import ShogiInput, UserDifferentException, KomaCannotMoveException
from app.modules.shogi_output import ShogiOutput
from app.slack_utils.user import User
from app.helper import channel_info, should_exist_shogi
@respond_to('start with <?@?([\d\w_-]+)>?')
@channel_info
def start_shogi(channel, message, opponent_name):
slacker = message._client.webapi
user = User(slacker)
opponent_id = user.username_to_id(opponent_name)
if opponent_id is None:
# In case of mention. In mention, slack transform username to userid
# like @username to <@UOIFJ83F>
opponent_id = opponent_name
if not user.user_in_channel(opponent_id, channel.channel_id):
message.reply("Error, sorry. Opponent is not found in this channel")
return
shogi = ShogiInput.init(channel_id=channel.channel_id, users=[{
"id": channel.own_id,
"name": user.id_to_username(channel.own_id),
}, {
"id": opponent_id,
"name": user.id_to_username(opponent_id),
}])
if shogi is None:
message.reply("Shogi started already by a user. Sorry.\nIf you want to quit shogi which already exists, please say this command `resign`")
else:
message.reply("Shogi started: " + shogi.id)
board = ShogiInput.get_shogi_board(channel.channel_id)
board_str = ShogiOutput.make_board_emoji(board)
message.send(board_str)
koma_names = [
"歩兵?",
"と金?",
"成?香車?",
"成?桂馬?",
"成?銀将?",
"金将?",
"角行?",
"馬",
"飛車?",
"龍",
"王将?",
"玉将?",
]
koma_names_string_regex = "|".join(koma_names)
@respond_to("^([一二三四五六七八九123456789123456789]{2})?(同)?(" + koma_names_string_regex + ")([上右下左引寄直打]{1,2})?つ?(成)?")
@channel_info
@should_exist_shogi
def koma_move(channel, message, position, dou, koma, sub_position=None, promote=None):
movement_str = "".join(
[x for x in [position, dou, koma, sub_position, promote] if x is not None])
try:
ShogiInput.move(movement_str, channel.channel_id, channel.own_id)
except UserDifferentException:
message.reply("You cannot move this because *it's not your turn*")
except KomaCannotMoveException:
message.reply("You cannot move this with your message *{}*".format(movement_str))
finally:
board = ShogiInput.get_shogi_board(channel.channel_id)
board_str = ShogiOutput.make_board_emoji(board)
message.send(board_str)
@respond_to("set (all) mode")
@channel_info
@should_exist_shogi
def set_mode(channel, message, arg):
if arg == "all":
ShogiInput.setAllMode(channel.channel_id)
message.reply("Done! All member can move now!")
@respond_to("今?.*の?.*状態.*を?教.*え?て?")
@respond_to("now")
@respond_to("局面.*")
@respond_to("board")
@channel_info
@should_exist_shogi
def board_info(channel, message):
board = ShogiInput.get_shogi_board(channel.channel_id)
board_str = ShogiOutput.make_board_emoji(board)
message.send(board_str)
@respond_to(".*降参.*")
@respond_to(".*resign.*")
@respond_to(".*負けました.*")
@respond_to(".*まけました.*")
@respond_to(".*まいりました.*")
@respond_to(".*参りました.*")
@respond_to(".*ありません.*")
@channel_info
@should_exist_shogi
def resign(channel, message):
message.send("最終局面")
board = ShogiInput.get_shogi_board(channel.channel_id)
board_str = ShogiOutput.make_board_emoji(board)
message.send(board_str)
ShogiInput.clear(channel.channel_id)
@respond_to("待った")
@channel_info
@should_exist_sh | ogi
def matta(channel, message):
try:
ShogiInput.matta(channel.channel_id, channel.own_id)
| message.send("mattaed")
except UserDifferentException:
message.reply("You cannot matta because *it's not your turn*")
except KomaCannotMoveException:
message.reply("You cannot matta because koma not moved")
finally:
board = ShogiInput.get_shogi_board(channel.channel_id)
board_str = ShogiOutput.make_board_emoji(board)
message.send(board_str)
@respond_to(".*ひふみん[eye, アイ, あい]?")
@respond_to(".*反転.*")
@channel_info
@should_exist_shogi
def hifumin(channel, message):
board = ShogiInput.get_shogi_board(channel.channel_id)
board_str = ShogiOutput.make_board_emoji_reverse(board)
message.send(board_str)
|
j-po/django-brambling | brambling/management/commands/update_tokens.py | Python | bsd-3-clause | 931 | 0 | from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from brambling.utils.payment import dwolla_update_tokens
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option(
'--days',
action='store',
dest='days',
default=15,
help='Number of days ahead of time to update refresh tokens.'),
)
def handle(self, *args, **options):
try:
days = int(options['days'])
except ValueError:
raise Comma | ndError("Days must be an integer value.")
self.stdout.write("Updating dwolla tokens...")
self.stdout.flu | sh()
count, test_count = dwolla_update_tokens(days)
self.stdout.write("Test tokens updated: {}".format(count))
self.stdout.write("Live tokens updated: {}".format(test_count))
self.stdout.flush()
|
kcompher/pygraphistry | graphistry/plotter.py | Python | bsd-3-clause | 18,526 | 0.002375 | from __future__ import print_function
from __future__ import absolute_import
from builtins import str
from builtins import range
from builtins import object
import random
import string
import copy
import types
import pandas
from . import pygraphistry
from . import util
class Plotter(object):
"""Graph plotting class.
Created using ``Graphistry.bind()``.
Chained calls successively add data and visual encodings, and end with a plot call.
To streamline reuse and replayable notebooks, Plotter manipulations are immutable. Each chained call returns a new instance that derives from the previous one. The old plotter or the new one can then be used to create different graphs.
The class supports convenience methods for mixing calls across Pandas, NetworkX, and IGraph.
"""
_defaultNodeId = '__nodeid__'
def __init__(self):
# Bindings
self._edges = None
| self._nodes = None
self._source = None
self._destination = None
self._node = None
self._edge_title = None
self._edge_label = None
self._edge_color = None
self._edge_weight = None
self._point_title = None
self._point_label = None
self._point_color = None
s | elf._point_size = None
# Settings
self._height = 500
self._url_params = {'info': 'true'}
def __repr__(self):
bnds = ['edges', 'nodes', 'source', 'destination', 'node', 'edge_title',
'edge_label', 'edge_color', 'edge_weight', 'point_title',
'point_label', 'point_color', 'point_size']
stgs = ['height', 'url_params']
rep = {'bindings': dict([(f, getattr(self, '_' + f)) for f in bnds]),
'settings': dict([(f, getattr(self, '_' + f)) for f in stgs])}
if util.in_ipython():
from IPython.lib.pretty import pretty
return pretty(rep)
else:
return str(rep)
def bind(self, source=None, destination=None, node=None,
edge_title=None, edge_label=None, edge_color=None, edge_weight=None,
point_title=None, point_label=None, point_color=None, point_size=None):
"""Relate data attributes to graph structure and visual representation.
To facilitate reuse and replayable notebooks, the binding call is chainable. Invocation does not effect the old binding: it instead returns a new Plotter instance with the new bindings added to the existing ones. Both the old and new bindings can then be used for different graphs.
:param source: Attribute containing an edge's source ID
:type source: String.
:param destination: Attribute containing an edge's destination ID
:type destination: String.
:param node: Attribute containing a node's ID
:type node: String.
:param edge_title: Attribute overriding edge's minimized label text. By default, the edge source and destination is used.
:type edge_title: HtmlString.
:param edge_label: Attribute overriding edge's expanded label text. By default, scrollable list of attribute/value mappings.
:type edge_label: HtmlString.
:param edge_color: Attribute overriding edge's color. `See palette definitions <https://github.com/graphistry/pygraphistry/blob/master/graphistry.com/palette.html>`_ for values. Based on Color Brewer.
:type edge_color: String.
:param edge_weight: Attribute overriding edge weight. Default is 1. Advanced layout controls will relayout edges based on this value.
:type edge_weight: String.
:param point_title: Attribute overriding node's minimized label text. By default, the node ID is used.
:type point_title: HtmlString.
:param point_label: Attribute overriding node's expanded label text. By default, scrollable list of attribute/value mappings.
:type point_label: HtmlString.
:param point_color: Attribute overriding node's color. `See palette definitions <https://github.com/graphistry/pygraphistry/blob/master/graphistry.com/palette.html>`_ for values. Based on Color Brewer.
:type point_color: Integer.
:param point_size: Attribute overriding node's size. By default, uses the node degree. The visualization will normalize point sizes and adjust dynamically using semantic zoom.
:type point_size: HtmlString.
:returns: Plotter.
:rtype: Plotter.
**Example: Minimal**
::
import graphistry
g = graphistry.bind()
g = g.bind(source='src', destination='dst')
**Example: Node colors**
::
import graphistry
g = graphistry.bind()
g = g.bind(source='src', destination='dst',
node='id', point_color='color')
**Example: Chaining**
::
import graphistry
g = graphistry.bind(source='src', destination='dst', node='id')
g1 = g.bind(point_color='color1', point_size='size1')
g.bind(point_color='color1b')
g2a = g1.bind(point_color='color2a')
g2b = g1.bind(point_color='color2b', point_size='size2b')
g3a = g2a.bind(point_size='size3a')
g3b = g2b.bind(point_size='size3b')
In the above **Chaining** example, all bindings use src/dst/id. Colors and sizes bind to:
::
g: default/default
g1: color1/size1
g2a: color2a/size1
g2b: color2b/size2b
g3a: color2a/size3a
g3b: color2b/size3b
"""
res = copy.copy(self)
res._source = source or self._source
res._destination = destination or self._destination
res._node = node or self._node
res._edge_title = edge_title or self._edge_title
res._edge_label = edge_label or self._edge_label
res._edge_color = edge_color or self._edge_color
res._edge_weight = edge_weight or self._edge_weight
res._point_title = point_title or self._point_title
res._point_label = point_label or self._point_label
res._point_color = point_color or self._point_color
res._point_size = point_size or self._point_size
return res
def nodes(self, nodes):
"""Specify the set of nodes and associated data.
Must include any nodes referenced in the edge list.
:param nodes: Nodes and their attributes.
:type point_size: Pandas dataframe
:returns: Plotter.
:rtype: Plotter.
**Example**
::
import graphistry
es = pandas.DataFrame({'src': [0,1,2], 'dst': [1,2,0]})
g = graphistry
.bind(source='src', destination='dst')
.edges(es)
vs = pandas.DataFrame({'v': [0,1,2], 'lbl': ['a', 'b', 'c']})
g = g.bind(node='v').nodes(vs)
g.plot()
"""
res = copy.copy(self)
res._nodes = nodes
return res
def edges(self, edges):
"""Specify edge list data and associated edge attribute values.
:param edges: Edges and their attributes.
:type point_size: Pandas dataframe, NetworkX graph, or IGraph graph.
:returns: Plotter.
:rtype: Plotter.
**Example**
::
import graphistry
df = pandas.DataFrame({'src': [0,1,2], 'dst': [1,2,0]})
graphistry
.bind(source='src', destination='dst')
.edges(df)
.plot()
"""
res = copy.copy(self)
res._edges = edges
return res
def graph(self, ig):
"""Specify the node and edge data.
:param ig: Graph with node and edge attributes.
:type ig: NetworkX graph or an IGraph graph.
:returns: Plotter.
:rtype: Plotter.
"""
res = copy.copy(self)
res._edges = ig
res._nodes = |
VPAC/patchman | patchman/domains/models.py | Python | gpl-3.0 | 813 | 0 | # Copyright 2012 VPAC, http://www.vpac.org
#
# This file is part of Patchman.
#
# Patchman is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 only.
#
# Patchman is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Patchman. If not, see <http://www.gnu.org/licenses/>
from django.db import models
class Domain(models.Model):
name = models.CharField(max_length=255, un | ique=True)
def __unicode__(self):
return | self.name
|
nd1/women_in_tech_summit_DC2017 | api/seeclickfix_api.py | Python | mit | 2,074 | 0.000964 | '''
Pull one page of 100 results fro | m seeclickfix using the global PARAMS
value if the parameters are not supplied. If there are more than 100
results, make another pull passing paramters that include the next page to
be pulled.
Nicole Donnelly 30May2016, updated 21Oct2016
'''
import requests
import json
def get_seeclickfix(page=1, pulled=0, search_params={'place_url':
| 'district-of-columbia', 'after': '2016-10-01',
'per_page': 100}):
# base_url for usajobs api to build the request url
base_url = 'https://seeclickfix.com/api/v2/issues'
# send a get request with the url, parameters, and header
myResponse = requests.get(url=base_url, params=search_params)
# For successful API call, response code will be 200 (OK)
if(myResponse.ok):
# Loading the response data into a dict variable
data = json.loads(myResponse.content.decode('utf-8'))
# get the total search result count and set it to count_all. the
# API only allows 100 results per page
count_all = data['metadata']['pagination']['entries']
# track the number of items we have pulled with our requests
pulled = pulled + 100
# create a file name that reflects which page of results it contains
# and write that file
file_name = 'data%d.json' % page
with open(file_name, 'w') as outfile:
json.dump(data, outfile)
# check to see if we pulled all the results. If not, increment the
# page count, update the parameters dictionary to include the page
# number, and run the process again.
if pulled < count_all:
page += 1
page_param = {'page': page}
search_params.update(page_param)
print(search_params)
get_seeclickfix(page, pulled, search_params)
else:
# If response code is not ok (200), print the resulting http error
# code with description
myResponse.raise_for_status()
if __name__ == '__main__':
get_seeclickfix()
|
kristerw/spirv-tools | spirv_tools/ext_inst/glsl_std_450.py | Python | mit | 11,873 | 0.033521 | """Instruction descriptions for the "SPIR-V Extended Instructions for GLSL"
version 1.00, revision 2.
"""
INST_FORMAT = {
1 : {
'name' : 'Round',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
}, |
2 : {
'name' : 'RoundEven',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
3 : {
'name' : 'Trunc',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
4 : {
'name': 'FAbs',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
5 : {
'name' : 'SAbs',
| 'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
6 : {
'name' : 'FSign',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
7 : {
'name' : 'SSign',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
8 : {
'name' : 'Floor',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
9 : {
'name' : 'Ceil',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
10 : {
'name' : 'Fract',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
11 : {
'name' : 'Radians',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
12 : {
'name' : 'Degrees',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
13 : {
'name' : 'Sin',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
14 : {
'name' : 'Cos',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
15 : {
'name' : 'Tan',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
16 : {
'name' : 'Asin',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
17 : {
'name' : 'Acos',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
18 : {
'name' : 'Atan',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
19 : {
'name' : 'Sinh',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
20 : {
'name' : 'Cosh',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
21 : {
'name' : 'Tanh',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
22 : {
'name' : 'Asinh',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
23 : {
'name' : 'Acosh',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
24 : {
'name' : 'Atanh',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
25 : {
'name' : 'Atan2',
'operands' : ['Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : False
},
26 : {
'name' : 'Pow',
'operands' : ['Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : False
},
27 : {
'name' : 'Exp',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
28 : {
'name' : 'Log',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
29 : {
'name' : 'Exp2',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
30: {
'name' : 'Log2',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
31 : {
'name' : 'Sqrt',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
32 : {
'name' : 'Inversesqrt',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
33 : {
'name' : 'Determinant',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
34 : {
'name' : 'MatrixInverse',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
35 : {
'name' : 'Modf',
'operands' : ['Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : False
},
36 : {
'name' : 'ModfStruct',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
37 : {
'name' : 'FMin',
'operands' : ['Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : True
},
38 : {
'name' : 'UMin',
'operands' : ['Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : True
},
39 : {
'name' : 'SMin',
'operands' : ['Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : True
},
40 : {
'name' : 'FMax',
'operands' : ['Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : True
},
41 : {
'name' : 'UMax',
'operands' : ['Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : True
},
42 : {
'name' : 'SMax',
'operands' : ['Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : True
},
43 : {
'name' : 'FClamp',
'operands' : ['Id', 'Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : False
},
44 : {
'name' : 'UClamp',
'operands' : ['Id', 'Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : False
},
45 : {
'name' : 'SClamp',
'operands' : ['Id', 'Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : False
},
46 : {
'name' : 'FMix',
'operands' : ['Id', 'Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : False
},
48 : {
'name' : 'Step',
'operands' : ['Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : False
},
49 : {
'name' : 'Smoothstep',
'operands' : ['Id', 'Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : False
},
50 : {
'name' : 'Fma',
'operands' : ['Id', 'Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : False
},
51 : {
'name' : 'Frexp',
'operands' : ['Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : False
},
52 : {
'name' : 'FrexpStruct',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
53 : {
'name' : 'Ldexp',
'operands' : ['Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : False
},
54 : {
'name' : 'PackSnorm4x8',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
55 : {
'name' : 'PackUnorm4x8',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
56 : {
'name' : 'PackSnorm2x16',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
57 : {
'name' : 'PackUnrom2x16',
'operands' : ['Id'],
'has |
anthgur/servo | tests/wpt/web-platform-tests/tools/third_party/pytest/_pytest/runner.py | Python | mpl-2.0 | 16,098 | 0.000186 | """ basic collect and runtest protocol implementations """
from __future__ import absolute_import, division, print_function
import bdb
import os
import sys
from time import time
import py
from _pytest._code.code import TerminalRepr, ExceptionInfo
from _pytest.outcomes import skip, Skipped, TEST_OUTCOME
#
# pytest plugin hooks
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting", "reporting", after="general")
group.addoption('--durations',
action="store", type=int, default=None, metavar="N",
help="show N slowest setup/test | durations (N=0 for all)."),
def pytest_terminal_summary(terminalreporter):
durations = terminalreporter.config.option.durations
if durations is None:
return
tr = terminalreporter
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, 'duration'):
dlist.append(rep)
if not dlist:
return
dlist.sort(key=lambda x: x.d | uration)
dlist.reverse()
if not durations:
tr.write_sep("=", "slowest test durations")
else:
tr.write_sep("=", "slowest %s test durations" % durations)
dlist = dlist[:durations]
for rep in dlist:
nodeid = rep.nodeid.replace("::()::", "::")
tr.write_line("%02.2fs %-8s %s" %
(rep.duration, rep.when, nodeid))
def pytest_sessionstart(session):
session._setupstate = SetupState()
def pytest_sessionfinish(session):
session._setupstate.teardown_all()
def pytest_runtest_protocol(item, nextitem):
item.ihook.pytest_runtest_logstart(
nodeid=item.nodeid, location=item.location,
)
runtestprotocol(item, nextitem=nextitem)
return True
def runtestprotocol(item, log=True, nextitem=None):
hasrequest = hasattr(item, "_request")
if hasrequest and not item._request:
item._initrequest()
rep = call_and_report(item, "setup", log)
reports = [rep]
if rep.passed:
if item.config.option.setupshow:
show_test_item(item)
if not item.config.option.setuponly:
reports.append(call_and_report(item, "call", log))
reports.append(call_and_report(item, "teardown", log,
nextitem=nextitem))
# after all teardown hooks have been called
# want funcargs and request info to go away
if hasrequest:
item._request = False
item.funcargs = None
return reports
def show_test_item(item):
"""Show test function, parameters and the fixtures of the test item."""
tw = item.config.get_terminal_writer()
tw.line()
tw.write(' ' * 8)
tw.write(item._nodeid)
used_fixtures = sorted(item._fixtureinfo.name2fixturedefs.keys())
if used_fixtures:
tw.write(' (fixtures used: {0})'.format(', '.join(used_fixtures)))
def pytest_runtest_setup(item):
_update_current_test_var(item, 'setup')
item.session._setupstate.prepare(item)
def pytest_runtest_call(item):
_update_current_test_var(item, 'call')
try:
item.runtest()
except Exception:
# Store trace info to allow postmortem debugging
type, value, tb = sys.exc_info()
tb = tb.tb_next # Skip *this* frame
sys.last_type = type
sys.last_value = value
sys.last_traceback = tb
del tb # Get rid of it in this namespace
raise
def pytest_runtest_teardown(item, nextitem):
_update_current_test_var(item, 'teardown')
item.session._setupstate.teardown_exact(item, nextitem)
_update_current_test_var(item, None)
def _update_current_test_var(item, when):
"""
Update PYTEST_CURRENT_TEST to reflect the current item and stage.
If ``when`` is None, delete PYTEST_CURRENT_TEST from the environment.
"""
var_name = 'PYTEST_CURRENT_TEST'
if when:
value = '{0} ({1})'.format(item.nodeid, when)
# don't allow null bytes on environment variables (see #2644, #2957)
value = value.replace('\x00', '(null)')
os.environ[var_name] = value
else:
os.environ.pop(var_name)
def pytest_report_teststatus(report):
if report.when in ("setup", "teardown"):
if report.failed:
# category, shortletter, verbose-word
return "error", "E", "ERROR"
elif report.skipped:
return "skipped", "s", "SKIPPED"
else:
return "", "", ""
#
# Implementation
def call_and_report(item, when, log=True, **kwds):
call = call_runtest_hook(item, when, **kwds)
hook = item.ihook
report = hook.pytest_runtest_makereport(item=item, call=call)
if log:
hook.pytest_runtest_logreport(report=report)
if check_interactive_exception(call, report):
hook.pytest_exception_interact(node=item, call=call, report=report)
return report
def check_interactive_exception(call, report):
return call.excinfo and not (
hasattr(report, "wasxfail") or
call.excinfo.errisinstance(skip.Exception) or
call.excinfo.errisinstance(bdb.BdbQuit))
def call_runtest_hook(item, when, **kwds):
hookname = "pytest_runtest_" + when
ihook = getattr(item.ihook, hookname)
return CallInfo(lambda: ihook(item=item, **kwds), when=when)
class CallInfo:
""" Result/Exception info a function invocation. """
#: None or ExceptionInfo object.
excinfo = None
def __init__(self, func, when):
#: context of invocation: one of "setup", "call",
#: "teardown", "memocollect"
self.when = when
self.start = time()
try:
self.result = func()
except KeyboardInterrupt:
self.stop = time()
raise
except: # noqa
self.excinfo = ExceptionInfo()
self.stop = time()
def __repr__(self):
if self.excinfo:
status = "exception: %s" % str(self.excinfo.value)
else:
status = "result: %r" % (self.result,)
return "<CallInfo when=%r %s>" % (self.when, status)
def getslaveinfoline(node):
try:
return node._slaveinfocache
except AttributeError:
d = node.slaveinfo
ver = "%s.%s.%s" % d['version_info'][:3]
node._slaveinfocache = s = "[%s] %s -- Python %s %s" % (
d['id'], d['sysplatform'], ver, d['executable'])
return s
class BaseReport(object):
def __init__(self, **kw):
self.__dict__.update(kw)
def toterminal(self, out):
if hasattr(self, 'node'):
out.line(getslaveinfoline(self.node))
longrepr = self.longrepr
if longrepr is None:
return
if hasattr(longrepr, 'toterminal'):
longrepr.toterminal(out)
else:
try:
out.line(longrepr)
except UnicodeEncodeError:
out.line("<unprintable longrepr>")
def get_sections(self, prefix):
for name, content in self.sections:
if name.startswith(prefix):
yield prefix, content
@property
def longreprtext(self):
"""
Read-only property that returns the full string representation
of ``longrepr``.
.. versionadded:: 3.0
"""
tw = py.io.TerminalWriter(stringio=True)
tw.hasmarkup = False
self.toterminal(tw)
exc = tw.stringio.getvalue()
return exc.strip()
@property
def capstdout(self):
"""Return captured text from stdout, if capturing is enabled
.. versionadded:: 3.0
"""
return ''.join(content for (prefix, content) in self.get_sections('Captured stdout'))
@property
def capstderr(self):
"""Return captured text from stderr, if capturing is enabled
.. versionadded:: 3.0
"""
return ''.join(content for (prefix, content) in self.get_sections('Captured stderr'))
passed = property(lambda x: x.outcome == "passed")
failed = property(lambda x: x.outcome == "failed")
skipped = property(lambda x: x.outcome == "skipped")
@property
def fspath(self):
return self.nodeid.s |
h-mayorquin/camp_india_2016 | tutorials/LTPinnetworks2/Step2aCorr_Gilson_etal_2012.py | Python | mit | 8,226 | 0.016168 | #!/usr/bin/env python
'''
Based on:
Gilson, Matthieu, Tomoki Fukai, and Anthony N. Burkitt.
Spectral Analysis of Input Spike Trains by Spike-Timing-Dependent Plasticity.
PLoS Comput Biol 8, no. 7 (July 5, 2012): e1002584. doi:10.1371/journal.pcbi.1002584.
Author: Aditya Gilra, Jun 2016. (with inputs from Matthieu Gilson)
in Brian2rc3 for CAMP 2016.
'''
#import modules and functions to be used
from brian2 import * # importing brian also does:
# 'from pylab import *' which imports:
# matplot like commands into the namespace, further
# also can use np. for numpy and mpl. for matplotlib
stand_alone = True
if stand_alone: set_device('cpp_standalone', build_on_run=False)
else:
#prefs.codegen.target = 'numpy'
prefs.codegen.target = 'weave'
#prefs.codegen.target = 'cython'
import random
import time
# http://stackoverflow.com/questions/31057197/should-i-use-random-seed-or-numpy-random-seed-to-control-random-number-gener
np.random.seed(0) # set seed for reproducibility of simulations
random.seed(0) # set seed for reproducibility of simulations
# ###########################################
# Simulation parameters
# ###########################################
simdt = 0.1*ms
simtime = 100.0*second
defaultclock.dt = simdt # set Brian's sim time step
simdtraw = simdt/second # convert to value in seconds
# ###########################################
# Neuron model
# ###########################################
taudelay = 0.75*ms # synaptic delay
tauA = 1*ms # synaptic epsp tauA
tauB = 5*ms | # synaptic epsp tauB
eqs | _neurons='''
dA/dt=-A/tauA : 1
dB/dt=-B/tauB : 1
rho_out = (A-B)/(tauA-tauB) : Hz
'''
# ###########################################
# Network parameters: numbers
# ###########################################
Npools = 4 # Number of correlated pools
Ninp = 500 # Number of neurons per pool
nu0 = 10*Hz # spiking rate of inputs
# ###########################################
# Network parameters: synapses
# ###########################################
Q = array([[sqrt(0.4),sqrt(0.1),0.,0.],\
[0.,sqrt(0.2),sqrt(0.2),0.],\
[0.,0.,sqrt(0.1),sqrt(0.1)]])
corr = dot(transpose(Q),Q)
print "Correlation matrix between pools is\n",corr
# ###########################################
# Network parameters: synaptic plasticity
# ###########################################
eta = 2e-4 # learning rate (as in paper)
Apre_tau = 17*ms # STDP Apre (LTP) time constant
Apost_tau = 34*ms # STDP Apost (LTD) time constant
stdp_eqns = ''' w : 1
dApre/dt=-Apre/Apre_tau : 1 (event-driven)
dApost/dt=-Apost/Apost_tau : 1 (event-driven)
'''
Apre0 = 1.0 # incr in Apre (LTP), on pre-spikes;
# at spike coincidence, delta w = -Apre0*eta
Apost0 = 0.55 # incr in Apost (LTD) on post spike
wmax = 0.04 # max weight (hard bound)
winit = wmax/2. # initial weights are from 0 to winit
w0 = wmax/2.
pre_eqns = 'Apre+=Apre0; w+=-eta*Apost;'\
' w=clip(w,0,wmax)'
post_eqns = 'Apost+=Apost0; w += eta*Apre;'\
' w=clip(w,0,wmax)'
# ###########################################
# Initialize neuron (sub)groups
# ###########################################
# post-synaptic neuron
P=NeuronGroup(1,model=eqs_neurons,\
threshold='rand()<rho_out*dt',method='euler')
# ###########################################
# Stimuli
# ###########################################
#inputs rates for absence of correlated events such that all neurons have same firing rate F
baseline_input_rates = np.zeros(Npools*Ninp)*Hz
for i_gp in range(Npools):
baseline_input_rates[i_gp*Ninp:(i_gp+1)*Ninp] = nu0*(1.-Q[:,i_gp].sum())
print baseline_input_rates[i_gp*Ninp]
# 3 driving spike trains
Pinp0=NeuronGroup(3,model='spikerate : Hz',\
threshold='rand()<spikerate*dt')
Pinp0.spikerate = nu0
Pinps = [Pinp0[:1],Pinp0[1:2],Pinp0[2:]]
# Npools number of Ninp spike trains
Pinp1=NeuronGroup(Npools*Ninp,model='spikerate : Hz',\
threshold='rand()<spikerate*dt')
Pinp1.spikerate = baseline_input_rates
Ppools = []
for k in range(Npools):
Ppools.append(Pinp1[k*Ninp:(k+1)*Ninp])
inpconns = []
def correlate_spike_trains(PR,P,l,csqrt):
con = Synapses(PR,P,'',on_pre='spikerate+='+str(csqrt)+'/dt')
con.connect(True)
con.delay = 0.*ms
con1 = Synapses(PR,P,'',on_pre='spikerate='+str(baseline_input_rates[l*Ninp]/Hz)+'*Hz')
con1.connect(True)
con1.delay = simdt
inpconns.append((con,con1))
for k in range(3):
for l in range(Npools):
if Q[k,l]!=0.:
correlate_spike_trains(Pinps[k],Ppools[l],l,Q[k,l])
# ###########################################
# Connecting the network
# ###########################################
con = Synapses(Pinp1,P,stdp_eqns,\
on_pre='A+=w*0.05;B+=w*0.05;'+pre_eqns,on_post=post_eqns,
method='euler')
con.connect(True)
con.delay = uniform(size=(Npools*Ninp,))*1.*ms + 4.*ms
con.w = uniform(size=(Npools*Ninp,))*2*winit
# ###########################################
# Setting up monitors
# ###########################################
sm = SpikeMonitor(P)
sminp1 = SpikeMonitor(Pinp1)
# Population monitor
popm = PopulationRateMonitor(P)
popminp1 = PopulationRateMonitor(Pinp1)
# voltage monitor
sm_rho = StateMonitor(P,'rho_out',record=[0])
# weights monitor
wm = StateMonitor(con,'w',record=range(Npools*Ninp), dt=1*second)
# ###########################################
# Simulate
# ###########################################
# a simple run would not include the monitors
net = Network(collect()) # collects Brian2 objects in current context
net.add(inpconns)
print "Setup complete, running for",simtime,"at dt =",simdtraw,"s."
t1 = time.time()
net.run(simtime,report='text')
device.build(directory='output', compile=True, run=True, debug=False)
# ###########################################
# Make plots
# ###########################################
# always convert spikemon.t and spikemon.i to array-s before indexing
# spikemon.i[] indexing is extremely slow!
spiket = array(sm.t/second) # take spiketimes of all neurons
spikei = array(sm.i)
fig = figure()
cols = ['r','b','g','c']
# raster plot
subplot(231)
plot(sminp1.t/second,sminp1.i,',')
xlim([0,1])
xlabel("time (s)")
# weight evolution
subplot(232)
meanpoolw = []
for k in range(Npools):
meanpoolw.append(mean(wm.w[k*Ninp:(k+1)*Ninp,:],axis=0)/w0)
plot(wm.t/second,meanpoolw[-1],'-'+cols[k],lw=(4-k))
xlabel("time (s)")
ylabel("PCA-weight")
title('weight evolution')
# plot output firing rate sm_rho.rho_out[nrn_idx,time_idx]
subplot(234)
plot(sm_rho.t/second,sm_rho.rho_out[0]/Hz,'-')
xlim([0,simtime/second])
xlabel("")
# plot final weights wm.w[syn_idx,time_idx]
subplot(233)
plot(range(Npools*Ninp),wm.w[:,-1],'.')
for k in range(Npools):
meanpoolw_end = mean(wm.w[k*Ninp:(k+1)*Ninp,-1])
plot([k*Ninp,(k+1)*Ninp],[meanpoolw_end,meanpoolw_end],'-'+cols[k],lw=3)
xlabel("pre-neuron #")
ylabel("weight (/w0)")
title("end wts")
# plot averaged weights over last 50s (weights are sampled per second)
subplot(236)
plot(range(Npools*Ninp),mean(wm.w[:,-50:],axis=1),'.')
for k in range(Npools):
meanpoolw_end = mean(wm.w[k*Ninp:(k+1)*Ninp,-50:])
plot([k*Ninp,(k+1)*Ninp],[meanpoolw_end,meanpoolw_end],'-'+cols[k],lw=3)
xlabel("pre-neuron #")
ylabel("weight (/w0)")
title("mean (50s) end wts")
fig = figure()
# plot eigenvectors of corr = Q^T Q matrix
w,v = np.linalg.eig(corr)
subplot(131)
#plot(v)
for k in range(Npools):
plot(v[:,k],'.-'+cols[k],lw=4-k)
xlabel("pre-neuron #")
ylabel("weight (/w0)")
title('eigenvectors of corr matrix')
# weight evolution along eigenvectors of corr matrix
subplot(132)
for k in range(Npools):
plot(wm.t/second,dot(v[:,k],meanpoolw),'-'+cols[k],lw=(4-k))
xlabel('time (s)')
ylabel("weight (/w0)")
title('weights along PCs')
subplot(133)
hist(wm.w[:,-1],bins=200)
#fig.tight_layout()
fig.subplots_adjust(left=None, bottom=None, right=No |
buddyzhangrenze/i2c | python/project/buddy/statusbar.py | Python | gpl-2.0 | 403 | 0.012407 | #!/usr/bin/python
# statusbar.py
import sys
from P | yQt4 import QtGui
class MainWindow(QtGui.QMainWindow):
def __init__(self, parent = None):
QtGui.QMainWindow.__init__(self)
self.resize(250, 150)
self.setWindowTitle('statusbar')
self.statusBar().showMessage('Ready')
app = QtGui.QApplication(sys.argv)
main | = MainWindow()
main.show()
sys.exit(app.exec_())
|
karan259/GrovePi | Software/Python/grove_electricity_sensor.py | Python | mit | 2,530 | 0.003162 | #!/usr/bin/env python
#
# GrovePi Example for using the Grove Electricity Sensor (http://www.seeedstudio.com/wiki/Gro | ve_-_Electricity_Sensor)
#
# The GrovePi connects the Raspberry Pi and Grove sensors. You can learn more about GrovePi here: http://www.dexterindustries.com/GrovePi
#
# Have a question about this example? Ask on the forums here: http://forum.dexterindustries.com/ | c/grovepi
#
'''
## License
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2015 Dexter Industries
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import time
import grovepi
# Connect the Grove Electricity Sensor to analog port A0
# SIG,NC,NC,GND
sensor = 0
grovepi.pinMode(sensor,"INPUT")
# Vcc of the grove interface is normally 5v
grove_vcc = 5
while True:
try:
# Get sensor value
sensor_value = grovepi.analogRead(sensor)
# Calculate amplitude current (mA)
amplitude_current = (float)(sensor_value / 1024 * grove_vcc / 800 * 2000000)
# Calculate effective value (mA)
effective_value = amplitude_current / 1.414
# minimum_current = 1 / 1024 * grove_vcc / 800 * 2000000 / 1.414 = 8.6(mA)
# Only for sinusoidal alternating current
print("sensor_value", sensor_value)
print("The amplitude of the current is", amplitude_current, "mA")
print("The effective value of the current is", effective_value, "mA")
time.sleep(1)
except IOError:
print ("Error")
|
laurent-george/weboob | weboob/browser/filters/__init__.py | Python | agpl-3.0 | 729 | 0 | # -*- c | oding: utf-8 -*-
# Copyright(C) 2014 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERC | HANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
|
PoseidonAW/Py_RPG | playerdata.py | Python | apache-2.0 | 4,639 | 0.004311 | __author__ = 'Poseidon'
# Import the AW SDK Information
from ctypes import *
aw = CDLL("aw")
AWEVENT = CFUNCTYPE(None)
AWCALLBACK = CFUNCTYPE(None)
import sqlite3
import genericMessaging
db = sqlite3.connect('awrpg.db')
cursor = db.cursor()
from random import randint
session_dict = {}
def player_check(player_session):
player = session_dict[player_session]
# Access the DB and determine if the user has a player account
cursor.execute('''SELECT name FROM players WHERE name = ?''', (player,))
player_exists = cursor.fetchone()
if player_exists:
player_status = True
else:
player_status = False
return player_status
class Character:
def __init__(self):
self.name = ""
self.health = 1
self.mana = 1
self.race = "Human"
def player_registration(self, player_session):
player = session_dict[player_session]
# print "Player Session {}".format(player_session)
cursor.execute('''SELECT name FROM players WHERE name = ?''', (player,))
player_exists = cursor.fetchone()
if not player_exists:
hud_origin = 0
location_x = 0
location_y = 0
health = self.health
mana = self.mana
race = self.race
cursor.execute('''INSERT INTO players(name, health, mana, race)
VALUES(?,?,?,?)''', (player, health, mana, race))
db.commit()
print 'Player added: ' + player
aw.aw_say("Welcome to the club, " + player)
genericMessaging.py_hud_generic(player_session, "Health: {}, Mana: {}, Race: {}".format(health, mana, race),
1000,
hud_origin, location_x, location_y)
class Immortal(Character):
def __init__(self):
Character.__init__(self)
self.state = 'normal'
self.health = 10000
self.race = "Immortal"
def add_to_inventory(item, player_session):
player_name = session_dict[player_session]
genericMessaging.py_console(player_session, "Added {} to your inventory, {}!" .format(item, player_name))
cursor.execute('''SELECT inventory FROM players WHERE name = ?''', (player_name,))
inventory_current_row = cursor.fetchone()
if inventory_current_row:
cursor.execute('''UPDATE players SET inventory = ? WHERE name = ?''', (item, player_name,))
db.commit()
def stat_change(session, stat, amount):
target_session = session
player_name = session_dict[target_session]
# Select the appropriate stat for the player
cursor.execute("SELECT %s FROM players where name=?" % (stat), (player_name,))
stat_current_row = cursor.fetchone()
if stat_current_row:
stat_current = stat_current_row[0]
stat_new = stat_current + amount
# Update the player's appropriate stat by the given amount
cursor.execute('''UPDATE players SET %s = ? WHERE name = ?''' % (stat), (stat_new, player_name,))
db.commit()
update_stats(session)
# Update the Stats HUD on demand
def update_stats(player_session):
hud_id = 1000
player_name = session_dict[player_session]
hud_origin | = 0
location_x = 0
location_y = 0
cursor.execute('''SELECT name, health, mana, race FROM players WH | ERE name = ?''', (player_name,))
results = cursor.fetchone()
if results is not None:
aw.aw_hud_destroy(player_session, hud_id)
health = results[1]
mana = results[2]
race = results[3]
genericMessaging.py_hud_generic(player_session, "Health: {}, Mana: {}, Race: {}".format(health, mana, race),
1000,
hud_origin, location_x, location_y)
# Handle player damage
def damage_player(clicked_session):
health_reduction = 10
player_name = session_dict[clicked_session]
cursor.execute('''SELECT name FROM players WHERE name = ?''', (player_name,))
player_exists = cursor.fetchone()
# Check the DB to verify the clicked player exists in the DB
if player_exists:
genericMessaging.py_console(clicked_session, "{} you just got hit!".format(player_name))
cursor.execute('''SELECT name, health, mana, race FROM players WHERE name = ?''', (player_name,))
results = cursor.fetchone()
if results is not None:
stat = "health"
amount = health_reduction
# print "{} was attacked!" .format(player_name)
stat_change(clicked_session, stat, amount)
|
wwwslinger/airflow | airflow/operators/mysql_operator.py | Python | apache-2.0 | 1,063 | 0 | import logging
from airflow.hooks import MySqlHook
from airflow.models import BaseOperator
from airflow.utils import apply_defaults
class MySqlOperator(BaseOperator):
"""
Executes sql code in a specific MySQL database
:param mysql_conn_id: reference to a specific mysql database
:type mysql_conn_id: string
:param sql: the sql code to be executed
:type sql: Can receive a str representing a sql statement,
a list of str (sql statements), or reference to a template file.
Template reference are recognized by str ending in '.sql'
"""
template_fields = ('sql',)
template_ext = ('.sql',)
ui_color = '#ededed'
@apply_defaults
def __init__(self, sql, mysql_conn_id='mysql_default', *args, **kwargs):
super(MySqlOperator, self).__init__(*args, **kwargs)
self.mysql_conn_id = mysql_conn_id
self.sql = sql
def execute(self, context):
| logging.info('Executing: ' + str(self.sql))
hook = MySqlHook(mysql_conn | _id=self.mysql_conn_id)
hook.run(self.sql)
|
osgcc/ryzom | nel/tools/build_gamedata/processes/displace/1_export.py | Python | agpl-3.0 | 1,885 | 0.007958 | #!/usr/bin/python
#
# \file 1_export.py
# \brief Export displace
# \date 2009-03-10-21-45-GMT
# \author Jan Boon (Kaetemi)
# Python port of game data build pipeline.
# Export displace
#
# NeL - MMORPG Framework <http://dev.ryzom.com/projects/nel/>
# Copyright (C) 2010 Winch Gate Property Limited
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This prog | ram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this p | rogram. If not, see <http://www.gnu.org/licenses/>.
#
import time, sys, os, shutil, subprocess, distutils.dir_util
sys.path.append("../../configuration")
if os.path.isfile("log.log"):
os.remove("log.log")
log = open("log.log", "w")
from scripts import *
from buildsite import *
from process import *
from tools import *
from directories import *
printLog(log, "")
printLog(log, "-------")
printLog(log, "--- Export displace")
printLog(log, "-------")
printLog(log, time.strftime("%Y-%m-%d %H:%MGMT", time.gmtime(time.time())))
printLog(log, "")
mkPath(log, ExportBuildDirectory + "/" + DisplaceExportDirectory)
for dir in DisplaceSourceDirectories:
mkPath(log, DatabaseDirectory + "/" + dir)
copyFilesExtNoTreeIfNeeded(log, DatabaseDirectory + "/" + dir, ExportBuildDirectory + "/" + DisplaceExportDirectory, ".tga")
copyFilesExtNoTreeIfNeeded(log, DatabaseDirectory + "/" + dir, ExportBuildDirectory + "/" + DisplaceExportDirectory, ".png")
log.close()
# end of file
|
jyr/japos | goods/forms.py | Python | gpl-2.0 | 1,027 | 0.027264 | from django import forms
from japos.goods.models import Product, Group
class ProductForm(forms.ModelForm):
sku = forms.CharField(widget = forms.TextInput(attrs={'class': 'text-input small-input'}))
barcode = forms.CharField(widget = forms.TextInput(attrs={'class': 'text-in | put small-input'}))
name = forms.CharField(widget = forms.TextInput(attrs={'class': 'text-input small-input'}))
description = forms.CharField(widget = forms.Textarea(attrs={'class': 'textarea', 'rows': '5', 'cols': '50'}))
stock = forms.IntegerField(widget = forms.TextInput(attrs={'class': 'text-input small-input'}))
purchase_price = forms.DecimalField(widget = form | s.TextInput(attrs={'class': 'text-input small-input'}))
class Meta:
model = Product
class GroupForm(forms.ModelForm):
sku = forms.CharField(widget = forms.TextInput(attrs={'class': 'text-input small-input'}))
name = forms.CharField(widget = forms.TextInput(attrs={'class': 'text-input small-input'}))
class Meta:
model = Group |
TTSpaxxy/SpaghettiPy | src/mangle.py | Python | gpl-2.0 | 7,428 | 0.011443 | #
# Copyright (C) 2014 Aaron Cohen
#
# This file is part of SpaghettiPy.
#
# SpaghettiPy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SpaghettiPy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SpaghettiPy. If not, see <http://www.gnu.org/licenses/>.
#
#Begin imports
import random
import parser
#End imports
class Subdivision(object):
statements = []
parentBlock = None
subID = -1
nextSub = None
#End class Subdivision
class Block(object):
declaration = ""
statements = []
subdivisions = []
children = []
parent = None
level = -1
blockID = -1
beginning = 0
end = 0
#End class Block
def findBlocks(statements, currentStatement, currentLevel):
"""
Creates a heirarchy of Blocks out of a list of Statements
"""
block = Block()
block.level = currentLevel
block.beginning = currentStatement
if currentLevel > 0:
block.declaration = statements[currentStatement | - 2].plaintext
#End if
else:
block.declaration = "GLOBAL SCOPE"
#End else
while True:
if statements[currentStatement],kind == "Begin Code Block":
block.children.append(findBlocks(statements, currentStatement + 1, currentLevel + 1))
block.children[-1].parent = b | lock
statements.append(Statement("BREAK", "Break at statement " + str(currentStatement), statements[currentStatement].line))
currentStatement = block.children[-1].end + 1
#End if
elif statements[currentStatement].kind == "End Code Block" or currentStatement == len(statements):
block.end = currentStatement
return block
#End elif
block.statements.append(statements[currentStatement++])
#End while
#End findBlocks(statements, currentStatement, currentLevel, blocksSoFar):
def subdivide(parent):
"""
Creates Subdivisions out of a heirarchy of Blocks
"""
if len(parent.children) > 0:
for child in parent.children:
subdivide(child)
#End for
#End if
for i in range(0, len(parent.statements), 2):
subdivision = Subdivision()
subdivision.parentBlock = parent
subdivision.statements.append(parent.statements[i])
if i != len(parent.statements) - 1:
subdivision.statements.append(parent.statements[i + 1])
#End if
parent.subvisions.append(subdivision)
if len(parent.subdivisions) > 1:
parent.subdivisions[-2].nextSub = parent.subdivisions[-1]
#End if
#End for
#End subdivide(parent)
def link(parent):
"""
Links subdivided Blocks together
"""
currentChild = 0
for i in xrange(0, len(parent.subdivisions)):
if parent.subdivisions[i].statements[0].kind is "BREAK":
parent.subdivisions[i-1].nextSub = parent.children[currentChild].subdivsions[0]
parent.children[currentChild].subdivisions[-1].nextSub = parent.subdivisions[i]
parent.subdivisions[i].statements.pop(0)
link(parent.children[currentChild++])
#End if
elif parent.subdivisions[i].statements[1].kind is "BREAK":
parent.subdivisions[i].nextSub = parent.children[currentChild].subdivisions[0]
parent.children[currentChild].subdivisions[-1].nextSub = parent.subdivisions[i + 1]
parent.subdivisions[i].statements.pop(1)
link(parent.children[currentChild++])
#End elif
#End for
#End link(parent)
def randomize(parent):
"""
Generates random IDs for blocks and subdivisions
"""
for subdivision in parent.subdivisions:
subdivision.subID = random.randint(0x0, 0xFFFFFFFF)
#End for
for child in parent.children:
randomize(child)
#End for
#End randomize(parent)
def deriveSource(parent):
"""
Takes the linked block heirarchy and outputs it into a string (Which it returns)
"""
newSource = ""
for subdivision in parent.subdivisions:
#End for
for child in parent.children:
newSource += deriveSource(child)
#End for
return newSource
#End deriveSource(parent)
"""
Overview:
1) Make the key the random seed
2) Put all variable declarations and function pointers into a new list, and remove prototypes
3) Turn the statements into blocks of code
4) Turn the code inside the blocks into subdivisions of two statements each
5) Link the subdivisions between blocks
6) Give each subdivision and block a pseudorandom ID
7) Rename all the variables after their original scope, then make them global
8) Print the new heirarchy into a string, and return it
9) Do a regex search and replace previous instances of variables with their new versions
"""
def mangle(statements, key):
"""
Mangles a list of Statements according to the above algorithm
"""
random.seed(key)
variables = []
functionPointers = []
for i in range(0, len(statements)):
if statements[i].kind == "Variable Declaration":
variables.append(statements.pop(i))
#End if
elif statements[i].kind == "Function Pointer Declaration":
statements.remove(i)
#End elif
elif statements[i].kind == "Function Prototype":
statements.remove(i)
#End elif
#End for
globalBlock = findBlocks(statements, 0, 0)
subdivide(globalBlock)
link(globalBlock)
randomize(globalBlock)
newSource = ""
for variable in variables:
#End for
newSource += deriveSource(globalBlock)
#End mangle(statements, key):
|
arcturusannamalai/open-tamil | examples/savaal.py | Python | mit | 325 | 0.01278 | # -*- coding: utf-8 -*-
# (C | ) 2015 Muthiah Annamalai
#
# This file is part of 'open-tamil' package tests
import tamil
import solthiruthi
from solthiruthi.dictionary import *
TVU_dict,_ = DictionaryBuilder.create(TamilVU)
word = 'சவால்’'
q=list(tamil.wordutils.combinagr | ams(word,TVU_dict))
print(('|'.join(q)))
|
AutorestCI/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2016_12_01/models/effective_network_security_group.py | Python | mit | 1,777 | 0.002251 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class EffectiveNetworkSecurityGroup(Model):
"""Effective network security group.
:param network_security_group: The ID of network security group that is
applied.
:type network_security_group:
~azure.mgmt.network.v2016_12_01.models.SubResource
:param association:
:type association:
~azure.mgmt.network.v2016_12_01.models.EffectiveNetworkSecurityGroupAssociation
:param effective_security_rules: A colle | ction of effective security rules.
:type effective_security_rules:
list[~azure.mgmt.network.v2016_12_01.models.EffectiveNetworkSecurityRule]
"""
_attribute_map = {
'network_security_group': {'key': 'networkSecurityGroup', 'type': 'SubResource'},
'association': {'key': 'association', 'type': 'EffectiveNetworkSecurityGroupA | ssociation'},
'effective_security_rules': {'key': 'effectiveSecurityRules', 'type': '[EffectiveNetworkSecurityRule]'},
}
def __init__(self, network_security_group=None, association=None, effective_security_rules=None):
super(EffectiveNetworkSecurityGroup, self).__init__()
self.network_security_group = network_security_group
self.association = association
self.effective_security_rules = effective_security_rules
|
cspang1/4534-08 | src/supervisory/test_protocol/test_serial_com.py | Python | gpl-3.0 | 1,683 | 0.005348 | import serial
import sys
import time
import matplotlib.pyplot as plt
import numpy
def update_line(h1, x, y):
h1.set_xdata(numpy.append(h1.get_xdata(), x))
h1.set_ydata(numpy.append(h1.get_ydata(), y))
plt.draw()
'''
__author__ = 'tjd08a'
'''
port = None
for arg in sys.argv:
port = arg
ser = serial.Serial(port, baudrate=57600, timeout=10)
readNum = False
cou | nter = 0
seconds = 0
# h1, = plt.plot([], [])
# Reboot sequence below
ser.write('$$$')
time.sleep(1)
ser.write('reboot\r')
time.sleep(3)
start = None
stop = None
initial = True
#plt.show(h1)
while 1:
#ser.write("hello world")
bytesWaiting = ser.inWaiting()
if bytesWaiting:
# print bytesWaiting
letter = ser.read(1)
val = ord(letter)
if not readNum:
if val >= 128 :
#print "Ready To Receive"
ser.write(" | r")
readNum = True
if initial:
start = time.time()
initial = False
else:
end = time.time()
# print "Received %i" % val
if (end - start >= 1):
seconds += 1
print "%d: Total Messages Received - %d" % (seconds, counter)
start = time.time()
if (val > 100):
if(val == 255):
#print "Stop byte received"
ser.write('e')
readNum = False
else:
print "Error: Incorrect value received"
print val
#print val
#update_line(h1, counter, val)
counter += 1
ser.flush() |
myselfHimanshu/Udacity-DataML | Intro-To-Data-Science/Lesson3/PS3_6.py | Python | gpl-2.0 | 743 | 0.005384 | import numpy as np
import scipy
import matplotlib.pyplot as plt
def plot_residuals(turnstile_weather, predictions):
'''
Using the same methods that we used to plot a histogram of entries
per hour for our data, why don't you make a | histogram of the residuals
(that is, the difference be | tween the original hourly entry data and the predicted values).
Try different binwidths for your histogram.
Based on this residual histogram, do you have any insight into how our model
performed? Reading a bit on this webpage might be useful:
http://www.itl.nist.gov/div898/handbook/pri/section2/pri24.htm
'''
plt.figure()
(turnstile_weather['ENTRIESn_hourly'] - predictions).hist(bins=50)
return plt
|
bhipple/brobot | currency.py | Python | gpl-3.0 | 336 | 0 | # | Lookup Bitcoin value from exchanges
from exchanges.bitfinex import Bitfinex
import re
def bitcoinValue(msg):
val = Bitfinex().get_current_price()
formattedVal = "$" + "{:,.2f}".format(val)
if re.search(r"(?i)moon", msg):
return "To the moon! " + formattedVal
else:
return "Bitcoin: " + formattedVa | l
|
hofmannedv/training-python | usecases/patterns-in-strings/pattern.py | Python | gpl-2.0 | 1,056 | 0.012311 | # -----------------------------------------------------------
# find n | umber patterns in strings
#o
# (C | ) 2016 Frank Hofmann, Berlin, Germany
# Released under GNU Public License (GPL)
# email frank.hofmann@efho.de
# -----------------------------------------------------------
#
# use the regex module
import re
text1 = "this is contract 14556-658, London, postcode SE30AF"
text2 = "computer 2345-56, value EUR 255.18"
text3 = "processor,pr1566,45.00,15"
# digits with a hyphen
pattern1 = re.compile('\d+-\d+')
# single pattern
m = pattern1.match('123-456')
print (m.group())
# detect all patterns
m = pattern1.findall('123-456 789-012')
print (m)
# detect pattern in text
m = pattern1.findall(text1)
print (m)
# postcode pattern
pattern2 = re.compile('[A-Z]+\d+[A-Z]+')
print (pattern2.findall(text1))
# float pattern
pattern3 = re.compile('\d+\.\d+')
print (pattern3.findall(text2))
# split dataset by delimiter
pattern4 = re.compile(',')
description, productId, price, amount = pattern4.split(text3)
print (description, productId, price, amount)
|
paulmcquad/Python | 10-Tuples/Tuple_indices.py | Python | gpl-3.0 | 111 | 0.045045 | t1 = ("apple", | "banana", "cherry", "durian", "orange")
i = 0
while i < len( t1 ):
print( t1[i] )
i | += 1 |
pmghalvorsen/gramps_branch | gramps/gen/test/user_test.py | Python | gpl-2.0 | 1,410 | 0.002837 | # -*- coding: utf-8 -*-
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2013 Vassilii Khachaturov <vassilii@tarunz.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# | along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
""" Unittest for user.py """
from __future__ import print_function
import unittest
from .. import user
class TestUser(object):
TITLE = "Testing prompt"
MSG = "Choices are hard. Nevertheless, please choose!"
ACCEPT = "To be"
REJECT = "Not to be"
class TestUser_prompt(unittest.TestCase):
def setUp(self):
self.user = | user.User()
def test_returns_False(self):
assert not self.user.prompt(
TestUser.TITLE, TestUser.MSG, TestUser.ACCEPT, TestUser.REJECT)
if __name__ == "__main__":
unittest.main()
|
obi-two/Rebelion | data/scripts/templates/object/mobile/shared_dressed_spice_collective_sentry_twk_male_01.py | Python | mit | 469 | 0.046908 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICAT | IONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "objec | t/mobile/shared_dressed_spice_collective_sentry_twk_male_01.iff"
result.attribute_template_id = 9
result.stfName("npc_name","twilek_base_male")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
gammalib/gammalib | inst/cta/test/test_CTA.py | Python | gpl-3.0 | 25,438 | 0.007155 | # ==========================================================================
# This module performs unit tests for the GammaLib CTA module.
#
# Copyright (C) 2012-2021 Juergen Knoedlseder
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ==========================================================================
import os
import gammalib
import math
import test_support
# ================================== #
# Test class for GammaLib CTA module #
# ================================== #
class Test(gammalib.GPythonTestSuite):
"""
Test class for GammaLib CTA module
"""
# Constructor
def __init__(self):
"""
Constructor
"""
# Call base class constructor
gammalib.GPythonTestSuite.__init__(self)
# Set test directories
self._data = os.environ['TEST_CTA_DATA']
self._caldb = self._data + '/../caldb'
# Return
return
# Setup GCTAEventList container
def _setup_eventlist(self):
"""
Setup GCTAEventList container
Returns
-------
list : `~gammalib.GCTAEventList`
GCTAEventList container
"""
# Setup event list container
list = gammalib.GCTAEventList()
for i in range(10):
dir = gammalib.GCTAInstDir(gammalib.GSkyDir(),float(i),float(i))
energy = gammalib.GEnergy(float(i),'MeV')
time = gammalib.GTime(float(i),'sec')
atom = gammalib.GCTAEventAtom(dir, energy, time)
list.append(atom)
# Return event list container
return list
# Setup GCTAEventCube
def _setup_eventcube(self):
"""
Setup GCTAEventCube
Returns
-------
cube : `~gammalib.GCTAEventCube`
GCTAEventCube
"""
# Setup event cube
map = gammalib.GSkyMap('CAR','CEL',0.0,0.0,1.0,1.0,5,5,2)
ebds = gammalib.GEbounds(2, gammalib.GEnergy(1.0, 'TeV'),
gammalib.GEnergy(10.0, 'TeV'))
gti = gammalib.GGti(gammalib.GTime(0.0,'sec'),
gammalib.GTime(1.0,'sec'))
cube = gammalib.GCTAEventCube(map, ebds, gti)
counts = 0.0
for bin in cube:
counts += 1.0
bin.counts(1.0)
# Return event cube
return cube
# Test GCTAEventList class access operators
def _test_eventlist_access(self):
"""
Test GCTAEventList class observation access
"""
# Setup event list container
list = self._setup_eventlist()
# Perform event list access tests
test_support.energy_container_access_index(self, list)
# Return
return
# Test GCTAEventList class slicing
def _test_eventlist_slicing(self):
"""
Test GCTAEventList class slicing
"""
# Setup event list container
list = self._setup_eventlist()
# Perform slicing tests
test_support.energy_container_slicing(self, list)
# Return
return
# Test effective area response
def _test_aeff(self):
"""
Test GCTAAeff classes
"""
# Test GCTAAeff2D file constructor
filename = self._caldb + '/prod1_gauss.fits'
aeff = gammalib.GCTAAeff2D(filename)
# Test Aeff values
self.test_value(aeff(0.0, 0.0), 5535774176.75, 0.1,
'Test reference effective area value')
self.test_value(aeff(1.0, 0.0), 20732069462.7, 0.1,
'Test reference effective area value')
self.test_value(aeff(0.0, 0.01745), 5682897797.76, 0.1,
'Test reference effective area value')
self.test_value(aeff(1.0, 0.01745), 18446656815.1, 0.1,
'Test reference effective area value')
# Test that Aeff values outside boundaries are zero
self.test_value(aeff(-1.80001, 0.0), 0.0, 1.0e-6,
'Test that effective area is zero for energy below'
| ' minimum energy')
self.test_value(aeff(+2.20001, 0.0), 0.0, 1.0e-6,
'Test that effective area is zero for energy above'
' maximum energy')
self.test_value(aeff(0.0, -0.00001), 0.0, 1.0e-6,
'Test that effective area is zero for offset angle'
' below minimum offset angle')
self.test_value(aeff(0.0, 0.13963), 0.0, 1.0e-6,
'Te | st that effective area is zero for offset angle'
' above maximum offset angle')
# Test GCTAAeffPerfTable file constructor
filename = self._caldb + '/cta_dummy_irf.dat'
aeff = gammalib.GCTAAeffPerfTable(filename)
# Test Aeff values
self.test_value(aeff(0.0, 0.0), 2738898000.0, 0.1)
self.test_value(aeff(1.0, 0.0), 16742420500.0, 0.1)
self.test_value(aeff(0.0, 0.01745), 2590995083.29, 0.1)
self.test_value(aeff(1.0, 0.01745), 15838314971.2, 0.1)
# Test GCTAAeffArf file constructor
filename = self._caldb + '/dc1/arf.fits'
aeff = gammalib.GCTAAeffArf(filename)
# Test Aeff values
self.test_value(aeff(0.0, 0.0), 1607246236.98, 0.1)
self.test_value(aeff(1.0, 0.0), 4582282342.98, 0.1)
self.test_value(aeff(0.0, 0.01745), 1607246236.98, 0.1)
self.test_value(aeff(1.0, 0.01745), 4582282342.98, 0.1)
# Return
return
# Test point spread function response
def _test_psf(self):
"""
Test GCTAPsf classes
"""
# Test GCTAPsf2D file constructor
filename = self._caldb + '/prod1_gauss.fits'
psf = gammalib.GCTAPsf2D(filename)
# Test PSF values
self.test_value(psf(0.0, 0.0, 0.0), 163782.469465, 1.0e-6)
self.test_value(psf(0.001, 0.0, 0.0), 97904.9307797, 1.0e-6)
self.test_value(psf(0.0, 1.0, 0.0), 616076.98558, 1.0e-6)
self.test_value(psf(0.001, 1.0, 0.0), 88932.681708, 1.0e-6)
self.test_value(psf(0.0, 1.0, 0.01745), 433247.309504, 1.0e-6)
self.test_value(psf(0.001, 1.0, 0.01745), 111075.0692681, 1.0e-6)
# Test GCTAPsfKing file constructor
filename = self._caldb + '/prod1_king.fits'
psf = gammalib.GCTAPsfKing(filename)
# Test PSF values
self.test_value(psf(0.0, 0.0, 0.0), 213616.312600672, 1.0e-6)
self.test_value(psf(0.001, 0.0, 0.0), 90918.3030269623, 1.0e-6)
self.test_value(psf(0.0, 1.0, 0.0), 1126804.99931516, 1.0e-5)
self.test_value(psf(0.001, 1.0, 0.0), 54873.6646449112, 1.0e-6)
self.test_value(psf(0.0, 1.0, 0.01745), 660972.636049452, 1.0e-6)
self.test_value(psf(0.001, 1.0, 0.01745), 80272.4048345619, 1.0e-6)
# Test GCTAPsfPerfTable file constructor
filename = self._caldb + '/cta_dummy_irf.dat'
psf = gammalib.GCTAPsfPerfTable(filename)
# Test PSF values
self.test_value(psf(0.0, 0.0, 0.0), 537853.354917, 1.0e-6)
self.test_value(psf(0.001, 0.0, 0.0), 99270.360144, 1.0e-6)
self.test_value(psf(0.0, 1.0, 0.0), 1292604.7473727, 1.0e-6)
self.test_value(psf(0.001, 1.0, 0.0), 22272.4258111, 1.0e-6)
self.test_value(psf(0.0, 1.0, 0.01745), 1292604.7473727, 1.0e-6)
self.test_value(psf(0.001, 1.0, 0.01745), 22272.4258111, 1.0e-6)
# Test GCTAPsfVector file constructor
filename = self._caldb + '/dc1/psf_magic.fits'
|
ltiao/networkx | networkx/algorithms/link_analysis/hits_alg.py | Python | bsd-3-clause | 9,424 | 0.009975 | """Hubs and authorities analysis of graph structure.
"""
# Copyright (C) 2008-2012 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
# NetworkX:http://networkx.github.io/
import networkx as nx
from networkx.exception import NetworkXError
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
__all__ = ['hits','hits_numpy','hits_scipy','authority_matrix','hub_matrix']
def hits(G,max_iter=100,tol=1.0e-8,nstart=None,normalized=True):
"""Return HITS hubs and authorities values for nodes.
The HITS algorithm computes two numbers for a node.
Authorities estimates the node value based on the incoming links.
Hubs estimates the node value based on outgoing links.
Parameters
----------
G : graph
A NetworkX graph
max_iter : interger, optional
Maximum number of iterations in power method.
tol : float, optional
Error tolerance used to check convergence in power method iteration.
nstart : dictionary, optional
Starting value of each node for power method iteration.
normalized : bool (default=True)
Normalize results by the sum of all of the values.
Returns
-------
(hubs,authorities) : two-tuple of dictionaries
Two dictionaries keyed by node containing the hub and authority
values.
Examples
--------
>>> G=nx.path_graph(4)
>>> h,a=nx.hits(G)
Notes
-----
The eigenvector calculation is done by the power iteration method
and has no guarantee of convergence. The iteration will stop
after max_iter iterations or an error tolerance of
number_of_nodes(G)*tol has been reached.
The HITS algorithm was designed for directed graphs but this
algorithm does not check if the input graph is directed and will
execute on undirected graphs.
References
----------
.. [1] A. Langville and C. Meyer,
"A survey of eigenvector methods of web information retrieval."
http://citeseer.ist.psu.edu/713792.html
.. [2] Jon Kleinberg,
Authoritative sources in a hyperlinked environment
Journal of the ACM 46 (5): 604-32, 1999.
doi:10.1145/324133.324140.
http://www.cs.cornell.edu/home/kleinber/auth.pdf.
"""
if type(G) == nx.MultiGraph or type(G) == nx.MultiDiGraph:
raise Exception("hits() not defined for graphs with multiedges.")
if len(G) == 0:
return {},{}
# choose fixed starting vector if not given
if nstart is None:
h=dict.fromkeys(G,1.0/G.number_of_nodes())
else:
h=nstart
# normalize starting vector
s=1.0/sum(h.values())
for k in h:
h[k]*=s
i=0
while True: # power iteration: make up to max_iter iterations
hlast=h
h=dict.fromkeys(hlast.keys(),0)
a=dict.fromkeys(hlast.keys(),0)
# this "matrix multiply" looks odd because it is
# doing a left multiply a^T=hlast^T*G
for n in h:
for nbr in G[n]:
a[nbr]+=hlast[n]*G[n][nbr].get('weight',1)
# now multiply h=Ga
for n in h:
for nbr in G[n]:
h[n]+=a[nbr]*G[n][nbr].get('weight',1)
# normalize vector
s=1.0/max(h.values())
for n in h: h[n]*=s
# normalize vector
s=1.0/max(a.values())
for n in a: a[n]*=s
# check convergence, l1 norm
err=sum([abs(h[n]-hlast[n]) for n in h])
if err < tol:
break
if i>max_iter:
raise NetworkXError(\
"HITS: power iteration failed to converge in %d iterations."%(i+1))
i+=1
if normalized:
s = 1.0/sum(a.values())
for n in a:
a[n] *= s
s = 1.0/sum(h.values())
for n in h:
h[n] *= s
return h,a
def authority_matrix(G,nodelist=None):
"""Return the HITS authority matrix."""
M=nx.to_numpy_matrix(G,nodelist=nodelist)
return M.T*M
def hub_matrix(G,nodelist=None):
"""Return the HITS hub matrix."""
M=nx.to_numpy_matrix(G,nodelist=nodelist)
return M*M.T
def hits_numpy(G,normalized=True):
"""Return HITS hubs and authorities values for nodes.
The HITS algorithm computes two numbers for a node.
Authorities estimates the node value based on the incoming links.
Hubs estimates the node value based on outgoing links.
Parameters
-----------
G : graph
A NetworkX graph
normalized : bool (default=True)
Normalize results by the sum of all of the values.
Returns
-------
(hubs,authorities) : two-tuple of dictionaries
Two dictionaries keyed by node containing the hub and authority
values.
Examples
--------
>>> G=nx.path_graph(4)
>>> h,a=nx.hits(G)
Notes
-----
The eigenvector calculation uses NumPy's interface to LAPACK.
The HITS algorithm was designed for directed graphs but this
algorithm does not check if the input graph is directed and will
execute on undirected graphs.
References
----------
.. [1] A. Langville and C. Meyer,
"A survey of eigenvector methods of web information retrieval."
http://citeseer.ist.psu.edu/713792.html
.. [2] Jon Kleinberg,
Authoritative sources in a hyperlinked environment
Journal of the ACM 46 (5): 604-32, 1999.
doi:10.1145/324133.324140.
http://www.cs.cornell.edu/home/kleinber/auth.pdf.
"""
try:
import numpy as np
except ImportError:
raise ImportError(\
"hits_numpy() requires NumPy: http://scipy.org/")
if len(G) == 0:
return {},{}
H = nx.hub_matrix(G, list(G))
e,ev=np.linalg.eig(H)
m=e.argsort()[-1] # index of maximum eigenvalue
h=np.array(ev[:,m]).flatten()
A=nx.authority_matrix(G, list(G))
e,ev=np.linalg.eig(A)
m=e.argsort()[-1] # index of maximum eigenvalue
a=np.array(ev[:,m]).flatten()
if normalized:
h = h/h.sum()
a = a/a.sum()
else:
h = h/h.max()
a = a/a.max()
hubs = dict(zip(G, map(float, h)))
authorities = dict(zip(G, map(float, a)))
return hubs,authorities
def hits_scipy(G,max_iter=100,tol=1.0e-6,normalized=True):
"""Return HITS hubs and authorities values for nodes.
The HITS algorithm computes two numbers for a node.
Authorities estimates the node value based on the incoming links.
Hubs estimates the node value based on outgoing links.
Parameters
-----------
G : graph
A NetworkX graph
max_iter | : interger, optional
Maximum number of iterations in power method.
tol : float, optional
Error tolerance used to check convergence in power method iteration.
nstart : dictionary, optional
Starting value of each node for power method iteration.
normalized : bool (default=True)
Normalize results by the sum of all of the values.
Returns
-------
(hubs,authorit | ies) : two-tuple of dictionaries
Two dictionaries keyed by node containing the hub and authority
values.
Examples
--------
>>> G=nx.path_graph(4)
>>> h,a=nx.hits(G)
Notes
-----
This implementation uses SciPy sparse matrices.
The eigenvector calculation is done by the power iteration method
and has no guarantee of convergence. The iteration will stop
after max_iter iterations or an error tolerance of
number_of_nodes(G)*tol has been reached.
The HITS algorithm was designed for directed graphs but this
algorithm does not check if the input graph is directed and will
execute on undirected graphs.
References
----------
.. [1] A. Langville and C. Meyer,
"A survey of eigenvector methods of web information retrieval."
http://citeseer.ist.psu.edu/713792.html
.. [2] Jon Kleinberg,
Authoritative sources in a hyperlinked environment
Journal of the ACM 46 (5): 604-632, 1999.
doi:10.1145/324133.324140.
http://www.cs.cornell.edu/home/kleinber/auth.pdf.
"""
try:
import scipy.spars |
AMLab-Amsterdam/lie_learn | lie_learn/groups/SO3_tests.py | Python | mit | 3,912 | 0.00409 |
import numpy as np
from lie_learn.groups.SO3 import *
def test_change_parameterization():
def is_equal(R1, R2, p):
if p == 'Q':
# Quaternions are only defined up to a sign, so check each row, what sign we need
for i in range(R1.shape[0]):
if not (np.allclose(R1[i, ...], R2[i, ...]) or np.allclose(R1[i, ...], -R2[i, ...])):
return False
return True
elif p == 'EV':
# Euler vector (x,y,z,theta) == (-x,-y,-z,-theta mod 2pi)
for i in range(R1.shape[0]):
R2i = np.array([-R2[i, 0], -R2[i, 1], -R2[i, 2], (-R2[i, 3]) % (2 * np.pi)])
if not (np.allclose(R1[i, ...], R2[i, ...]) or np.allclose(R1[i, ], R2i)):
return False
return True
else:
return np.allclose(R1, R2)
for p1 in parameterizations:
for p2 in parameterizations:
# Create two random rotations in 313 Euler angles
R1_EA313 = (np.random.rand(3) * np.array([2 * np.pi, np.pi, 2 * np.pi]))[np.newaxis, :]
R2_EA313 = (np.random.rand(3) * np.array([2 * np.pi, np.pi, 2 * np.pi]))[np.newaxis, :]
R_EA313 = np.r_[R1_EA313, R2_EA313]
R1_p1 = change_coordinates(p_from='EA313', p_to=p1, g=R1_EA313)
R1_p2 = change_coordinates(p_from='EA313', p_to=p2, g=R1_EA313)
R2_p1 = change_coordinates(p_from='EA313', p_to=p1, g=R2_EA313)
R2_p2 = change_coordinates(p_from='EA313', p_to=p2, g=R2_EA313)
R_p1 = change_coordinates(p_from='EA313', p_to=p1, g=R_EA313)
R_p2 = change_coordinates(p_from='EA313', p_to=p2, g=R_EA313)
R1_p2_from_R1_p1 = change_coordinates(p_from=p1, p_to=p2, g=R1_p1)
R1_p1_from_R1_p2 = change_coordinates(p_from=p2, p_to=p1, g=R1_p2)
R2_p2_from_R2_p1 = change_coordinates(p_from=p1, p_to=p2, g=R2_p1)
R2_p1_from_R2_p2 = change_coordinates(p_from=p2, p_to=p1, g=R2_p2)
R_p2_from_R_p1 = change_coordinates(p_from=p1, p_to=p2, g=R_p1)
R_p1_from_R_p2 = change_coordinates(p_from=p2, p_to=p1, g=R_p2)
assert is_e | qual(R1_p1_from_R1_p2, R1_p1, p1), (
p1 + ' to ' + p2 + ' | R1_p1: ' + str(R1_p1) + ' | R1_p2: ' + str(R1_p2) + ' | R1_p1_from_R1_p2: ' +
str(R1_p1_from_R1_p2))
assert is_equal(R2_p1_from_R2_p2, R2_p1, p1), (
p1 + ' to ' + p2 + ' | R2_p1: ' + str(R2_p1) + ' | R2_p2: ' + str | (R2_p2) + ' | R2_p1_from_R2_p2: ' +
str(R2_p1_from_R2_p2))
assert is_equal(R_p1_from_R_p2, R_p1, p1), (
p1 + ' to ' + p2 + ' | R_p1: ' + str(R_p1) + ' | R_p2: ' + str(R_p2) + ' | R_p1_from_R_p2: ' +
str(R_p1_from_R_p2))
assert is_equal(R1_p2_from_R1_p1, R1_p2, p2), (
p1 + ' to ' + p2 + ' | R1_p1: ' + str(R1_p1) + ' | R1_p2: ' + str(R1_p2) + ' | R1_p2_from_R1_p1: ' +
str(R1_p2_from_R1_p1))
assert is_equal(R2_p2_from_R2_p1, R2_p2, p2), (
p1 + ' to ' + p2 + ' | R2_p1: ' + str(R2_p1) + ' | R2_p2: ' + str(R2_p2) + ' | R2_p2_from_R2_p1: ' +
str(R2_p2_from_R2_p1))
assert is_equal(R_p2_from_R_p1, R_p2, p2), (
p1 + ' to ' + p2 + ' | R_p1: ' + str(R_p1) + ' | R_p2: ' + str(R_p2) + ' | R_p2_from_R_p1: ' +
str(R_p2_from_R_p1))
def test_invert():
for p in parameterizations:
R_EA = np.random.rand(4, 5, 6, 3) * np.array([2 * np.pi, np.pi, 2 * np.pi])[None, None, None, :]
R_p = change_coordinates(R_EA, p_from='EA313', p_to=p)
R_p_inv = invert(R_p, parameterization=p)
e = compose(R_p, R_p_inv, parameterization=p)
eM = change_coordinates(e, p_from=p, p_to='MAT')
assert np.isclose(np.sum(eM - np.eye(3)), 0.0), 'not the identity: ' + eM |
mk-pmb/heidisql-ubuntu-util | cfg.unpack.py | Python | gpl-2.0 | 3,798 | 0.001316 | #!/usr/bin/python
# -*- coding: UTF-8, tab-width: 4 -*-
# Python Coding Style: http://docs.python.org/tutorial/con | trolflow.html#intermezzo-coding-style
# Command Line Arguments Parser: http://docs.python.org/library/argparse.html
from __future__ import division
from sys import | argv, stdout, stderr
from codecs import open as cfopen
def main(invocation, *cli_args):
HEIDI_CHARSET = 'UTF-8'
INI_CHARSET = 'UTF-8-sig'
if len(cli_args) < 1:
raise ValueError('not enough parameters. required: ConfigFileName')
cfg_fn = cli_args[0]
cfg_realms = {
'app': {},
'srv': {},
}
for cfg_ln in cfopen(cfg_fn, 'r', HEIDI_CHARSET):
cfg_key, cfg_fmt, cfg_value = cfg_ln.rstrip().split('<|||>', 2)
cfg_realm, cfg_sect, cfg_key = split_cfg_key(cfg_key)
if (cfg_realm, cfg_key) == ('srv', 'Password'):
cfg_value = decode_heidi_password(cfg_value)
cfg_realm = cfg_realms[cfg_realm]
sect_dict = cfg_realm.get(cfg_sect)
if sect_dict is None:
sect_dict = cfg_realm[cfg_sect] = {}
sect_dict[cfg_key] = cfg_fmt + '|' + cfg_value
ini_fn = 'heidisql.ini'
write_ini(cfopen(ini_fn, 'w', INI_CHARSET), cfg_realms['app'])
for cfg_sect, sect_dict in cfg_realms['srv'].items():
ini_fn = sanitize_file_name(cfg_sect).lower() + '.ini'
write_ini(cfopen(ini_fn, 'w', INI_CHARSET), { cfg_sect: sect_dict })
def write_ini(dest, ini):
for sect_name in sorted(ini.keys()):
sect_dict = ini[sect_name]
dest.write('[' + sect_name + ']\n')
for opt_name in sorted(sect_dict.keys()):
opt_value = sect_dict[opt_name]
dest.write(opt_name + '=' + opt_value + '\n')
dest.write('\n')
def split_at_first_nonalpha(idstr, defaultPrefix=None):
for pos, chr in enumerate(idstr):
if not chr.isalpha():
pos += 1
return idstr[0:pos], idstr[pos:]
return defaultPrefix, idstr
def split_cfg_key(key):
if key.startswith('Servers\\'):
sect, key = key.split('\\', 2)[1:]
return 'srv', sect, key
form_part = key.split('.', 1)
if len(form_part) == 2:
# [u'ColPositions_connform', u'ListSessions']
if form_part[0].lower().endswith('form'):
form_prop, form_part = form_part
form_prop = form_prop.split('_')
if len(form_prop) == 2:
# [u'ColPositions', u'connform']
form_prop, form_name = form_prop
sect = form_name
key = form_part + '.' + form_prop
return 'app', sect, key
return 'app', 'HeidiSQL', key
def decode_heidi_password(obfus):
obfus, caesar_key = obfus[:-1], obfus[-1:]
caesar_key = -int(caesar_key, 16)
clean = ''
while obfus != '':
cnum, obfus = obfus[:2], obfus[2:]
cnum = int(cnum, 16)
cnum += caesar_key
char = None
if (31 < cnum) and (cnum < 127):
char = chr(cnum)
if char in ('\\', '"', "'"):
char = None
if char is None:
char = '\\u00' + hex(cnum).replace('0x', '00')[-2:]
# print cnum, hex(cnum), char
clean += char
return '"' + clean + '"'
def sanitize_file_name(wild):
sane = ''
for char in wild:
# print repr(char),
if char.isalnum() or (char in '@-'):
if repr(char)[2:-1] != char:
# this alnum might be too fancy for some file systems.
continue
sane += char
continue
# if char.isspace():
char = '_'
if not sane.endswith(char):
sane += char
# print repr(sane)
return sane
if __name__ == '__main__':
main(*argv)
|
softappeal/yass | py3/tutorial/generated/contract/__init__.py | Python | bsd-3-clause | 2,223 | 0 | from enum import Enum
from typing import List, Any, cast
import yass
from tutorial.base_types_external import Integer
# shows how to use contract internal base types
class ExpirationHandler(yass.BaseTypeHandler):
def readBase(self, reader: yass.Reader) -> 'Expiration':
return Expiration(
reader.readZigZagInt()
)
def writeBase(self, value: 'Expiration', writer: yass.Writer) -> None:
writer.writeZigZagInt(value.year)
class Expiration:
TYPE_DESC = yass.TypeDesc(yass.FIRST_DESC_ID + 1, ExpirationHandler())
def __init__(self, year: int) -> None:
self.year = year
def __str__(self) -> str:
return f"{self.year}"
class PriceKind(Enum):
BID = 0
ASK = 1
class Price:
def __init__(self) -> None:
self.instrumentId: Integer = cast(Integer, None)
self.kind: PriceKind = cast(PriceKind, None)
self.value: Integer = cast(Integer, None)
@yass.abstract
class Instrument:
def __init__(self) -> None:
self.id: Integer = cast(Integer, None)
self.name: str = cast(str, None)
class SystemException(Exception):
def __init__(self) -> None:
self.details: str = cast(str, None)
@yass.abstract
class ApplicationException(Exception):
def __init__(self) -> None:
pass
class UnknownInstrumentsException(ApplicationException):
def __init__(self) -> None:
ApplicationException.__init__(self)
self.instrumentIds: List[Integer] = cast(List[Integer], None)
self.onlyNeededForTests1: Any = cast(Any, None)
self. | onlyNeededForTests2: bytes = cast(bytes, None)
self.onlyNeededForTests3: Exception = cast(Exception, None)
class Node:
def __init__(self) -> None:
self.id: float = cast(float, None)
self.links: List[Node] = cast(List[Node], None)
| self.next: Node = cast(Node, None)
class EchoService:
def echo(self, value: Any) -> Any:
raise NotImplementedError()
class PriceEngine:
def subscribe(self, instrumentIds: List[Integer]) -> None:
raise NotImplementedError()
class PriceListener:
def newPrices(self, prices: List[Price]) -> None:
raise NotImplementedError()
|
Userperson321/DigitalGold | share/qt/clean_mac_info_plist.py | Python | mit | 903 | 0.016611 | #!/usr/bin/env python
# Jonas Schnelli, 2013
# make sure the Digitalgold-Qt.app contains the right plist (including the right version)
# fix made because of serval bugs in Qt mac deployment (https://bugreports.qt-project.org/browse/QTBUG-21267)
from string import Template
from datetime import date
bitcoinDir = "./";
inFile = bitcoinDir+"/share/qt/Info.plist"
outFile = "Digitalgold-Qt.app/Contents/Info.plist"
version = "unknown";
fileForGrabbingVersion = bitcoinDir+"bitcoin-qt.pro"
for line in open(fileForGrabbingVersion):
lineArr = line.repl | ace(" ", "").split("=");
if lineArr[0].startswith("VERSION"):
version = lineArr[1].replace("\n", "");
fIn = open(inFile, "r")
fileContent = fIn.read()
s = Template(fileContent)
newFileContent = s.substitute(VERSION=version,YEAR=date.today().y | ear)
fOut = open(outFile, "w");
fOut.write(newFileContent);
print "Info.plist fresh created"
|
openstack/ironic-lib | ironic_lib/disk_utils.py | Python | apache-2.0 | 30,540 | 0.000065 | # Copyright 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import re
import stat
import time
import warnings
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils import excutils
from oslo_utils import imageutils
from oslo_utils import units
import tenacity
from ironic_lib.common.i18n import _
from ironic_lib import disk_partitioner
from ironic_lib import exception
from ironic_lib import utils
opts = [
cfg.IntOpt('efi_system_partition_size',
default=200,
help='Size of EFI system partition in MiB when configuring '
'UEFI systems for local boot.'),
cfg.IntOpt('bios_boot_partition_size',
default=1,
help='Size of BIOS Boot partition in MiB when configuring '
'GPT partitioned systems for local boot in BIOS.'),
cfg.StrOpt('dd_block_size',
default='1M',
help='Block size to use when writing to the nodes disk.'),
cfg.IntOpt('partition_detection_attempts',
default=3,
min=1,
help='Maximum attempts to detect a newly created partition.'),
cfg.IntOpt('partprobe_attempts',
default=10,
help='Maximum number of attempts to try to read the '
'partition.'),
cfg.IntOpt('image_convert_memory_limit',
default=2048,
help='Memory limit for "qemu-img convert" in MiB. Implemented '
'via the address space resource limit.'),
cfg.IntOpt('image_convert_attempts',
default=3,
help='Number of attempts to convert an image.'),
]
CONF = cfg.CONF
CONF.register_opts(opts, group='disk_utils')
LOG = logging.getLogger(__name__)
_PARTED_PRINT_RE = re.compile(r"^(\d+):([\d\.]+)MiB:"
r"([\d\.]+)MiB:([\d\.]+)MiB:(\w*):(.*):(.*);")
_PARTED_TABLE_TYPE_RE = re.compile(r'^.*partition\s+table\s*:\s*(gpt|msdos)',
re.IGNORECASE | re.MULTILINE)
CONFIGDRIVE_LABEL = "config-2"
MAX_CONFIG_DRIVE_SIZE_MB = 64
GPT_SIZE_SECTORS = 33
# Maximum disk size supported by MBR is 2TB (2 * 1024 * 1024 MB)
MAX_DISK_SIZE_MB_SUPPORTED_BY_MBR = 2097152
# Limit the memory address space to 1 GiB when running qemu-img
QEMU_IMG_LIMITS = None
def _qemu_img_limits():
global QEMU_IMG_LIMITS
if QEMU_IMG_LIMITS is None:
QEMU_IMG_LIMITS = processutils.ProcessLimits(
address_space=CONF.disk_utils.image_convert_memory_limit
* units.Mi)
return QEMU_IMG_LIMITS
def list_partitions(device):
"""Get partitions information from given device.
:param device: The device path.
:returns: list of dictionaries (one per partition) with keys:
number, start, end, size (in MiB), filesystem, partition_name,
flags, path.
"""
output = utils.execute(
'parted', '-s', '-m', device, 'unit', 'MiB', 'print',
use_standard_locale=True, run_as_root=True)[0]
if isinstance(output, bytes):
output = output.decode("utf-8")
lines = [line for line in output.split('\n') if line.strip()][2:]
# Example of line: 1:1.00MiB:501MiB:500MiB:ext4::boot
fields = ('number', 'start', 'end', 'size', 'filesystem', 'partition_name',
'flags')
result = []
for line in lines:
match = _PARTED_PRINT_RE.match(line)
if match is None:
LOG.warning("Partition information from parted for device "
"%(device)s does not match "
"expected format: %(line)s",
dict(device=device, line=line))
continue
# Cast int fields to ints (some are floats and we round them down)
groups = [int(float(x)) if i < 4 else x
for i, x in enumerate(match.groups())]
item = dict(zip(fields, groups))
item['path'] = partition_index_to_path(device, item['number'])
result.append(item)
return result
def count_mbr_partitions(device):
"""Count the number of primary and logical partitions on a MBR
:param device: The device path.
:returns: A tuple with the | number of primary partitions and logical
partitions.
: | raise: ValueError if the device does not have a valid MBR partition
table.
"""
# -d do not update the kernel table
# -s print a summary of the partition table
output, err = utils.execute('partprobe', '-d', '-s', device,
run_as_root=True, use_standard_locale=True)
if 'msdos' not in output:
raise ValueError('The device %s does not have a valid MBR '
'partition table' % device)
# Sample output: /dev/vdb: msdos partitions 1 2 3 <5 6 7>
# The partitions with number > 4 (and inside <>) are logical partitions
output = output.replace('<', '').replace('>', '')
partitions = [int(s) for s in output.split() if s.isdigit()]
return(sum(i < 5 for i in partitions), sum(i > 4 for i in partitions))
def get_disk_identifier(dev):
"""Get the disk identifier from the disk being exposed by the ramdisk.
This disk identifier is appended to the pxe config which will then be
used by chain.c32 to detect the correct disk to chainload. This is helpful
in deployments to nodes with multiple disks.
http://www.syslinux.org/wiki/index.php/Comboot/chain.c32#mbr:
:param dev: Path for the already populated disk device.
:raises OSError: When the hexdump binary is unavailable.
:returns: The Disk Identifier.
"""
disk_identifier = utils.execute('hexdump', '-s', '440', '-n', '4',
'-e', '''\"0x%08x\"''',
dev, run_as_root=True,
attempts=5, delay_on_retry=True)
return disk_identifier[0]
def get_partition_table_type(device):
"""Get partition table type, msdos or gpt.
:param device: the name of the device
:return: dos, gpt or None
"""
out = utils.execute('parted', '--script', device, '--', 'print',
run_as_root=True, use_standard_locale=True)[0]
m = _PARTED_TABLE_TYPE_RE.search(out)
if m:
return m.group(1)
LOG.warning("Unable to get partition table type for device %s", device)
return 'unknown'
def _blkid(device, probe=False, fields=None):
args = []
if probe:
args.append('-p')
if fields:
args += sum((['-s', field] for field in fields), [])
output, err = utils.execute('blkid', device, *args,
use_standard_locale=True, run_as_root=True)
if output.strip():
return output.split(': ', 1)[1]
else:
return ""
def _lsblk(device, deps=True, fields=None):
args = ['--pairs', '--bytes', '--ascii']
if not deps:
args.append('--nodeps')
if fields:
args.extend(['--output', ','.join(fields)])
else:
args.append('--output-all')
output, err = utils.execute('lsblk', device, *args,
use_standard_locale=True, run_as_root=True)
return output.strip()
def get_device_information(device, probe=False, fields=None):
"""Get information about a device using blkid.
Can be applied to all block devices: disks, RAID, partitions.
:param device: Device name.
:param probe: DEPRECATED, do not use.
:param fields: A list of fields to request (all by default).
:return: A dictionary with req |
AsherBond/MondocosmOS | grass_trunk/lib/python/array.py | Python | agpl-3.0 | 3,001 | 0.070976 | """!@package grass.script.array
@brief GRASS Python scripting module (rasters with numpy)
Functions to use GRASS rasters with NumPy.
Usage:
@code
from grass.script import array as garray
...
@endcode
(C) 2010-2011 by Glynn Clements and the GRASS Development Team
This program is free software under the GNU General Public
License (>=v2). Read the file COPYING that comes with GRASS
for details.
@author Glynn Clements
"""
import os
import numpy
import core as grass
class array(numpy.memmap):
def __new__(cls, dtype = numpy.double):
"""!Define new numpy array
@param cls
@param dtype data type (default: numpy.double)
"""
reg = grass.region()
r = reg['rows']
c = reg['cols']
shape = (r, c)
filename = grass.tempfile()
self = numpy.memmap.__new__(
cls,
filename = filename,
dtype = dtype,
mode = 'w+',
shape = shape)
self.filename = filename
return self
def _close(self):
numpy.memmap._close(self)
if isinstance(self, array):
grass.try_remove(self.filename)
def read(self, mapname, null = None):
"""!Read raster map into array
@param mapname name of raster map to be read
@param null null value
@return 0 on success
@return non-zero code on failure
"""
kind = self.dtype.kind
size = self.dtype.itemsize
if kind == 'f':
flags = 'f'
elif kind in 'biu':
flags = 'i'
else:
raise ValueError(_('Invalid kind <%s>') % kind)
if size not in [1,2,4,8]:
raise ValueError(_('Invalid size <%d>') % size)
return grass.run_command(
'r.out.bin',
flags = flags,
input = mapname,
output = self.filename,
bytes = size,
null = null,
quiet = True)
def write(self, mapname, title = None, null = None, overwrite = None):
"""!Write array into raster map |
@param mapname name for raster map
@param title title for raster map
@param null null value
@param overwrite True for overwritting existing raster maps
@return 0 on success
@return non-zero code on failure
"""
kind = self.dtype.kind
size = self.dtype.itemsize
if kind == 'f':
if size == 4:
flags = 'f'
| elif size == 8:
flags = 'd'
else:
raise ValueError(_('Invalid FP size <%d>') % size)
size = None
elif kind in 'biu':
if size not in [1,2,4]:
raise ValueError(_('Invalid integer size <%d>') % size)
flags = None
else:
raise ValueError(_('Invalid kind <%s>') % kind)
reg = grass.region()
return grass.run_command(
'r.in.bin',
flags = flags,
input = self.filename,
output = mapname,
title = title,
bytes = size,
anull = null,
overwrite = overwrite,
verbose = True,
north = reg['n'],
south = reg['s'],
east = reg['e'],
west = reg['w'],
rows = reg['rows'],
cols = reg['cols'])
|
DESHRAJ/fjord | vendor/packages/nose/unit_tests/test_core.py | Python | bsd-3-clause | 2,069 | 0.002417 | import os
import sys
import unittest
from cStringIO import StringIO
from optparse import OptionParser
import nose.core
from nose.config import Config
from nose.tools import set_trace
from mock import Bucket, MockOptParser
class NullLoader:
def loadTestsFromNames(self, names):
return unittest.TestSuite()
class TestAPI_run(unittest.TestCase):
def test_restore_stdout(self):
print "AHOY"
s = StringIO()
print s
stdout = sys.s | tdout
conf = Config(stream=s)
# set_trace()
print "About to run"
res = nose.core.run(
testLoader=NullLoader(), argv=['test_run'], env={}, config=conf)
print "Done running"
stdout_after = sys.stdout
self.assertEqual(stdout, stdout_after)
class Undefined(object):
pass
class TestUsage(unittest.TestCase):
def test_from_directory(self):
usage_txt = nose.core.T | estProgram.usage()
assert usage_txt.startswith('nose collects tests automatically'), (
"Unexpected usage: '%s...'" % usage_txt[0:50].replace("\n", '\n'))
def test_from_zip(self):
requested_data = []
# simulates importing nose from a zip archive
# with a zipimport.zipimporter instance
class fake_zipimporter(object):
def get_data(self, path):
requested_data.append(path)
# Return as str in Python 2, bytes in Python 3.
return '<usage>'.encode('utf-8')
existing_loader = getattr(nose, '__loader__', Undefined)
try:
nose.__loader__ = fake_zipimporter()
usage_txt = nose.core.TestProgram.usage()
self.assertEqual(usage_txt, '<usage>')
self.assertEqual(requested_data, [os.path.join(
os.path.dirname(nose.__file__), 'usage.txt')])
finally:
if existing_loader is not Undefined:
nose.__loader__ = existing_loader
else:
del nose.__loader__
if __name__ == '__main__':
unittest.main()
|
eonpatapon/rally | rally/plugins/openstack/types.py | Python | apache-2.0 | 5,962 | 0 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and | limitations
# under the License.
from rally.common.plugin import plugin
from rally import exceptions
from rally.task import types
@plugin.configure(name="nova_flavor")
class Flavor(types.ResourceType):
@classmethod
| def transform(cls, clients, resource_config):
"""Transform the resource config to id.
:param clients: openstack admin client handles
:param resource_config: scenario config with `id`, `name` or `regex`
:returns: id matching resource
"""
resource_id = resource_config.get("id")
if not resource_id:
novaclient = clients.nova()
resource_id = types._id_from_name(
resource_config=resource_config,
resources=novaclient.flavors.list(),
typename="flavor")
return resource_id
@plugin.configure(name="ec2_flavor")
class EC2Flavor(types.ResourceType):
@classmethod
def transform(cls, clients, resource_config):
"""Transform the resource config to name.
In the case of using EC2 API, flavor name is used for launching
servers.
:param clients: openstack admin client handles
:param resource_config: scenario config with `id`, `name` or `regex`
:returns: name matching resource
"""
resource_name = resource_config.get("name")
if not resource_name:
# NOTE(wtakase): gets resource name from OpenStack id
novaclient = clients.nova()
resource_name = types._name_from_id(
resource_config=resource_config,
resources=novaclient.flavors.list(),
typename="flavor")
return resource_name
@plugin.configure(name="glance_image")
class GlanceImage(types.ResourceType):
@classmethod
def transform(cls, clients, resource_config):
"""Transform the resource config to id.
:param clients: openstack admin client handles
:param resource_config: scenario config with `id`, `name` or `regex`
:returns: id matching resource
"""
resource_id = resource_config.get("id")
if not resource_id:
glanceclient = clients.glance()
resource_id = types._id_from_name(
resource_config=resource_config,
resources=list(glanceclient.images.list()),
typename="image")
return resource_id
@plugin.configure(name="ec2_image")
class EC2Image(types.ResourceType):
@classmethod
def transform(cls, clients, resource_config):
"""Transform the resource config to EC2 id.
If OpenStack resource id is given, this function gets resource name
from the id and then gets EC2 resource id from the name.
:param clients: openstack admin client handles
:param resource_config: scenario config with `id`, `name` or `regex`
:returns: EC2 id matching resource
"""
if "name" not in resource_config and "regex" not in resource_config:
# NOTE(wtakase): gets resource name from OpenStack id
glanceclient = clients.glance()
resource_name = types._name_from_id(
resource_config=resource_config,
resources=list(glanceclient.images.list()),
typename="image")
resource_config["name"] = resource_name
# NOTE(wtakase): gets EC2 resource id from name or regex
ec2client = clients.ec2()
resource_ec2_id = types._id_from_name(
resource_config=resource_config,
resources=list(ec2client.get_all_images()),
typename="ec2_image")
return resource_ec2_id
@plugin.configure(name="cinder_volume_type")
class VolumeType(types.ResourceType):
@classmethod
def transform(cls, clients, resource_config):
"""Transform the resource config to id.
:param clients: openstack admin client handles
:param resource_config: scenario config with `id`, `name` or `regex`
:returns: id matching resource
"""
resource_id = resource_config.get("id")
if not resource_id:
cinderclient = clients.cinder()
resource_id = types._id_from_name(resource_config=resource_config,
resources=cinderclient.
volume_types.list(),
typename="volume_type")
return resource_id
@plugin.configure(name="neutron_network")
class NeutronNetwork(types.ResourceType):
@classmethod
def transform(cls, clients, resource_config):
"""Transform the resource config to id.
:param clients: openstack admin client handles
:param resource_config: scenario config with `id`, `name` or `regex`
:returns: id matching resource
"""
resource_id = resource_config.get("id")
if resource_id:
return resource_id
else:
neutronclient = clients.neutron()
for net in neutronclient.list_networks()["networks"]:
if net["name"] == resource_config.get("name"):
return net["id"]
raise exceptions.InvalidScenarioArgument(
"Neutron network with name '{name}' not found".format(
name=resource_config.get("name")))
|
bcl/pykickstart | tests/commands/monitor.py | Python | gpl-2.0 | 2,924 | 0.002394 | #
# James Laska <jlaska@redhat.com>
#
# Copyright 2009 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
import unittest
from tests.baseclass import CommandTest
from pykickstart.base import DeprecatedCommand
from pykickstart.commands.monitor import FC6_Monitor
class FC3_TestCase(CommandTest):
command = "monitor"
def runTest(self):
# pass
| self.assert_parse("monitor", "")
self.assert_parse("monitor --hsync=HSYNC", "monitor --hsync=HSYNC\n")
| self.assert_parse("monitor --vsync=VSYNC", "monitor --vsync=VSYNC\n")
self.assert_parse("monitor --monitor=MONITOR", "monitor --monitor=\"MONITOR\"\n")
self.assert_parse("monitor --hsync=HSYNC --monitor=MONITOR",
"monitor --hsync=HSYNC --monitor=\"MONITOR\"\n")
self.assert_parse("monitor --monitor=MONITOR --vsync=VSYNC",
"monitor --monitor=\"MONITOR\" --vsync=VSYNC\n")
self.assert_parse("monitor --hsync=HSYNC --monitor=MONITOR --vsync=VSYNC",
"monitor --hsync=HSYNC --monitor=\"MONITOR\" --vsync=VSYNC\n")
self.assert_parse_error("monitor BOGUS")
self.assert_parse_error("monitor --monitor=SOMETHING GREAT")
if "--noprobe" not in self.optionList:
self.assert_parse_error("monitor --noprobe")
class FC6_TestCase(FC3_TestCase):
def runTest(self):
FC3_TestCase.runTest(self)
# pass
self.assert_parse("monitor --noprobe", "monitor --noprobe\n")
# fail
self.assert_parse_error("monitor --noprobe 1")
# assert default values
self.assertTrue(FC6_Monitor().probe)
class F10_TestCase(FC6_TestCase):
def runTest(self):
# make sure we've been deprecated
parser = self.getParser("monitor")
self.assertEqual(issubclass(parser.__class__, DeprecatedCommand), True)
parser = parser._getParser()
self.assertIsNotNone(parser)
self.assertTrue(parser.description.find('deprecated:: Fedora10') > -1)
if __name__ == "__main__":
unittest.main()
|
SpyDeX/BeepMiBot | bot/event_handler.py | Python | mit | 3,806 | 0.011823 | import json
import logging
import re
logger = logging.getLogger(__name__)
csRegStr4Hash = '\B(?P<Type>[\@\#\$\%\&])(?P<Text>\S+)'
def FindHashes(srcStr, RegStr):
#\B(?P<Type>[\@\#\$\%\&])(?P<Text>\S+)
regex = r'' +RegStr
matches = re.finditer(regex, srcStr, re.IGNORECASE | re.UNICODE) #
plain = []
for matchNum, match in enumerate(matches):
agr = match.groups()
k = agr[0]
word = agr[1]
plain.append({k: word})
if (len(plain) > 0):
return plain
return None
def Split_CMD(SrcMessage):
if (SrcMessage.startswith('mcc')):
if (SrcMessage.startswith('mcc?')):
return {"cmd":"lst"}
if (SrcMessage.startswith('mcc+')):
return {"cmd":"add"}
if (SrcMessage.startswith('mcc-')):
return {"cmd":"del"}
if (SrcMessage.startswith('sms:')):
hashes = FindHashes(SrcMessage, csRegStr4Hash)
if (hashes):
return {"cmd":"sms", 'hashes': hashes}
else:
re | turn {"cmd":"sms", 'hashes': None}
return None;
class RtmEventHandler(object):
def __init__(self, slack_clients, msg_writer):
self.clients = slack_clients
self.msg_writer = msg_writer
self.log = {};
def handle(self, event):
| if 'type' in event:
self._handle_by_type(event['type'], event)
def _handle_by_type(self, event_type, event):
# See https://api.slack.com/rtm for a full list of events
if event_type == 'error':
# error
self.msg_writer.write_error(event['channel'], json.dumps(event))
elif event_type == 'message':
# message was sent to channel
self._handle_message(event)
elif event_type == 'channel_joined':
# you joined a channel
self.msg_writer.write_help_message(event['channel'])
elif event_type == 'group_joined':
# you joined a private group
self.msg_writer.write_help_message(event['channel'])
else:
pass
def _handle_message(self, event):
# Filter out messages from the bot itself, and from non-users (eg. webhooks)
#if event['team'] not in self.log:
# self.log[str(event['team'])] ={}
#self.log[str(event['team'])][str(event['user'])] = event
if ('user' in event) and (not self.clients.is_message_from_me(event['user'])):
msg_txt = event['text']
if self.clients.is_bot_mention(msg_txt) or self._is_direct_message(event['channel']):
# e.g. user typed: "@pybot tell me a joke!"
MCC = Split_CMD(msg_txt)
if MCC:
self.msg_writer.send_message(event['channel'], 'This is my message! '+json.dumps(MCC))
elif '?' == msg_txt:
self.msg_writer.send_message(event['channel'], json.dumps(event)+'\n'+json.dumps(self.log))
elif 'help' in msg_txt:
self.msg_writer.write_help_message(event['channel'])
elif re.search('hi|hey|hello|howdy', msg_txt):
self.msg_writer.write_greeting(event['channel'], event['user'])
elif 'joke' in msg_txt:
self.msg_writer.write_joke(event['channel'])
elif 'attachment' in msg_txt:
self.msg_writer.demo_attachment(event['channel'])
elif 'echo' in msg_txt:
self.msg_writer.send_message(event['channel'], msg_txt)
else:
self.msg_writer.write_prompt(event['channel'])
def _is_direct_message(self, channel):
"""Check if channel is a direct message channel
Args:
channel (str): Channel in which a message was received
"""
return channel.startswith('D')
|
OpenNingia/l5r-character-manager-3 | l5r/dialogs/managedatapack.py | Python | gpl-3.0 | 5,427 | 0.000369 | # -*- coding: utf-8 -*-
# Copyright (C) 2014-2022 Daniele Simonetti
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from PyQt5 import QtCore, QtGui, QtWidgets
from l5r.util.settings import L5RCMSettings
class DataPackModel(QtCore.QAbstractTableModel):
def __init__(self, parent=None):
super(DataPackModel, self).__init__(parent)
self.items = []
self.headers = [self.tr('Name'),
self.tr('Language'),
self.tr('Version'),
self.tr('Authors')]
self.settings = L5RCMSettings()
def rowCount(self, parent=QtCore.QModelIndex()):
return len(self.items)
def columnCount(self, parent=QtCore.QModelIndex()):
return len(self.headers)
def headerData(self, section, orientation, role=QtCore.Qt.DisplayRole):
if orientation != QtCore.Qt.Horizontal:
return None
if role == QtCore.Qt.DisplayRole:
return self.headers[section]
return None
def data(self, index, role=QtCore.Qt.UserRole):
if not index.isValid() or index.row() >= len(self.items):
return None
item = self.items[index.row()]
if role == QtCore.Qt.DisplayRole:
if index.column() == 0:
return item.display_name
if index.column() == 1:
return item.language or self.tr("All")
if index.column() == 2:
return item.version or self.tr("N/A")
if index.column() == 3:
return ", ".join(item.authors) if (item.authors is not None) else ""
elif role == QtCore.Qt.F | oregroundRole:
if index.row() % 2:
return self.settings.ui.table_row_color_alt_fg
return self.settings.ui.table_row_color_fg
elif role == QtCore.Qt.BackgroundRole:
if index.row() % 2:
return self.settings.ui.table_row_color_alt_bg
return self.settings.ui.table_row_color_bg
elif role == QtCore.Qt.SizeHintRole:
return self.setti | ngs.ui.table_row_size
elif role == QtCore.Qt.UserRole:
return item
elif role == QtCore.Qt.CheckStateRole:
return self.__checkstate_role(item, index.column())
return None
def setData(self, index, value, role):
if not index.isValid():
return False
ret = False
item = self.items[index.row()]
self.dirty = True
if index.column() == 0 and role == QtCore.Qt.CheckStateRole:
item.active = (value == QtCore.Qt.Checked)
ret = True
else:
ret = super(DataPackModel, self).setData(index, value, role)
return ret
def flags(self, index):
if not index.isValid():
return QtCore.Qt.ItemIsDropEnabled
flags = QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled
if index.column() == 0:
flags |= QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEditable
return flags
def __checkstate_role(self, item, column):
if column == 0:
return QtCore.Qt.Checked if item.active else QtCore.Qt.Unchecked
return None
def add_item(self, item):
row = self.rowCount()
self.beginInsertRows(QtCore.QModelIndex(), row, row)
self.items.append(item)
self.endInsertRows()
def clean(self):
self.beginResetModel()
self.items = []
self.endResetModel()
class ManageDataPackDlg(QtWidgets.QDialog):
def __init__(self, dstore, parent=None):
super(ManageDataPackDlg, self).__init__(parent)
self.dstore = dstore
self.build_ui()
self.load_data()
def build_ui(self):
self.setWindowTitle(self.tr("Data Pack Manager"))
vbox = QtWidgets.QVBoxLayout(self)
grp = QtWidgets.QGroupBox(self.tr("Available data packs"))
self.view = QtWidgets.QTableView(self)
vbox2 = QtWidgets.QVBoxLayout(grp)
vbox2.addWidget(self.view)
bts = QtWidgets.QDialogButtonBox()
bts.addButton(self.tr("Discard"), QtWidgets.QDialogButtonBox.RejectRole)
bts.addButton(self.tr("Save"), QtWidgets.QDialogButtonBox.AcceptRole)
vbox.addWidget(grp)
vbox.addWidget(bts)
bts.accepted.connect(self.on_accept)
bts.rejected.connect(self.reject)
self.setMinimumSize(QtCore.QSize(440, 330))
def load_data(self):
from copy import deepcopy
self.packs = deepcopy(self.dstore.packs)
model = DataPackModel(self)
for pack in self.packs:
model.add_item(pack)
self.view.setModel(model)
def on_accept(self):
self.dstore.packs = self.packs
self.accept()
|
alxgu/ansible-modules-extras | system/firewalld.py | Python | gpl-3.0 | 20,455 | 0.006698 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Adam Miller (maxamillion@fedoraproject.org)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: firewalld
short_description: Manage arbitrary ports/services with firewalld
description:
- This module allows for addition or deletion of services and ports either tcp or udp in either running or permanent firewalld rules.
version_added: "1.4"
options:
service:
description:
- "Name of a service to add/remove to/from firewalld - service must be listed in /etc/services."
required: false
default: null
port:
description:
- "Name of a port or port range to add/remove to/from firewalld. Must be in the form PORT/PROTOCOL or PORT-PORT/PROTOCOL for port ranges."
required: false
default: null
rich_rule:
description:
- "Rich rule to add/remove to/from firewalld."
required: false
default: null
source:
description:
- 'The source/network you would like to add/remove to/from firewalld'
required: false
default: null
version_added: "2.0"
interface:
description:
- 'The interface you would like to add/remove to/from a zone in firewalld'
required: false
default: null
version_added: "2.1"
zone:
description:
- 'The firewalld zone to add/remove to/from (NOTE: default zone can be configured per system but "public" is default from upstream. Available choices can be extended based on per-system configs, listed here are "out of the box" defaults).'
required: false
default: system-default(public)
choices: [ "work", "drop", "internal", "external", "trusted", "home", "dmz", "public", "block" ]
permanent:
description:
- "Should this configuration be in the running firewalld configuration or persist across reboots."
required: false
default: null
immediate:
description:
- "Should this configuration be applied immediately, if set as permanent"
required: false
default: false
version_added: "1.9"
state:
description:
- "Should this port accept(enabled) or reject(disabled) connections."
required: true
choices: [ "enabled", "disabled" ]
timeout:
description:
- "The amount of time the rule should be in effect for when non-permanent."
required: false
default: 0
masquerade:
description:
- 'The masquerade setting you would like to enable/disable to/from zones within firewalld'
required: false
default: null
version_added: "2.1"
notes:
- Not tested on any Debian based system.
- Requires the python2 bindings of firewalld, who may not be installed by default if the distribution switched to python 3
requirements: [ 'firewalld >= 0.2.11' ]
author: "Adam Miller (@maxamillion)"
'''
EXAMPLES = '''
- firewalld: service=https permanent=true state=enabled
- firewalld: port=8081/tcp permanent=true state=disabled
- firewalld: port=161-162/udp permanent=true state=enabled
- firewalld: zone=dmz service=http permanent=true state=enabled
- firewalld: rich_rule='rule service name="ftp" audit limit value="1/m" accept' permanent=true state=enabled
- firewalld: source='192.168.1.0/24' zone=internal state=enabled
- firewalld: zone=trusted interface=eth2 permanent=true state=enabled
- firewalld: masquerade=yes state=enabled permanent=true zone=dmz
'''
import os
import re
try:
import firewall.config
FW_VERSION = firewall.config.VERSION
from firewall.client import Rich_Rule
from firewall.client import FirewallClient
fw = FirewallClient()
if not fw.connected:
HAS_FIREWALLD = False
else:
HAS_FIREWALLD = True
except ImportError:
HAS_FIREWALLD = False
#####################
# masquerade handling
#
def get_masquerade_enabled(zone):
if fw.queryMasquerade(zone) == True:
return True
else:
return False
def get_masquerade_enabled_permanent(zone):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
if fw_settings.getMasquerade() == True:
return True
else:
return False
def set_masquerade_enabled(zone):
fw.addMasquerade(zone)
def set_masquerade_disabled(zone):
fw.removeMasquerade(zone)
def set_masquerade_permanent(zone, masquerade):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
fw_settings.setMasquerade(masquerade)
fw_zone.update(fw_settings)
################
# port handling
#
def get_port_enabled(zone, port_proto):
if port_proto in fw.getPorts(zone):
return True
else:
return False
def set_port_enabled(zone, port, protocol, timeout):
fw.addPort(zone, port, protocol, timeout)
def set_port_disabled(zone, port, protocol):
fw.removePort(zone, port, protocol)
def get_port_enabled_permanent(zone, port_proto):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
if tuple(port_proto) in fw_settings.getPorts():
return True
else:
return False
def set_port_enabled_permanent(zone, port, protocol):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
fw_settings.addPort(port, protocol)
fw_zone.update(fw_settings)
def set_port_disabled_permanent(zone, port, protocol):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
fw_settings.removePort(port, protocol)
fw_zone.update(fw_settings)
####################
# source handling
#
def get_source(zone, source):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
if source in fw_settings.getSources():
return True
else:
return False
def add_source(zone, source):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
fw_settings.addSource(source)
fw_zone.u | pdate(fw_settings)
def remove_source(zone, source):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
fw_settings.removeSource(source)
fw_zone.update(fw_settings)
####################
# interface handling
#
def get_interface(zone, interface):
fw_zo | ne = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
if interface in fw_settings.getInterfaces():
return True
else:
return False
def add_interface(zone, interface):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
fw_settings.addInterface(interface)
fw_zone.update(fw_settings)
def remove_interface(zone, interface):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
fw_settings.removeInterface(interface)
fw_zone.update(fw_settings)
####################
# service handling
#
def get_service_enabled(zone, service):
if service in fw.getServices(zone):
return True
else:
return False
def set_service_enabled(zone, service, timeout):
fw.addService(zone, service, timeout)
def set_service_disabled(zone, service):
fw.removeService(zone, service)
def get_service_enabled_permanent(zone, service):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
if service in fw_settings.getServices():
return True
else:
return False
def set_service_enabled_permanent(zone, service):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
fw_settings.addService(service)
fw_zone.update(fw_settings)
def set_service_disabled |
Battleroid/yanker | yanker/yanker.py | Python | mit | 2,508 | 0.000797 | """
Yanker
Usage:
yanker [--threads=<tnum>]
"""
__version__ = '1.0.1'
import Queue
import threading
import youtube_dl as ydl
import pyperclip as clip
import time
from docopt import docopt
class ErrLogger(object):
def debug(self, msg):
pas | s
def warning(self, msg):
pass
def error(self, msg):
print msg
class Worker(threading.Thread):
def __init__(self, tasks):
threading.Thread.__init__(self)
self.tasks = tasks
self.daemon = True
se | lf.start()
def run(self):
while True:
vid = self.tasks.get()
vid.download()
self.tasks.task_done()
class Video:
def progress(self, s):
if s['status'] == 'finished':
print 'Finished {}'.format(s['filename'])
def __init__(self, url, opts={}):
self.url = url
self.ydl_opts = {
'progress_hooks': [self.progress],
'logger': ErrLogger()
}
self.ydl_opts.update(opts)
def download(self):
print 'Downloading: {}'.format(self.url)
with ydl.YoutubeDL(self.ydl_opts) as y:
try:
y.download([self.url])
except ydl.DownloadError:
print 'Unsupported URL, skipping'
class Watcher:
def __init__(self, urls=[], threads=2):
self.queue = Queue.Queue(0)
self.threads = threads
self.stopped = False
self.grabbed_urls = set([])
for _ in range(threads): Worker(self.queue)
def run(self):
recent = ''
while not self.stopped:
current = clip.paste()
if recent != current:
recent = current
if current.startswith(('http://', 'https://',)) and current not in self.grabbed_urls:
print 'Added: {}'.format(current)
self.grabbed_urls.add(current)
self.queue.put(Video(current))
elif current in self.grabbed_urls:
print 'Already grabbed {}'.format(current)
time.sleep(0.25)
def run():
args = docopt(__doc__, version='Yanker {}'.format(__version__))
threads = args['--threads']
if not threads:
threads = 2
else:
threads = int(threads)
print 'Starting Yanker with {} threads...'.format(threads)
watch = Watcher(threads=threads)
try:
watch.run()
except KeyboardInterrupt:
print 'Stopping...'
watch.stopped = True
|
snapcore/snapcraft | tools/collect_ppa_autopkgtests_results.py | Python | gpl-3.0 | 2,694 | 0.001485 | #!/usr/bin/env python3
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2017 Canonical Ltd
#
# This program is free softwar | e: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General | Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import os
import subprocess
import tempfile
ACTIVE_DISTROS = ("xenial", "artful", "bionic")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("day", help="The day of the results, with format yyyymmdd")
args = parser.parse_args()
install_autopkgtest_results_formatter()
with tempfile.TemporaryDirectory(dir=os.environ.get("HOME")) as temp_dir:
clone_results_repo(temp_dir)
format_results(temp_dir, ACTIVE_DISTROS, args.day)
commit_and_push(temp_dir, args.day)
def install_autopkgtest_results_formatter():
subprocess.check_call(
["sudo", "snap", "install", "autopkgtest-results-formatter", "--edge"]
)
def clone_results_repo(dest_dir):
subprocess.check_call(
["git", "clone", "https://github.com/elopio/autopkgtest-results.git", dest_dir]
)
def format_results(dest_dir, distros, day):
subprocess.check_call(
[
"/snap/bin/autopkgtest-results-formatter",
"--destination",
dest_dir,
"--distros",
*distros,
"--day",
day,
]
)
def commit_and_push(repo_dir, day):
subprocess.check_call(
["git", "config", "--global", "user.email", "u1test+m-o@canonical.com"]
)
subprocess.check_call(["git", "config", "--global", "user.name", "snappy-m-o"])
subprocess.check_call(["git", "-C", repo_dir, "add", "--all"])
subprocess.check_call(
[
"git",
"-C",
repo_dir,
"commit",
"--message",
"Add the results for {}".format(day),
]
)
subprocess.check_call(
[
"git",
"-C",
repo_dir,
"push",
"https://{GH_TOKEN}@github.com/elopio/autopkgtest-results.git".format(
GH_TOKEN=os.environ.get("GH_TOKEN_PPA_AUTOPKGTEST_RESULTS")
),
]
)
if __name__ == "__main__":
main()
|
dunkhong/grr | grr/core/grr_response_core/lib/type_info_test.py | Python | apache-2.0 | 5,306 | 0.002271 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2012 Google Inc. All Rights Reserved.
"""Tests for grr.lib.type_info."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from absl import app
from grr_response_core.lib import type_info
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr.test_lib import test_lib
class TypeInfoTest(test_lib.GRRBaseTest):
def testTypeInfoBoolObjects(self):
"""Test the type info objects behave as expected."""
a = type_info.Bool()
self.assertRaises(type_info.TypeValueError, a.Validate, 2)
self.assertRaises(type_info.TypeValueError, a.Validate, None)
a.Validate(True)
# 1 is a valid substitute for True.
a.Validate(1)
def testTypeInfoStringObjects(self):
"""Test the type info objects behave as expected."""
a = type_info.String()
self.assertRaises(type_info.TypeValueError, a.Validate, 1)
self.assertRaises(type_info.TypeValueError, a.Validate, None)
a.Validate("test")
a.Validate(u"test")
a.Validate(u"/test-Îñ铁网åţî[öñåļ(îžåţîờñ")
def testTypeInfoNumberObjects(self):
"""Test the type info objects behave as expected."""
a = type_info.Integer()
self.assertRaises(type_info.TypeValueError, a.Validate, "1")
self.assertRaises(type_info.TypeValueError, a.Validate, "hello")
a.Validate(1231232)
a.Validate(-2)
def testTypeInfoListObjects(self):
"""Test List objects."""
a = type_info.List(type_info.Integer())
self.assertRaises(type_info.TypeValueError, a.Validate, 1)
self.assertRaises(type_info.TypeValueError, a.Validate, "test")
self.assertRaises(type_info.TypeValueError, a.Validate, None)
self.assertRaises(type_info.TypeValueError, a.Validate, ["test"])
self.assertRaises(type_info.TypeValueError, a.Validate,
[rdf_paths.PathSpec()])
a.Validate([1, 2, 3])
def testTypeInfoListConvertsObjectsOnValidation(self):
"""Test List objects return validated objects."""
class TypeInfoFoo(type_info.Integer):
def Validate(self, value):
return value * 2
a = type_info.List(TypeInfoFoo())
self.assertEqual(a.Validate([1, 2, 3]), [2, 4, 6])
def testTypeInfoMultiChoiceObjects(self):
"""Test MultiChoice objects."""
a = type_info.MultiChoice(choices=["a", "b"])
self.assertRaises(type_info.TypeValueError, a.Validate, "a")
self.assertRaises(type_info.TypeValueError, a.Validate, ["test"])
self.assertRaises(type_info.TypeValueError, a.Validate, ["a", "test"])
self.assertRaises(type_info.TypeValueError, a.Validate, ["a", "a"])
self.assertRaises(type_info.TypeValueError, a.Validate, None)
self.assertRaises(type_info.TypeValueError, a.Validate, [1])
self.assertRaises(type_info.TypeValueError, a.Validate, 1)
a.Validate(["a"])
a.Validate(["a", "b"])
with self.assertRaises(type_info.TypeValueError):
type_info.MultiChoice(choices=[1, 2])
a = type_info.MultiChoice(choices=[1, 2], validator=type_info.Integer())
self.assertRaises(type_info.TypeValueError, a.Validate, "a")
self.assertRaises(type_info.TypeValueError, a.Validate, ["test"])
a.Validate([2])
a.Validate([1, 2])
def testStructDictType(self):
"""Test RDFStructDictType type."""
a = type_info.RDFStructDictType(rdfclass=rdf_paths.PathSpec)
self.assertRaises(type | _info.TypeValueError, a.Validate, 2)
self.assertRaises(type_info.TypeValueError, a.Validate, "ab")
self.assertRaises(type_info.TypeValueError, a.Validate, [1, 2])
# None values are left as is.
self.assertEqual(None, a.Validate(None))
# Check that validation raises when unknown attributes are passed.
self.assertRaises(type_info.TypeValueError, a.Validate, dict(foo="bar"))
v = a.Validate(dict(path="blah", pat | htype="TSK"))
self.assertIsInstance(v, rdf_paths.PathSpec)
self.assertEqual(v.path, "blah")
self.assertEqual(v.pathtype, "TSK")
def testTypeDescriptorSet(self):
type_infos = [
type_info.String(name="output", default="analysis/{p}/{u}-{t}"),
type_info.String(
description="Profile to use.", name="profile", default=""),
type_info.String(
description="A comma separated list of plugins.",
name="plugins",
default=""),
]
info = type_info.TypeDescriptorSet(
type_infos[0],
type_infos[1],
type_infos[2],
)
new_info = type_info.TypeDescriptorSet(type_infos[0],)
updated_info = new_info + type_info.TypeDescriptorSet(type_infos[1],)
updated_info += type_info.TypeDescriptorSet(type_infos[2],)
self.assertEqual(info.descriptor_map, updated_info.descriptor_map)
self.assertCountEqual(info.descriptors, updated_info.descriptors)
self.assertIn(type_infos[1], updated_info.descriptors)
self.assertIn("plugins", updated_info)
removed_info = updated_info.Remove("plugins")
self.assertIn(type_infos[1], updated_info.descriptors)
self.assertIn("plugins", updated_info)
self.assertNotIn(type_infos[2], removed_info.descriptors)
self.assertNotIn("plugins", removed_info)
def main(args):
test_lib.main(args)
if __name__ == "__main__":
app.run(main)
|
kongsally/Deep-Learning-for-Automated-Discourse | FirstChatbot/seq2seq_model.py | Python | mit | 14,119 | 0.003966 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ================================= | =============================================
"""Sequence-to-sequence model with an attention mechanism."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.models.rnn.translate import data_utils
class Seq2SeqModel(object):
"""Sequen | ce-to-sequence model with attention and for multiple buckets.
This class implements a multi-layer recurrent neural network as encoder,
and an attention-based decoder. This is the same as the model described in
this paper: http://arxiv.org/abs/1412.7449 - please look there for details,
or into the seq2seq library for complete model implementation.
This class also allows to use GRU cells in addition to LSTM cells, and
sampled softmax to handle large output vocabulary size. A single-layer
version of this model, but with bi-directional encoder, was presented in
http://arxiv.org/abs/1409.0473
and sampled softmax is described in Section 3 of the following paper.
http://arxiv.org/abs/1412.2007
"""
def __init__(self,
source_vocab_size,
target_vocab_size,
buckets,
size,
num_layers,
max_gradient_norm,
batch_size,
learning_rate,
learning_rate_decay_factor,
use_lstm=True,
num_samples=512,
forward_only=False,
dtype=tf.float32):
"""Create the model.
Args:
source_vocab_size: size of the source vocabulary.
target_vocab_size: size of the target vocabulary.
buckets: a list of pairs (I, O), where I specifies maximum input length
that will be processed in that bucket, and O specifies maximum output
length. Training instances that have inputs longer than I or outputs
longer than O will be pushed to the next bucket and padded accordingly.
We assume that the list is sorted, e.g., [(2, 4), (8, 16)].
size: number of units in each layer of the model.
num_layers: number of layers in the model.
max_gradient_norm: gradients will be clipped to maximally this norm.
batch_size: the size of the batches used during training;
the model construction is independent of batch_size, so it can be
changed after initialization if this is convenient, e.g., for decoding.
learning_rate: learning rate to start with.
learning_rate_decay_factor: decay learning rate by this much when needed.
use_lstm: if true, we use LSTM cells instead of GRU cells.
num_samples: number of samples for sampled softmax.
forward_only: if set, we do not construct the backward pass in the model.
dtype: the data type to use to store internal variables.
"""
self.source_vocab_size = source_vocab_size
self.target_vocab_size = target_vocab_size
self.buckets = buckets
self.batch_size = batch_size
self.learning_rate = tf.Variable(
float(learning_rate), trainable=False, dtype=dtype)
self.learning_rate_decay_op = self.learning_rate.assign(
self.learning_rate * learning_rate_decay_factor)
self.global_step = tf.Variable(0, trainable=False)
# If we use sampled softmax, we need an output projection.
output_projection = None
softmax_loss_function = None
# Sampled softmax only makes sense if we sample less than vocabulary size.
if num_samples > 0 and num_samples < self.target_vocab_size:
w_t = tf.get_variable("proj_w", [self.target_vocab_size, size], dtype=dtype)
w = tf.transpose(w_t)
b = tf.get_variable("proj_b", [self.target_vocab_size], dtype=dtype)
output_projection = (w, b)
def sampled_loss(inputs, labels):
labels = tf.reshape(labels, [-1, 1])
# We need to compute the sampled_softmax_loss using 32bit floats to
# avoid numerical instabilities.
local_w_t = tf.cast(w_t, tf.float32)
local_b = tf.cast(b, tf.float32)
local_inputs = tf.cast(inputs, tf.float32)
return tf.cast(
tf.nn.sampled_softmax_loss(local_w_t, local_b, local_inputs, labels,
num_samples, self.target_vocab_size),
dtype)
softmax_loss_function = sampled_loss
# Create the internal multi-layer cell for our RNN.
single_cell = tf.nn.rnn_cell.GRUCell(size)
if use_lstm:
single_cell = tf.nn.rnn_cell.BasicLSTMCell(size)
cell = single_cell
if num_layers > 1:
cell = tf.nn.rnn_cell.MultiRNNCell([single_cell] * num_layers)
# The seq2seq function: we use embedding for the input and attention.
def seq2seq_f(encoder_inputs, decoder_inputs, do_decode):
return tf.nn.seq2seq.embedding_attention_seq2seq(
encoder_inputs,
decoder_inputs,
cell,
num_encoder_symbols=source_vocab_size,
num_decoder_symbols=target_vocab_size,
embedding_size=size,
output_projection=output_projection,
feed_previous=do_decode,
dtype=dtype)
# Feeds for inputs.
self.encoder_inputs = []
self.decoder_inputs = []
self.target_weights = []
for i in xrange(buckets[-1][0]): # Last bucket is the biggest one.
self.encoder_inputs.append(tf.placeholder(tf.int32, shape=[None],
name="encoder{0}".format(i)))
for i in xrange(buckets[-1][1] + 1):
self.decoder_inputs.append(tf.placeholder(tf.int32, shape=[None],
name="decoder{0}".format(i)))
self.target_weights.append(tf.placeholder(dtype, shape=[None],
name="weight{0}".format(i)))
# Our targets are decoder inputs shifted by one.
targets = [self.decoder_inputs[i + 1]
for i in xrange(len(self.decoder_inputs) - 1)]
# Training outputs and losses.
if forward_only:
self.outputs, self.losses = tf.nn.seq2seq.model_with_buckets(
self.encoder_inputs, self.decoder_inputs, targets,
self.target_weights, buckets, lambda x, y: seq2seq_f(x, y, True),
softmax_loss_function=softmax_loss_function)
# If we use output projection, we need to project outputs for decoding.
if output_projection is not None:
for b in xrange(len(buckets)):
self.outputs[b] = [
tf.matmul(output, output_projection[0]) + output_projection[1]
for output in self.outputs[b]
]
else:
self.outputs, self.losses = tf.nn.seq2seq.model_with_buckets(
self.encoder_inputs, self.decoder_inputs, targets,
self.target_weights, buckets,
lambda x, y: seq2seq_f(x, y, False),
softmax_loss_function=softmax_loss_function)
# Gradients and SGD update operation for training the model.
params = tf.trainable_variables()
if not forward_only:
self.gradient_norms = []
self.updates = []
opt = tf.train.GradientDescentOptimizer(self.learning_rate)
for b in xrange(len(buckets)):
gradients = tf.gradients(self.losses[b], params)
clipped_gradients, norm = tf.clip_by_global_norm(gradients,
max_gradient_norm)
self.gradient_norms.append(norm)
self.updates.append(opt.apply_gradients(
|
django-bmf/django-bmf | djangobmf/core/module.py | Python | bsd-3-clause | 17,355 | 0.00023 | #!/usr/bin/python
# ex:set fileencoding=utf-8:
from __future__ import unicode_literals
from django.conf.urls import patterns
from django.conf.urls import url
from django.contrib.admin.sites import AlreadyRegistered
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ImproperlyConfigured
from django.db.models import signals
from django.http import Http404
from django.utils import six
from django.utils.text import slugify
from rest_framework.reverse import reverse
from djangobmf.core.relationship import DocumentRelationship
from djangobmf.core.serializers.document import DocumentSerializer
from djangobmf.core.workflow import Workflow
from djangobmf.models import Document
from djangobmf.permissions import ModulePermission
from djangobmf.views import ModuleCreateView
from djangobmf.views import ModuleDeleteView
from djangobmf.views import ModuleDetail
from djangobmf.views import ModuleFormAPI
from djangobmf.views import ModuleUpdateView
from djangobmf.views import ModuleWorkflowView
from collections import OrderedDict
import logging
logger = logging.getLogger(__name__)
class Module(object):
"""
Under the ``Module`-class the framework stores every informations
needed to display and manage views and API's. It also provid | es
many functions used in the whole framework.
"""
open_relation = None
workflow_class = None
workflow_field_name = "state"
detail_view = ModuleDetail
def __init__(self, bmfconfig):
# validation
if not hasattr(self, 'model'):
raise ImproperlyConfigur | ed(
'No model defined in %s.' % self.__class__
)
self.bmfconfig = bmfconfig
self._class_reports = {}
self._object_reports = {}
self._relations = []
self.signals_setup()
self.validate_workflow()
# auto add document relationship
if hasattr(self.model, '_bmfmeta') and self.model._bmfmeta.has_files:
class FileDownload(DocumentRelationship):
model_to = self.model
serializer = DocumentSerializer
self.add_relation(FileDownload, Document)
# TODO: OLD OLD OLD
self.create_view = self.create
self.delete_view = self.delete
self.update_view = self.update
# --- misc ----------------------------------------------------------------
def get_contenttype(self): # pragma: no cover
"""
returns the models contenttype
"""
return ContentType.objects.get_for_model(self.model)
# --- single views --------------------------------------------------------
# TODO
def get_update_view(self):
"""
"""
pass
# TODO
def get_delete_view(self):
"""
"""
pass
def get_detail_view(self, request, *args, **kwargs):
"""
generates a detail-view response
"""
if hasattr(self, '_detail_view'):
return self._detail_view(request, *args, **kwargs)
self._detail_view = self.detail_view.as_view(
module=self,
model=self.model
)
return self._detail_view(request, *args, **kwargs)
# --- serialization -------------------------------------------------------
# TODO
def serialize_class(self, request=None):
"""
"""
return OrderedDict([
('app', self.model._meta.app_label),
('creates', self.get_create_views()),
('ct', self.get_contenttype().pk),
('model', self.model._meta.model_name),
('name', self.model._meta.verbose_name_plural),
('open_relation', self.open_relation),
('relations', self.get_relations(request)),
])
# TODO
def serialize_object(self, obj):
"""
"""
return {}
# --- workflow ------------------------------------------------------------
# TODO
def validate_workflow(self):
"""
"""
if self.workflow_class:
if not issubclass(self.workflow_class, Workflow):
raise ImproperlyConfigured(
"%s is not a Workflow in %s" % (
self.workflow_class.__name__,
self.__name__
)
)
# self.workflow = self.workflow_class()
def has_workflow(self):
"""
"""
return bool(self.workflow_class)
# TODO
def get_workflow_states(self, obj):
"""
"""
pass
# TODO
def get_workflow_transitions(self, obj, state_name):
"""
"""
pass
# --- permissions ---------------------------------------------------------
# TODO
def get_permissions(self, obj):
"""
"""
pass
# --- Create views --------------------------------------------------------
def has_create_views(self):
"""
return True if the module has one or more create views
"""
return getattr(self, '_has_create_views', False)
# TODO
def get_create_views(self):
"""
"""
if self.bmfconfig:
namespace_api = '%s:moduleapi_%s_%s' % (
self.bmfconfig.label,
self.model._meta.app_label,
self.model._meta.model_name,
)
return [{
'name': i[1],
'url': reverse(namespace_api + ':create', kwargs={"key": i[0]}),
} for i in self.list_creates()]
return []
# TODO
def get_create_view(self, name):
"""
"""
pass
# TODO
def add_create_view(self, name, view):
"""
"""
pass
self._has_create_views = True
# --- Clone views ---------------------------------------------------------
def has_clone_views(self):
"""
return True if the module has one or more clone views
"""
return getattr(self, '_has_clone_views', False)
# TODO
def get_clone_views(self):
"""
"""
pass
# TODO
def get_clone_view(self, name):
"""
"""
pass
# TODO
def add_clone_view(self, name, view):
"""
"""
pass
self._has_clone_views = True
# --- Functions for both report types -------------------------------------
def add_report(self, report):
"""
"""
if not getattr(report, "renderer_class", None):
raise ImproperlyConfigured(
'%s needs a renderer_class attribute',
report,
)
if report.has_object:
return self.add_object_report(report)
else:
return self.add_class_report(report)
# --- Class specific reports ----------------------------------------------
# TODO
def get_class_reports(self):
"""
"""
pass
# TODO
def get_class_report(self, name):
"""
"""
pass
# TODO
def add_class_report(self, report):
"""
"""
self._class_reports[report.__name__] = {
'class': report,
}
# --- Object specific reports ---------------------------------------------
def get_object_reports(self):
"""
Returns all available reports
"""
qs = self.bmfconfig.get_model("Report").objects.filter(
contenttype=self.get_contenttype(),
enabled=True
).values('pk', 'name', 'slug', 'renderer_view')
items = []
for data in qs:
cls = self._object_reports[data['renderer_view']]
if data['renderer_view'] in self._object_reports:
items.append({
'name': data['name'],
'slug': data['slug'],
'verbose_name': cls['class'].verbose_name,
'has_form': bool(cls['class'].form_class),
})
else:
self.bmfconfig.get_model("Report").objects.filter(pk=data['pk']).update(ena |
Lajnold/adventofcode2015 | day12.py | Python | mit | 1,136 | 0.001761 | #!/usr/bin/env p | ython3
import json
with open("day12-input.txt") as f:
day12_input = f.read()
day12_input_json = json.loads(day12_input)
def is_int(token):
return isinstance(token, int)
def is_list(token):
return isinstance(token, list)
def is_dict(token):
return isinstance(token, dict)
def part1():
def crawl(x):
if is_int(x):
return x
elif is_list(x):
return | sum(crawl(el) for el in x)
elif is_dict(x):
return sum(crawl(el) for el in x.values())
else:
return 0
input_sum = crawl(day12_input_json)
print("Sum of numbers: {}".format(input_sum))
def part2():
def has_forbidden_value(x):
return "red" in x.values()
def crawl(x):
if is_int(x):
return x
elif is_list(x):
return sum(crawl(el) for el in x)
elif is_dict(x) and not has_forbidden_value(x):
return sum(crawl(el) for el in x.values())
else:
return 0
input_sum = crawl(day12_input_json)
print("Sum of numbers: {}".format(input_sum))
part1()
part2()
|
zentralopensource/zentral | tests/utils/http_server.py | Python | apache-2.0 | 1,158 | 0 | import http.server
import sys
class ServerHandler(http.server.BaseHTTPRequestHandler):
def _process_request(self):
self.log_message("<<<HEADERS")
self.log_message(self.headers.as_string().strip())
self.log_message("HEADERS>>>")
self.log_message("<<<BODY")
try:
content_length = | int(self.headers['Content-Length'])
except TypeError:
self.log_message("ERROR: missing Content-Length")
else:
self.log_message(self.rfile.read(content_length).decode("utf-8"))
self.log_message("BODY>>>")
self.send_response(200)
self.send_header('Content-Type', 'text/plain;charset=utf-8')
self.end_headers()
self.wfile.write("OK\n".encode(" | utf-8"))
def do_GET(self):
self._process_request()
def do_POST(self):
self._process_request()
if __name__ == "__main__":
try:
port = int(sys.argv[1])
except Exception:
port = 8000
server_address = ('', port)
print("Starting server:", server_address)
httpd = http.server.HTTPServer(server_address, ServerHandler)
httpd.serve_forever()
|
mikesun/xen-cow-checkpointing | tools/python/xen/xend/XendBootloader.py | Python | gpl-2.0 | 5,656 | 0.006188 | #
# XendBootloader.py - Framework to run a boot loader for picking the kernel
#
# Copyright 2005-2006 Red Hat, Inc.
# Jeremy Katz <katzj@redhat.com>
#
# This software may be freely redistributed under the terms of the GNU
# general public license.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
import os, select, errno, stat, signal
import random
import shlex
from xen.xend import sxp
from xen.util import mkdir
from XendLogging import log
from XendError import VmError
import pty, termios, fcntl
from xen.lowlevel import ptsname
def bootloader(blexec, disk, dom, quiet = False, blargs = '', kernel = '',
ramdisk = '', kernel_args = ''):
"""Run the boot loader executable on the given disk and return a
config image.
@param blexec Binary to use as the boot loader
@param disk Disk to run the boot loader on.
@param dom DomainInfo representing the domain being booted.
@param quiet Run in non-interactive mode, just booting the default.
@param blargs Arguments to pass to the bootloader."""
if not os.access(blexec, os.X_OK):
msg = "Bootloader isn't executable"
log.error(msg)
raise VmError(msg)
if not os.access(disk, os.R_OK):
msg = "Disk isn't accessible"
log.error(msg)
raise VmError(msg)
mkdir.parents("/var/run/xend/boot/", stat.S_IRWXU)
while True:
fifo = "/var/run/xend/boot/xenbl.%s" %(random.randint(0, 32000),)
try:
os.mkfifo(fifo, 0600)
except OSError, e:
if (e.errno != errno.EEXIST):
raise
break
# We need to present the bootloader's tty as a pty slave that xenconsole
# can access. Since the bootloader itself needs a pty slave,
# we end up with a connection like this:
#
# xenconsole -- (slave pty1 master) <-> (master pty2 slave) -- bootloader
#
# where we copy characters between the two master fds, as well as
# listening on the bootloader's fifo for the results.
# Termios runes for very raw access to the pty master fds.
attr = [ 0, 0, termios.CS8 | termios.CREAD | termios.CLOCAL,
0, 0, 0, [0] * 32 ]
(m1, s1) = pty.openpty()
termios.tcsetattr(m1, termios.TCSANOW, attr)
fcntl.fcntl(m1, fcntl.F_SETFL, os.O_NDELAY);
os.close(s1)
slavename = ptsname.ptsname(m1)
dom.storeDom("console/tty", slavename)
# Release the domain lock here, because we definitely don't want
# a stuck bootloader to deny service to other xend clients.
from xen.xend import XendDomain
domains = XendDomain.instance()
domains.domains_lock.release()
(child, m2) = pty.fork()
if (not child):
args = [ blexec ]
if kernel:
args.append("--kernel=%s" % kernel)
if ramdisk:
args.append("--ramdisk=%s" % ramdisk)
if kernel_args:
args.append("--args=%s" % kernel_args)
if quiet:
args.append("-q")
args.append("--output=%s" % fifo)
if blargs:
args.extend(shlex.split(blargs))
args.append(disk)
try:
log.debug("Launching bootloader as %s." % str(args))
env = os.environ.copy()
env['TERM'] = 'vt100'
os.execvpe(args[0], args, env)
except OSError, e:
print e
pass
os._exit(1)
# record that this domain is bootloading
dom.bootloader_pid = child
termios.tcsetattr(m2, termios.TCSANOW, attr)
fcntl.fcntl(m2, fcntl.F_SETFL, os.O_NDELAY);
while True:
try:
r = os.open(fifo, os.O_RDONLY)
except OSError, e:
if e.errno == errno.EINTR:
continue
break
ret = ""
inbuf=""; outbuf="";
while True:
sel = select.select([r, m1, m2], [m1, m2], [])
try:
if m1 in sel[0]:
s = os.read(m1, 1)
inbuf += s
if m2 in sel[1] and len(inbuf) != 0:
os.write(m2, inbuf[0])
inbuf = inbuf[1:]
except OSError, e:
if e.errno == errno.EIO:
pass
try:
if m2 in sel[0]:
s = os.read(m2, 1)
outbuf += s
if m1 in sel[1] and len(outbuf) != 0:
os.write(m1, outbuf[0])
outbuf = outbuf[1:]
except OSError, e:
if e.errno == errno.EIO:
pass
if r in sel[0]:
s = os.read(r, 1)
ret = ret + s
if len(s) == 0:
break
del inbuf
del outbuf
os.waitpid(child, 0)
os.close(r)
os.close(m2)
os.close(m1)
os.unlink(fifo)
# Re-acquire the lock to cover the changes we're about to make
# when we return to domain creation.
domains.domains_lock.acquire()
if dom.bootloader_pid is None:
msg = "Domain was died while the bootloader was running."
log.error(msg)
raise VmError, msg
dom.bootloader_pid = None
if len(ret) == 0:
msg = "Boot loader didn't return any data!"
| log.error(msg)
raise VmError, | msg
pin = sxp.Parser()
pin.input(ret)
pin.input_eof()
blcfg = pin.val
return blcfg
def bootloader_tidy(dom):
if hasattr(dom, "bootloader_pid") and dom.bootloader_pid is not None:
pid = dom.bootloader_pid
dom.bootloader_pid = None
os.kill(pid, signal.SIGKILL)
|
subkar/rasmodel | src/simulate_chen_2009.py | Python | mit | 755 | 0 | from REM.chen_2009 import model
from pysb.integrate import Solver
from pysb.bng import generate_equations
import numpy as np
import matplotlib.pyplot as plt
import sympy
# Replicate matlab simulation
# Implement "fixed" species (they are never consumed).
generate_equations(model)
for i in (5, 6, 7):
model.odes[i] = sympy.numbers.Zero()
tspan = np.linspace(0, 9000, 9001)
solver = Solver(model, tspan, atol=1e-6, rtol=1e-8)
solver.run()
plt.figure()
for i, (arr, obs, color) in enumerate([(solver.yexpr, 'pErbB1', 'b'),
| (solver.yobs, 'pERK', 'g'),
| (solver.yobs, 'pAKT', 'r')]):
plt.subplot(3, 1, i + 1)
plt.plot(tspan, arr[obs], c=color, label=obs)
plt.show()
|
wandb/client | wandb/vendor/watchdog/observers/inotify.py | Python | mit | 8,528 | 0.001993 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:module: watchdog.observers.inotify
:synopsis: ``inotify(7)`` based emitter implementation.
:author: Sebastien Martini <seb@dbzteam.org>
:author: Luke McCarthy <luke@iogopro.co.uk>
:author: yesudeep@google.com (Yesudeep Mangalapilly)
:author: Tim Cuthbertson <tim+github@gfxmonk.net>
:platforms: Linux 2.6.13+.
.. ADMONITION:: About system requirements
Recommended minimum kernel version: 2.6.25.
Quote from the inotify(7) man page:
"Inotify was merged into the 2.6.13 Linux kernel. The required library
interfaces were added to glibc in version 2.4. (IN_DONT_FOLLOW,
IN_MASK_ADD, and IN_ONLYDIR were only added in version 2.5.)"
Therefore, you must ensure the system is running at least these versions
appropriate libraries and the kernel.
.. ADMONITION:: About recursiveness, event order, and event coalescing
Quote from the inotify(7) man page:
If successive output inotify events produced on the inotify file
descriptor are identical (same wd, mask, cookie, and name) then they
are coalesced into a single event if the older event has not yet been
read (but see BUGS).
The events returned by reading from an inotify file descriptor form
an ordered queue. Thus, for example, it is guaranteed that when
renaming from one directory to another, events will be produced in
the correct order on the inotify file descriptor.
...
Inotify monitoring of directories is not recursive: to monitor
subdirectories under a directory, additional watches must be created.
This emitter implementation therefore automatically adds watches for
sub-directories if running in recursive mode.
Some extremely useful articles and documentation:
.. _inotify FAQ: http://inotify.aiken.cz/?section=inotify&page=faq&lang=en
.. _intro to inotify: http://www.linuxjournal.com/article/8478
"""
from __future__ import with_statement
import os
import threading
from .inotify_buffer import InotifyBuffer
from watchdog.observers.api import (
EventEmitter,
BaseObserver,
DEFAULT_EMITTER_TIMEOUT,
DEFAULT_OBSERVER_TIMEOUT
)
from watchdog.events import (
DirDeletedEvent,
DirModifiedEvent,
DirMovedEvent,
DirCreatedEvent,
FileDeletedEvent,
FileModifiedEvent,
FileMovedEvent,
FileCreatedEvent,
generate_sub_moved_events,
generate_sub_created_events,
)
from watchdog.utils import unicode_paths
class InotifyEmitter(EventEmitter):
"""
inotify(7)-based event emitter.
:param event_queue:
The event queue to fill with events.
:param watch:
A watch object representing the directory to monitor.
:type watch:
:class:`watchdog.observers.api.ObservedWatch`
:param timeout:
Read events blocking timeout (in seconds).
:type timeout:
``float``
"""
def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT):
EventEmitter.__init__(self, event_queue, watch, timeout)
self._lock = threading.Lock()
self._inotify = None
def on_thread_start(self):
path = unicode_paths.encode(self.watch.path)
self._inotify = InotifyBuffer(path, self.watch.is_recursive)
def on_thread_stop(self):
if self._inotify:
self._inotify.close()
def queue_events(self, timeout, full_events=False):
#If "full_events" is true, then the method will report unmatched move events as seperate events
#This behavior is by default only called by a InotifyFullEmitter
with self._lock:
event = self._inotify.read_event()
if event is None:
return
if isinstance(event, tuple):
move_from, move_to = event
src_path = self._decode_path(move_from.src_path)
dest_path = self._decode_path(move_to.src_path)
cls = DirMovedEvent if move_from.is_directory else FileMovedEvent
self.queue_event(cls(src_path, dest_path))
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
self.queue_event(DirModifiedEvent(os.path.dirname(dest_path)))
if move_from.is_directory and self.watch.is_recursive:
for sub_event in generate_sub_moved_events(src_path, dest_path):
self.queue_event(sub_event)
return
src_path = self._decode_path(event.src_path)
if event.is_moved_to:
if (full_events):
cls = DirMovedEvent if event.is_directory else FileMovedEvent
self.queue_event(cls(None, src_path))
else:
cls = DirCreatedEvent if event.is_directory else FileCreatedEvent
self.queue_event(cls(src_path))
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
if event.is_directory and self.watch.is_recursive:
for sub_event in generate_sub_created_events(src_path):
self.queue_event(sub_event)
elif event.is_attrib:
cls = DirModifiedEvent if event.is_directory else FileModifiedEvent
self.queue_event(cls(src_path))
elif event.is_modify:
cls = DirModifiedEvent if event.is_directory else FileModifiedEvent
self.queue_event(cls(src_path))
elif event.is_del | ete or (event.is_moved_from and not full_events):
cls = DirDeletedEvent if event.is_directory else FileDeletedEvent
self.queue_event(cls(src_path))
self.queue_event(DirModifiedEvent( | os.path.dirname(src_path)))
elif event.is_moved_from and full_events:
cls = DirMovedEvent if event.is_directory else FileMovedEvent
self.queue_event(cls(src_path, None))
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
elif event.is_create:
cls = DirCreatedEvent if event.is_directory else FileCreatedEvent
self.queue_event(cls(src_path))
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
def _decode_path(self, path):
""" Decode path only if unicode string was passed to this emitter. """
if isinstance(self.watch.path, bytes):
return path
return unicode_paths.decode(path)
class InotifyFullEmitter(InotifyEmitter):
"""
inotify(7)-based event emitter. By default this class produces move events even if they are not matched
Such move events will have a ``None`` value for the unmatched part.
:param event_queue:
The event queue to fill with events.
:param watch:
A watch object representing the directory to monitor.
:type watch:
:class:`watchdog.observers.api.ObservedWatch`
:param timeout:
Read events blocking timeout (in seconds).
:type timeout:
``float``
"""
def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT):
InotifyEmitter.__init__(self, event_queue, watch, timeout)
def queue_events(self, timeout, events=True):
InotifyEmitter.queue_events(self, timeout, full_events=events)
class InotifyObserver(BaseObserver):
"""
Observer thread that schedules watching directories and dispatches
calls to event handlers.
|
derkling/trappy | tests/test_sched.py | Python | apache-2.0 | 5,299 | 0.002076 | # Copyright 2015-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import utils_tests
import trappy
sys.path.append(os.path.join(utils_tests.TESTS_DIRECTORY, "..", "trappy"))
class BaseTestSched(utils_tests.SetupDirectory):
def __init__(self, *args, **kwargs):
super(BaseTestSched, self).__init__(
[("trace_sched.txt", "trace.txt")],
*args,
**kwargs)
class TestSchedLoadAvgSchedGroup(BaseTestSched):
def test_get_dataframe(self):
"""Test that SchedLoadAvgSchedGroup creates a proper data_frame"""
dfr = trappy.Run().sched_load_avg_sched_group.data_frame
self.assertTrue(len(dfr) == 1)
self.assertEquals(dfr["cpus"].iloc[0], "00000002")
self.assertEquals(dfr["load"].iloc[0], 0)
self.assertEquals(dfr["utilization"].iloc[0], 0)
class TestSchedLoadAvgTask(BaseTestSched):
def test_get_dataframe(self):
"""Test that SchedLoadAvgTask creates a proper data_frame"""
dfr = trappy.Run().sched_load_avg_task.data_frame
self.assertTrue(len(dfr) == 1)
self.assertEquals(dfr["comm"].iloc[0], "sshd")
self.assertEquals(dfr["pid"].iloc[0], 2962)
self.assertEquals(dfr["load"].iloc[0], 0)
self.assertEquals(dfr["utilization"].iloc[0], 0)
self.assertEquals(dfr["runnable_avg_sum"].iloc[0], 0)
self.assertEquals(dfr["running_avg_sum"].iloc[0], 0)
self.assertEquals(dfr["avg_period"].iloc[0], 48595)
class TestSchedLoadAvgCpu(BaseTestSched):
def test_get_dataframe(self):
"""Test that SchedLoadAvgCpu creates a proper data_frame"""
dfr = trappy.Run().sched_load_avg_cpu.data_frame
self.assertTrue(len(dfr) == 1)
self.assertEquals(dfr["cpu"].iloc[0], 0)
self.assertEquals(dfr["load"].iloc[0], 13)
self.assertEquals(dfr["utilization"].iloc[0], 18)
class TestSchedContribScaleFactor(BaseTestSched):
def test_get_dataframe(self):
"""Test that SchedContribScaleFactor creates a proper data_frame"""
dfr = trappy.Run().sched_contrib_scale_factor.data_frame
self.assertTrue(len(dfr) == 1)
self.assertEquals(dfr["cpu"].iloc[0], 0)
self.assertEquals(dfr["freq_scale_factor"].iloc[0], 426)
self.assertEquals(dfr["cpu_scale_factor"].iloc[0], 1024)
class TestSchedCpuCapacity(BaseTestSched):
def test_get_dataframe(self):
"""Test that SchedCpuCapacity creates a proper data_frame"""
dfr = trappy.Run().sched_cpu_capacity.data_frame
self.assertTrue(len(dfr) == 1)
self.assertEquals(dfr["cpu"].iloc[0], 3)
| self.assertEquals(dfr["capacity"].iloc[0], 430)
| self.assertEquals(dfr["rt_capacity"].iloc[0], 1024)
class TestSchedCpuFrequency(BaseTestSched):
def test_get_dataframe(self):
"""Test that CpuFrequency creates a proper data_frame"""
dfr = trappy.Run().sched_cpu_frequency.data_frame
self.assertTrue(len(dfr) == 1)
self.assertEquals(dfr["cpu"].iloc[0], 0)
self.assertEquals(dfr["state"].iloc[0], 600000)
self.assertFalse("cpu_id" in dfr.columns)
class TestGetFilters(BaseTestSched):
def test_get_filters(self):
"""Test that Run::get_filters returns correct list of filters"""
run = trappy.Run()
classes = run.class_definitions
filters = run.get_filters()
self.assertTrue(len(classes) == len(filters))
self.assertTrue(sorted(classes) == sorted(filters))
sched_classes = run.sched_classes
sched_filters = run.get_filters("sched")
self.assertTrue(len(sched_classes) == len(sched_filters))
self.assertTrue(sorted(sched_classes) == sorted(sched_filters))
class TestSpacedValueAttributes(BaseTestSched):
def test_spaced_value_attr(self):
"""Test that Run object parses spaced value attributes correctly"""
with open("trace.txt", "a") as fout:
fout.write(" <...>-2971 [004] 6550.056871: sched_load_avg_task: comm=AsyncTask #2 pid=6163 ")
dfr = trappy.Run().sched_load_avg_task.data_frame
self.assertTrue(len(dfr) == 2)
self.assertEquals(dfr["comm"].iloc[1], "AsyncTask #2")
self.assertEquals(dfr["pid"].iloc[1], 6163)
class TestNoSchedTraces(utils_tests.SetupDirectory):
def __init__(self, *args, **kwargs):
super(TestNoSchedTraces, self).__init__(
[("trace_empty.txt", "trace.txt")],
*args,
**kwargs)
def test_empty_trace_txt(self):
"""Test that empty objects are created with empty trace file"""
run = trappy.Run()
for attr in run.sched_classes.iterkeys():
self.assertTrue(len(getattr(run, attr).data_frame) == 0)
|
ics/doc8643 | app/__init__.py | Python | mit | 930 | 0.009677 | from flask import Flask, render_template
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.mail import Mail
from flask.ext.assets import Environment
#from flask.ext.adm | in import Admin
#from flask_debugtoolbar import DebugToolbarExtension
app = Flask(__name__)
app.config.from_object('config')
# SelectiveHTMLCompress {% strip %} ... {% endstrip %}
app.jinja_env.add_extension('app.helpers.jinja2htmlcompress. | HTMLCompress')
assets = Environment(app)
#js = Bundle("js/vendor/jquery-1.9.1.min.js", "js/vendor/bootstrap.min.js",
# "js/vendor/jquery.masonry.min.js", "js/vendor/jquery.infinitescroll.min.js",
# filters='jsmin', output='gen/packed.js')
#assets.register('js_all', js)
db = SQLAlchemy(app)
#admin = Admin(app)
#toolbar = DebugToolbarExtension(app)
mail = Mail(app)
@app.errorhandler(404)
def not_found(error):
return render_template('404.html'), 404
from app import views, models
|
wathen/PhD | MHD/FEniCS/FieldSplit/NSpicard.py | Python | mit | 8,721 | 0.014104 | #!/usr/bin/env python
import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
# from MatrixOperations import *
from dolfin import *
import numpy as np
import matplotlib.pylab as plt
import os
import scipy.io
from PyTrilinos import Epetra, EpetraExt, AztecOO, ML, Amesos
from scipy2Trilinos import scipy_csr_matrix2CrsMatrix
import PETScIO as IO
import time
#MO.SwapBackend('epetra')
#os.system("echo $PATH")
m = 4
errL2u = np.zeros((m-1,1))
errL2p = np.zeros((m-1,1))
NN = np.zeros((m-1,1))
DoF = np.zeros((m-1,1))
Vdim = np.zeros((m-1,1))
Qdim = np.zeros((m-1,1))
Wdim = np.zeros((m-1,1))
l2uorder = np.zeros((m-1,1))
l2porder = np.zeros((m-1,1))
nonlinear = np.zeros((m-1,1))
SolTime = np.zeros((m-1,1))
iterations = np.zeros((m-1,1))
nn = 2
dim = 2
Solving = 'No'
Saving = 'no'
case = 1
parameters['linear_algebra_backend'] = 'PETSc'
for xx in xrange(1,m):
print xx
nn = 2**(xx)
# Create mesh and define function space
nn = int(nn)
NN[xx-1] = nn
mesh = RectangleMesh(-1, -1, 1, 1, nn, nn,'right')
# tic()
parameters['reorder_dofs_serial'] = False
V = VectorFunctionSpace(mesh, "CG", 2)
Q = FunctionSpace(mesh, "CG", 1)
parameters['reorder_dofs_serial'] = False
# print 'time to create function spaces', toc(),'\n\n'
W = V*Q
Vdim[xx-1] = V.dim()
Qdim[xx-1] = Q.dim()
Wdim[xx-1] = W.dim()
print "\n\nV: ",Vdim[xx-1],"Q: ",Qdim[xx-1],"W: ",Wdim[xx-1],"\n\n"
def boundary(x, on_boundary):
return on_boundary
if case == 1:
u0 = Expression(("20*x[0]*pow(x[1],3)","5*pow(x[0],4)-5*pow(x[1],4)"))
p0 = Expression("60*pow(x[0],2)*x[1]-20*pow(x[1],3)")
elif case == 2:
Su0 = Expression(("pow(x[1],2)-x[1]","pow(x[0],2)-x[0]"))
p0 = Expression("x[1]+x[0]-1")
elif case == 3:
u0 = Expression(("cos(2*pi*x[1])*sin(2*pi*x[0]) ","-cos(2*pi*x[0])*sin(2*pi*x[1]) "))
p0 = Expression("sin(2*pi*x[0])*sin(2*pi*x[1]) ")
bc = DirichletBC(W.sub(0),u0, boundary)
bcs = [bc]
(u, p) = TrialFunctions(W)
(v, q) = TestFunctions(W)
if case == 1:
f = Expression(("120*x[0]*x[1]*(1-mu)+ 400*x[0]*pow(x[1],6)+(5*pow(x[0],4)-5*pow(x[1],4))*60*x[0]*x[1]*x[1]","60*(pow(x[0],2)-pow(x[1],2))*(1-mu)+400*pow(x[0],4)*pow(x[1],3)-(5*pow(x[0],4)-5*pow(x[1],4))*20*x[1]*x[1]*x[1]"), mu = 1e0)
elif case == 2:
f = -Expression(("-1","-1"))
elif case == 3:
f = -Expression(("8*pi*pi*cos(2*pi*x[1])*sin(2*pi*x[0]) + 2*pi*cos(2*pi*x[0])*sin(2*pi*x[1])","2*pi*cos(2*pi*x[0])*sin(2*pi*x[1]) - 8*pi*pi*cos(2*pi*x[0])*sin(2*pi*x[1])"))
u_k = Function(V)
mu = Constant(1e0)
u_k.vector()[:] = u_k.vector()[:]*0
n = FacetNormal(mesh)
h = CellSize(mesh)
h_avg =avg(h)
d = 0
a11 = mu*inner(grad(v), grad(u))*dx + inner((grad(u)*u_k),v)*dx+(1/2)*div(u_k)*inner(u,v)*dx- (1/2)*inner(u_k,n)*inner(u,v)*ds
a12 = div(v)*p*dx
a21 = div(u)*q*dx
L1 = inner(v, f)*dx
a = a11-a12-a21
eps = 1.0 # error measure ||u-u_k||
tol = 1.0E-4 # tolerance
iter = 0 # iteration counter
maxiter = 100 # max no of iterations allowed
while eps > tol and iter < maxiter:
iter += 1
x = Function(W)
uu = Function(W)
tic()
AA, bb = assemble_system(a, L1, bcs)
A = as_backend_type(AA).mat()
print toc()
b = bb.array()
zeros = 0*b
bb = IO.arrayToVec(b)
x = IO.arrayToVec(zeros)
tic()
u_is = PETSc.IS().createGeneral(range(V.dim()))
p_is = PETSc.IS().createGeneral(range(V.dim(),V.dim()+Q.dim()))
ksp = PETSc.KSP().create()
ksp.setOperators(A,A)
ksp.setTolerances(1e-6)
ksp.setType('gmres')
pc = ksp.getPC()
pc.setType(pc.Type.FIELDSPLIT)
fields = [ ("field1", u_is), ("field2", p_is)]
pc.setFieldSplitIS(*fields)
pc.setFieldSplitType(0)
OptDB = PETSc.Options()
# OptDB['pc_fieldsplit_detect_saddle_point'] = ''
OptDB['pc_fieldsplit_type'] = "schur"
OptDB['pc_fieldsplit_schur_factorization_type'] = "full"
OptDB['pc_fieldsplit_schur_precondition'] = "self"
OptDB['fieldsplit_field1_ksp_type'] = "preonly"
OptDB['fieldsplit_field1_pc_type'] = "lu"
OptDB['fieldsplit_1_ksp_type'] = 'gmres'
OptDB['fieldsplit_field2_pc_type'] = "lsc"
ksp.setFromOptions()
# ksp.setFromOptions()
print ksp.view()
print " time to create petsc field split preconditioner", toc(),"\n\n"
tic()
ksp.solve(bb, x)
SolTime[xx-1] = toc()
print "time to solve: ",SolTime[xx-1]
iterations[xx-1] = ksp.its
print "\n\nouter iterations = ", iterations[xx-1]
# print "Inner itations, field 1 = ", ksp1.its, " field 2 = ", ksp2.it
uu = IO.vecToArray(x)
uu = uu[0:Vdim[xx-1][0]]
# time = time+toc()
u1 = Function(V)
u1.vector()[:] = u1.vector()[:] + uu
diff = u1.vector().array() - u_k.vector().array()
eps = np.linalg.norm(diff, ord=np.Inf)
print '\n\n\niter=%d: norm=%g' % (iter, eps)
u_k.assign(u1)
#
if case == 1:
ue = Expression(("20*x[0]*pow(x[1],3)","5*pow(x[0],4)-5*pow(x[1],4)"))
pe = Expression("60*pow(x[0],2)*x[1]-20*pow(x[1],3)+5")
elif case == 2:
ue = Expression(("pow(x[1],2)-x[1]","pow(x[0],2)-x[0]"))
pe = Expression("x[1]+x[0]-1")
elif case == 3:
ue = Expression(("cos(2*pi*x[1])*sin(2*pi*x[0]) ","-cos(2*pi*x[0])*sin(2*pi*x[1]) "))
pe = Expression("sin(2*pi*x[0])*sin(2*pi*x[1]) ")
u = interpolate(ue,V)
p = interpolate(pe,Q)
ua = Function(V)
ua.vector()[:] = u_k.vector().array()
nonlinear[xx-1] = assemble(inner((grad(ua)*ua),ua)*dx+(1/2)*div(ua)*inner(ua,ua)*dx- (1/2)*inner(ua,n)*inner(ua,ua)*ds)
Nv = u.vector().array().shape
X = IO.vecToArray(x)
x = X[0:Vdim[xx-1][0]]
# x = x_epetra[0:Nv[0]]
ua = Function(V)
ua.vector()[:] = x
pp = X[Nv[0]:]
n = pp.shape
# pp = np.insert(pp,n,0)
pa = Function(Q)
pa.vector()[:] = pp
pend = assemble(pa*dx)
ones = Function(Q)
ones.vector()[:]=(0*pp+1)
pp = Function(Q)
pp.vector()[:] = pa.vector().array()- assemble(pa*dx)/assemble(ones*dx)
pInterp = interpolate(pe,Q)
pe = Function(Q)
pe.vector()[:] = pInterp.vector().array()
const = - assemble(pe*dx)/assemble(ones*dx)
pe.vector()[:] = pe.vector()[:]+const
errL2u[xx-1] = errornorm(ue,ua,norm_type="L2", degree_rise=4,mesh=mesh)
errL2p[xx-1] = errornorm(pe,pp,norm_type="L2", degree_rise=4,mesh=mesh)
if xx == 1:
l2uorder[xx-1] = 0
l2porder[xx-1] = 0
else:
l2uorder[xx-1] = np.abs(np.log2(errL2u[xx-2]/errL2u[xx-1]))
l2porder[xx-1] = np.abs(np.log2(errL2p[xx-2]/errL2p[xx-1]))
print errL2u[xx-1]
print errL2p[xx-1]
# del solver
# scipy.io.savemat('Vdim.mat', {'VDoF' | :Vdim})
# scipy.io.savem | at('DoF.mat', {'DoF':DoF})
# plt.loglog(NN,errL2u)
# plt.title('Error plot for CG2 elements - Velocity L2 convergence = %f' % np.log2(np.average((errL2u[0:m-2]/errL2u[1:m-1]))))
# plt.xlabel('N')
# plt.ylabel('L2 error')
# plt.figure()
# plt.loglog(NN,errL2p)
# plt.title('Error plot for CG1 elements - Pressure L2 convergence = %f' % np.log2(np.average((errL2p[0:m-2]/errL2p[1:m-1]))))
# plt.xlabel('N')
# plt.ylabel('L2 error')
# plt.show()
print nonlinear
print "Velocity Elements rate of convergence ", np.log2(np.average((errL2u[0:m-2]/errL2u[1:m-1])))
print "Pressure Elements rate of convergence ", np.log2(np.average((errL2p[0:m-2]/errL2p[1:m-1])))
import pandas as pd
tableTitles = ["Total DoF","V DoF","Q DoF","V-L2","V-order","P-L2","P-order"]
tableValues = np.concatenate((Wdim,Vdim,Qdim,errL2u,l2uorder,errL2p,l2porder),axis=1)
df = pd.DataFrame |
EvilMcJerkface/crate | devs/tools/create_certs.py | Python | apache-2.0 | 4,616 | 0.002166 | #!/usr/bin/env python3
"""Script to generate a keystore with node and client certificates.
Requires keystore and openssl to be available in $PATH
"""
import os
import argparse
from os.path import join, splitext, basename
from subprocess import run
def int_or(val, default):
if val:
return int(val)
return default
def create_key_and_csr(key, csr):
cn = splitext(basename(csr))[0]
run([
'openssl', 'req', '-newkey', 'rsa:2048', '-nodes',
'-subj', f'/C=AT/ST=Dummy State/L=Dummy Country/O=Dummy Company/CN={cn}',
'-keyout', key,
'-out', csr
])
def create_crt(csr, crt, root_ca_crt, root_ca_key, out_dir):
cn = splitext(basename(csr))[0]
ssl_ext_template = f"""authorityKeyIdentifier=keyid,issuer
basicConstraints=CA:FALSE
keyUsage = digitalSignature, nonRepudiation, keyEncipherment, dataEncipherment
subjectAltName = @alt_names
[alt_names]
DNS.1 = {cn}
"""
with open(join(out_dir, 'ssl.ext'),'w') as f:
f.write(ssl_ext_template)
run(['openssl', 'x509', '-req',
'-in', csr,
'-CA', root_ca_crt,
'-CAkey', root_ca_key,
'-CAcreateserial',
'-out', crt,
'-sha256',
'-days', '365',
'-extfile', join(out_dir, 'ssl.ext')
])
def generate_for(root_ca_key, root_ca_crt, out_dir, entity, num_default):
num = int_or(input(f'How many {entity} certs do you want to generate? [{num_default}]: '), num_default)
certs_and_keys = []
for i in range(num):
name = entity + str(i + 1)
supplied_name = input(f'Name (CN) of {entity} {i + 1} [{name}]: ')
name = supplied_name or name
key = join(out_dir, name + '.key')
csr = join(out_dir, name + '.csr')
crt = join(out_dir, name + '.crt')
certs_and_keys.append((crt, key))
print(f'Creating {entity} key, csr and cert for {name}')
create_key_and_csr(key, csr)
create_crt(csr, crt, root_ca_crt, root_ca_key, out_dir)
print('')
print('')
return certs_and_keys
def import_into_keystores(certs_and_keys, entity, keystore_pw, ca_crt, keystore, keystorep12):
print(f'Importing {entity} certificates into keystore, Use "{keystore_pw}" as pw.')
for (cert, key) in certs_and_keys:
run([
'openssl', 'pkcs12', '-export',
'-in', cert,
'-inkey', key,
'-out', keystorep12,
'-name', splitext(cert)[0],
'-CAfile', ca_crt,
'-caname', 'myCA',
'-chain',
])
run([
'keytool', '-importkeystore',
'-deststorepass', keystore_pw,
'-destkeypass', keystore_pw,
'-destkeystore', keystore,
'-srckeystore', keystorep12,
'-srcstoretype', 'PKCS12',
'-srcstorepass', keystore_pw,
'-alias', splitext(cert)[0]
])
def create_certs(out_dir, keystore_pw):
ca_key = join(out_dir, 'rootCA.key')
ca_crt = join(out_dir, 'rootCA.crt')
print(f'Generating rootCA key: {ca_key}')
print(f'Generating rootCA certificate: {ca_crt}')
run([
'openssl', 'req', '-x509', '-sha256', '-nodes',
'-days', '365',
'-subj', f'/C=AT/ST=Dummy State/L=Dummy Country/O=Dummy Company/CN=myCA',
'-newkey', 'rsa:2048',
'-keyout', ca_key,
'-out', ca_crt
])
certs_and_keys = generate_for(ca_key, ca_crt, out_dir, 'node', 1)
keystore | = join(out_dir, 'keystore.jks')
keystore_p12 = join(out_dir, 'keystore.p12')
import_into_keystores(certs_and_keys, 'node', keystore_pw, ca_crt, keystore, keystore_p12)
# the CA certificate should also be in the keystore for the
# node to be able to verify the client certificate
run(['keytool', '-importcert',
'-storepass', keystore_pw,
'-keystore', keystore,
'-file', ca_crt,
'-alias', 'therootca'
])
certs_ | and_keys = generate_for(ca_key, ca_crt, out_dir, 'client', 1)
keystore_client = join(out_dir, 'keystore_client.jks')
keystore_client_p12 = join(out_dir, 'keystore_client.p12')
import_into_keystores(certs_and_keys, 'client', keystore_pw, ca_crt, keystore_client, keystore_client_p12)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--out-dir', type=str, required=True)
parser.add_argument('--keystore-pw', type=str, default='changeit')
args = parser.parse_args()
os.makedirs(args.out_dir, exist_ok=True)
create_certs(args.out_dir, args.keystore_pw)
if __name__ == "__main__":
main()
|
ruippeixotog/beets | beetsplug/duplicates.py | Python | mit | 13,370 | 0.000374 | # This file is part of beets.
# Copyright 2015, Pedro Silva.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""List duplicate tracks or albums.
"""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
import shlex
from beets.plugins import BeetsPlugin
from beets.ui import decargs, print_, vararg_callback, Subcommand, UserError
from beets.util import command_output, displayable_path, subprocess
from beets.library import Item, Album
PLUGIN = 'duplicates'
class DuplicatesPlugin(BeetsPlugin):
"""List duplicate tracks or albums
"""
def __init__(self):
super(DuplicatesPlugin, self).__init__()
self.config.add({
'album': False,
'checksum': '',
'copy': '',
'count': False,
'delete': False,
'format': '',
'full': False,
'keys': [],
'merge': False,
'move': '',
'path': False,
'tiebreak': {},
'strict': False,
'tag': '',
})
self._command = Subcommand('duplicates',
help=__doc__,
aliases=['dup'])
self._command.parser.add_option('-c', '--count', dest='count',
action='store_true',
help='show duplicate counts')
self._command.parser.add_option('-C', '--checksum', dest='checksum',
action='store', metavar='PROG',
| help='report duplicates based on'
' arbitrary command')
self._c | ommand.parser.add_option('-d', '--delete', dest='delete',
action='store_true',
help='delete items from library and '
'disk')
self._command.parser.add_option('-F', '--full', dest='full',
action='store_true',
help='show all versions of duplicate'
' tracks or albums')
self._command.parser.add_option('-s', '--strict', dest='strict',
action='store_true',
help='report duplicates only if all'
' attributes are set')
self._command.parser.add_option('-k', '--keys', dest='keys',
action='callback', metavar='KEY1 KEY2',
callback=vararg_callback,
help='report duplicates based on keys')
self._command.parser.add_option('-M', '--merge', dest='merge',
action='store_true',
help='merge duplicate items')
self._command.parser.add_option('-m', '--move', dest='move',
action='store', metavar='DEST',
help='move items to dest')
self._command.parser.add_option('-o', '--copy', dest='copy',
action='store', metavar='DEST',
help='copy items to dest')
self._command.parser.add_option('-t', '--tag', dest='tag',
action='store',
help='tag matched items with \'k=v\''
' attribute')
self._command.parser.add_all_common_options()
def commands(self):
def _dup(lib, opts, args):
self.config.set_args(opts)
album = self.config['album'].get(bool)
checksum = self.config['checksum'].get(str)
copy = self.config['copy'].get(str)
count = self.config['count'].get(bool)
delete = self.config['delete'].get(bool)
fmt = self.config['format'].get(str)
full = self.config['full'].get(bool)
keys = self.config['keys'].get(list)
merge = self.config['merge'].get(bool)
move = self.config['move'].get(str)
path = self.config['path'].get(bool)
tiebreak = self.config['tiebreak'].get(dict)
strict = self.config['strict'].get(bool)
tag = self.config['tag'].get(str)
if album:
if not keys:
keys = ['mb_albumid']
items = lib.albums(decargs(args))
else:
if not keys:
keys = ['mb_trackid', 'mb_albumid']
items = lib.items(decargs(args))
if path:
fmt = '$path'
# Default format string for count mode.
if count and not fmt:
if album:
fmt = '$albumartist - $album'
else:
fmt = '$albumartist - $album - $title'
fmt += ': {0}'
if checksum:
for i in items:
k, _ = self._checksum(i, checksum)
keys = [k]
for obj_id, obj_count, objs in self._duplicates(items,
keys=keys,
full=full,
strict=strict,
tiebreak=tiebreak,
merge=merge):
if obj_id: # Skip empty IDs.
for o in objs:
self._process_item(o,
copy=copy,
move=move,
delete=delete,
tag=tag,
fmt=fmt.format(obj_count))
self._command.func = _dup
return [self._command]
def _process_item(self, item, copy=False, move=False, delete=False,
tag=False, fmt=''):
"""Process Item `item`.
"""
print_(format(item, fmt))
if copy:
item.move(basedir=copy, copy=True)
item.store()
if move:
item.move(basedir=move, copy=False)
item.store()
if delete:
item.remove(delete=True)
if tag:
try:
k, v = tag.split('=')
except:
raise UserError('%s: can\'t parse k=v tag: %s' % (PLUGIN, tag))
setattr(item, k, v)
item.store()
def _checksum(self, item, prog):
"""Run external `prog` on file path associated with `item`, cache
output as flexattr on a key that is the name of the program, and
return the key, checksum tuple.
"""
args = [p.format(file=item.path) for p in shlex.split(prog)]
key = args[0]
checksum = getattr(item, key, False)
if not checksum:
self._log.debug(u'key {0} on item {1} not cached:'
'computing checksum',
key, displayable_path(item.path))
try:
checksum = command_output(args)
setattr(item, key, checksum)
item.store()
self._log |
t-mertz/slurmCompanion | django-web/sshcomm/migrations/0003_auto_20170118_1901.py | Python | mit | 697 | 0.001435 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-18 18:01
from __future__ import unicode_literals
from django.db import migrations, models
import django.forms.widgets
class Migration(migrations.Migration):
dependencies = [
| ('sshcomm', '0002_auto_20170118_1702'),
]
operations = [
migrations.AlterField(
model_name='userdata',
name='user_name',
field=models.CharField(max_length=128),
),
migrations.AlterField(
model_name='userdata',
name='use | r_password',
field=models.CharField(max_length=128, verbose_name=django.forms.widgets.PasswordInput),
),
]
|
apache/qpid-dispatch | tests/system_tests_core_endpoint.py | Python | apache-2.0 | 9,396 | 0.005747 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache Licens | e, Version 2.0 (the
| # "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from proton import Message
from proton.handlers import MessagingHandler
from proton.reactor import Container
from system_test import TestCase, Qdrouterd, main_module, TIMEOUT, unittest, TestTimeout
class RouterTest(TestCase):
inter_router_port = None
@classmethod
def setUpClass(cls):
"""Start a router"""
super(RouterTest, cls).setUpClass()
def router(name, connection, args=None):
config = [
('router', {'mode': 'interior', 'id': name}),
('listener', {'port': cls.tester.get_port(), 'stripAnnotations': 'no'}),
('listener', {'port': cls.tester.get_port(), 'stripAnnotations': 'no', 'multiTenant': 'yes'}),
('listener', {'port': cls.tester.get_port(), 'stripAnnotations': 'no', 'role': 'route-container'}),
('address', {'prefix': 'closest', 'distribution': 'closest'}),
('address', {'prefix': 'spread', 'distribution': 'balanced'}),
('address', {'prefix': 'multicast', 'distribution': 'multicast'}),
('address', {'prefix': '0.0.0.0/queue', 'waypoint': 'yes'}),
connection
]
config = Qdrouterd.Config(config)
cls.routers.append(cls.tester.qdrouterd(name, config, wait=True, cl_args=args or []))
cls.routers = []
inter_router_port = cls.tester.get_port()
router('A', ('listener', {'role': 'inter-router', 'port': inter_router_port}), ["-T"])
def test_01_denied_link(self):
test = DenyLinkTest(self.routers[0].addresses[0], "org.apache.qpid.dispatch.router/test/deny")
test.run()
self.assertIsNone(test.error)
def test_02_discard_deliveries(self):
test = DiscardTest(self.routers[0].addresses[0], "org.apache.qpid.dispatch.router/test/discard")
test.run()
self.assertIsNone(test.error)
def test_03_presettled_source(self):
test = SourceTest(self.routers[0].addresses[0], "org.apache.qpid.dispatch.router/test/source_ps", 300, 300)
test.run()
self.assertIsNone(test.error)
def test_04_unsettled_source(self):
test = SourceTest(self.routers[0].addresses[0], "org.apache.qpid.dispatch.router/test/source", 300, 0)
test.run()
self.assertIsNone(test.error)
def test_05_echo_attach_detach(self):
test = EchoTest(self.routers[0].addresses[0], "org.apache.qpid.dispatch.router/test/echo")
test.run()
self.assertIsNone(test.error)
class DenyLinkTest(MessagingHandler):
def __init__(self, host, address):
super(DenyLinkTest, self).__init__(prefetch=0)
self.host = host
self.address = address
self.conn = None
self.error = None
self.receiver = None
self.sender = None
self.receiver_failed = False
self.sender_failed = False
def timeout(self):
self.error = "Timeout Expired: receiver_failed=%s sender_failed=%s" %\
("yes" if self.receiver_failed else "no",
"yes" if self.sender_failed else "no")
self.conn.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.conn = event.container.connect(self.host)
self.receiver = event.container.create_receiver(self.conn, self.address)
self.sender = event.container.create_sender(self.conn, self.address)
def on_link_error(self, event):
if event.receiver == self.receiver:
self.receiver_failed = True
if event.sender == self.sender:
self.sender_failed = True
if self.receiver_failed and self.sender_failed:
self.conn.close()
self.timer.cancel()
def run(self):
Container(self).run()
class DiscardTest(MessagingHandler):
def __init__(self, host, address):
super(DiscardTest, self).__init__(prefetch=0)
self.host = host
self.address = address
self.conn = None
self.error = None
self.sender = None
self.count = 300
self.sent = 0
self.rejected = 0
def timeout(self):
self.error = "Timeout Expired: n_sent=%d n_rejected=%d" % (self.sent, self.rejected)
self.conn.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.conn = event.container.connect(self.host)
self.sender = event.container.create_sender(self.conn, self.address)
def on_sendable(self, event):
while self.sender.credit > 0 and self.sent < self.count:
msg = Message(body="Discard Test")
self.sender.send(msg)
self.sent += 1
def on_rejected(self, event):
self.rejected += 1
self.conn.close()
self.timer.cancel()
def on_link_error(self, event):
if event.receiver == self.receiver:
self.receiver_failed = True
if event.sender == self.sender:
self.sender_failed = True
if self.receiver_failed and self.sender_failed:
self.conn.close()
self.timer.cancel()
def run(self):
Container(self).run()
class SourceTest(MessagingHandler):
def __init__(self, host, address, count, expected_ps):
super(SourceTest, self).__init__(prefetch=0)
self.host = host
self.address = address
self.expected_ps = expected_ps
self.conn = None
self.error = None
self.receiver = None
self.count = count
self.n_credit_given = 0
self.n_rcvd = 0
self.n_rcvd_ps = 0
def timeout(self):
self.error = "Timeout Expired: n_rcvd=%d" % (self.n_rcvd)
self.conn.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.conn = event.container.connect(self.host)
self.receiver = event.container.create_receiver(self.conn, self.address)
self.receiver.flow(3)
self.n_credit_given = 3
def on_message(self, event):
dlv = event.delivery
if dlv.settled:
self.n_rcvd_ps += 1
self.n_rcvd += 1
if self.n_rcvd == self.count:
self.conn.close()
self.timer.cancel()
if self.n_rcvd_ps != self.expected_ps:
self.error = "Received %d deliveries, %d were settled (expected %d)" %\
(self.n_rcvd, self.n_rcvd_ps, self.expected_ps)
elif self.n_rcvd == self.n_credit_given:
self.receiver.flow(5)
self.n_credit_given += 5
def run(self):
Container(self).run()
class EchoTest(MessagingHandler):
def __init__(self, host, address):
super(EchoTest, self).__init__(prefetch=0)
self.host = host
self.address = address
self.conn = None
self.error = None
self.action = "Connecting to router"
self.receiver = None
self.sender = None
def timeout(self):
self.error = "Timeout Expired while attempting action: %s" % self.action
self.conn.close()
def fail(self, error):
self.error = error
self.conn |
mosbasik/dotatreasures | create_treasures_json.py | Python | mit | 794 | 0.002519 | #!/usr/bin/py | thon
import json
f = file('treasures.json', 'r')
try:
foo = json.load(f)
json_contents = foo
except ValueError:
json_contents = dict()
f.close()
print 'Type \'q\' to [q]uit'
while True:
name = raw_input('Treasure Name: ')
if name == 'q':
break
print 'Type \'n\' to stop entering heroes and go to [n]ext treasure'
set_contents = dict()
hero = ''
while True:
hero = raw_input('Hero name: ')
if hero | == 'n' or hero == 'q':
break
else:
bundle_rating = raw_input('Item set rating [1-3]: ')
set_contents[hero] = bundle_rating
json_contents[name] = set_contents
if hero == 'q':
break
f = open('treasures.json', 'w')
json.dump(json_contents, f, indent=4)
f.close() |
quuxlabs/goodiff-core | includes/GooDiffProvider.py | Python | agpl-3.0 | 307 | 0.006515 | from GooDiffService import GooDiffService
class GooDiffProvider:
def __init__(self, name, services=None):
| self.name = name
self.services = services | or []
def debug(self):
print "Provider name:", self.name
for service in self.services:
service.debug()
|
DROPCitizenShip/e-cidadania | tests/unit_tests/helpers/test_cache.py | Python | gpl-3.0 | 2,101 | 0.007139 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2010-2012 Cidadania S. Coop. Galega
#
# This file is part of e-cidadania.
#
# e-cidadania is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of | the License, or
# (at your option) any later version.
#
# e-cidadania is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with e-cidadania. If not, see <http://www.gnu.org/licenses/>.
from django.core.cache import cache
from core.spaces.models import Space
from src.helpers import cache as cache_helper
from tests.test_utils import ECDTestCase
class CacheHelperTest(ECDTestCase):
"""Tests the cache helper functions.
"""
def setUp(self):
self.init()
def testGetOrInsertObjectInCache(self):
"""
Tests the get_or_insert_object_in_helpers.cache.
"""
space_props = {'url': 'test_space', 'name': 'some_name'}
#print Space.__class__.__name__
space_key = cache_helper._get_cache_key_for_model(Space, 'test_space')
expected = None
actual = cache.get(space_key)
self.assertEqual(expected, actual)
space = Space(**space_props)
space.save()
expected = space
actual = cache_helper.get_or_insert_object_in_cache(Space,
space.url, url=space.url)
self.assertEqual(expected, actual)
cache.delete(space_key)
self.assertEqual(cache.get(space_key), None)
expected = space
actual = cache_helper.get_or_insert_object_in_cache(Space,
space.url, url=space.url)
self.assertEqual(expected, actual)
|
chubbymaggie/cle | cle/backends/blob.py | Python | bsd-2-clause | 2,119 | 0.006607 | from ..backends import Backend
from ..errors import CLEError
import logging
import os
l = logging.getLogger("cle.blob")
__all__ = ('Blob',)
class Blob(Backend):
"""
Representation of a binary blob, i.e. an executable in an unknown file format.
"""
def __init__(self, path, custom_arch=None, custom_offset=None, *args, **kwargs):
"""
:param custom_arch: (required) an :class:`archinfo.Arch` for the binary blob.
:param custom_offset: Skip this many bytes from the beginning of the file.
"""
if custom_arch is None:
raise CLEError("Must specify custom_arch when loading blob!")
super(Blob, self).__init__(path, *args,
custom_arch=custom_arch,
custom_offset=custom_offset, **kwargs)
self.custom_offset = custom_offset if custom_offset is not None else 0
if self._custom_entry_point is None:
l.warning("No custom entry point was specified for blob, assuming 0")
self._custom_entry_point = 0
self._entry = self._custom_entry_point
self._max_addr = 0
self.os = 'unknown'
self._load(self.custom_offset)
supported_filetypes = ['elf', 'pe', 'mach-o', 'unknown']
def get_min_addr(self):
return 0
def get_max_addr(self):
return self._max_addr
def _load(self, offset, size=None):
"""
Load a segment into memory.
"""
self.binary_stream.seek(offset)
if size is No | ne:
string = self.binary_stream.read()
else:
string = self.binary_stream.read(size)
self.memory.add_backer(0, string)
self._max_addr = len(string)
def function_name(se | lf, addr): #pylint: disable=unused-argument,no-self-use
"""
Blobs don't support function names.
"""
return None
def contains_addr(self, addr):
return addr in self.memory
def in_which_segment(self, addr): #pylint: disable=unused-argument,no-self-use
"""
Blobs don't support segments.
"""
return None
|
BlaXpirit/steam-notifier | app/cookies.py | Python | gpl-3.0 | 1,650 | 0.006061 | # Copyright (C) 2014-2015 Oleh Prypin <blaxpirit@gmail.com>
#
# This file is part of Steam Notifier.
#
# Steam Notifier is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Steam Notifier is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Steam Notifier. If not, see <http://www.gnu.org/licenses/>.
from qt.network import QNetworkCookieJar, QNetworkCookie
class PersistentCookieJar(QNetworkCookieJar):
def __init__(self, filename):
QNetworkCookieJar.__init__(self)
self.filename = filename
try:
with open(self.filename, 'rb') as f:
self.setAllCookies([cookie
for data in f.read().split(b'\n\n')
for cookie in QNetworkCookie.parseCookies(data)
])
except IOError:
pass
def setCookiesFromUrl(self, cookies, url):
res | ult = QNetworkCookieJar.setCookiesFromUrl(self, cookies, url)
self.save_to_file(self.filename)
return result
def save_to_file(self, filename):
with open(filename, 'wb') as f:
for cookie in self.allCookies():
| f.write(cookie.toRawForm().data())
f.write(b'\n\n') |
magnusmorton/pycket | pycket/values_struct.py | Python | mit | 45,589 | 0.002479 | import itertools
from pycket import config
from pycket import values
from pycket import vector as values_vector
from pycket.arity import Arity
from pycket.base import SingleResultMixin, UnhashableType
from pycket.cont import continuation, label
from pycket.error import SchemeException
from pycket.prims.expose import default, make_call_method
from pycket.small_list import inline_small_list
from pycket.util import strip_immutable_field_name
from pycket.values_parameter import W_Parameter
from rpython.rlib import jit
from rpython.rlib.objectmodel import import_from_mixin
from rpython.rlib.unroll import unrolling_iterable
PREFAB = values.W_Symbol.make("prefab")
class W_StructInspector(values.W_Object):
errorname = "struct-inspector"
_immutable_fields_ = ["super"]
_attrs_ = ["super"]
@staticmethod
def make(inspector, issibling=False):
assert isinstance(inspector, W_StructInspector)
super = inspector
if issibling:
super = inspector.super if inspector is not None else None
return W_StructInspector(super)
def __init__(self, super):
self.super = super
@jit.elidable
def has_control(self, struct_type):
inspector = struct_type.inspector
if not isinstance(inspector, W_StructInspector):
return True
inspector = inspector.super
while isinstance(inspector, W_StructInspector):
if inspector is self:
return True
inspector = inspector.super
return False
current_inspector = W_StructInspector(None)
current_inspector_param = W_Parameter(current_inspector)
class W_StructType(values.W_Object):
errorname = "struct-type-descriptor"
_immutable_fields_ = [
"name", "constructor_name", "super",
"init_field_cnt", "auto_field_cnt", "total_field_cnt",
"total_auto_field_cnt", "total_init_field_cnt",
"auto_v", "props", "inspector", "immutables[*]",
"immutable_fields[*]", "guard", "auto_values[*]", "offsets[*]",
"constructor", "predicate", "accessor", "mutator", "prop_procedure",
"constructor_arity", "procedure_source", "isprefab", "isopaque"]
_attrs_ = map(strip_immutable_field_name, _immutable_fields_)
unbound_prefab_types = {}
@staticmethod
def make(name, super_type, init_field_cnt, auto_field_cnt,
auto_v=values.w_false, props=values.w_null,
inspector=values.w_false, proc_spec=values.w_false,
immutables=[], guard=values.w_false,
constr_name=values.w_false, env=None, cont=None):
"""
This method returns five instances:
W_StructType
W_StructConstructor
W_StructPredicate
W_StructAccessor
W_StructMutator
"""
w_struct_type = W_StructType.make_simple(name, super_type,
init_field_cnt, auto_field_cnt, auto_v, props, inspector,
proc_spec, immutables, guard, constr_name)
return w_struct_type.initialize_props(props, proc_spec, env, cont)
@staticmethod
def make_simple(name, super_type, init_field_cnt, auto_field_cnt,
auto_v=values.w_false, props=values.w_null,
inspector=values.w_false, proc_spec=values.w_false,
immutables=[], guard=values.w_false,
constr_name=values.w_false):
"""
This metho | d returns an instance of W_StructType only.
| It does not support properties.
"""
if inspector is PREFAB:
prefab_key = W_PrefabKey.from_raw_params(name, init_field_cnt,\
auto_field_cnt, auto_v, immutables, super_type)
if prefab_key in W_StructType.unbound_prefab_types:
return W_StructType.unbound_prefab_types.pop(prefab_key)
return W_StructType(name, super_type, init_field_cnt, auto_field_cnt,
auto_v, inspector, proc_spec, immutables, guard, constr_name)
@staticmethod
@jit.elidable
def make_prefab(prefab_key):
if prefab_key in W_StructType.unbound_prefab_types:
w_struct_type = W_StructType.unbound_prefab_types[prefab_key]
else:
name, init_field_cnt, auto_field_cnt, auto_v, mutables, super_key =\
prefab_key.make_key_tuple()
super_type = W_StructType.make_prefab(super_key) if super_key else\
values.w_false
immutables = [i for i in range(init_field_cnt) if i not in mutables]
w_struct_type = W_StructType.make_simple(name,
super_type, init_field_cnt, auto_field_cnt, auto_v, values.w_null,
PREFAB, values.w_false, immutables)
W_StructType.unbound_prefab_types[prefab_key] = w_struct_type
return w_struct_type
@continuation
def save_prop_value(self, props, idx, is_checked, env, cont, _vals):
from pycket.interpreter import check_one_val
prop = props[idx][0]
prop_val = check_one_val(_vals)
props[idx] = (prop, prop_val, None)
return self.attach_prop(props, idx, is_checked, env, cont)
@label
def attach_prop(self, props, idx, is_checked, env, cont):
from pycket.interpreter import return_multi_vals
if idx < len(props):
(prop, prop_val, sub_prop) = props[idx]
if sub_prop is not None:
for p in props:
if p[0] is sub_prop:
return prop_val.call([p[1]], env,
self.save_prop_value(props, idx, False, env, cont))
assert isinstance(prop, W_StructProperty)
if not is_checked and prop.guard.iscallable():
return prop.guard.call([prop_val, values.to_list(self.struct_type_info(cont))],
env, self.save_prop_value(props, idx, True, env, cont))
if prop.isinstance(w_prop_procedure):
self.prop_procedure = prop_val
self.props.append((prop, prop_val))
return self.attach_prop(props, idx + 1, False, env, cont)
# at this point all properties are saved, next step is to copy
# propertyes from super types
struct_type = self.super
while isinstance(struct_type, W_StructType):
self.props = self.props + struct_type.props
if not self.prop_procedure and struct_type.prop_procedure:
self.prop_procedure = struct_type.prop_procedure
self.procedure_source = struct_type.procedure_source
struct_type = struct_type.super
struct_tuple = self.make_struct_tuple()
return return_multi_vals(values.Values.make(struct_tuple), env, cont)
@jit.unroll_safe
def initialize_prop(self, props, p, sub_prop=None):
prop = p.car()
prop_val = p.cdr()
if sub_prop is None:
if prop.isinstance(w_prop_procedure):
if self.prop_procedure is not None and\
self.prop_procedure is not prop_val:
raise SchemeException(
"make-struct-type: duplicate property binding\nproperty: %s" %
prop.tostring())
self.prop_procedure = prop_val
self.procedure_source = self
elif prop.isinstance(w_prop_checked_procedure):
if self.total_field_cnt < 2:
raise SchemeException("need at least two fields in the structure type")
props.append((prop, prop_val, sub_prop))
assert isinstance(prop, W_StructProperty)
for super_p in prop.supers:
self.initialize_prop(props, super_p, prop)
@jit.unroll_safe
def initialize_props(self, props, proc_spec, env, cont):
"""
Properties initialization contains few steps:
1. call initialize_prop for each property from the input list,
it extracts all super values and stores them into props array
with a flat structure
2. recursively call attach_prop for each property from props and
prepare the value:
* |
Teknologforeningen/tf_arsfest | tf_arsfest/forms.py | Python | mit | 366 | 0.016393 | from django.forms import ModelForm, RadioSelect
from models import Registration, Guest
class RegistrationForm(ModelForm):
class Meta:
| model = Registration
exclude = ('guest', 'avec', 'event', 'reference_number', 'sum')
class GuestForm(ModelForm):
class Meta:
model = Guest
exclude = ('event')
| |
Daerdemandt/Learning-bioinformatics | LEXF/Solution.py | Python | apache-2.0 | 366 | 0.032787 | #!/usr/bin/env python3
def get_strin | gs(alphabet, length):
if length == 0:
yield ''
else:
for ch in alphabet:
for st in get_strings(alphabet, length - 1):
yield ch + st
def main():
alphabet = "T A G C".split()
length = 2
with open("Output.txt", "w") as output_file:
for st in get_strings(alphabet, length):
print(st, file=output_file)
main | ()
|
ostrokach/bioconda-recipes | recipes/pgdspider/PGDSpider2-cli.py | Python | mit | 3,267 | 0.000918 | #!/usr/bin/env python
#
# Wrapper script for Java Conda packages that ensures that the java runtime
# is invoked with the right options. Adapted from the bash script (http://stackoverflow.com/questions/59895/can-a-bash-script-tell-what-directory-its-stored-in/246128#246128).
#
# Program Parameters
#
import os
import subprocess
import sys
import shutil
from os import access
from os import getenv
from os import X_OK
jar_file = 'PGDSpider2-cli.jar'
default_jvm_mem_opts = ['-Xms512m', '-Xmx1g']
# !!! End of parameter section. No user-serviceable code below this line !!!
def real_dirname(path):
"""Return the symlink-resolved, canonicalized directory-portion of path."""
return os.path.dirname(os.path.realpath(path))
def java_executable():
"""Return the executable name of the Java interpreter."""
java_home = getenv('JAVA_HOME')
java_bin = os.path.join('bin', 'java')
if java_home and access(os.path.join(java_home, java_bin), X_OK):
return os.path.join(java_home, java_bin)
else:
return 'java'
def jvm_opts(argv):
"""Construct list of Java arguments based on our argument list.
The argument list passed in argv must not include the script name.
The return value is a 3-tuple lists of strings o | f the form:
(memory_options, prop_options, passthrough_options)
"""
mem_opts = []
prop_opts = []
pass_args = []
exec_dir = | None
for arg in argv:
if arg.startswith('-D'):
prop_opts.append(arg)
elif arg.startswith('-XX'):
prop_opts.append(arg)
elif arg.startswith('-Xm'):
mem_opts.append(arg)
elif arg.startswith('--exec_dir='):
exec_dir = arg.split('=')[1].strip('"').strip("'")
if not os.path.exists(exec_dir):
shutil.copytree(real_dirname(sys.argv[0]), exec_dir, symlinks=False, ignore=None)
else:
pass_args.append(arg)
# In the original shell script the test coded below read:
# if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ]
# To reproduce the behaviour of the above shell code fragment
# it is important to explictly check for equality with None
# in the second condition, so a null envar value counts as True!
if mem_opts == [] and getenv('_JAVA_OPTIONS') is None:
mem_opts = default_jvm_mem_opts
return (mem_opts, prop_opts, pass_args, exec_dir)
def main():
java = java_executable()
"""
PeptideShaker updates files relative to the path of the jar file.
In a multiuser setting, the option --exec_dir="exec_dir"
can be used as the location for the peptide-shaker distribution.
If the exec_dir dies not exist,
we copy the jar file, lib, and resources to the exec_dir directory.
"""
(mem_opts, prop_opts, pass_args, exec_dir) = jvm_opts(sys.argv[1:])
jar_dir = exec_dir if exec_dir else real_dirname(sys.argv[0])
if pass_args != [] and pass_args[0].startswith('eu'):
jar_arg = '-cp'
else:
jar_arg = '-jar'
jar_path = os.path.join(jar_dir, jar_file)
java_args = [java] + mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args
sys.exit(subprocess.call(java_args))
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.