repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
dplarson/Mocha.jl
|
docs/conf.py
|
Python
|
mit
| 8,570
| 0.006301
|
# -*- coding: utf-8 -*-
#
# Mocha documentation build configuration file, created by
# sphinx-quickstart on Thu Nov 13 00:43:40 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath("sphinx"))
import julia
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.mathjax',
'julia'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Mocha'
copyright = u'2014, pluskid'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.2'
# The full version, including alpha/beta/rc tags.
release = '0.1.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
primary_domain = 'jl'
highlight_language = 'julia'
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin theme
html_theme = 'default'
import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except:
pass
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e
|
.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Mochadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files.
|
List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Mocha.tex', u'Mocha Documentation',
u'pluskid', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'mocha', u'Mocha Documentation',
[u'pluskid'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Mocha', u'Mocha Documentation',
u'pluskid', 'Mocha', 'One line descrip
|
CptDemocracy/Python
|
MITx-6.00.1x-EDX-Introduction-to-Computer-Science/Week-4/PSET-4/game.py
|
Python
|
mit
| 2,366
| 0.005917
|
"""
PSET-4
Word Game Part 9: You and your Computer
"""
PROMPT_STR = "Enter n to deal a new hand, r to replay the last hand, or e to end game: "
NO_REPL_AVAIL_STR = "You have not played a hand yet. Please play a new hand first!"
INVALID_CMD = "Invalid command."
CHOOSE_PLAYER_STR = "Enter u to have yourself play, c to have the computer play: "
def playGame(wordList):
"""
Allow the user to play an arbitrary number of hands.
1) Asks the user to input 'n' or 'r' or 'e'.
* If the user inputs 'e', immediately exit the game.
* If the user inputs anything that's not 'n', 'r', or 'e', keep asking them again.
2) Asks the user to input
|
a 'u' or a 'c'.
* If the user inputs anything that's not 'c' or 'u', keep asking them again.
3) Switch functionality based on the above choices:
* If the user inputted 'n', play a new (random) hand.
* Else, if the user inputted 'r', play the last hand again.
But if no hand
|
was played, output "You have not played a hand yet.
Please play a new hand first!"
* If the user inputted 'u', let the user play the game
with the selected hand, using playHand.
* If the user inputted 'c', let the computer play the
game with the selected hand, using compPlayHand.
4) After the computer or user has played the hand, repeat from step 1
wordList: list (string)
"""
firstGame = True
lastHand = {}
while True:
userInput = raw_input(PROMPT_STR)
if userInput == 'n':
hand = dealHand(HAND_SIZE)
lastHand = hand.copy()
playHand_AI_and_human(hand, wordList, HAND_SIZE)
elif userInput == 'r':
if len(lastHand) == 0:
print(NO_REPL_AVAIL_STR)
else:
playHand_AI_and_human(lastHand, wordList, HAND_SIZE)
elif userInput == 'e':
break
else:
print(INVALID_CMD)
print
def playHand_AI_and_human(hand, wordList, n):
userInput = ""
while userInput != 'u' and userInput != 'c':
userInput = raw_input(CHOOSE_PLAYER_STR)
if userInput == 'u':
playHand(hand, wordList, n)
elif userInput == 'c':
compPlayHand(hand, wordList, n)
else:
print(INVALID_CMD)
|
j5shi/Thruster
|
pylibs/test/test_peepholer.py
|
Python
|
gpl-2.0
| 9,096
| 0.001539
|
import dis
import sys
from cStringIO import StringIO
import unittest
def disassemble(func):
f = StringIO()
tmp = sys.stdout
sys.stdout = f
dis.dis(func)
sys.stdout = tmp
result = f.getvalue()
f.close()
return result
def dis_single(line):
return disassemble(compile(line, '', 'single'))
class TestTranforms(unittest.TestCase):
def test_unot(self):
# UNARY_NOT POP_JUMP_IF_FALSE --> POP_JUMP_IF_TRUE
def unot(x):
if not x == 2:
del x
asm = disassemble(unot)
for elem in ('UNARY_NOT', 'POP_JUMP_IF_FALSE'):
self.assertNotIn(elem, asm)
self.assertIn('POP_JUMP_IF_TRUE', asm)
def test_elim_inversion_of_is_or_in(self):
for line, elem in (
('not a is b', '(is not)',),
('not a in b', '(not in)',),
('not a is not b', '(is)',),
('not a not in b', '(in)',),
):
asm = dis_single(line)
self.assertIn(elem, asm)
def test_none_as_constant(self):
# LOAD_GLOBAL None --> LOAD_CONST None
def f(x):
None
return x
asm = disassemble(f)
for elem in ('LOAD_GLOBAL',):
self.assertNotIn(elem, asm)
for elem in ('LOAD_CONST', '(None)'):
self.assertIn(elem, asm)
def f():
'Adding a docstring made this test fail in Py2.5.0'
return None
self.assertIn('LOAD_CONST', disassemble(f))
self.assertNotIn('LOAD_GLOBAL', disassemble(f))
def test_while_one(self):
# Skip over: LOAD_CONST trueconst POP_JUMP_IF_FALSE xx
def f():
while 1:
pass
return list
asm = disassemble(f)
for elem in ('LOAD_CONST', 'POP_JUMP_IF_FALSE'):
self.assertNotIn(elem, asm)
for elem in ('JUMP_ABSOLUTE',):
self.assertIn(elem, asm)
def test_pack_unpack(self):
for line, elem in (
('a, = a,', 'LOAD_CONST',),
('a, b = a, b', 'ROT_TWO',),
('a, b, c = a, b, c', 'ROT_THREE',),
):
asm = dis_single(line)
self.assertIn(elem, asm)
self.assertNotIn('BUILD_TUPLE', asm)
self.assertNotIn('UNPACK_TUPLE', asm)
def test_folding_of_tuples_of_constants(self):
for line, elem in (
('a = 1,2,3', '((1, 2, 3))'),
('("a","b","c")', "(('a', 'b', 'c'))"),
('a,b,c = 1,2,3', '((1, 2, 3))'),
('(None, 1, None)', '((None, 1, None))'),
('((1, 2), 3, 4)', '(((1, 2), 3, 4))'),
):
asm = dis_single(line)
self.assertIn(elem, asm)
self.assertNotIn('BUILD_TUPLE', asm)
# Bug 1053819: Tuple of constants misidentified when presented with:
# . . . opcode_with_arg 100 unary_opcode BUILD_TUPLE 1 . . .
# The following would segfault upon compilation
def crater():
(~[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
],)
def test_folding_of_binops_on_constants(self):
for line, elem in (
('a = 2+3+4', '(9)'), # chained fold
('"@"*4', "('@@@@')"), # check string ops
('a="abc" + "def"', "('abcdef')"), # check string ops
('a = 3**4', '(81)'), # binary power
('a = 3*4', '(12)'), # binary multiply
('a = 13//4', '(3)'), # binary fl
|
oor divide
('a = 14%4', '(2)'), # binary modulo
('a = 2+3', '(5)'),
|
# binary add
('a = 13-4', '(9)'), # binary subtract
('a = (12,13)[1]', '(13)'), # binary subscr
('a = 13 << 2', '(52)'), # binary lshift
('a = 13 >> 2', '(3)'), # binary rshift
('a = 13 & 7', '(5)'), # binary and
('a = 13 ^ 7', '(10)'), # binary xor
('a = 13 | 7', '(15)'), # binary or
):
asm = dis_single(line)
self.assertIn(elem, asm, asm)
self.assertNotIn('BINARY_', asm)
# Verify that unfoldables are skipped
asm = dis_single('a=2+"b"')
self.assertIn('(2)', asm)
self.assertIn("('b')", asm)
# Verify that large sequences do not result from folding
asm = dis_single('a="x"*1000')
self.assertIn('(1000)', asm)
def test_binary_subscr_on_unicode(self):
# unicode strings don't get optimized
asm = dis_single('u"foo"[0]')
self.assertNotIn("(u'f')", asm)
self.assertIn('BINARY_SUBSCR', asm)
asm = dis_single('u"\u0061\uffff"[1]')
self.assertNotIn("(u'\\uffff')", asm)
self.assertIn('BINARY_SUBSCR', asm)
# out of range
asm = dis_single('u"fuu"[10]')
self.assertIn('BINARY_SUBSCR', asm)
# non-BMP char (see #5057)
asm = dis_single('u"\U00012345"[0]')
self.assertIn('BINARY_SUBSCR', asm)
asm = dis_single('u"\U00012345abcdef"[3]')
self.assertIn('BINARY_SUBSCR', asm)
def test_folding_of_unaryops_on_constants(self):
for line, elem in (
('`1`', "('1')"), # unary convert
('-0.5', '(-0.5)'), # unary negative
('~-2', '(1)'), # unary invert
):
asm = dis_single(line)
self.assertIn(elem, asm, asm)
self.assertNotIn('UNARY_', asm)
# Verify that unfoldables are skipped
for line, elem in (
('-"abc"', "('abc')"), # unary negative
('~"abc"', "('abc')"), # unary invert
):
asm = dis_single(line)
self.assertIn(elem, asm, asm)
self.assertIn('UNARY_', asm)
def test_elim_extra_return(self):
# RETURN LOAD_CONST None RETURN --> RETURN
def f(x):
return x
asm = disassemble(f)
self.assertNotIn('LOAD_CONST', asm)
self.assertNotIn('(None)', asm)
self.assertEqual(asm.split().count('RETURN_VALUE'), 1)
def test_elim_jump_to_return(self):
# JUMP_FORWARD to RETURN --> RETURN
def f(cond, true_value, false_value):
return true_value if cond else false_value
asm = disassemble(f)
self.assertNotIn('JUMP_FORWARD', asm)
self.assertNotIn('JUMP_ABSOLUTE', asm)
self.assertEqual(asm.split().count('RETURN_VALUE'), 2)
def test_elim_jump_after_return1(self):
# Eliminate dead code: jumps immediately after returns can't be reached
def f(cond1, cond2):
if cond1: return 1
if cond2: return 2
while 1:
return 3
while 1:
if cond1: return 4
return 5
return 6
asm = disassemble(f)
self.assertNotIn('JUMP_FORWARD', asm)
self.assertNotIn('JUMP_ABSOLUTE', asm)
self.assertEqual(asm.split().count('RETURN_VALUE'), 6)
def test_elim_jump_after_return2(self):
# Eliminate dead code: jumps immediately after returns can't be reached
def f(cond1, cond2):
while 1:
if cond1: return 4
asm = disassemble(f)
self.
|
conejoninja/xbmc-seriesly
|
servers/nowdownload.py
|
Python
|
gpl-3.0
| 2,851
| 0.025614
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# seriesly - XBMC Plugin
# Conector para nowdownload
# http://blog.tvalacarta.info/plugin-xbmc/seriesly/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os
from core import scrapertools
from core import logger
from core import config
def test_video_exists( page_url ):
return True,""
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
logger.info("[nowdownload.py] get_video_url (page_url='%s')" % page_url)
'''
<a href="http://f02.nowdownload.co/dl/91efaa9ec507ef4de023cd62bb9a0fe2/50ab76ac/6711c9c90ebf3_family.guy.s11e02.italian.subbed.hdtv.xvid_gannico.avi" class="btn btn-danger"><i class="icon-white icon-download"></i> Download Now</a>
'''
data = scrapertools.cache_page( page_url )
logger.debug("[nowdownload.py] data:" + data)
try:
url = scrapertools.get_match(data,'<a hre
|
f="([^"]*)" class="btn btn-danger"><i class="icon-white icon-download"></i> Download Now</a>')
except:
#$.get("/api/token.php?token=7e1ab09df2775dbea02
|
506e1a2651883");
token = scrapertools.get_match(data,'(/api/token.php\?token=[^"]*)')
logger.debug("[nowdownload.py] token:" + token)
d= scrapertools.cache_page( "http://www.nowdownload.co"+ token )
url = scrapertools.get_match(data,'expiryText: \'<a class="btn btn-danger" href="([^"]*)')
logger.debug("[nowdownload.py] url_1:" + url)
data = scrapertools.cache_page("http://www.nowdownload.co" + url )
logger.debug("[nowdownload.py] data:" + data)
#<a href="http://f03.nowdownload.co/dl/8ec5470153bb7a2177847ca7e1638389/50ab71b3/f92882f4d33a5_squadra.antimafia_palermo.oggi.4x01.episodio.01.ita.satrip.xvid_upz.avi" class="btn btn-success">Click here to download !</a>
url = scrapertools.get_match(data,'<a href="([^"]*)" class="btn btn-success">Click here to download !</a>')
logger.debug("[nowdownload.py] url_final:" + url)
video_urls = [url]
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
#http://www.nowdownload.co/dl/9gwahc3577hj9
#http://www.nowdownload.eu/dl/srv4g94wk6j7b
patronvideos = '(nowdownload.\w{2}/dl/[a-z0-9]+)'
logger.info("[nowdownload.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[nowdownload]"
url = "http://www."+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'nowdownload' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
return devuelve
|
schleichdi2/OPENNFR-6.1-CORE
|
opennfr-openembedded-core/meta/lib/oeqa/runtime/cases/df.py
|
Python
|
gpl-2.0
| 493
| 0.006085
|
from oeqa.runtime.case import OERuntimeTestCase
from oeqa.co
|
re.decorator.depends import OETestDep
|
ends
from oeqa.core.decorator.oeid import OETestID
class DfTest(OERuntimeTestCase):
@OETestID(234)
@OETestDepends(['ssh.SSHTest.test_ssh'])
def test_df(self):
cmd = "df / | sed -n '2p' | awk '{print $4}'"
(status,output) = self.target.run(cmd)
msg = 'Not enough space on image. Current size is %s' % output
self.assertTrue(int(output)>5120, msg=msg)
|
thomasem/nova
|
nova/virt/netutils.py
|
Python
|
apache-2.0
| 6,132
| 0.000163
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Network-related utilities for supporting libvirt connection code."""
import os
import jinja2
import netaddr
from oslo_config import cfg
from nova.network import model
from nova import paths
CONF = cfg.CONF
netutils_opts = [
cfg.StrOpt('injected_network_template',
default=paths.basedir_def('nova/virt/interfaces.template'),
help='Template file for injected network'),
]
CONF.register_opts(netutils_opts)
CONF.import_opt('use_ipv6', 'nova.netconf')
def get_net_and_mask(cidr):
net = netaddr.IPNetwork(cidr)
return s
|
tr(net.ip), str(net.netmask)
|
def get_net_and_prefixlen(cidr):
net = netaddr.IPNetwork(cidr)
return str(net.ip), str(net._prefixlen)
def get_ip_version(cidr):
net = netaddr.IPNetwork(cidr)
return int(net.version)
def _get_first_network(network, version):
# Using a generator expression with a next() call for the first element
# of a list since we don't want to evaluate the whole list as we can
# have a lot of subnets
try:
return next(i for i in network['subnets']
if i['version'] == version)
except StopIteration:
pass
def get_injected_network_template(network_info, use_ipv6=None, template=None,
libvirt_virt_type=None):
"""Returns a rendered network template for the given network_info.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param use_ipv6: If False, do not return IPv6 template information
even if an IPv6 subnet is present in network_info.
:param template: Path to the interfaces template file.
:param libvirt_virt_type: The Libvirt `virt_type`, will be `None` for
other hypervisors..
"""
if use_ipv6 is None:
use_ipv6 = CONF.use_ipv6
if not template:
template = CONF.injected_network_template
if not (network_info and template):
return
nets = []
ifc_num = -1
ipv6_is_available = False
for vif in network_info:
if not vif['network'] or not vif['network']['subnets']:
continue
network = vif['network']
# NOTE(bnemec): The template only supports a single subnet per
# interface and I'm not sure how/if that can be fixed, so this
# code only takes the first subnet of the appropriate type.
subnet_v4 = _get_first_network(network, 4)
subnet_v6 = _get_first_network(network, 6)
ifc_num += 1
if not network.get_meta('injected'):
continue
hwaddress = vif.get('address')
address = None
netmask = None
gateway = ''
broadcast = None
dns = None
routes = []
if subnet_v4:
if subnet_v4.get_meta('dhcp_server') is not None:
continue
if subnet_v4['ips']:
ip = subnet_v4['ips'][0]
address = ip['address']
netmask = model.get_netmask(ip, subnet_v4)
if subnet_v4['gateway']:
gateway = subnet_v4['gateway']['address']
broadcast = str(subnet_v4.as_netaddr().broadcast)
dns = ' '.join([i['address'] for i in subnet_v4['dns']])
for route_ref in subnet_v4['routes']:
(net, mask) = get_net_and_mask(route_ref['cidr'])
route = {'gateway': str(route_ref['gateway']['address']),
'cidr': str(route_ref['cidr']),
'network': net,
'netmask': mask}
routes.append(route)
address_v6 = None
gateway_v6 = ''
netmask_v6 = None
dns_v6 = None
have_ipv6 = (use_ipv6 and subnet_v6)
if have_ipv6:
if subnet_v6.get_meta('dhcp_server') is not None:
continue
if subnet_v6['ips']:
ipv6_is_available = True
ip_v6 = subnet_v6['ips'][0]
address_v6 = ip_v6['address']
netmask_v6 = model.get_netmask(ip_v6, subnet_v6)
if subnet_v6['gateway']:
gateway_v6 = subnet_v6['gateway']['address']
dns_v6 = ' '.join([i['address'] for i in subnet_v6['dns']])
net_info = {'name': 'eth%d' % ifc_num,
'hwaddress': hwaddress,
'address': address,
'netmask': netmask,
'gateway': gateway,
'broadcast': broadcast,
'dns': dns,
'routes': routes,
'address_v6': address_v6,
'gateway_v6': gateway_v6,
'netmask_v6': netmask_v6,
'dns_v6': dns_v6,
}
nets.append(net_info)
if not nets:
return
tmpl_path, tmpl_file = os.path.split(CONF.injected_network_template)
env = jinja2.Environment(loader=jinja2.FileSystemLoader(tmpl_path),
trim_blocks=True)
template = env.get_template(tmpl_file)
return template.render({'interfaces': nets,
'use_ipv6': ipv6_is_available,
'libvirt_virt_type': libvirt_virt_type})
|
fp7-netide/Engine
|
tests/of13-parallel-priority-3firewalls/firewall_13-3.py
|
Python
|
epl-1.0
| 5,948
| 0.003362
|
# Copyright (C) 2011 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ether_types
logger = logging.getLogger('firewall_3')
logger.setLevel(logging.DEBUG)
PROTO_TCP = 6
PROTO_UDP = 17
ETH_ARP = 0x0806
ETH_IP = 0x0800
FW_OUTPORT = 1
FW_INPORT = 2
class Firewall_3(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(Firewall_3, self).__init__(*args, **kwargs)
self.mac_to_port = {}
def ipv4_to_int(self, ip):
o = map(int, ip.split('.'))
res = (16777216 * o[0]) + (65536 * o[1]) + (256 * o[2]) + o[3]
return res
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def swit
|
ch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
# install table-miss flow entry
#
# We specify NO BUFFER to max_len of the output action due to
# OVS bug. At this moment, if we specify a lesser number, e.g.,
# 128, OVS will send Packet-In with invalid buffer_id and
# truncated p
|
acket data. In that case, we cannot output packets
# correctly. The bug has been fixed in OVS v2.1.0.
match = parser.OFPMatch()
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 0, match, actions)
def add_flow(self, datapath, priority, match, actions, idle_to=0, hard_to=0, buffer_id=None):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
if buffer_id:
mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
priority=priority, match=match,
instructions=inst6)
else:
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
match=match, instructions=inst, idle_timeout=idle_to, hard_timeout=hard_to)
datapath.send_msg(mod)
def forwardPacket(self, msg, outPort):
# Does not install a rule. Just forwards this packet.
datapath = msg.datapath
in_port = msg.match['in_port']
parser = datapath.ofproto_parser
data = None
if msg.buffer_id is not None:
if msg.buffer_id == datapath.ofproto.OFP_NO_BUFFER:
data = msg.data
po_actions = [parser.OFPActionOutput(outPort)]
pkt_out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id, in_port=in_port, actions=po_actions, data=data)
datapath.send_msg(pkt_out)
def Configure_stateful_FW(self, msg):
pkt = packet.Packet(msg.data)
datapath = msg.datapath
parser = datapath.ofproto_parser
ofproto = datapath.ofproto
in_port = msg.match['in_port']
eth = pkt.get_protocols(ethernet.ethernet)[0]
hwdst = eth.dst
hwsrc = eth.src
global COUNTER
if eth.ethertype == ether_types.ETH_TYPE_LLDP:
# ignore lldp packet
return
# Forward all arp
if eth.ethertype == ether_types.ETH_TYPE_ARP:
logger.debug("Received ARP packet, forwarding it via Packet out: %s" % repr(pkt))
if in_port == FW_INPORT:
self.forwardPacket(msg, FW_OUTPORT)
if in_port == FW_OUTPORT:
self.forwardPacket(msg, FW_INPORT)
# Forward packets from inside to outside and also install the reverse rule with idle_to=13 sec
elif in_port == FW_INPORT:
logger.debug("Got packet from inside to outside, allowing it (fwd+flow mod): %s" % repr(pkt))
match = parser.OFPMatch(in_port=FW_INPORT, eth_type = ETH_IP, eth_src=hwsrc, eth_dst=hwdst)
actions = [parser.OFPActionOutput(FW_OUTPORT)]
self.add_flow(datapath, 10, match, actions, 13, 0)
match = parser.OFPMatch(in_port=FW_OUTPORT, eth_type = ETH_IP, eth_src=hwdst, eth_dst=hwsrc)
actions = [parser.OFPActionOutput(FW_INPORT)]
self.add_flow(datapath, 10, match, actions, 13, 0)
# forward the packet
self.forwardPacket(msg, FW_OUTPORT)
print(">>> FW: FORWARD PACKET from %s to port 1" % (in_port))
elif in_port == FW_OUTPORT:
logger.debug("Droping packet from in_port %d: %s" % (in_port,repr(pkt)))
match = parser.OFPMatch(in_port=FW_OUTPORT, eth_type = ETH_IP, eth_src=hwsrc, eth_dst=hwdst)
actions = []
self.add_flow(datapath, 5, match, actions, 13, 0)
# PacketIn handler for reactive actions
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
self.Configure_stateful_FW(msg)
|
aothms/hdf5_bim_sparql_query
|
src/query.py
|
Python
|
gpl-3.0
| 22,647
| 0.00892
|
from __future__ import print_function
import operator
import functools
import collections
import ifc2x3_schema as schema
import prefixes
import rdflib.plugins.sparql as sparql
from rdflib.namespace import RDF, XSD
from rdflib.term import Variable, Literal, URIRef, BNode
from rdflib.paths import Path, SequencePath, MulPath
class path(object):
unboxes = False
list_accessor = False
list_range = None
attribute = None
INF = float("inf")
def __repr__(self):
s = "ifc:%s" % self.attribute[len(schema.PREFIX):]
if self.list_accessor:
s += "[%d:%d]" % self.list_range
if self.unboxes:
s += "/expr:getValue"
return s
# Forward startswith and slicing to self.attribute for triple sorting
def __getitem__(self, x):
return self.attribute[x]
def startswith(self, s):
return self.attribute.startswith(s)
def __init__(self, path, is_list=False, is_boxed=False):
self.p, self.is_list, self.is_boxed = path, is_list, is_boxed
self.validate()
def validate(self):
p = self.p
# Needs to be a SequencePath of list_attribute/hasNext*/hasContents
# TODO: Option to refer to specific index (e.g Z-coordinate of point)
assert isinstance(p, SequencePath), "Path root should be a sequence"
assert isinstance(p.args[0], URIRef), "Path root first node should be a schema attribute that points to a list"
assert p.args[0].startswith(schema.PREFIX), "Path root first node should be a schema attribute that points to a list"
# TODO: probably should be boxed, just like in ifcowl, as otherwise it is not possible to retrieve the exact type, other than, say, string, which is serialized as part of the hdf5 model.
self.is_list or self.is_boxed, "Path root first node should be a schema attribute that points to a list or boxed attribute"
self.attribute = p.args[0]
components = p.args[1:] # peel off IFC attribute
if self.is_boxed:
assert components[-1] in prefixes.EXPRESS_DATATYPES, "Path root last node should be an express datatype accessor"
components = components[:-1] # peel off unboxing
self.unboxes = True
if self.is_list:
assert components[-1] == prefixes.list.hasContents, "Path root last node should be contents"
components = components[:-1] # peel off list contents
min_elem, max_elem = 0, 0
for pp in components:
if pp == prefixes.list.hasNext:
min_elem += 1
max_elem += 1
elif isinstance(pp, MulPath):
assert pp.path == prefixes.list.hasNext, "Path root second node should be a multiplication of next"
assert pp.mod == '*', "Path root second node should be a multiplication of next"
self.list_accessor = True
# +1 to account for python slice() syntax
self.list_range = (min_elem, max_elem + 1)
class query(object):
IFC_PREFIX = 'http://ifcowl.openbimstandards.org/IFC2X3_TC1#'
JOIN_TYPE_JOIN = 1
JOIN_TYPE_OPTIONAL = 2
JOIN_TYPE_UNION = 3
class statement(object):
def key(self, **kwargs):
return type(self).key(self, **kwargs)
class filter(statement):
query = None
def _make_Builtin_NOTEXISTS(self, expr):
self.query = query(bgp=expr['graph']['part'][0])
self.vars = set(self.query.vars)
def build(self):
if self.query:
q = self.query
is_triple = lambda st: isinstance(st, query.triple)
def list_immediate_variables(stmts):
# i.e. variables not included in the FILTER we are currently processing
for st in stmts:
if not is_triple(st): continue
for x in st:
if x.__class__ == Variable:
yield x
self.vars = self.vars & set(list_immediate_variables(self.parent_query.statements))
for st in self.parent_query.statements:
if is_triple(st):
vars = set(list_immediate_variables([st]))
if vars <= self.vars:
q.statements.append(st)
q.merge_paths()
q.infer()
q.sort()
q.proj = self.vars
self.s = "filter not exists on {%s}" % ",".join(self.vars)
def init(self, population):
if self.query:
context = population.query(self.query)
# context.print_statistics()
results = set(context.project())
vars = [context.vars.index(str(v)) for v in context.proj]
def filter(tup):
x = tuple(tup[n] for n in vars)
return x not in results
self.fn = filter
def _make_Builtin_REGEX(self, expr):
import re
assert map(type, (expr.text, expr.flags, expr.pattern)) == [Variable, Literal, Literal]
flags = 0
for f in expr.flags:
flags |= {
'i': re.IGNORECASE
}[f]
pat = re.compile(expr.pattern, flags=flags)
k = expr.text
self.fn = lambda t: pat.search(getattr(t, k)) is not None
self.s = "F(?%s) -> {0,1} { ?%s %% /%s/%s }" % (expr.text, expr.text, expr.pattern, expr.flags)
self.vars = {k}
def _make_RelationalExpression(self, expr):
opfn = getattr(operator, {
'>' : 'gt',
'<' : 'lt',
'>=': 'ge',
'<=': 'le',
'=' : 'eq'
}[expr.op])
args = (expr.expr, expr.other)
is_var = [isinstance(x, Variable) for x in args]
s0, s1 = map(str, args)
a0, a1 = map(lambda v: v.value if isinstance(v, Literal) else None, args)
self.vars = set(x for x in args if isinstance(x, Variable))
varnames_c = ",".join("?%s" % v for v in self.vars)
if all(is_var):
self.fn = lambda t: opfn(getattr(t, s0), getattr(t, s1))
self.s = "F(%s) -> {0,1} { ?%s %s ?%s }" % (varnames_c, s0, expr.op, s1)
elif is_var[0]:
|
self.fn = lambda t: opfn(getattr(t, s0), a1)
self.s = "F(%s) -> {0,1} { ?%s %s %s }" % (varnames_c, s0, ex
|
pr.op, a1)
elif is_var[1]:
self.fn = lambda t: opfn(a0, getattr(t, s1))
self.s = "F(%s) -> {0,1} { %s %s ?%s }" % (varnames_c, a0, expr.op, s1)
else: raise Exception("Not supported")
def __init__(self, q, expr):
self.parent_query = q
getattr(self, "_make_%s" % expr.name)(expr)
def key(self, known_variables, variable_ref_count):
inf = float("inf")
return (
(-inf if self.vars <= known_variables else +inf),
)
def __repr__(self):
return self.s
def __call__(self, t):
return self.fn(t)
class triple(statement):
def __init__(self, q, spo):
self.q, self.spo = q, spo
self.vars = set(x for x in self.spo if isinstance(x, Variable))
def key(self, known_variables, variable_ref_count):
# Adapted from rdflib
is_unknown_variable = lambda v: v not in known_variables and isinstance(v, (Variable, BNode))
f
|
jespino/django-rest-framework-apidoc
|
rest_framework_apidoc/apidoc.py
|
Python
|
bsd-3-clause
| 2,342
| 0.002989
|
from django.conf import settings
from rest_framework.settings import import_from_string
from .mixins import FileContentMixin, DocStringContentMixin, MarkupProcessMixin, NoProcessMixin, SafeProcessMixin
APIDOC_DEFAULT_DOCUMENTER_CLASSES = getattr(
settings,
'APIDOC_DEFAULT_DOCUMENTER_CLASSES',
['rest_framework_apidoc.apidoc.MDDo
|
cStringsDocumenter']
)
def get_view_description(view_cls, html=False, request=None):
documenters = []
if hasattr(view_cl
|
s, 'documenter_classes'):
for cls in view_cls.documenter_classes:
documenters.append(cls())
else:
for cls in APIDOC_DEFAULT_DOCUMENTER_CLASSES:
documenter_class = import_from_string(cls, "APIDOC_DEFAULT_DOCUMENTER_CLASS")
documenters.append(documenter_class())
for documenter in documenters:
description = documenter.get_description(view_cls, html, request)
if description:
return description
return ""
class Documenter(object):
def get_description(self, view_cls, html=True, request=None):
if html:
return self.process(self.get_content(view_cls, html, request))
return self.get_content(view_cls, html, request=None)
class RSTFilesDocumenter(Documenter, FileContentMixin, MarkupProcessMixin):
extension = ".rst"
markup = "restructuredtext"
class RSTDocStringsDocumenter(Documenter, DocStringContentMixin, MarkupProcessMixin):
markup = "restructuredtext"
class MDFilesDocumenter(Documenter, FileContentMixin, MarkupProcessMixin):
extension = ".md"
markup = "markdown"
class MDDocStringsDocumenter(Documenter, DocStringContentMixin, MarkupProcessMixin):
markup = "markdown"
class TextileFilesDocumenter(Documenter, FileContentMixin, MarkupProcessMixin):
extension = ".textile"
markup = "textile"
class TextileDocStringsDocumenter(Documenter, DocStringContentMixin, MarkupProcessMixin):
markup = "textile"
class TxtFilesDocumenter(Documenter, FileContentMixin, NoProcessMixin):
extension = ".txt"
class TxtDocStringsDocumenter(Documenter, DocStringContentMixin, NoProcessMixin):
pass
class HtmlFilesDocumenter(Documenter, FileContentMixin, SafeProcessMixin):
extension = ".html"
class HtmlDocStringsDocumenter(Documenter, DocStringContentMixin, SafeProcessMixin):
pass
|
dmach/dnf
|
dnf/logging.py
|
Python
|
gpl-2.0
| 7,178
| 0.002786
|
# logging.py
# DNF Logging Subsystem.
#
# Copyright (C) 2013-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
import dnf.exceptions
import dnf.const
import dnf.util
import libdnf.repo
import logging
import os
import sys
import time
import warnings
# :api loggers are: 'dnf', 'dnf.plugin', 'dnf.rpm'
SUPERCRITICAL = 100 # do not use this for logging
CRITICAL = logging.CRITICAL
ERROR = logging.ERROR
WARNING = logging.WARNING
INFO = logging.INFO
DEBUG = logging.DEBUG
DDEBUG = 8
SUBDEBUG = 6
TRACE = 4
def only_once(func):
"""Method decorator turning the method into noop on second or later calls."""
def noop(*_args, **_kwargs):
pass
def swan_song(self, *args, **kwargs):
func(self, *args, **kwargs)
setattr(self, func.__name__, noop)
return swan_song
class _MaxLevelFilter(object):
def __init__(self, max_level):
self.max_level = max_level
def filter(self, record):
if record.levelno >= self.max_level:
return 0
return 1
_VERBOSE_VAL_MAPPING = {
0 : SUPERCRITICAL,
1 : logging.INFO,
2 : logging.INFO, # the default
3 : logging.DEBUG,
4 : logging.DEBUG,
5 : logging.DEBUG,
6 : logging.DEBUG, # verbose value
}
def _cfg_verbose_val2level(cfg_errval):
assert 0 <= cfg_errval <= 10
return _VERBOSE_VAL_MAPPING.get(cfg_errval, DDEBUG)
# Both the DNF default and the verbose default are WARNING. Note that ERROR has
# no specific level.
_ERR_VAL_MAPPING = {
0: SUPERCRITICAL,
1: logging.CRITICAL,
2: logging.ERROR
}
def _cfg_err_val2level(cfg_errval):
assert 0 <= cfg_errval <= 10
return _ERR_VAL_MAPPING.get(cfg_errval, logging.WARNING)
def _create_filehandler(logfile):
if not os.path.exists(logfile):
dnf.util.ensure_dir(os.path.dirname(logfile))
dnf.util.touch(logfile)
# By default, make logfiles readable by the user (so the reporting ABRT
# user can attach root logfiles).
os.chmod(logfile, 0o644)
handler = logging.FileHandler(logfile)
formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s",
"%Y-%m-%dT%H:%M:%SZ")
formatter.converter = time.gmtime
handler.setFormatter(formatter)
return handler
def _paint_mark(logger):
logger.log(INFO, dnf.const.LOG_MARKER)
class Logging(object):
def __init__(self):
self.stdout_handler = self.stderr_handler = None
@only_once
def _presetup(self):
logging.addLevelName(DDEBUG, "DDEBUG")
logging.addLevelName(SUBDEBUG, "SUBDEBUG")
logging.addLevelName(TRACE, "TRACE")
logger_dnf = logging.getLogger("dnf")
logger_dnf.setLevel(TRACE)
# setup stdout
stdout = logging.StreamHandler(sys.stdout)
stdout.setLevel(INFO)
stdout.addFilter(_MaxLevelFilter(logging.WARNING))
logger_dnf.addHandler(stdout)
self.stdout_handler = stdout
# setup stderr
stderr = logging.StreamHandler(sys.stderr)
stderr.setLevel(WARNING)
logger_dnf.addHandler(stderr)
self.stderr_handler = stderr
@only_once
def _setup(self, verbose_level, error_level, logdir):
self._presetup()
logger_dnf = logging.getLogger("dnf")
# setup file logger
logfile = os.path.join(logdir, dnf.const.LOG)
handler = _create_filehandler(logfile)
logger_dnf.addHandler(handler)
# temporarily turn off stdout/stderr handlers:
self.stdout_handler.setLevel(SUPERCRITICAL)
self.stderr_handler.setLevel(SUPERCRITICAL)
# put the marker in the file now:
_paint_mark(logger_dnf)
# setup Python warnings
logging.captureWarnings(True)
logger_warnings = logging.getLogger("py.warnings")
logger_warnings.addHandler(self.stderr_handler)
logger_warnings.addHandler(handler)
lr_logfile = os.path.join(logdir, dnf.const.LOG_LIBREPO)
libdnf.repo.LibrepoLog.addHandler(lr_logfile, verbose_level <= DEBUG)
# setup RPM callbacks logger
logger_rpm = logging.getLogger("dnf.rpm")
logger_rpm.propagate = False
logger_rpm.setLevel(SUBDEBUG)
logfile = os.path.join(logdir, dnf.const.LOG_RPM)
handler = _create_filehandler(logfile)
logger_rpm.addHandler(self.stdout_handler)
logger_rpm.addHandler(self.stderr_handler)
logger_rpm.addHandler(handler)
_paint_mark(logger_rpm)
# bring std ha
|
ndlers to the preferred level
self.stdout_handler.setLevel(verbose_level)
self.stderr_handler.setLevel(error_level)
logging.raiseExceptions = False
def _setup_from_dnf_conf(self, conf):
verbose_level_r = _cfg_verbose_val2level(conf.debuglevel)
error_level_r = _cfg_err_val2level(conf.errorlevel)
logdir = conf.logdir
return self._setup(verbo
|
se_level_r, error_level_r, logdir)
class Timer(object):
def __init__(self, what):
self.what = what
self.start = time.time()
def __call__(self):
diff = time.time() - self.start
msg = 'timer: %s: %d ms' % (self.what, diff * 1000)
logging.getLogger("dnf").log(DDEBUG, msg)
_LIBDNF_TO_DNF_LOGLEVEL_MAPPING = {
libdnf.utils.Logger.Level_CRITICAL: CRITICAL,
libdnf.utils.Logger.Level_ERROR: ERROR,
libdnf.utils.Logger.Level_WARNING: WARNING,
libdnf.utils.Logger.Level_NOTICE: INFO,
libdnf.utils.Logger.Level_INFO: INFO,
libdnf.utils.Logger.Level_DEBUG: DEBUG,
libdnf.utils.Logger.Level_TRACE: TRACE
}
class LibdnfLoggerCB(libdnf.utils.Logger):
def __init__(self):
super(LibdnfLoggerCB, self).__init__()
self._logger = logging.getLogger("dnf")
def write(self, source, *args):
"""Log message.
source -- integer, defines origin (libdnf, librepo, ...) of message, 0 - unknown
"""
if len(args) == 2:
level, message = args
elif len(args) == 4:
time, pid, level, message = args
self._logger.log(_LIBDNF_TO_DNF_LOGLEVEL_MAPPING[level], message)
libdnfLoggerCB = LibdnfLoggerCB()
libdnf.utils.Log.setLogger(libdnfLoggerCB)
|
epronk/pyfit2
|
examples/chat/User.py
|
Python
|
gpl-2.0
| 75
| 0
|
class User(object):
def _
|
_init__(self, name):
self.name = na
|
me
|
openwns/wrowser
|
openwns/wrowser/playgroundPlugins/SimulationCampaign/simcontrol.py
|
Python
|
gpl-2.0
| 23,950
| 0.013946
|
#! /usr/bin/env python
###############################################################################
# This file is part of openWNS (open Wireless Network Simulator)
# _____________________________________________________________________________
#
# Copyright (C) 2004-2007
# Chair of Communication Networks (ComNets)
# Kopernikusstr. 16, D-52074 Aachen, Germany
# phone: ++49-241-80-27910,
# fax: ++49-241-80-22242
# email: info@openwns.org
# www: http://www.openwns.org
# _____________________________________________________________________________
#
# openWNS is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License version 2 as published by the
# Free Software Foundation;
#
# openWNS is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General
|
Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not,
|
see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import pwd
import sys
import shutil
import subprocess
import optparse
import re
import datetime
import time
import openwns.wrowser.Configuration as conf
import openwns.wrowser.simdb.Database as db
import openwns.wrowser.simdb.Parameters as params
import openwns.wrowser.simdb.ProbeDB
import openwns.wrowser.Tools
config = conf.Configuration()
config.read('.campaign.conf')
db.Database.connectConf(config)
def getWrowserDir():
for cand in sys.path:
if os.path.isdir(os.path.join(cand, 'openwns', 'wrowser')):
return cand
return None
def __getFilteredScenarioIds(cursor, stateSpecial = None):
query = 'SELECT id FROM scenarios WHERE campaign_id = %d' % config.campaignId
if(options.state is not None):
if('state' in options.state):
query += ('AND ( %s )' % options.state)
else:
query += (' AND state = \'%s\'' % options.state)
if(stateSpecial is not None):
query += (' AND %s' % stateSpecial)
cursor.execute(query)
scenarioIds = [ entry[0] for entry in cursor.fetchall() ]
if(options.expression is not None):
scenarioIds = openwns.wrowser.Tools.objectFilter(options.expression, scenarioIds, viewGetter=__parametersDict)
scenarioIds.sort()
return scenarioIds
def createDatabase(arg = 'unused'):
subprocess.call(['python ./campaignConfiguration.py'], shell = True)
print 'Database entries successfully created.'
def createScenarios(arg = 'unused'):
cursor = db.Database.getCursor()
scenarioIds = __getFilteredScenarioIds(cursor)
cursor.connection.commit()
wdir = getWrowserDir()
if wdir is None:
print "ERROR: Cannot find Wrowser directory! Exiting..."
return
for scenario in scenarioIds:
simId = str(scenario)
simPath = os.path.abspath(os.path.join(os.getcwd(), simId))
if os.path.exists(simPath):
if options.forceOverwrite:
shutil.rmtree(simPath)
else:
print "Skipping %s, it already exists (consider --force switch)" % simPath
continue
os.mkdir(simPath)
os.symlink(os.path.join('..', '..', 'sandbox', options.flavor, 'bin', 'openwns'), os.path.join(simPath, 'openwns'))
if options.flavor == 'opt':
os.symlink(os.path.join('..', '..', 'sandbox', 'dbg', 'bin', 'openwns'), os.path.join(simPath, 'openwns-dbg'))
os.symlink(os.path.join(wdir, 'openwns', 'wrowser', 'simdb', 'SimConfig.py'),
os.path.join(simPath, 'SimConfig.py'))
os.symlink(os.path.join('..', '.campaign.conf'), os.path.join(simPath, '.campaign.conf'))
for f in os.listdir(os.getcwd()):
if f.endswith('.py') or f.endswith('.probes') or f.endswith('.ini'):
if not f == 'simcontrol.py' and not f == 'campaignConfiguration.py' and not f == 'ProbeDB.py':
os.symlink(os.path.join('..', f), os.path.join(simPath, f))
if not os.path.exists(os.path.join(os.getcwd(), 'ProbeDB.py')):
os.symlink(os.path.join(wdir, 'openwns', 'wrowser', 'simdb', 'ProbeDB.py'),
os.path.join(os.getcwd(), 'ProbeDB.py'))
print 'Scenarios successfully created.'
def removeDatabase(arg = 'unused'):
db.Database.truncateCampaign(config.campaignId)
print 'Campaign results successfully removed from database.'
def removeScenarios(arg = 'unused'):
cursor = db.Database.getCursor()
scenarioIds = __getFilteredScenarioIds(cursor)
cursor.connection.commit()
for scenarioId in scenarioIds:
simPath = os.path.abspath(os.path.join(os.getcwd(), str(scenarioId)))
if os.path.exists(simPath):
shutil.rmtree(simPath)
print 'Scenarios successfully removed.'
def __submitJob(scenarioId):
cursor = db.Database.getCursor()
cursor.execute('SELECT state FROM scenarios WHERE id = %d AND campaign_id = %d' % (scenarioId, config.campaignId))
state = cursor.fetchone()[0]
if state == 'Queued':
print >>sys.stderr, 'ERROR: Job is already in queue'
elif state == 'Running':
print >>sys.stderr, 'ERROR: Job is currently running'
simId = str(scenarioId)
simPath = os.path.abspath(os.path.join(os.getcwd(), simId))
if simPath.startswith('/local'):
raise Exception('\n\nYour current dir starts with "/local/...". You must chdir to /net/<hostname>/.... Otherwise your simulations will fail.\n')
print 'Submitting job with scenario id ' + simId
command = os.path.abspath(os.path.join('..', 'sim.py')) + ' -p ' + os.path.abspath(os.getcwd()) + ' -i ' + simId
if options.skipNullTrials == True:
command += ' -n'
process = subprocess.Popen(['qsub -q %s -N job%s -l s_cpu=%i:%i:00 -l h_cpu=%i:%i:00 -o %s -e %s -m a -M %s@comnets.rwth-aachen.de %s' % (options.queue,
simId,
options.cpuTime,
options.cpuMinutes,
options.cpuTime,
options.cpuMinutes + 15,
os.path.join(simPath, 'stdout'),
os.path.join(simPath, 'stderr'),
pwd.getpwuid(os.getuid())[0],
command)],
stdout = subprocess.PIPE,
stderr = subprocess.STDOUT,
shell = True)
status = process.wait()
if not status == 0:
print >>sys.stderr, 'ERROR: qsub failed!'
print >>sys.stderr, process.stdout.read()
sys.exit(1)
state = 'Queued'
startDate = None
stopDate = None
hostname = None
try:
jobId = int(process.stdout.read().split()[2])
except:
print >>sys.stderr, 'ERROR: Could not get job id. Output of qsub has probably changed'
sys.exit(1)
cursor.execute('UPDATE scenarios
|
envycontent/msd.rdfexport
|
msd/rdfexport/researcher/helper/webpresence.py
|
Python
|
gpl-2.0
| 1,575
| 0.013333
|
#!/usr/bin/env python
"""
For the moment we're not using this, but it would be nice to do so
at some point. It isn't really relevant to the researcher RDF export, the
idea would be to export details of a web-presence, to compile a complete directory
of websites in the Medical Sciences Division.
"""
def addWebPresenceElement(rdf, deptElement, deptName):
"""
|
<foaf:Organization rdf:about="http://www.psy.ox.ac.uk/">
<foaf:homepage>
<res:WebPresence>
<dc:title>Department of Experimental Psychology Website</dc:title>
<dc:creator>
<foaf:Person>
<foaf:mbox>anne.bowtell@medsci.ox.ac.uk</foaf:mbox>
</foaf:Person>
</dc:creator>
|
</res:WebPresence>
</foaf:homepage>
"""
#TODO - this will be real data in the future
standardWebmaster = 'webmaster@medsci.ox.ac.uk'
homepageProp = rdf.createElement('foaf:homepage')
deptElement.appendChild(homepageProp)
wpElem = rdf.createElement('res:WebPresence')
homepageProp.appendChild(wpElem)
title = rdf.createElement('dc:title')
title.appendChild(rdf.createTextNode(deptName + ' Website'))
wpElem.appendChild(title)
creator = rdf.createElement('dc:creator')
wpElem.appendChild(creator)
person = rdf.createElement('foaf:Person')
creator.appendChild(person)
mbox = rdf.createElement('foaf:mbox')
person.appendChild(mbox)
mbox.appendChild(rdf.createTextNode(standardWebmaster))
|
leonth/elude
|
elude/proxy.py
|
Python
|
mit
| 3,517
| 0.004834
|
import logging
import re
import asyncio
from functools import partial
import aiohttp
from pandas.io.html import read_html
from elude import config
logger = logging.getLogger(__name__)
class Proxy(object):
test_semaphore = None
def __init__(self, ip=None, port=None, country=None, source=None):
self.ip = ip
self.port = port
self.country = country
self.source = source
self._connector = None
if Proxy.test_semaphore is None:
Proxy.test_semaphore = asyncio.Semaphore(config.PROXY_TEST_MAX_CONCURRENT_CONN)
@property
def id(self):
return '%s:%s' % (self.ip, self.port)
def get_connector(self):
if self._connector is None:
self._connector = aiohttp.ProxyConnector(proxy='http://%s:%s' % (self.ip, self.port))
return self._connector
class ProxyGatherer(object):
def __init__
|
(self):
self.new_proxy_callbacks = []
@asyncio.coroutine
def start_getting_proxies(self):
return asyncio.async(asyncio.gather(self._grab_proxies_from_checkerproxy(), self._grab_proxies_from_letushide()))
@asyncio.coroutine
def _grab_proxies_from_checkerproxy(self):
dfs = yield from _request_and_read_html('http://checkerproxy.net/all_proxy')
df = dfs[0][['ip:port', 'country', 'proxy type', 'proxy status']]
d
|
f_filtered = df[(df['proxy type'] == 'HTTP') & (df['proxy status'].str.contains('Elite proxy'))].drop_duplicates(subset=['ip:port'])
i = 0
for _, row in df_filtered.iterrows():
m = re.match(r'([0-9.]+):([0-9]+)', row['ip:port'])
if m:
ip, port = m.group(1), m.group(2)
self.register_proxy(Proxy(ip.strip(), port.strip(), row['country'], 'checkerproxy.net'))
i += 1
logger.info('checkerproxy: gathered %d proxies out of %d parsed' % (i, len(df)))
@asyncio.coroutine
def _grab_proxies_from_letushide(self):
last_page_indicator = '' # keep track of port:ip of the first proxy. if it is the same as that of the page before, we must be at the last page.
for page_num in range(1, 21): # try until max of 20 pages
dfs = yield from _request_and_read_html('http://letushide.com/filter/http,hap,all/%d/list_of_free_HTTP_High_Anonymity_proxy_servers' % page_num)
#logger.info(dfs[1])
df = dfs[1]
page_indicator = '%s:%s' % (df.loc[0, 'host'], df.loc[0, 'port'])
if last_page_indicator == page_indicator:
logger.debug('letushide terminates at page %d' % (page_num-1))
break
last_page_indicator = page_indicator
logger.info('letushide: gathered %d proxies coming from page %d' % (len(df), page_num))
for _, row in df.iterrows():
self.register_proxy(Proxy(row['host'], row['port'], None, 'letushide.com'))
#logger.debug('Finished inserting candidate proxies for letushide')
def register_proxy(self, proxy):
for cb in self.new_proxy_callbacks:
if asyncio.iscoroutine(cb) or asyncio.iscoroutinefunction(cb):
asyncio.async(cb(proxy))
else:
cb(proxy)
@asyncio.coroutine
def _request_and_read_html(url):
# TODO cache this call
r = yield from aiohttp.request('get', url)
text = yield from r.text()
dfs = yield from asyncio.get_event_loop().run_in_executor(None, partial(read_html, text))
return dfs
|
dls-controls/pymalcolm
|
tests/test_modules/test_scanning/test_attributeprerunpart.py
|
Python
|
apache-2.0
| 3,395
| 0.001473
|
import unittest
from mock import Mock, call
from malcolm.modules.scanning.hooks import (
AbortHook,
PauseHook,
PostRunReadyHook,
PreRunHook,
)
from malcolm.modules.scanning.parts import AttributePreRunPart
class TestAttributePreRunPartConstructor(unittest.TestCase):
def setUp(self):
self.name = "ShutterPart"
self.mri = "ML-SHUTTER-01"
self.pre_run_value = "Open"
self.reset_value = "Closed"
def test_attributes_are_initialised_with_defaults(self):
part = AttributePreRunPart(
self.name, self.mri, self.pre_run_value, self.reset_value
)
self.assertEqual(self.name, part.name)
self.assertEqual(self.mri, part.mri)
self.assertEqual(self.pre_run_value, part.pre_run_value)
self.assertEqual(self.reset_value, part.reset_value)
class TestAttributePreRunPartSetupHooks(unittest.TestCase):
def setUp(self):
self.name = "ShutterPart"
self.mri = "ML-SHUTTER-01"
self.pre_run_value = "Open"
self.reset_value = "Closed"
def test_setup_sets_correct_hooks(self):
part = AttributePreRunPart(
self.name, self.mri, self.pre_run_value, self.reset_value
)
registrar_mock = Mock()
part.setup(registrar_mock)
# Check calls
calls = [
call(PreRunHook, part.on_pre_run),
call((PauseHook, AbortHook, PostRunReadyHook), part.on_reset),
]
registrar_mock.hook.assert_has_calls(calls)
class TestAttributePreRunPartWithDefaultNamePutMethods(unittest.TestCase):
def setUp(self):
# Create our part
name = "ShutterPart"
mri = "ML-SHUTTER-01"
self.pre_run_value = "Open"
self.reset_value = "Closed"
self.part = AttributePreRunPart(name, mri, self.pre_run_value, self.reset_value)
# Generate our mocks
self.part.setup(Mock())
self.context = Mock(name="context")
self.child = Mock(name="child")
self.context.block_view.return_value = self.child
def test_puts_pre_run_value_to_child_on_pre_run(self):
self.part.on_pre_run(self.context)
self.child.shutter.put_value.assert_called_once_with(self.pre_run_value)
def test_puts_reset_value_to_child_on_reset(self):
self.part.on_reset(self.context)
self.child.shutter.put_value.assert_called_once_with(self.reset_value)
class TestAttributePreRunPartWithNonDe
|
faultNamePutMethods(un
|
ittest.TestCase):
def setUp(self):
# Create our part
name = "AttributePart"
mri = "ML-ATTR-01"
self.pre_run_value = "Set"
self.reset_value = "Reset"
self.part = AttributePreRunPart(
name, mri, self.pre_run_value, self.reset_value, attribute_name="togglePart"
)
# Generate our mocks
self.part.setup(Mock())
self.context = Mock(name="context")
self.child = Mock(name="child")
self.context.block_view.return_value = self.child
def test_puts_pre_run_value_to_child_on_pre_run(self):
self.part.on_pre_run(self.context)
self.child.togglePart.put_value.assert_called_once_with(self.pre_run_value)
def test_puts_reset_value_to_child_on_reset(self):
self.part.on_reset(self.context)
self.child.togglePart.put_value.assert_called_once_with(self.reset_value)
|
jmesmon/layman
|
layman/__init__.py
|
Python
|
gpl-2.0
| 1,585
| 0.005047
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Layman is a complete library for the operation and maintainance
on all gentoo repositories and overlays
"""
import sys
try:
from layman.api import LaymanAPI
from layman.config import BareConfig
from layman.output import Message
except ImportError:
sys.stderr.write("!!! Layman API imports failed.")
raise
class Layman(LaymanAPI):
"""A complete high level interface capable of performing all
overlay repository actions."""
def __init__(self, stdout=sys.stdout, stdin=sys.stdin, stderr=sys.stderr,
config=None, read_configfile
|
=True, quiet=False, quietness=4,
verbose=False, nocolor=False, width
|
=0, root=None
):
"""Input parameters are optional to override the defaults.
sets up our LaymanAPI with defaults or passed in values
and returns an instance of it"""
self.message = Message(out=stdout, err=stderr)
self.config = BareConfig(
output=self.message,
stdout=stdout,
stdin=stdin,
stderr=stderr,
config=config,
read_configfile=read_configfile,
quiet=quiet,
quietness=quietness,
verbose=verbose,
nocolor=nocolor,
width=width,
root=root
)
LaymanAPI.__init__(self, self.config,
report_errors=True,
output=self.config['output']
)
return
|
Jokiva/Computational-Physics
|
lecture 15/1a_init.py
|
Python
|
gpl-3.0
| 927
| 0.005394
|
from particle import RandomParticle
import numpy as np
import matplotlib.pyplot as plt
from time import time
t0 = time()
num_of_paths = (50, 500, 5000)
max_num_of_collisions = 1000
particle = RandomParticle(1)
rms = np.zeros((3, max_num_of_collisions))
for n in range(3):
path = num_of_paths[n]
data = np.zeros((path, max_num_of_collisions))
for i in range(path):
particle.reset()
for j in range(max_num_of_collisions):
data[i, j] = particle.displacement
particle._RandomParticle__next()
rms[n] = np.sqrt((data * data / path).sum(0))
print(time() -t0, 's elapsed')
# save the results
|
np.savetxt('rm
|
s_1a.txt', rms)
"""
x = np.linspace(0, 1000)
y = np.sqrt(x)
plt.figure()
for n in range(3):
l = str(num_of_paths[n]) + ' paths'
plt.scatter(list(range(1000)), rms[n], marker='+', label=l)
plt.plot(x, y)
plt.legend()
plt.grid()
plt.show()
"""
|
csdms/packagebuilder
|
setup.py
|
Python
|
mit
| 1,488
| 0.004032
|
from ez_setup import use_setuptools # https://pypi.python.org/pypi/setuptools
use_setuptools()
from setuptools import setup, find_packages
from packager import __version__
# Get the long description from the README file.
def get_long_description():
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
try:
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
except:
return []
else:
return long_description
setup(
name='packagebuilder',
version=__version__,
description='Tools for building rpm and deb packages for CSDMS software',
long_description=get_long_description(),
url='https://github.com/csdms/packagebuilder',
author='Mark Piper',
author_email='mark.piper@colorado.edu',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
|
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programmin
|
g Language :: Python :: 2.7',
],
keywords='CSDMS, earth system modeling, packaging, Linux, rpm, deb',
packages=find_packages(exclude=['*test']),
install_requires=['nose'],
package_data={
'packager': ['repositories.txt'],
},
entry_points={
'console_scripts': [
'build_rpm=packager.rpm.build:main',
],
},
)
|
dbbhattacharya/kitsune
|
vendor/packages/sqlalchemy/lib/sqlalchemy/orm/util.py
|
Python
|
bsd-3-clause
| 23,666
| 0.003085
|
# mapper/util.py
# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010 Michael Bayer
# mike_mp@zzzcomputing.com
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import sqlalchemy.exceptions as sa_exc
from sqlalchemy import sql, util
from sqlalchemy.sql import expression, util as sql_util, operators
from sqlalchemy.orm.interfaces import MapperExtension, EXT_CONTINUE,\
PropComparator, MapperProperty,\
AttributeExtension
from sqlalchemy.orm import attributes, exc
mapperlib = None
all_cascades = frozenset(("delete", "delete-orphan", "all", "merge",
"expunge", "save-update", "refresh-expire",
"none"))
_INSTRUMENTOR = ('mapper', 'instrumentor')
class CascadeOptions(object):
"""Keeps track of the options sent to relationship().cascade"""
def __init__(self, arg=""):
if not arg:
values = set()
else:
values = set(c.strip() for c in arg.split(','))
self.delete_orphan = "delete-orphan" in values
self.delete = "delete" in values or "all" in values
self.save_update = "save-update" in values or "all" in values
self.merge = "merge" in values or "all" in values
self.expunge = "expunge" in values or "all" in values
self.refresh_expire = "refresh-expire" in values or "all" in values
if self.delete_orphan and not self.delete:
util.warn("The 'delete-orphan' cascade option requires "
"'delete'. This will raise an error in 0.6.")
for x in values:
if x not in all_cascades:
raise sa_exc.ArgumentError("Invalid cascade option '%s'" % x)
def __contains__(self, item):
return getattr(self, item.replace("-", "_"), False)
def __repr__(self):
return "CascadeOptions(%s)" % repr(",".join(
[x for x in ['delete', 'save_update', 'merge', 'expunge',
'delete_orphan', 'refresh-expire']
if getattr(self, x, False) is True]))
class Validator(AttributeExtension):
"""Runs a validation method on an attribute value to be set or appended.
The Validator class is used by the :func:`~sqlalchemy.orm.validates`
decorator, and direct access is usually not needed.
"""
def __init__(self, key, validator):
"""Construct a new Validator.
key - name of the attribute to be validated;
will be passed as the second argument to
the validation method (the first is the object instance itself).
validator - an function or instance method which accepts
three arguments; an instance (usually just 'self' for a method),
the key name of the attribute, and the value. The function should
return the same value given, unless it wishes to modify it.
"""
self.key = key
self.validator = validator
def append(self, state, value, initiator):
return self.validator(state.obj(), self.key, value)
def set(self, state, value, oldvalue, initiator):
return self.validator(state.obj(), self.key, value)
def polymorphic_union(table_map, typecolname, aliasname='p_union'):
"""Create a ``UNION`` statement used by a polymorphic mapper.
See :ref:`concrete_inheritance` for an example of how
this is used.
"""
colnames = set()
colnamemaps = {}
types = {}
for key in table_map.keys():
table = table_map[key]
# mysql doesnt like selecting from a select;
# make it an alias of the select
if isinstance(table, sql.Select):
table = table.alias()
table_map[key] = table
m = {}
for c in table.c:
colnames.add(c.key)
m[c.key] = c
types[c.key] = c.type
colnamemaps[table] = m
def col(name, table):
try:
return colnamemaps[table][name]
except KeyError:
return sql.cast(sql.null(), types[name]).label(name)
result = []
for type, table in table_map.iteritems():
if typecolname is not None:
result.append(
sql.select([col(name, table) for name in colnames] +
[sql.literal_column(sql_util._quote_ddl_expr(type)).
label(typecolname)],
from_obj=[table]))
else:
result.append(sql.select([col(name, table) for name in colnames],
from_obj=[table]))
return sql.union_all(*result).alias(aliasname)
def identity_key(*args, **kwargs):
"""Get an identity key.
Valid call signatures:
* ``identity_key(class, ident)``
class
mapped class (must be a positional argument)
ident
primary key, if the key is composite this is a tuple
* ``identity_key(instance=instance)``
instance
object instance (must be given as a keyword arg)
* ``identity_key(class, row=row)``
class
mapped class (must be a positional argument)
row
result proxy row (must be given as a keyword arg)
"""
if args:
if len(args) == 1:
class_ = args[0]
try:
row = kwargs.pop("row")
except KeyError:
ident = kwargs.pop("ident")
elif len(args) == 2:
class_, ident = args
elif len(args) == 3:
class_, ident = args
else:
raise sa_exc.ArgumentError("expected up to three "
"positional arguments, got %s" % len(args))
if kwargs:
raise sa_exc.ArgumentError("unknown keyword arguments: %s"
% ", ".join(kwargs.keys()))
mapper = class_mapper(class_)
if "ident" in locals():
return mapper.identity_key_from_primary_key(ident)
return mapper.identity_key_from_row(row)
instance = kwargs.pop("instance")
if kwargs:
raise sa_exc.ArgumentError("unknown keyword arguments: %s"
% ", ".join(kwargs.keys()))
mapper = object_mapper(instance)
return mapper.identity_key_from_instance(instance)
class ExtensionCarrier(dict):
"""Fronts an ordered collection of MapperExtension objects.
Bundles multiple MapperExtensions into a unified callable unit,
encapsulating ordering, looping and EXT_CONTINUE logic. The
ExtensionCarrier implements the MapperExtension interface, e.g.::
carrier.after_insert(...args...)
|
The dictionary interface provides containment for implemented
method names mapped to a callable which executes that method
for participating extensions.
"""
interface = set(method for method in dir(MapperExtension)
if not method.startswith('_'))
def __init__(
|
self, extensions=None):
self._extensions = []
for ext in extensions or ():
self.append(ext)
def copy(self):
return ExtensionCarrier(self._extensions)
def push(self, extension):
"""Insert a MapperExtension at the beginning of the collection."""
self._register(extension)
self._extensions.insert(0, extension)
def append(self, extension):
"""Append a MapperExtension at the end of the collection."""
self._register(extension)
self._extensions.append(extension)
def __iter__(self):
"""Iterate over MapperExtensions in the collection."""
return iter(self._extensions)
def _register(self, extension):
"""Register callable fronts for overridden interface methods."""
for method in self.interface.difference(self):
impl = getattr(extension, method, None)
if impl and impl is not getattr(MapperExtension, method):
self[method] = self._create_do(method)
def _create_do(self, method):
"""Return a closure that loops over impls of the named method."""
def _do(*args, **kwargs):
for e
|
carquois/blobon
|
blobon/punns/migrations/0028_auto__add_unique_cat_slug__add_field_punn_publish_on_facebook.py
|
Python
|
mit
| 8,119
| 0.008006
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'Cat', fields ['slug']
db.create_unique('punns_cat', ['slug'])
# Adding field 'Punn.publish_on_facebook'
db.add_column('punns_punn', 'publish_on_facebook',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Removing unique constraint on 'Cat', fields ['slug']
db.delete_unique('punns_cat', ['slug'])
# Deleting field 'Punn.publish_on_facebook'
db.delete_column('punns_punn', 'publish_on_facebook')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.
|
db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
|
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'punns.cat': {
'Meta': {'object_name': 'Cat'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '140'})
},
'punns.favorite': {
'Meta': {'object_name': 'Favorite'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'punn': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['punns.Punn']"})
},
'punns.punn': {
'Meta': {'object_name': 'Punn'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'base62id': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'cat': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['punns.Cat']", 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_top': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_video': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'karma': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'original_punn': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['punns.Punn']", 'null': 'True', 'blank': 'True'}),
'pic': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'publish_on_facebook': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '140', 'blank': 'True'}),
'source': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['punns.Tags']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'views': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'youtube_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
},
'punns.reblog': {
'Meta': {'object_name': 'Reblog'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'origin': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['punns.Punn']"})
},
'punns.tags': {
'Meta': {'object_name': 'Tags'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag': ('django.db.models.fields.CharField', [], {'max_length': '140'})
}
}
complete_apps = ['punns']
|
watsonyanghx/CS231n
|
assignment3/cs231n/classifiers/rnn.py
|
Python
|
mit
| 13,249
| 0.008378
|
import numpy as np
from cs231n.layers import *
from cs231n.rnn_layers import *
class CaptioningRNN(object):
"""
A CaptioningRNN produces captions from image features using a recurrent
neural network.
The RNN receives input vectors of size D, has a vocab size of V, works on
sequences of length T, has an RNN hidden dimension of H, uses word vectors
of dimension W, and operates on minibatches of size N.
Note that we don't use any regularization for the CaptioningRNN.
"""
def __init__(self, word_to_idx, input_dim=512, wordvec_dim=128,
hidden_dim=128, cell_type='rnn', dtype=np.float32):
"""
Construct a new CaptioningRNN instance.
Inputs:
- word_to_idx: A dictionary giving the vocabulary. It contains V entries,
and maps each string to a unique integer in the range [0, V).
- input_dim: Dimension D of input image feature vectors.
- wordvec_dim: Dimension W of word vectors.
- hidden_dim: Dimension H for the hidden state of the RNN.
- cell_type: What type of RNN to use; either 'rnn' or 'lstm'.
- dtype: numpy datatype to use; use float32 for training and float64 for
numeric gradient checking.
"""
if cell_type not in {'rnn', 'lstm'}:
raise ValueError('Invalid cell_type "%s"' % cell_type)
self.cell_type = cell_type
self.dtype = dtype
self.word_to_idx = word_to_idx
self.idx_to_word = {i: w for w, i in word_to_idx.iteritems()}
self.params = {}
vocab_size = len(word_to_idx)
self._null = word_to_idx['<NULL>']
self._start = word_to_idx.get('<START>', None)
self._end = word_to_idx.get('<END>', None)
# Initialize word vectors
self.params['W_embed'] = np.random.randn(vocab_size, wordvec_dim)
self.params['W_embed'] /= 100
# Initialize CNN -> hidden state projection parameters
self.params['W_proj'] = np.random.randn(input_dim, hidden_dim)
self.params['W_proj'] /= np.sqrt(input_dim)
self.params['b_proj'] = np.zeros(hidden_dim)
# Initialize parameters for the RNN
dim_mul = {'lstm': 4, 'rnn': 1}[cell_type]
self.params['Wx'] = np.random.randn(wordvec_dim, dim_mul * hidden_dim)
self.params['Wx'] /= np.sqrt(wordvec_dim)
self.params['Wh'] = np.random.randn(hidden_dim, dim_mul * hidden_dim)
self.params['Wh'] /= np.sqrt(hidden_dim)
self.params['b'] = np.zeros(dim_mul * hidden_dim)
# Initialize output to vocab weights
self.params['W_vocab'] = np.random.randn(hidden_dim, vocab_size)
self.params['W_vocab'] /= np.sqrt(hidden_dim)
self.params['b_vocab'] = np.zeros(vocab_size)
# Cast parameters to correct dtype
for k, v in self.params.iteritems():
self.params[k] = v.astype(self.dtype)
def loss(self, features, captions):
"""
Compute training-time loss for the RNN. We input image features and
ground-truth captions for those images, and use an RNN (or LSTM) to compute
loss and gradients on all parameters.
Inputs:
- features: Input image features, of shape (N, D)
- captions: Ground-truth captions; an integer array of shape (N, T) where
each element is in the range 0 <= y[i, t] < V
Returns a tuple of:
- loss: Scalar loss
- grads: Dictionary of gradients parallel to self.params
"""
# Cut captions into two pieces: captions_in has everything but the last word
# and will be input to the RNN; captions_out has everything but the first
# word and this is what we will expect the RNN to generate. These are offset
# by one relative to each other because the RNN should produce word (t+1)
# after receiving word t. The first element of captions_in will be the START
# token, and the first element of captions_out will be the first word.
captions_in = captions[:, :-1]
captions_out = captions[:, 1:]
# You'll need this
mask = (captions_out != self._null)
# Weight and bias for the affine transform from image features to initial
# hidden state
W_proj, b_proj = self.params['W_proj'], self.params['b_proj']
# Word embedding matrix
W_embed = self.params['W_embed']
# Input-to-hidden, hidden-to-hidden, and biases for the RNN
Wx, Wh, b = self.params['Wx'], self.params['Wh'], self.params['b']
# Weight and bias for the hidden-to-vocab transformation.
W_vocab, b_vocab = self.params['W_vocab'], self.params['b_vocab']
loss, grads = 0.0, {}
############################################################################
# TODO: Implement the forward and backward passes for the CaptioningRNN. #
# In the forward pass you will need to do the following: #
# (1) Use an affine transformation to compute the initial hidden state #
# from the image features. This should produce an array of shape (N, H)#
# (2) Use a word embedding layer to transform the words in captions_in #
# from indices to vectors, giving an array of shape (N, T, W). #
# (3) Use either a vanilla RNN or LSTM (depending on self.cell_type) to #
# process the sequence of input word vectors and produce hidden state #
# vectors for all timesteps, producing an array of shape (N, T, H). #
# (4) Use a (temporal) affine transformation to compute scores over the #
#
|
vocabulary at every timestep using the hidden state
|
s, giving an #
# array of shape (N, T, V). #
# (5) Use (temporal) softmax to compute loss using captions_out, ignoring #
# the points where the output word is <NULL> using the mask above. #
# #
# In the backward pass you will need to compute the gradient of the loss #
# with respect to all model parameters. Use the loss and grads variables #
# defined above to store loss and gradients; grads[k] should give the #
# gradients for self.params[k]. #
############################################################################
# First we must do an affine transformation to change from image features to initial hidden state.
# features is in the shape (N, Y), we should just transform it in to (N, H) using easy affine stuff.
affine_out, affine_cache = affine_forward(features, W_proj, b_proj) # (N, H)
# Second, we need to change the captions_in(not captions_out, check the softmax function)into actual embeddings
# captions_in is in the shape (num_samples, number of captions_in words for the given sample), or (N, M)
# We should change it into:
# (num samples, number of captions_in words for the given sample, dimensions we should represent the words in)
embedding_out, embedding_cache = word_embedding_forward(captions_in, W_embed) # (N, T, W)
# Third, if we're using LSTM/Vanilla, we need to specify which step_forward we need.
# Given the correct one, we just plug in our prev_h, which for the first step is affine_out, as well as words
# Since the same h and rnn_cache will be used for both LSTM and vanilla, I used the same variables for both.
if self.cell_type == 'rnn':
h, rnn_cache = rnn_forward(embedding_out, affine_out, Wx, Wh, b)
elif self.cell_type == 'lstm':
h, rnn_cache = lstm_forward(embedding_out, affine_out, Wx, Wh, b)
# Fourth, now that we have the hidden states, let's find the temporal_affine_forward outputs.
# These are interpreted as our predicted outputs.
t_affine_out, t_affine_cache = temporal_affine_forward(h, W_vocab, b_vocab)
# Lastly, we need to compute the softmax loss given the predicted outputs and the output captions.
loss, dx = temporal_softmax_loss(t_affine_out, captions_out, mask, verbose=False)
###################### Backward Pass ######################
# Perform backward pass on Step 4.
dx, grads['W_vocab'], grads['b_vocab'] = temporal_affine_backward(dx, t_affine_cache)
# Perform backward pass on Step 3. Conditional - rnn would have a different gradient than ls
|
alirizakeles/zato
|
code/zato-server/src/zato/server/service/internal/pubsub/__init__.py
|
Python
|
gpl-3.0
| 12,954
| 0.004477
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2014 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
from httplib import FORBIDDEN, INTERNAL_SERVER_ERROR, OK
from json import dumps, loads
from logging import getLogger
from traceback import format_exc
# gevent
from gevent import sleep,
|
spawn
# huTools
from huTools.structured import dict2xml
# Zato
from zato.common import DATA_FORMAT, PUB_SUB, ZATO_ERROR, ZATO_NONE, ZATO_OK
from zato.common.pubsub import ItemFull, PermissionDenied
from zato.common.util import get_basic_auth_credentials
from zato.server.connection.http_soap import BadRequest, Forbidden, TooManyRequests
|
, Unauthorized
from zato.server.service import AsIs, Bool, Int, Service
from zato.server.service.internal import AdminService
logger_overflown = getLogger('zato_pubsub_overflown')
# ################################################################################################################################
class DeleteExpired(AdminService):
""" Invoked when a server is starting - periodically spawns a greenlet deleting expired messages.
"""
def _delete_expired(self):
self.logger.debug('Deleted expired messages %s', self.pubsub.impl.delete_expired())
def handle(self):
interval = float(self.server.fs_server_config.pubsub.delete_expired_interval)
while True:
self.logger.debug('Deleting expired messages, interval %rs', interval)
spawn(self._delete_expired)
sleep(interval)
# ################################################################################################################################
class InvokeCallbacks(AdminService):
""" Invoked when a server is starting - periodically spawns a greenlet invoking consumer URL callbacks.
"""
def _reject(self, msg_ids, sub_key, consumer, reason):
self.pubsub.reject(sub_key, msg_ids)
self.logger.error('Could not deliver messages `%s`, sub_key `%s` to `%s`, reason `%s`', msg_ids, sub_key, consumer, reason)
def _invoke_callbacks(self):
callback_consumers = list(self.pubsub.impl.get_callback_consumers())
self.logger.debug('Callback consumers found `%s`', callback_consumers)
for consumer in callback_consumers:
with self.lock(consumer.sub_key):
msg_ids = []
out = {
'status': ZATO_OK,
'results_count': 0,
'results': []
}
messages = self.pubsub.get(consumer.sub_key, get_format=PUB_SUB.GET_FORMAT.JSON.id)
for msg in messages:
msg_ids.append(msg['metadata']['msg_id'])
out['results_count'] += 1
out['results'].append(msg)
# messages is a generator so we still don't know if we had anything.
if msg_ids:
outconn = self.outgoing.plain_http[consumer.callback_name]
if outconn.config['data_format'] == DATA_FORMAT.XML:
out = dict2xml(out)
content_type = 'application/xml'
else:
out = dumps(out)
content_type = 'application/json'
try:
response = outconn.conn.post(self.cid, data=out, headers={'content-type': content_type})
except Exception, e:
self._reject(msg_ids, consumer.sub_key, consumer, format_exc(e))
else:
if response.status_code == OK:
self.pubsub.acknowledge(consumer.sub_key, msg_ids)
else:
self._reject(
msg_ids, consumer.sub_key, consumer, '`{}` `{}`'.format(response.status_code, response.text))
def handle(self):
# TODO: self.logger's name should be 'zato_pubsub' so it got logged to the same location
# the rest of pub/sub does.
interval = float(self.server.fs_server_config.pubsub.invoke_callbacks_interval)
while True:
self.logger.debug('Invoking pub/sub callbacks, interval %rs', interval)
spawn(self._invoke_callbacks)
sleep(interval)
# ################################################################################################################################
class MoveToTargetQueues(AdminService):
""" Invoked when a server is starting - periodically spawns a greenlet moving published messages to recipient queues.
"""
def _move_to_target_queues(self):
overflown = []
for item in self.pubsub.impl.move_to_target_queues():
for result, target_queue, msg_id in item:
if result == PUB_SUB.MOVE_RESULT.OVERFLOW:
self.logger.warn('Message overflow, queue:`%s`, msg_id:`%s`', target_queue, msg_id)
overflown.append((target_queue[target_queue.rfind(':')+1:], msg_id))
if overflown:
self.invoke_async(StoreOverflownMessages.get_name(), overflown, to_json_string=True)
self.logger.debug('Messages moved to target queues')
def handle(self):
interval = float(self.server.fs_server_config.pubsub.move_to_target_queues_interval)
while True:
self.logger.debug('Moving messages to target queues, interval %rs', interval)
spawn(self._move_to_target_queues)
sleep(interval)
# ################################################################################################################################
class StoreOverflownMessages(AdminService):
""" Stores on filesystem messages that were above a consumer's max backlog and marks them as rejected by the consumer.
"""
def handle(self):
acks = {}
for sub_key, msg_id in loads(self.request.payload):
logger_overflown.warn('%s - %s - %s', msg_id, self.pubsub.get_consumer_by_sub_key(sub_key).name,
self.pubsub.get_message(msg_id))
msg_ids = acks.setdefault(sub_key, [])
msg_ids.append(msg_id)
for consumer_sub_key, msg_ids in acks.iteritems():
self.pubsub.acknowledge(sub_key, msg_id)
# ################################################################################################################################
class RESTHandler(Service):
""" Handles calls to pub/sub from REST clients.
"""
class SimpleIO(object):
input_required = ('item_type', 'item')
input_optional = ('max', 'dir', 'format', 'mime_type', Int('priority'), Int('expiration'), AsIs('msg_id'),
Bool('ack'), Bool('reject'))
default = ZATO_NONE
use_channel_params_only = True
# ################################################################################################################################
def _raise_unauthorized(self):
raise Unauthorized(self.cid, 'You are not authorized to access this resource', 'Zato pub/sub')
def validate_input(self):
username, password = get_basic_auth_credentials(self.wsgi_environ.get('HTTP_AUTHORIZATION'))
if not username:
self._raise_unauthorized()
for item in self.server.worker_store.request_dispatcher.url_data.basic_auth_config.values():
if item.config.username == username and item.config.password == password:
client = item
break
else:
self._raise_unauthorized()
if self.request.input.item_type not in PUB_SUB.URL_ITEM_TYPE:
raise BadRequest(self.cid, 'None of the supported resources `{}` found in URL path'.format(
', '.join(PUB_SUB.URL_ITEM_TYPE)))
sub_key = self.wsgi_environ.get('HTTP_X_ZATO_PUBSUB_KEY', ZATO_NONE)
is_consumer = self.request.input.item_type == PUB_SUB.URL_ITEM_TYPE.MESSAGES.id
# Deletes don't access topics, they operate on messages
|
Canpio/Paddle
|
python/paddle/fluid/tests/unittests/test_smooth_l1_loss_op.py
|
Python
|
apache-2.0
| 3,483
| 0
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from op_test import OpTest
def smooth_l1_loss_forward(val, sigma2):
abs_val = abs(val)
if abs_val < 1.0 / sigma2:
return 0.5 * val * val * sigma2
else:
return abs_val - 0.5 / sigma2
class TestSmoothL1LossOp1(OpTest):
def setUp(self):
self.op_type = "smooth_l1_loss"
dims = (5, 10)
self.inputs = {
'X': np.random.random(dims).astype("float32"),
'Y': np.random.random(dims).astype("float32")
}
sigma = 3.0
self.attrs = {'sigma': sigma}
sigma2 = sigma * sigma
diff = self.inputs['X'] - self.inputs['Y']
loss = np.vectorize(smooth_l1_loss_forward)(diff, sigma2).sum(1)
loss = loss.reshape((dims[0], 1))
self.outputs = {
'Diff': diff.astype('float32'),
'Out': loss.astype('float32')
}
def test_check_output(self):
self.check_output()
def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.02)
def test_check_grad_ingore_x(self):
self.check_grad(
['Y'], 'Out', max_relative_error=0.03, no_grad_set=set("X"))
def test_check_grad_ingore_y(self):
self.check_grad(
['X'], 'Out', max_relative_error=0.03, no_grad_set=set('Y'))
class TestSmoothL1LossOp2(OpTest):
def setUp(self):
self.op_type = "smooth_l1_loss"
dims = (5, 10)
self.inputs = {
|
'X': np.random.random(dims).astype("float32"),
'Y': np.random.random(dims).astype("float32"),
'InsideWeight': np
|
.random.random(dims).astype("float32"),
'OutsideWeight': np.random.random(dims).astype("float32")
}
sigma = 3.0
self.attrs = {'sigma': sigma}
sigma2 = sigma * sigma
diff = self.inputs['X'] - self.inputs['Y']
diff = diff * self.inputs['InsideWeight']
loss = np.vectorize(smooth_l1_loss_forward)(diff, sigma2)
loss = loss * self.inputs['OutsideWeight']
loss = loss.sum(1).reshape((dims[0], 1))
self.outputs = {
'Diff': diff.astype('float32'),
'Out': loss.astype('float32')
}
def test_check_output(self):
self.check_output()
def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.03)
def test_check_grad_ingore_x(self):
self.check_grad(
['Y'],
'Out',
max_relative_error=0.03,
no_grad_set=set(['X', 'InsideWeight', 'OutsideWeight']))
def test_check_grad_ingore_y(self):
self.check_grad(
['X'],
'Out',
max_relative_error=0.03,
no_grad_set=set(['Y', 'InsideWeight', 'OutsideWeight']))
if __name__ == '__main__':
unittest.main()
|
lqez/korean
|
koreantests.py
|
Python
|
bsd-3-clause
| 20,731
| 0.000232
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, with_statement
import contextlib
import sys
import textwrap
from pytest import deprecated_call, raises
from korean import *
@contextlib.contextmanager
def disable_imports(*names):
"""Stolen from Attest."""
import __builtin__
import_ = __builtin__.__import__
def __import__(name, *args, **kwargs):
if name in names:
raise ImportError('%r is disabled' % name)
return import_(name, *args, **kwargs)
__builtin__.__import__ = __import__
try:
yield
finally:
__builtin__.__import__ = import_
class TestParticle(object):
def test_allomorph(self):
# case clitics
assert Particle('가') is Particle('이')
assert Particle('를') is Particle('을')
assert Particle('로') is Particle('으로')
assert Particle('와') is Particle('과')
assert Particle('랑') is Particle('이랑')
# informational litics
assert Particle('는') is Particle('은')
assert Particle('나') is Particle('이나')
def test_naive(self):
assert Particle('을').naive() == \
('를(을)', '을(를)', '(
|
를)을', '(을)를')
assert Particle('로').naive() == ('(으)로',)
def test_pick_allomorph_with_noun(self):
pick_allomorph = morphology.pick_allomorph
P, N = Particle, Noun
assert pick_allomorph(P('가'), suffix_of=N('받침')) == '이'
assert pick_allomorph(P('가'), suffix_of=N('나비')) == '가'
assert pick_allomorph(P('로'), suffix_of=N('마을')) == '로'
|
assert pick_allomorph(P('로'), suffix_of=N('파이썬')) == '으로'
assert pick_allomorph(P('다'), suffix_of=N('파이썬')) == '이다'
assert pick_allomorph(P('일랑'), suffix_of=N('게임')) == '일랑'
assert pick_allomorph(P('일랑'), suffix_of=N('서버')) == 'ㄹ랑'
def test_pick_allomorph_with_number_word(self):
pick_allomorph = morphology.pick_allomorph
P, Nw = Particle, NumberWord
assert pick_allomorph(P('가'), suffix_of=Nw(1)) == '이'
assert pick_allomorph(P('가'), suffix_of=Nw(2)) == '가'
assert pick_allomorph(P('일랑'), suffix_of=Nw(3)) == '일랑'
#assert pick_allomorph(P('일랑'), suffix_of=Nw(4)) == '일랑'
def test_pick_allomorph_with_loanword(self):
pick_allomorph = morphology.pick_allomorph
P, Lw = Particle, Loanword
assert pick_allomorph(P('가'), suffix_of=Lw('Emil', 'ron')) == '이'
def test_merge_with_noun(self):
merge = morphology.merge
P, N = Particle, Noun
assert merge(N('게임'), P('일랑')) == '게임일랑'
assert merge(N('서버'), P('일랑')) == '서벌랑'
class TestNoun(object):
def test_read(self):
assert Noun('주인공').read() == '주인공'
assert Noun('컴퓨터').read() == '컴퓨터'
assert Noun('한국어').read() == '한국어'
def test_read_with_number(self):
assert Noun('레벨 4').read() == '레벨 사'
assert Noun('레벨 50').read() == '레벨 오십'
assert Noun('64렙').read() == '육십사렙'
def test_null_format(self):
assert '{0}'.format(Noun('소년')) == '소년'
def test_unicode_format(self):
assert '{0:6}'.format(Noun('소년')) == '소년 '
assert '{0:^6}'.format(Noun('소녀')) == ' 소녀 '
assert '{0:>6}'.format(Noun('한국어')) == ' 한국어'
def test_particle_format(self):
assert '{0:는}'.format(Noun('소년')) == '소년은'
assert '{0:는}'.format(Noun('소녀')) == '소녀는'
assert '{0:을}'.format(Noun('한국어')) == '한국어를'
assert '{0:이}'.format(Noun('레벨 2')) == '레벨 2가'
def test_undefined_particle_format(self):
assert '{0:에게}'.format(Noun('소년')) == '소년에게'
def test_guessable_particle_format(self):
assert '{0:로서}'.format(Noun('학생')) == '학생으로서'
assert '{0:로써}'.format(Noun('컴퓨터')) == '컴퓨터로써'
assert '{0:로써}'.format(Noun('칼')) == '칼로써'
assert '{0:로써}'.format(Noun('음식')) == '음식으로써'
assert '{0:랑은}'.format(Noun('녀석')) == '녀석이랑은'
def test_combination_format(self):
with raises(ValueError):
'{0:을:를}'.format(Noun('한국어'))
assert '{0:는:5}'.format(Noun('소년')) == '소년은 '
assert '{0:는:^5}'.format(Noun('소녀')) == ' 소녀는 '
assert '{0:을:>5}'.format(Noun('한국어')) == ' 한국어를'
class TestNumberWord(object):
def test_read(self):
assert NumberWord(5).read() == '오'
assert NumberWord(32).read() == '삼십이'
assert NumberWord(42).read() == '사십이'
assert NumberWord(152400).read() == '십오만이천사백'
assert NumberWord(600000109).read() == '육억백구'
assert NumberWord(72009852).read() == '칠천이백만구천팔백오십이'
assert NumberWord(-8).read() == '마이너스팔'
def test_read_phases(self):
assert NumberWord.read_phases(32) == ('삼십이',)
assert NumberWord.read_phases(42) == ('사십이',)
assert NumberWord.read_phases(152400) == ('십오만', '이천사백')
assert NumberWord.read_phases(600000109) == ('육억', '', '백구')
assert NumberWord.read_phases(-8) == ('마이너스', '팔')
def test_null_format(self):
assert '{0}'.format(NumberWord(12)) == '12'
def test_number_format(self):
assert '{0:.1f}'.format(NumberWord(4)) == '4.0'
assert '{0:4d}'.format(NumberWord(4)) == ' 4'
def test_particle_format(self):
assert '레벨 {0:이}'.format(NumberWord(4)) == '레벨 4가'
assert '레벨 {0:이}'.format(NumberWord(3)) == '레벨 3이'
assert '레벨 {0:이}'.format(NumberWord(15)) == '레벨 15가'
def test_combination_format(self):
with raises(ValueError):
'{0:을:를}'.format(NumberWord(19891212))
if sys.version_info > (2, 7):
# Python 2.6 doesn't support PEP 378
assert '{0:,:을}'.format(NumberWord(19891212)) == '19,891,212를'
class TestLoanword(object):
def test_need_hangulize(self):
with disable_imports('hangulize'):
with raises(ImportError):
Loanword('štěstí', 'ces')
def test_read(self):
assert Loanword('italia', 'ita').read() == '이탈리아'
assert Loanword('gloria', 'ita').read() == '글로리아'
assert Loanword('Αλεξάνδρεια', 'ell').read() == '알렉산드리아'
def test_null_format(self):
assert '{0}'.format(Loanword('Вадзім Махнеў', 'bel')) == \
'Вадзім Махнеў'
def test_particle_format(self):
assert '{0:으로} 여행 가자'.format(Loanword('Italia', 'ita')) == \
'Italia로 여행 가자'
van_gogh = Loanword('Vincent Willem van Gogh', 'nld')
assert '이 작품은 {0:이} 그렸다.'.format(van_gogh) == \
'이 작품은 Vincent Willem van Gogh가 그렸다.'
class TestLocalization(object):
def test_template(self):
assert l10n.Template('{0:로}').format(123) == '123으로'
if sys.version_info < (3,):
assert l10n.Template('{0:로}').format(long(123)) == '123으로'
def test_proofreading(self):
assert l10n.proofread('사과은(는) 맛있다.') == '사과는 맛있다.'
assert l10n.proofread('집(으)로 가자.') == '집으로 가자.'
assert l10n.proofread('용사은(는) 검을(를) 획득했다.') == \
'용사는 검을 획득했다.'
def test_meaningless_proofreading(self):
assert l10n.proofread('사과다.') == '사과다.'
assert l10n.proofread('집') == '집'
assert l10n.proofread('의 식 주') == '의 식 주'
assert l10n.proofread('the grammatical rules of a language') == \
'the grammatical rules of a language'
def test_unworkable_proofreading(self):
assert l10n.proofread('Korean를(을)') == 'Korean를(을)'
assert l10n.proofread('Korean을(를)') == 'Korean를(을)'
assert l10n.proofread('Korean(을)를') == 'Korean를(을)'
def test_complex_proofreading(self):
assert l10n.proofread('말을(를)(를)') == '말을(를)'
def test_proofreading_lyrics(self):
assert textwrap.dedent(l10n.proofread('''
나의 영혼 물어다줄 평화시장 비둘기 위(으)로 떨어지는 투명한 소나기
다음날엔 햇빛 쏟아지길 바라며 참아왔던 고통이(가) 찢겨져 버린 가지
될 때까지 묵묵히 지켜만 보던 벙어리 몰아치는 회오리 속에 지친 모습이(가)
말해주는 가슴에 맺힌 응어리 여전히 가슴속에 쏟아지는 빛줄기
''')) == textwrap.dedent('''
나의 영혼 물어다줄 평화시장 비둘기 위로 떨어지는 투명한 소나기
다음날엔 햇빛 쏟아지길 바라며 참아왔던 고통이 찢겨져 버린 가지
될 때까지 묵묵히 지켜만 보던 벙어리 몰아치는 회오리 속에 지친 모습이
말해주는 가슴에 맺힌 응어리 여전히 가슴속에 쏟아지는 빛줄기
''')
assert textwrap.dedent(l10n.proofread('''
빨간
|
napjon/moocs_solution
|
robotics-udacity/1.3.py
|
Python
|
mit
| 626
| 0.036741
|
p=[0.2,0.2,0.2,0.2,0.2]
world=['green', 'red', 'red', 'green',
|
'green']
measurements = ['red','green']
Z = 'red'
pHit = 0.6
pMiss = 0.2
def sense(p, Z):
q=[]
for i in range(len(p)):
hit = (Z == world[i]) #hit return zero if false
q.append(p[i] * (hit * pHit + (1-hit) * pMiss)) #if hit zero then miss
s=sum(q)
for i in ra
|
nge (len(p)):
q[i] = q[i]/s
#q[i] = q[i]/sum(q)-->WRONG. The Sum then different everytime it goes to loop
return q
#def move(p,U)
for k in range(len(measurements)):
p = sense (p,measurements[k])
print p
print sense(p,Z)
|
aadithyamd/QandA
|
QA/forms.py
|
Python
|
mit
| 4,443
| 0.011479
|
from django import forms
from .models import Question, Answer, Categories, Customuser
from django.contrib import auth
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
class add_Question_Form(forms.ModelForm): # just a regular form
question_text = forms.CharField(label=_("question_text"),
widget=forms.Textarea({'cols': '40', 'rows': '5'}))
class Meta:
model = Question
fields = ['question_text', 'upload',
'category1','category2',
'category3','category4']
def clean_text(self):
if question_text == "":
raise forms.ValidationError(
"Need a question",)
else:
return True
def save(self,commit=True):
question = super(add_Question_Form, self).save(commit=False)
question.question_text = self.cleaned_data["question_text"]
if commit:
question.save()
return question
class add_Answer_Form(forms.ModelForm):
class Meta:
model = Answer
fields = ['answer_text']
def clean_text(self):
return self.cleaned_data.get('answer_text')
class UserCreationForm(forms.ModelForm):
"""
A form that creates a user, with no privileges, from the given username and
password.
"""
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
}
password1 = forms.CharField(label=_("Password"),
widget=forms.PasswordInput)
password2 = forms.CharField(label=_("P
|
assword confirmation"),
widget=forms.PasswordInput,
help_text=_("Enter the same password as before, for verification."))
# User's username field and our own 2 fields pass1 and pass2 are used. Later
# we shall set the User's password by user.set_password.
class Meta:
model = Customuser
fields = ("username","email","first_name","department")
def clean_password2(self):
passwo
|
rd1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
self.instance.username = self.cleaned_data.get('username')
# To remove invalid passwords like short words, number only cases
auth.password_validation.validate_password(self.cleaned_data.get('password2'), self.instance)
return password2
def save(self, commit=True):
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password2"])
if commit:
user.save()
return user
class AuthenticationForm(forms.Form):
"""
Base class for authenticating users. Extend this to get a form that accepts
username/password logins.
"""
username = forms.CharField( max_length=254,
widget=forms.TextInput( attrs={'autofocus': ''}),
)
password = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
error_messages = {
'invalid_login': _("Please enter a correct username and password. "
"Note that both fields may be case-sensitive."),
'inactive': _("This account is inactive."),
}
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username and password:
self.user_cache = auth.authenticate(username=username,
password=password)
if self.user_cache is None:
raise forms.ValidationError(
self.error_messages['invalid_login'],
code='invalid_login',
)
else:
return self.cleaned_data
class UserForm(forms.ModelForm):
class Meta:
model = Customuser
fields = ('categories',)
class CustomuserAdminForm(forms.ModelForm):
class Meta:
model = Customuser
fields = ("username","email","first_name","last_name",
'department','groups','is_active','is_staff','is_superuser')
# fields = ['username','password','verify,'first_name','last_name','email','batch',]
################### Django classes ##########################
|
inferrna/neurolabcl
|
replacer.py
|
Python
|
lgpl-3.0
| 407
| 0.034398
|
import re
s = open("neurolab/tool.py", "r").read()
s = re.sub('^([ \t\r\f\v]*)(.+?)\.shape = (.*?), (.*?)$', '\g<1>\g<2> = \g<2>.reshape((\g<3>, \g<
|
4>,)) #replacement for \"\\g<0>\"', s, flags=re.MULTILINE)
s = re.sub('^([ \t\r\f\v]*)(.+?)\.shape = (\S+?)$', '\g<1>\g<2> = \g<2>.reshape(\g<3>) #replacement for \"\\g<0>\"', s, flags=re.MULTILINE)
f = open("neuro
|
lab/newtool.py", "w")
f.write(s)
f.close()
|
dobbscoin/dobbscoin-source
|
qa/rpc-tests/getblocktemplate_proposals.py
|
Python
|
mit
| 6,302
| 0.004919
|
#!/usr/bin/env python2
# Copyright (c) 2014 The Dobbscoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework import DobbscoinTestFramework
from dobbscoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
from binascii import a2b_hex, b2a_hex
from hashlib import sha256
from struct import pack
def check_array_result(object_array, to_match, expected):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
"""
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0:
raise AssertionError("No objects matched %s"%(str(to_match)))
def b2x(b):
return b2a_hex(b).decode('ascii')
# NOTE: This does not work for signed numbers (set the high bit) or zero (use b'\0')
def encodeUNum(n):
s = bytearray(b'\1')
while n > 127:
s[0] += 1
s.append(n % 256)
n //= 256
s.append(n)
return bytes(s)
def varlenEncode(n):
if n < 0xfd:
return pack('<B', n)
if n <= 0xffff:
return b'\xfd' + pack('<H', n)
if n <= 0xffffffff:
return b'\xfe' + pack('<L', n)
return b'\xff' + pack('<Q', n)
def dblsha(b):
return sha256(sha256(b).digest()).digest()
def genmrklroot(leaflist):
cur = leaflist
while len(cur) > 1:
n = []
if len(cur) & 1:
cur.append(cur[-1])
for i in range(0, len(cur), 2):
n.append(dblsha(cur[i] + cur[i+1]))
cur = n
return cur[0]
def template_to_bytes(tmpl, txlist):
blkver = pack('<L', tmpl['version'])
mrklroot = genmrklroot(list(dblsha(a) for a in txlist))
timestamp = pack('<L', tmpl['curtime'])
nonce = b'\0\0\0\0'
blk = blkver + a2b_hex(tmpl['previousblockhash'])[::-1] + mrklroot + timestamp + a2b_hex(tmpl['bits'])[::-1] + nonce
blk += varlenEncode(len(txlist))
for tx in txlist:
blk += tx
return blk
def template_to_hex(tmpl, txlist):
return b2x(template_to_bytes(tmpl, txlist))
def assert_template(node, tmpl, txlist, expect):
rsp = node.getblocktemplate({'data':template_to_hex(tmpl, txlist),'mode':'proposal'})
if rsp != expect:
raise AssertionError('unexpected: %s' % (rsp,))
class GetBlockTemplateProposalTest(DobbscoinTestFramework):
'''
Test block proposals with getblocktemplate.
'''
def run_test(self):
node = self.nodes[0]
tmpl = node.getblocktemplate()
if 'coinbasetxn' not in tmpl:
rawcoinbase = encodeUNum(tmpl['height'])
rawcoinbase += b'\x01-'
hexcoinbase = b2x(rawcoinbase)
hexoutval = b2x(pack('<Q', tmpl['coinbasevalue']))
tmpl['coinbasetxn'] = {'data': '01000000' + '01' + '0000000000000000000000000000000000000000000000000000000000000000ffffffff' + ('%02x' % (len(rawcoinbase),)) + hexcoinbase + 'fffffffe' + '01' + hexoutval + '00' + '00000000'}
txlist = list(bytearray(a2b_hex(a['data'])) for a in (tmpl['coinbasetxn'],) + tuple(tmpl['transactions']))
# Test 0: Capability advertised
assert('proposal' in tmpl['capabilities'])
# NOTE: This test currently FAILS (regtest mode doesn't enforce block height in coinbase)
## Test 1: Bad height in coinbase
#txlist[0][4+1+36+1+1] += 1
#assert_template(node, tmpl, txlist, 'FIXME')
#txlist[0][4+1+36+1+1] -= 1
# Test 2: Bad input hash for gen tx
txlist[0][4+1] += 1
assert_template(node, tmpl, txlist, 'bad-cb-missing')
txlist[0][4+1] -= 1
# Test 3: Truncated final tx
lastbyte = txlist[-1].pop()
try:
assert_template(node, tmpl, txlist, 'n/a')
except JSONRPCException:
pass # Expected
txlist[-1].append(lastbyte)
# Test 4: Add an invalid tx to the end (duplicate of gen tx)
txlist.append(txlist[0])
assert_template(node, tmpl, txlist, 'bad-txns-duplicate')
txlist.pop()
# Test 5: Add an invalid tx to the end (non-duplicate)
txlist.append(bytearray(txlist[0]))
txlist[-1][4+1] = b'\xff'
assert_template(node, tmpl, txlist, 'bad-txns-inputs-missingorspent')
txlist.pop()
# Test 6: Future tx lock time
txlist[0][-4:] = b'\x
|
ff\xff\xff\xff'
assert_template(node, tmpl, txlist, 'bad-txns-nonfinal')
txlist[0][-4:] = b'\0\0\0\0'
# Test 7: Bad tx count
txlist.append(b'')
try:
assert_template(node, tmpl, txlist, 'n/a'
|
)
except JSONRPCException:
pass # Expected
txlist.pop()
# Test 8: Bad bits
realbits = tmpl['bits']
tmpl['bits'] = '1c0000ff' # impossible in the real world
assert_template(node, tmpl, txlist, 'bad-diffbits')
tmpl['bits'] = realbits
# Test 9: Bad merkle root
rawtmpl = template_to_bytes(tmpl, txlist)
rawtmpl[4+32] = (rawtmpl[4+32] + 1) % 0x100
rsp = node.getblocktemplate({'data':b2x(rawtmpl),'mode':'proposal'})
if rsp != 'bad-txnmrklroot':
raise AssertionError('unexpected: %s' % (rsp,))
# Test 10: Bad timestamps
realtime = tmpl['curtime']
tmpl['curtime'] = 0x7fffffff
assert_template(node, tmpl, txlist, 'time-too-new')
tmpl['curtime'] = 0
assert_template(node, tmpl, txlist, 'time-too-old')
tmpl['curtime'] = realtime
# Test 11: Valid block
assert_template(node, tmpl, txlist, None)
# Test 12: Orphan block
tmpl['previousblockhash'] = 'ff00' * 16
assert_template(node, tmpl, txlist, 'inconclusive-not-best-prevblk')
if __name__ == '__main__':
GetBlockTemplateProposalTest().main()
|
roderickvd/nzbToMedia
|
nzbtomedia/linktastic/linktastic.py
|
Python
|
gpl-3.0
| 4,080
| 0.004902
|
# Linktastic Module
# - A python2/3 compatible module that can create hardlinks/symlinks on windows-based systems
#
# Linktastic is distributed under the MIT License. The follow are the terms and conditions of using Linktastic.
#
# The MIT License (MIT)
# Copyright (c) 2012 Solipsis Development
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import subprocess
from subprocess import CalledProcessError
import os
# Prevent spaces from messing with us!
def _escape_param(param):
return '"%s"' % param
# Private function to create link on nt-based systems
def _link_windows(src, dest):
try:
subprocess.check_output(
'cmd /C mklink /H %s %s' % (_escape_param(dest), _escape_param(src)),
stderr=subprocess.STDOUT)
except CalledProcessError as err:
raise IOError(err.output.decode('utf-8'))
# TODO, find out what kind of messages Windows sends us from mklink
# print(stdout)
# assume if they ret-coded 0 we're good
def _symlink_windows(src, dest):
try:
subprocess.check_output(
'cmd /C mklink %s %s' % (_escape_param(dest), _escape_param(src)),
stderr=subprocess.STDOUT)
except CalledProcessError as err:
raise IOError(err.output.decode('utf-8'))
# TODO, find out what kind of messages Windows sends us from mklink
# print(stdout)
# assume if they ret-coded 0 we're good
def _dirlink_windows(src, dest):
try:
subprocess.check_output(
'cmd /C mklink /J %s %s' % (_escape_param(dest), _escape_param(src)),
stderr=subprocess.STDOUT)
except CalledProcessError as err:
raise IOError(err.output.decode('utf-8'))
# TODO, find out what kind of messages Windows sends us from mklink
# print(stdout)
# assume if they ret-coded 0 we're good
def _junctionlink_windows(src, dest):
try:
subprocess.check_output(
'cmd /C mklink /D %s %s' % (_escape_param(dest), _escape_param(src)),
stderr=subprocess.STDOUT)
except CalledProcessError as err:
raise IOError(err.output.decode('utf-8'))
# TODO, find out what kind of messages Windows sends us from mklink
# print(stdout)
# assume if they ret-coded 0 we're good
# Create a hard link to src named as dest
# This version of link, unlike os.link, supports nt systems as well
def link(src, dest):
if os.name == 'nt':
|
_link_windows(src, dest)
else:
os.link(src, dest)
# Create a symlink to src named as de
|
st, but don't fail if you're on nt
def symlink(src, dest):
if os.name == 'nt':
_symlink_windows(src, dest)
else:
os.symlink(src, dest)
# Create a symlink to src named as dest, but don't fail if you're on nt
def dirlink(src, dest):
if os.name == 'nt':
_dirlink_windows(src, dest)
else:
os.symlink(src, dest)
# Create a symlink to src named as dest, but don't fail if you're on nt
def junctionlink(src, dest):
if os.name == 'nt':
_junctionlink_windows(src, dest)
else:
os.symlink(src, dest)
|
pelucid/salesforce_bulk
|
test_salesforce_bulk.py
|
Python
|
mit
| 4,845
| 0.008462
|
import re
import unittest
import salesforce_oauth_request
from salesforce_bulk import SalesforceBulk
class SalesforceBulkTest(unittest.TestCase):
def __init__(self, testName, endpoint, sessionId):
super(SalesforceBulkTest, self).__init__(testName)
self.endpoint = endpoint
self.sessionId = sessionId
def setUp(self):
self.jobs = []
def tearDown(self):
if hasattr(self, 'bulk'):
for job_id in self.jobs:
print "Closing job: %s" % job_id
self.bulk.close_job(job_id)
def test_raw_query(self):
bulk = SalesforceBulk(self.sessionId, self.endpoint)
self.bulk = bulk
job_id = bulk.create_query_job("Contact")
self.jobs.append(job_id)
self.assertIsNotNone(re.match("\w+", job_id))
batch_id = bulk.query(job_id, "Select Id,Name,Email from Contact Limit 1000")
self.assertIsNotNone(re.match("\w+", batch_id))
while not bulk.is_batch_done(job_id, batch_id):
print "Job not done yet..."
print bulk.batch_status(job_id, batch_id)
time.sleep(2)
self.results = ""
def save_results(tfile, **kwargs):
print "in save results"
self.results = tfile.read()
flag = bulk.get_batch_results(job_id, batch_id, callback = save_results)
self.assertTrue(flag)
self.assertTrue(len(self.results) > 0)
self.assertIn('"', self.results)
def test_csv_query(self):
bulk = SalesforceBulk(self.sessionId, self.endpoint)
self.bulk = bulk
job_id = bulk.create_query_job("Account")
self.jobs.append(job_id)
self.assertIsNotNone(re.match("\w+", job_id))
batch_id = bulk.query(job_id, "Select Id,Name,Description from Account Limit 10000")
self.assertIsNotNone(re.match("\w+", batch_id))
bulk.wait_for_batch(job_id, batch_id, timeout=120)
self.results = None
def save_results1(rows, **kwargs):
self.results = rows
flag = bulk.get_batch_results(job_id, batch_id, callback = save_results1, parse_csv=True)
self.assertTrue(flag)
results = self.results
self.assertTrue(len(results) > 0)
self.assertTrue(isinstance(results,list))
self.assertEqual(results[0], ['Id','Name','Description'])
self.assertTrue(len(results) > 3)
self.results = None
self.callback_count = 0
def save_results2(rows, **kwargs):
self.results = rows
print rows
self.callback_count += 1
batch = len(results) / 3
self.callback_count = 0
flag = bulk.get_batch_results(job_id, batch_id, callback = save_results2, parse_csv=True, batch_size=batch)
self.assertTrue(self.callback_count >= 3)
def test_csv_upload(self):
bulk = SalesforceBulk(self.sessionId, self.endpoint)
self.bulk = bulk
job_id = bulk.create_insert_job("Contact")
self.jobs.append(job_id)
self.assertIsNotNone(re.match("\w+", job_id))
batch_ids = []
content = open("example.csv").read()
for i in range(5):
batch_id = bulk.query(job_id, content)
self.assertIsNotNone(re.match("\w+", batch_id))
batch_ids.append(batch_id)
for batch_id in batch_ids:
bulk.wait_for_batch(job_id, batch_id, timeout=120)
self.results = None
def save_results1(rows, failed, remaining):
self.results = rows
for batch_id in batch_ids:
flag = bulk.get_upload_results(job_id, batch_id, callback = save_results1)
self.assertTrue(flag)
results = self.results
self.assertTrue(len(results) > 0)
self.assertTrue(isinstance(results,list))
self.assertEqual(results[0], UploadResult('Id','Success','Created','Error'))
self.assertEqual(len(results), 3)
self.results = None
self.callback_count = 0
def save_results2(rows, failed, remaining):
self.results = rows
self.callback_count += 1
batch = len(results) / 3
self.callback_count = 0
flag = bulk.get_upload_results(job_id, batch_id, callback = save_results2, batch_size=batch)
self.assertTrue(self.callback_count >= 3)
if __name__ == '__main__':
username = raw_input("Salesforce username: ")
password = raw_input("Salesforce password: ")
login = salesforce_oauth_request.login(username=username, password=password, cache_session=True)
endpoint = login['endpoint']
sessionId = login['access_token']
suite
|
= unittest.TestSuite()
suite.addTest(SalesforceBulkTest("test_csv_u
|
pload", endpoint, sessionId))
unittest.TextTestRunner().run(suite)
|
evelkey/vahun
|
experiment_FINAL.py
|
Python
|
apache-2.0
| 4,608
| 0.021267
|
import sys
import tensorflow as tf
from vahun.Text import Text
import numpy as np
from vahun.tools import Timer
from vahun.tools import explog
from vahun.autoencoder import Autoencoder_ffnn
from vahun.variational_autoencoder import Variational_autoencoder
from vahun.genetic import evolution
from vahun.genetic import experiment
from vahun.tools import show_performance
from vahun.genetic import Settings
import argparse
def main(args=None):
timer=Timer()
size=args.corp_len
corpuses=[Text(corpus_path='/mnt/store/velkey/mnsz2/filt.200k.maxlen20',size=size),
Text(corpus_path='/mnt/store/velkey/mnsz2/filt.200k_random.maxlen20',size=size),
Text(corpus_path='/mnt
|
/store/velkey/mnsz2/filt.200k.maxlen20.digraph_repl',size=size),
Text(corpus_path='/mnt/store/velkey/mnsz2/filt.200k_random.maxlen20.digraph_repl',size=size)]
print("Corpus list ready")
exps = []
ranger=range(args.low,args.high)
i=0
with open('/mnt/store/velkey/experiments') as f:
for line in f:
if(i in ranger):
exps.append(line.strip().split('\t'))
i+=1
for exper in
|
exps:
exper=[int(item) for item in exper]
layerlist=exper[3:]
settings=Settings(layerlist)
typ=0
if exper[1]==0 and exper[2]==0:
corpus_path='/mnt/store/velkey/mnsz2/filt.200k.maxlen20'
typ=0
if exper[1]==1 and exper[2]==0:
corpus_path='/mnt/store/velkey/mnsz2/filt.200k_random.maxlen20'
typ=1
if exper[1]==0 and exper[2]==1:
corpus_path='/mnt/store/velkey/mnsz2/filt.200k.maxlen20.digraph_repl'
typ=2
if exper[1]==1 and exper[2]==1:
corpus_path='/mnt/store/velkey/mnsz2/filt.200k_random.maxlen20.digraph_repl'
typ=3
corpus=corpuses[typ]
name=(str("uniq_"+("variational_" if exper[0]==1 else "autoencoder_")+
("top_" if exper[1]==0 else "random_")+
("bigraph_" if exper[2]==1 else "uni_")))
logger=explog(encoder_type=name,
encoding_dim=min(settings.weights),
feature_len=20,
lang=corpus_path,
unique_words=len(set(corpus.wordlist)),
name=name,
population_size=0,
words=len(corpus.wordlist))
for k in range(2):
print("starting experiment: ",exper)
timer.add("experiment")
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
if(exper[0]==1):
encoder=Variational_autoencoder(logger=logger,
tf_session=sess,
inputdim=len(corpus.abc)*20,
encoding_size=settings.weights[0],
corpus=corpus,
optimizer =tf.train.AdamOptimizer(learning_rate = 0.001),
nonlinear=tf.sigmoid,charnum=len(corpus.abc))
else:
encoder=Autoencoder_ffnn(
experiment=settings,
logger = logger,
tf_session=sess,
inputdim = len(corpus.abc)*20,
layerlist = settings.weights,
encode_index = int(len(settings.weights)/2),
corpus = corpus,
optimizer = tf.train.AdamOptimizer(learning_rate = 0.001),
nonlinear = tf.sigmoid,
charnum=len(corpus.abc))
encoder.train(corpus.x_train,corpus.x_valid,corpus.x_test,512,80)
print("Finished in:", timer.get("experiment") ,"s")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Autoencoder experiment')
parser.add_argument("--corp_len", dest="corp_len", default=0, type=int, help="Words to read from corpus")
parser.add_argument("--feature_len", dest="feature_len", default=20, type=int, help="Feature size")
parser.add_argument("--from", dest="low", type=int, help="lower boundary")
parser.add_argument("--to", dest="high", type=int, help="upper boundary")
args = parser.parse_args()
main(args)
|
googleapis/python-aiplatform
|
samples/generated_samples/aiplatform_v1_generated_job_service_delete_model_deployment_monitoring_job_async.py
|
Python
|
apache-2.0
| 1,687
| 0.001778
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for DeleteModelDeploymentMonitoringJob
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_v1_generated_JobService_DeleteModelDeploymentMonitoringJob_async]
from google.cloud import aiplatform_v1
async def sample_delete_model_deployment_monitoring_job():
# Create a client
client = aiplatform_v1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteModelDeploymentMonitoringJobRequest(
name="na
|
me_value",
)
# Make the request
operation = client.delete_model_deployment_monitoring_job(request=request)
print("Waiting for operation to complet
|
e...")
response = await operation.result()
# Handle the response
print(response)
# [END aiplatform_v1_generated_JobService_DeleteModelDeploymentMonitoringJob_async]
|
nikesh-mahalka/cinder
|
cinder/tests/unit/api/contrib/test_availability_zones.py
|
Python
|
apache-2.0
| 2,932
| 0
|
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from lxml import etree
from oslo_utils import timeutils
import cinder.api.contrib.availability_zones
import cinder.context
import cinder.test
import cinder.volume.api
created_time = datetime.datetime(2012, 11, 14, 1, 20, 41, 95099)
current_time = timeutils.utcnow()
def list_availability_zones(self):
return (
{'name': 'ping', 'available': True},
{'name': 'pong', 'available': False},
)
class FakeRequest(object):
environ = {'
|
cinder.context': cinder.context.get_admin_context()}
GET = {}
class ControllerTestCase(cinder.test.TestCase):
def setUp(self):
super(ControllerTestCase, self).setUp()
self.controller = cinder.api.contrib.availability_z
|
ones.Controller()
self.req = FakeRequest()
self.stubs.Set(cinder.volume.api.API,
'list_availability_zones',
list_availability_zones)
def test_list_hosts(self):
"""Verify that the volume hosts are returned."""
actual = self.controller.index(self.req)
expected = {
'availabilityZoneInfo': [
{'zoneName': 'ping', 'zoneState': {'available': True}},
{'zoneName': 'pong', 'zoneState': {'available': False}},
],
}
self.assertEqual(expected, actual)
class XMLSerializerTest(cinder.test.TestCase):
def test_index_xml(self):
fixture = {
'availabilityZoneInfo': [
{'zoneName': 'ping', 'zoneState': {'available': True}},
{'zoneName': 'pong', 'zoneState': {'available': False}},
],
}
serializer = cinder.api.contrib.availability_zones.ListTemplate()
text = serializer.serialize(fixture)
tree = etree.fromstring(text)
self.assertEqual('availabilityZones', tree.tag)
self.assertEqual(2, len(tree))
self.assertEqual('availabilityZone', tree[0].tag)
self.assertEqual('ping', tree[0].get('name'))
self.assertEqual('zoneState', tree[0][0].tag)
self.assertEqual('True', tree[0][0].get('available'))
self.assertEqual('pong', tree[1].get('name'))
self.assertEqual('zoneState', tree[1][0].tag)
self.assertEqual('False', tree[1][0].get('available'))
|
ruchee/vimrc
|
vimfiles/bundle/vim-python/submodules/pylint/tests/functional/u/use/used_before_assignment_488.py
|
Python
|
mit
| 251
| 0.003984
|
# pyl
|
int: disable=missing-docstring
def func():
"""Test that a variable defined in a finally clause does not trigger a false positive"""
try:
variable = 1
yield variable
fin
|
ally:
variable = 2
yield variable
|
khalidm/ExAC_genes
|
src/annotations.py
|
Python
|
mit
| 3,862
| 0.009063
|
'''
A tool to annotate and print variants in tabular format.
Author: Khalid Mahmood (khalid.mahmood@unimelb.edu.au).
Copyright: 2015
'''
#!/usr/bin/python
#from utils import findlist
import sys
import os
import argparse
import getopt
#import vcf
import array
import pysam
#class Error(Exception):
# """Base-class for exceptions in this module."""
#class UsageError(Error):
# def __init__(self, msg):
# self.msg = msg
def getcadd(cadd_tbx, curr
|
ent_chr, current_pos, current_ref, current_alt):
current_chr = current_chr.translate(None, 'chr')
data = cadd_tbx.fetch(current_chr, current_pos-1, current_pos)
cadd_phred, cadd_priPhCons, cadd_GerpRS = '','',''
cadd_polysift, cadd_test1, cadd_t
|
est2 = '','',''
if data is not None:
for row in data:
row_info = row.split("\t")
cadd_ref = row_info[2]
cadd_alt = row_info[4]
if(cadd_ref == current_ref and cadd_alt == current_alt):
cadd_phred = row_info[115]
cadd_priPhCons = row_info[18]
cadd_GerpRS = row_info[26]
#cadd_annotation = row_info[]
if "damaging" in row_info[110] or "deleterious" in row_info[112]:
cadd_polysift = "del"
break
else:
cadd_phred = 'NA'
return cadd_annotation
#return cadd_phred, cadd_priPhCons, cadd_GerpRS, \
# cadd_polysift
def getExAC(record, ac_eth, an_eth, index):
#ac = ','.join(str(v) for v in record.INFO[eth])
ac = record.INFO[ac_eth][index]
an = record.INFO[an_eth]
if ac>0:
af = round((float(ac*1.0)/float(an*1.0)),7)
else:
af = 0.0
samples = an/2
return str(ac), str(an), str(af), str(samples)
def getTabixVal(input_tbx, current_chr, current_pos, current_ref, current_alt):
#current_chr = current_chr.translate(None, 'chr')
data = input_tbx.fetch(current_chr, current_pos-1, current_pos)
value = '.'
if data is not None:
for row in data:
row_info = row.split("\t")
value = row_info[3]
#else:
# value = '.'
return value
def getTabixValCondel(input_tbx, current_chr, current_pos, current_ref, current_alt):
#current_chr = current_chr.translate(None, 'chr')
data = input_tbx.fetch(current_chr, current_pos-1, current_pos)
value = 0.0001
if data is not None:
for row in data:
row_info = row.split("\t")
if( current_ref == row_info[3] and current_alt == row_info[4] ):
value = row_info[7]
break
return round(float(value), 4)
def getfathmm(fathmm_tbx, current_chr, current_pos, current_ref, current_alt):
#current_chr = current_chr.translate(None, 'chr')
data = fathmm_tbx.fetch(current_chr, current_pos-1, current_pos)
fathmm_score = 0.0
if data is not None:
for row in data:
row_info = row.split("\t")
fathmm_ref = row_info[3]
fathmm_alt = row_info[4]
if(fathmm_ref == current_ref and fathmm_alt == current_alt):
fathmm_score = row_info[5]
break
return fathmm_score
def getTabixBool(input_tbx, current_chr, current_pos, current_ref, current_alt):
#current_chr = current_chr.translate(None, 'chr')
data = input_tbx.fetch(current_chr, current_pos-1, current_pos)
val = '.'
if data is not None:
for row in data:
#print current_chr + ":" + str(current_pos) + ":" + str(row.split("\t"))
val = "T"
return val
def adjust_scores(condel, sift, polyphen, fathmm, annotation):
if( ("stop_lost" in annotation) or ("stop_gained" in annotation) ):
condel = 0.9999
sift = 0.0001
polyphen = 0.9999
fathmm = 0.9999
return condel, sift, polyphen, fathmm
|
galaxyproject/pulsar
|
pulsar/client/transport/ssh.py
|
Python
|
apache-2.0
| 1,799
| 0.000556
|
import os
import subprocess
SSH_OPTIONS = ['-o', 'StrictHostKeyChecking=no', '-o', 'PreferredAuthentications=publickey', '-o', 'PubkeyAuthentication=yes']
def rsync_get_file(uri_from, uri_to, user, host, port, key):
cmd = [
|
'rsync',
'-e',
'ssh -i {} -p {} {}'.format(key, port, ' '.join(SSH_OPTIONS)),
'{}@{}:{}'.format(user, host, uri_from),
uri_to,
]
_call(cmd)
def rsync_post_file
|
(uri_from, uri_to, user, host, port, key):
_ensure_dir(uri_to, key, port, user, host)
cmd = [
'rsync',
'-e',
'ssh -i {} -p {} {}'.format(key, port, ' '.join(SSH_OPTIONS)),
uri_from,
'{}@{}:{}'.format(user, host, uri_to),
]
_call(cmd)
def scp_get_file(uri_from, uri_to, user, host, port, key):
cmd = [
'scp',
'-P', str(port),
'-i', key
] + SSH_OPTIONS + [
'{}@{}:{}'.format(user, host, uri_from),
uri_to,
]
_call(cmd)
def scp_post_file(uri_from, uri_to, user, host, port, key):
_ensure_dir(uri_to, key, port, user, host)
cmd = [
'scp',
'-P', str(port),
'-i', key,
] + SSH_OPTIONS + [
uri_from,
'{}@{}:{}'.format(user, host, uri_to),
]
_call(cmd)
def _ensure_dir(uri_to, key, port, user, host):
directory = os.path.dirname(uri_to)
cmd = [
'ssh',
'-i', key,
'-p', str(port),
] + SSH_OPTIONS + [
'{}@{}'.format(user, host),
'mkdir', '-p', directory,
]
_call(cmd)
def _call(cmd):
exit_code = subprocess.check_call(cmd)
if exit_code != 0:
raise Exception("{} exited with code {}".format(cmd[0], exit_code))
___all__ = [
'rsync_post_file',
'rsync_get_file',
'scp_post_file',
'scp_get_file'
]
|
simion/django-trampoline
|
tests/test_paginator.py
|
Python
|
mit
| 2,454
| 0
|
"""
Test paginator for trampoline.
"""
from elasticsearch_dsl import Index
from elasticsearch_dsl import Search
from trampoline.paginator import ESSearchPaginator
from tests.base import BaseTestCase
from tests.models import Token
from tests.views import PaginatedContentView
class TestPaginator(BaseTestCase):
def setUp(self):
super(TestPaginator, self).setUp()
self.doc_type = Token.get_es_doc_type()
self.index = Index(self.doc_type._doc_type.index)
self.index.doc_type(self.doc_type)
self.index.create()
self.refresh()
for i in range(3):
Token.objects.create(name='token {0}'.format(i))
self.refresh()
def tearDown(self):
super(TestPaginator, self).tearDown()
self.index.delete()
def test_paginator(self):
search = Search(
index=Token.es_doc_type._doc_type.index,
doc_type=Token.es_doc_type._doc_type.name
)
search = search.sort('name')
page_size = 2
paginator = ESSearchPaginator(search, page_size)
page = paginator.page(1)
self.assertTrue(page.has_other_pages)
self.assertEqual(len(page.hits), page_size)
self.assertEqual(page.total_count, 3)
self.assertEqual(page.hits[0]['name'], 'token 0')
self.assertEqual(page.hits[1]['name'], 'token 1')
self.assertEqual(page.paginator, paginator)
self.assertEqual(page.number, 1)
self.assertIsNotNone(page.response)
page = paginator.page(2)
self.assertFalse(page.has_other_pages)
self.assertEqual(len(page.hits), 1)
self.assertEqual(page.hits[0]['name'], 'token 2')
def test_pagination_mixin(self):
class Request(object):
GET = {}
view = PaginatedContentView()
view.request = Request()
self.assertEqual(view.page_size, 2)
view.request.GET = {}
self.assertEqual(view.get_page_number(), 1)
view.request.GET = {'page': -2}
self.assertEqual(view.get_page_number(), 1)
view.request.GET = {'page': 'foobar'}
self.assertEqual(view.get_page_number(), 1)
view.request.GET = {'page': 5}
self.assertEqual(view.get_page_number(), 5)
page = view.paginate_search(
|
)
self.assertIsNotNone(page)
self.assertIsNotNone(view.page)
self.assertEqual(view.get_context_data()['page'], view.
|
page)
|
uwekamper/flutterspark
|
flutterbrain_web/mapview/clustering.py
|
Python
|
mit
| 3,178
| 0.003776
|
# -*- coding: utf-8 -*-
import re
import requests
import base64
import struct
import random
from sklearn.cluster import KMeans
from sklearn.preprocessing import scale
from dateutil.parser import parse
class Tweet(object):
def __init__(self, status):
self.screen_name = status['screen_name']
self.text = status['text']
self.classification = None
self.created_at = parse(status['created_at'])
self.image_url = status['user']['profile_image_url_https']
self.lat = self.vector[-2]
self.long = self.vector[-1]
def classify(self, classification):
self.classification = classification
@property
def vector(self):
val = extract_brainstate(self.text)
if val == None:
return (1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
else:
return val
@property
def theta(self):
return abs(self.vector[0])
@property
def alpha(self):
return abs(self.vector[1])
@property
def beta(self):
return abs(self.vector[2])
@property
def fmax(self):
return abs(self.vector[3])
@property
def color(self):
overall_density = self.theta + self.alpha + self.beta
r = int(self.alpha / overall_density * 255)
g = int(self.beta / overall_density * 255)
b = int(self.theta / overall_density * 255)
return u'#%02x%02x%02x' % (r, g, b)
def __str__(self):
if self.vector != None:
return u'<@{}: {} {} [{}]>'.format(self.screen_name, self.text[:20], self.vector,
self.classification)
else:
return u'<@{}: {} [{
|
}]>'.format(self.screen_name, self.text[:20], self.classification)
def get_tweets(hashtag):
url = u'http://loklak.org:9000/api/search.json?q={}&minifi
|
ed=true'.format(hashtag)
resp = requests.get(url)
data = resp.json()['statuses']
tweets = []
for status in data:
tweets.append(Tweet(status))
return tweets
def extract_brainstate(text):
match = re.search(r' [A-Za-z0-9+/=]{24,32} ', text)
if not match:
return None
else:
raw_brains = match.group(0).strip()
decoded_brains = base64.b64decode(raw_brains)
if len(raw_brains) == 24:
bla = struct.unpack('4f', decoded_brains)
cooked_brains = (bla[0], bla[1], bla[2], bla[3],
52.541576 + random.random() / 500, 13.390394 + random.random() / 500)
else:
cooked_brains = struct.unpack('6f', decoded_brains)
return cooked_brains
def cluster_brains(brains):
vectors = [t.vector for t in brains]
scaled_vectors = scale(vectors)
model = KMeans(n_clusters=2)
results = model.fit_predict(scaled_vectors)
for result, brain in zip(results, brains):
brain.classify(result)
return model, results
def find_latest(tweets):
latest = {}
for t in tweets:
if t.screen_name in latest.keys():
if latest[t.screen_name].created_at < t.created_at:
latest[t.screen_name] = t
else:
latest[t.screen_name] = t
return latest
|
bpeschier/reek-pages
|
pages/apps.py
|
Python
|
mit
| 265
| 0
|
from django.apps import AppConfig
from django.utils.module_load
|
ing import autodiscover_modules
class PagesAppConfig(AppConfig):
name = 'pages'
def ready(self):
#
|
Walk over all views-modules to discover views
autodiscover_modules('views')
|
Thortoise/Super-Snake
|
Blender/animation_nodes-master/node_link_conversion.py
|
Python
|
gpl-3.0
| 11,610
| 0.008183
|
import bpy
from . import tree_info
from mathutils import Vector
from . utils.nodes import idToSocket
from . sockets.info import toBaseIdName, isList
from . tree_info import getAllDataLinkIDs, getDirectlyLinkedSocket
def correctForbiddenNodeLinks():
for dataOrigin, target in iterLinksThatNeedToBeCorrectedOrRemoved():
directOrigin = getDirectlyLinkedSocket(target)
if not tryToCorrectLink(dataOrigin, directOrigin, target):
removeLink(directOrigin, target)
tree_info.updateIfNecessary()
approvedLinkTypes = set()
def iterLinksThatNeedToBeCorrectedOrRemoved():
for originID, targetID, originType, targetType in getAllDataLinkIDs():
if (originType, targetType) in approvedLinkTypes:
continue
origin = idToSocket(originID)
target = idToSocket(targetID)
if isConnectionValid(origin, target):
approvedLinkTypes.add((originType, targetType))
else:
yield (origin, target)
def isConnectionValid(origin, target):
return origin.dataType in target.allowedInputTypes or target.allowedInputTypes[0] == "all"
def tryToCorrectLink(dataOrigin, directOrigin, target):
for corrector in linkCorrectors:
if corrector.check(dataOrigin, target):
nodeTree = target.getNodeTree()
corrector.insert(nodeTree, directOrigin, target, dataOrigin)
return True
return False
def removeLink(origin, target):
nodeTree = origin.getNodeTree()
for link in nodeTree.links:
if link.from_socket == origin and link.to_socket == target:
nodeTree.links.remove(link)
class LinkCorrection:
# subclasses need a check and insert function
pass
class SimpleConvert(LinkCorrection):
rules = {
("Boolean", "Integer") : "an_BooleanToIntegerNode",
("Boolean", "Float") : "an_BooleanToIntegerNode",
("Float", "Integer") : "an_FloatToIntegerNode",
("Vector", "Matrix") : "an_TranslationMatrixNode",
("Text Block", "String") : "an_TextBlockReaderNode",
("Vector", "Float") : "an_SeparateVectorNode",
("Float", "Vector") : "an_CombineVectorNode",
("Integer", "Vector") : "an_CombineVectorNode",
("Integer List", "Polygon Indices") : "an_CreatePolygonIndicesNode",
("Polygon Indices List", "Edge Indices List") : "an_EdgesOfPolygonsNode",
("Vector List", "Mesh Data") : "an_CombineMeshDataNode",
("Mesh Data", "Vector List") : "an_SeparateMeshDataNode",
("Mesh Data", "BMesh") : "an_CreateBMeshFromMeshData",
("Particle System", "Particle List") : "an_GetParticlesNode",
("Integer", "Euler") : "an_CombineEulerNode",
("Float", "Euler") : "an_CombineEulerNode",
("Euler", "Float") : "an_SeparateEulerNode",
("Object", "Vector") : "an_ObjectTransformsInputNode",
("Object", "Matrix") : "an_ObjectMatrixInputNode",
("Polygon List", "Mesh Data") : "an_MeshDataFromPolygonsNode",
("Object", "Shape Key List") : "an_ShapeKeysFromObjectNode",
("String", "Float") : "an_ParseNumberNode",
("Vector", "Euler") : "an_DirectionToRotationNode",
("Euler", "Vector") : "an_RotationToDirectionNode"
}
def check(self, origin, target):
return (origin.dataType, target.dataType) in self.rules
def insert(self, nodeTree, origin, target, dataOrigin):
nodeIdName = self.rules[(dataOrigin.dataType, target.dataType)]
node = insertLinkedNode(nodeTree, nodeIdName, origin, target)
class ConvertToIntegerList(LinkCorrection):
def check(self, origin, target):
return origin.dataType in ("Float List", "Edge Indices", "Polygon Indices") and target.dataType == "Integer List"
def insert(self, nodeTree, origin, target, dataOrigin):
node = insertLinkedNode(nodeTree, "an_ConvertToIntegerListNode", origin, target)
node.setOriginType(dataOrigin.dataType)
node.inputs[0].linkWith(origin)
class ConvertFloatToScale(LinkCorrection):
def check(self, origin, target):
return origin.dataType in ("Float", "Integer") and target.dataType == "Vector" and "scale" in target.name.lower()
def insert(self, nodeTree, origin, target, dataOrigin):
insertLinkedNode(nodeTree, "an_VectorFromValueNode", origin, target)
class ConvertNormalToEuler(LinkCorrection):
def check(self, origin, target):
return origin.dataType == "Vector" and origin.name == "Normal" and target.dataType == "Euler"
def insert(self, nodeTree, origin, target, dataOrigin):
insertLinkedNode(nodeTree, "an_DirectionToRotationNode", origin, target)
class ConvertEulerToQuaternion(LinkCorrection):
def check(self, origin, target):
return origin.dataType == "Euler" and target.dataType == "Quaternion"
def insert(self, nodeTree, origin, target, dataOrigin):
node = insertLinkedNode(nodeTree, "an_ConvertRotationsNode", origin, target)
node.conversionType = "EULER_TO_QUATERNION"
node.inputs[0].linkWith(origin)
node.outputs[0].linkWith(target)
class ConvertQuaternionToEuler(LinkCorrection):
def check(self, origin, target):
return origin.dataType == "Quaternion" and target.dataType == "Euler"
def insert(self, nodeTree, origin, target, dataOrigin):
node = insertLinkedNode(nodeTree, "an_ConvertRotationsNode", origin, target)
node.conversionType = "QUATERNION_TO_EULER"
node.inputs[0].linkWith(origin)
node.outputs[0].linkWith(target)
class ConvertListToElement(LinkCorrection):
def check(self, origin, target):
return toBaseIdName(origin.bl_idname) == target.bl_idname
def insert(self, nodeTree, origin, target, dataOrigin):
node = insertNode(nodeTree, "an_GetListElementNode", origin, target)
node.assignType(target.dataType)
insertBasicLinking(nodeTree, origin, node, target)
class ConvertElementToList(LinkCorrection):
def check(self, origin, target):
return origin.bl_idname == toBaseIdName(target.bl_idname)
def insert(self, nodeTree, origin, target, dataOrigin):
node = insertNode(nodeTree, "an_CreateListNode", origin, target)
node.assignBaseDataType(dataOrigin.dataType, inputAmount = 1)
insertBasicLinking(nodeTree, origin, node, target)
class ConvertVectorListToSplineList(LinkCorrection):
def check(self, origin, target):
return origin.dataType == "Vector List" and target.dataType == "Spline List"
def insert(self, nodeTree, origin, target, dataOrigin):
splineFromPoints, createList = insertNodes(nodeTree, ["an_SplineFromPointsNode", "an_CreateListNode"], origin, target)
createList.assignBaseDataType("Spline", inputAmount = 1)
nodeTree.links.new(splineFromPoints.inputs[0], origin)
nodeTree.links.new(createList.inputs[0], splineFromPoints.outputs[0])
nodeTree.links.new(createList.outputs[0], target)
class ConvertObjectToShapeKey(LinkCorrection):
def check(self, origin, target):
ret
|
urn origin.dataType == "Object" and target.dataType == "Shape Key"
def insert(self, nodeTree, origin, target, dataOrigin):
getShapeKeys, getListElement = insertNodes(nodeTree, ["an_ShapeKeysFromObjectNode", "an_GetListElementNode"], origin, target)
getListElement.inputs[1].val
|
ue = 1
nodeTree.links.new(getShapeKeys.inputs[0], origin)
nodeTree.links.new(getListElement.inputs[0], getShapeKeys.outputs[0])
nodeTree.links.new(getListElement.outputs[0], target)
class ConvertSeparatedMeshDataToBMesh(LinkCorrection):
separatedMeshDataTypes = ["Vector List", "Edge Indices List", "Polygon Indices List"]
def check(self, origin, target):
return origin.dataType in self.separatedMeshDataTypes and target.dataType == "BMesh"
def insert(self, nodeTree, origin, target, dataOrigin):
toMeshData, toMesh = insertNodes(nodeTree, ["an_CombineMeshDataNode", "an_CreateBMeshFromMeshData"], origin, target)
nodeTree.links.new(toMeshData.inputs[self.separatedMeshDataTypes.index(origin.dataType)], origin)
nodeTree.links.new(toMesh.inputs[0], toMeshDa
|
marcusmoller/pyorpg-client
|
src/pgu/gui/test.py
|
Python
|
mit
| 226
| 0.00885
|
class Test(object):
@
|
property
def something(self):
return 12
@something.setter
def something(self, value):
self.other = value
t = Test()
print t.something
t.something =
|
123
print t.other
|
dw9694/django_blog
|
blog/templatetags/image_tags.py
|
Python
|
gpl-3.0
| 2,209
| 0.000453
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
import os.path
from django import template
FMT = 'JPEG'
EXT = 'jpg'
QUAL = 75
register = template.Librar
|
y()
def resized_path(path, size, method):
"Returns the path for the resized image."
dir, name = os.path.split(path)
image_name, ext = name.rsplit('.', 1)
return os.path.join(dir, '%s_%s_%s.%s' % (image_name, method, size, EXT))
def scale(imagefield, size, method='scale'):
"
|
""
Template filter used to scale an image
that will fit inside the defined area.
Returns the url of the resized image.
{% load image_tags %}
{{ profile.picture|scale:"48x48" }}
"""
# imagefield can be a dict with "path" and "url" keys
if imagefield.__class__.__name__ == 'dict':
imagefield = type('imageobj', (object,), imagefield)
image_path = resized_path(imagefield.path, size, method)
if not os.path.exists(image_path):
try:
import Image
except ImportError:
try:
from PIL import Image
except ImportError:
raise ImportError('Cannot import the Python Image Library.')
image = Image.open(imagefield.path)
# normalize image mode
if image.mode != 'RGB':
image = image.convert('RGB')
# parse size string 'WIDTHxHEIGHT'
width, height = [int(i) for i in size.split('x')]
# use PIL methods to edit images
if method == 'scale':
image.thumbnail((width, height), Image.ANTIALIAS)
image.save(image_path, FMT)
elif method == 'crop':
try:
import ImageOps
except ImportError:
from PIL import ImageOps
ImageOps.fit(image, (width, height), Image.ANTIALIAS
).save(image_path, FMT)
return resized_path(imagefield.url, size, method)
def crop(imagefield, size):
"""
Template filter used to crop an image
to make it fill the defined area.
{% load image_tags %}
{{ profile.picture|crop:"48x48" }}
"""
return scale(imagefield, size, 'crop')
register.filter('scale', scale)
register.filter('crop', crop)
|
lordakshaya/pyexcel
|
examples/basics/read_cell_by_cell.py
|
Python
|
bsd-3-clause
| 1,031
| 0.00291
|
"""
read_cell_by_cell.py
:copyright: (c) 2014-2015 by Onni Software Ltd.
:license: New BSD License, see LICENSE for more details
This shows how to use **Reader** class to go through a single
page spreadsheet, The
|
output is::
1.0
2.0
3.0
4.0
5.0
6.0
7.0
8.0
9.0
"""
import os
import pyexcel as pe
def main(base_dir):
# Simple give the file name to **Reader**
# "example.xls","example.xlsx","example.ods", "example.xlsm"
spreadsheet = pe.load(os.path.join(base_dir,"example.csv"))
# row_range() gives [0 .. number of rows]
for r in spreadsheet.row_range():
# column_range() gives [0 .. number of ranges]
for c in spreadsheet.column_ra
|
nge():
# cell_value(row_index, column_index)
# return the value at the specified
# position
# please note that both row_index
# and column_index starts from 0
print(spreadsheet.cell_value(r, c))
if __name__ == '__main__':
main(os.getcwd())
|
crossbario/crossbarexamples
|
exclude_subscribers/carol.py
|
Python
|
apache-2.0
| 2,561
| 0.000781
|
import random
from os import environ, urandom
from os.path import exists
from functools import partial
from twisted.internet.defer import inlineCallbacks
from autobahn.twisted.util import sleep
from autobahn.twisted.wamp import ApplicationSession, ApplicationRunner
from autobahn.wamp import cryptosign
from autobahn.wamp.types import PublishOptions
if not exists('carol.priv'):
with open('carol.priv', 'wb') as f:
f.write(urandom(32))
class Component(ApplicationSession):
"""
"""
key = cryptosign.SigningKey.from_raw_key('carol.priv')
@inlineCallbacks
def onJoin(self, details):
print("session attached")
print(" authid: {}".format(details.authid))
print("authrole: {}".format(details.authrole))
def got_heartbeat(name, counter):
print("hearbeat: {}: {}".format(name, counter))
for name in ['alice', 'bob', 'carol', 'dave', 'erin']:
yield self.subscribe(
partial(got_heartbeat, name),
'public.heartbeat.{}'.format(name),
)
counter = 0
topic = 'public.heartbeat.carol'
while True:
print("publish '{}'".format(topic))
self.publish(topic, counter)
c
|
ounter += 1
yield sleep(3)
def onConnect(self):
extra = {
'pubkey': self.key.pu
|
blic_key(),
'channel_binding': 'tls-unique'
}
# now request to join ..
self.join(self.config.realm,
authmethods=['cryptosign'],
authid='carol',
authextra=extra)
def onChallenge(self, challenge):
self.log.info("authentication challenge received: {challenge}", challenge=challenge)
# alright, we've got a challenge from the router.
# not yet implemented. check the trustchain the router provided against
# our trustroot, and check the signature provided by the
# router for our previous challenge. if both are ok, everything
# is fine - the router is authentic wrt our trustroot.
# sign the challenge with our private key.
signed_challenge = self.key.sign_challenge(self, challenge)
# send back the signed challenge for verification
return signed_challenge
if __name__ == '__main__':
runner = ApplicationRunner(
environ.get("AUTOBAHN_DEMO_ROUTER", "ws://127.0.0.1:8080/ws"),
"crossbardemo",
)
print("Carol pubkey: {}".format(Component.key.public_key()))
runner.run(Component)
|
namili/blueman
|
blueman/gui/MessageArea.py
|
Python
|
gpl-3.0
| 5,072
| 0.043573
|
# Copyright (
|
C) 2008 Valmantas Paliksa <walmis at balticum-tv dot lt>
#
# Licensed under the GNU General Public License Version 3
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# T
|
his program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import gtk
import pango
from blueman.gui.GtkAnimation import WidgetFade
from blueman.Functions import get_icon
class MessageArea(gtk.EventBox):
_inst_ = None
def __new__(cls):
if not MessageArea._inst_:
MessageArea._inst_ = super(MessageArea, cls).__new__(cls)
return MessageArea._inst_
def __init__(self):
gtk.EventBox.__init__(self)
self.hbox = gtk.HBox()
self.hbox.show()
self.text = ""
self.set_app_paintable(True)
self.anim = WidgetFade(self.hbox, self.hbox.style.base[0])
self.hl_anim = WidgetFade(self.hbox, gtk.gdk.Color(65535,0,0))
self.setting_style = False
self.hbox.props.spacing = 4
self.hbox.set_border_width(2)
self.icon = gtk.Image()
self.icon.props.xpad = 4
self.label = gtk.Label()
self.label.props.xalign = 0
self.label.set_ellipsize(pango.ELLIPSIZE_END)
self.label.set_single_line_mode(True)
self.label.set_selectable(True)
self.b_more = gtk.Button(_("More"))
im = gtk.Image()
im.set_from_stock(gtk.STOCK_DIALOG_INFO, gtk.ICON_SIZE_MENU)
im.show()
self.b_more.set_image(im)
self.b_more.props.relief = gtk.RELIEF_NONE
im = gtk.Image()
im.set_from_stock(gtk.STOCK_CANCEL, gtk.ICON_SIZE_MENU)
im.show()
self.b_close = gtk.Button()
self.b_close.add(im)
self.b_close.props.relief = gtk.RELIEF_NONE
self.b_close.props.tooltip_text = _("Close")
self.hbox.pack_start(self.icon, False,)
self.hbox.pack_start(self.label, True)
self.hbox.pack_start(self.b_more, False)
self.hbox.pack_start(self.b_close, False)
self.add(self.hbox)
self.icon.show()
self.b_close.show()
self.label.show()
self.b_more.show()
self.b_close.connect("clicked", self.on_close)
self.b_more.connect("clicked", self.on_more)
self.hbox.connect("expose-event", self.expose_event)
self.b_close.connect("style-set", self.style_set)
def on_more(self, button):
d = gtk.MessageDialog(parent=None, flags=0, type=gtk.MESSAGE_INFO, buttons=gtk.BUTTONS_CLOSE)
d.props.text = self.text
d.run()
d.destroy()
def style_set(self, widget, prev_style):
if self.setting_style:
return
#This is a hack needed to use the tooltip background color
window = gtk.Window(gtk.WINDOW_POPUP)
window.set_name("gtk-tooltip")
window.ensure_style()
style = window.get_style()
window.destroy()
self.setting_style = True
#recursively set style
def _set_style(wg):
if isinstance(wg, gtk.Container):
for w in wg:
if not isinstance(w, gtk.Button):
_set_style(w)
wg.set_style(style)
_set_style(self)
self.anim.color = self.hbox.style.base[0]
self.queue_draw()
self.setting_style = False
def on_close(self, button):
def on_finished(anim):
anim.disconnect(sig)
self.props.visible = False
anim.freeze()
sig = self.anim.connect("animation-finished", on_finished)
self.anim.thaw()
self.anim.animate(start=1.0, end=0.0, duration=500)
@staticmethod
def close():
MessageArea._inst_.on_close(None)
@staticmethod
def show_message(*args):
MessageArea._inst_._show_message(*args)
def _show_message(self, text, icon=gtk.STOCK_DIALOG_WARNING):
self.text = text
self.label.set_tooltip_text(text)
self.icon.set_from_stock(icon, gtk.ICON_SIZE_MENU)
if icon == gtk.STOCK_DIALOG_WARNING:
self.hl_anim.color = gtk.gdk.Color(65535,0,0)
else:
self.hl_anim.color = gtk.gdk.Color(0,0,65535)
def on_finished(anim):
anim.disconnect(sig)
anim.freeze()
if not self.props.visible:
sig = self.anim.connect("animation-finished", on_finished)
self.anim.thaw()
self.show()
self.anim.animate(start=0.0, end=1.0, duration=500)
else:
sig = self.hl_anim.connect("animation-finished", on_finished)
self.hl_anim.thaw()
self.hl_anim.animate(start=0.7, end=1.0, duration=1000)
lines = text.split("\n")
if len(lines) > 1:
self.label.props.label = lines[0] + "..."
self.b_more.props.visible = True
else:
self.label.props.label = text
self.b_more.props.visible = False
def expose_event(self, window, event):
window.style.paint_box(window.window,
gtk.STATE_NORMAL, gtk.SHADOW_IN,
None, window, "tooltip",
window.allocation.x, window.allocation.y, window.allocation.width, window.allocation.height)
return False
|
monodokimes/pythonmon
|
controller/component/__init__.py
|
Python
|
gpl-3.0
| 403
| 0
|
from controller.component.component import Compon
|
ent
from controller.component.graphics_component import GraphicsComponent
from controller.component.movement_component import MovementComponent
from controller.component.input_component import InputComponent
from controller.component.pla
|
yer_input_component import PlayerInputComponent
from controller.component.tile_map_component import TileMapComponent
|
laffra/pava
|
pava/implementation/natives/jdk/management/resource/__init__.py
|
Python
|
mit
| 215
| 0.009302
|
"""
This is the Python implementation for the Java package "jdk.management.resource.internal", compiled by Pava.
|
"""
import pava
from pava import nan, inf
pava_classes = {}
pava.module(__name__)
impor
|
t internal
|
franciscod/python-telegram-bot
|
tests/test_inlinequeryresultvideo.py
|
Python
|
gpl-2.0
| 3,826
| 0.001307
|
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2016
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains a object that represents Tests for Telegram
InlineQueryResultVideo"""
import sys
if sys.version_info[0:2] == (2, 6):
import unittest2 as unittest
else:
import unittest
sys.path.append('.')
import telegram
from tests.base import BaseTest
class InlineQueryResultVideoTest(BaseTest, unittest.TestCase):
"""This object represents Tests for Telegram InlineQueryResultVideo."""
def setUp(self):
self.id = 'id'
self.type = 'video'
self.video_url = 'video url'
self.mime_type = 'mime type'
self.video_width = 10
self.video_height = 15
self.video_duration = 15
self.thumb_url = 'thumb url'
self.title = 'title'
self.caption = 'caption'
self.description = 'description'
self.input_message_content = telegram.InputTextMessageContent('input_message_content')
self.reply_markup = telegram.InlineKeyboardMarkup([[
te
|
legram.InlineKeyboardButton('reply_markup')
]])
self.json_dict = {
'type': self.type,
'id': self.id,
'video_url': self.video_url,
'mime_type': self.mime_type,
'video_width': self.video_width,
'video_height': self.video_height,
'video_duration': self.video_duration,
'thumb_url': self.thumb_url,
'title': self.title,
'caption': self.caption,
'descriptio
|
n': self.description,
'input_message_content': self.input_message_content.to_dict(),
'reply_markup': self.reply_markup.to_dict(),
}
def test_video_de_json(self):
video = telegram.InlineQueryResultVideo.de_json(self.json_dict)
self.assertEqual(video.type, self.type)
self.assertEqual(video.id, self.id)
self.assertEqual(video.video_url, self.video_url)
self.assertEqual(video.mime_type, self.mime_type)
self.assertEqual(video.video_width, self.video_width)
self.assertEqual(video.video_height, self.video_height)
self.assertEqual(video.video_duration, self.video_duration)
self.assertEqual(video.thumb_url, self.thumb_url)
self.assertEqual(video.title, self.title)
self.assertEqual(video.description, self.description)
self.assertEqual(video.caption, self.caption)
self.assertDictEqual(video.input_message_content.to_dict(),
self.input_message_content.to_dict())
self.assertDictEqual(video.reply_markup.to_dict(), self.reply_markup.to_dict())
def test_video_to_json(self):
video = telegram.InlineQueryResultVideo.de_json(self.json_dict)
self.assertTrue(self.is_json(video.to_json()))
def test_video_to_dict(self):
video = telegram.InlineQueryResultVideo.de_json(self.json_dict).to_dict()
self.assertTrue(self.is_dict(video))
self.assertDictEqual(self.json_dict, video)
if __name__ == '__main__':
unittest.main()
|
psiinon/addons-server
|
src/olympia/lib/crypto/signing.py
|
Python
|
bsd-3-clause
| 9,286
| 0
|
import hashlib
import os
import zipfile
from base64 import b64decode, b64encode
from django.db import transaction
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.core.exceptions import ObjectDoesNotExist
from django.utils.encoding import force_bytes, force_text
import requests
import waffle
from django_statsd.clients import statsd
from requests_hawk import HawkAuth
from asn1crypto import cms
import olympia.core.logger
from olympia import amo
log = olympia.core.logger.getLogger('z.crypto')
SIGN_FOR_APPS = (amo.FIREFOX.id, amo.ANDROID.id)
class SigningError(Exception):
pass
def supports_firefox(file_obj):
"""Return True if the file supports Firefox or Firefox for Android.
We only sign files that are at least compatible with Firefox/Firefox for
Android.
"""
apps = file_obj.version.apps.all()
return apps.filter(max__application__in=SIGN_FOR_APPS)
def get_id(addon):
"""Return the addon GUID if <= 64 chars, or its sha256 hash otherwise.
We don't want GUIDs longer than 64 chars: bug 1203365.
"""
guid = force_bytes(addon.guid)
if len(guid) <= 64:
# Return guid as original unicode string.
return addon.guid
return force_text(hashlib.sha256(guid).hexdigest())
def use_recommendation_signer(file_obj):
try:
item = file_obj.version.addon.discoveryitem
except ObjectDoesNotExist:
return False
return item.recommendable
def call_signing(file_obj):
"""Sign `file_obj` via autographs /sign/file endpoint.
:returns: The certificates serial number.
"""
conf = settings.AUTOGRAPH_CONFIG
with storage.open(file_obj.current_file_path) as fobj:
input_data = force_text(b64encode(fobj.read()))
signing_data = {
'input': input_data,
'keyid': conf['signer'],
'options': {
'id': get_id(file_obj.version.addon),
# "Add-on variant A params (PKCS7 SHA1 and COSE ES256) work in
# Fx <57, so we can swit
|
ch to that without breaking backwards
# compatibility"
# https://github.com/mozilla/addons-server/issues/9308
# This means, the pkcs7 sha1 signature is used for backwards
# compatibility and cose sha256 will be used for newer
# Firefox versions.
# The relevant pref i
|
n Firefox is
# "security.signed_app_signatures.policy"
# where it's set to COSEAndPKCS7WithSHA1OrSHA256 to match
# these settings.
'pkcs7_digest': 'SHA1',
'cose_algorithms': ['ES256']
},
}
hawk_auth = HawkAuth(id=conf['user_id'], key=conf['key'])
# We are using a separate signer that adds the mozilla-recommendation.json
# file. There is currently only `recommended` as a type but more may be
# added later, e.g partner.
if use_recommendation_signer(file_obj):
signing_data['keyid'] = conf['recommendation_signer']
signing_data['options']['recommendations'] = ['recommended']
hawk_auth = HawkAuth(
id=conf['recommendation_signer_user_id'],
key=conf['recommendation_signer_key'])
with statsd.timer('services.sign.addon.autograph'):
response = requests.post(
'{server}/sign/file'.format(server=conf['server_url']),
json=[signing_data],
auth=hawk_auth)
if response.status_code != requests.codes.CREATED:
msg = u'Posting to add-on signing failed: {0} {1}'.format(
response.reason, response.text)
log.error(msg)
raise SigningError(msg)
# Save the returned file in our storage.
with storage.open(file_obj.current_file_path, 'wb') as fobj:
fobj.write(b64decode(response.json()[0]['signed_file']))
# Now fetch the certificates serial number. Future versions of
# autograph may return this in the response.
# https://github.com/mozilla-services/autograph/issues/214
# Now extract the file and fetch the pkcs signature
with zipfile.ZipFile(file_obj.current_file_path, mode='r') as zip_fobj:
return get_signer_serial_number(zip_fobj.read(
os.path.join('META-INF', 'mozilla.rsa')))
def sign_file(file_obj):
"""Sign a File if necessary.
If it's not necessary (file exists but it's a mozilla signed one, or it's
a search plugin) then return the file directly.
If there's no endpoint (signing is not enabled) or isn't reviewed yet,
or there was an error while signing, raise an exception - it
shouldn't happen.
Otherwise proceed with signing and return the signed file.
"""
from olympia.versions.tasks import extract_version_to_git
if (file_obj.version.addon.type == amo.ADDON_SEARCH and
file_obj.version.is_webextension is False):
# Those aren't meant to be signed, we shouldn't be here.
return file_obj
if not settings.ENABLE_ADDON_SIGNING:
raise SigningError(u'Not signing file {0}: no active endpoint'.format(
file_obj.pk))
# No file? No signature.
if not os.path.exists(file_obj.current_file_path):
raise SigningError(u'File {0} doesn\'t exist on disk'.format(
file_obj.current_file_path))
# Don't sign Mozilla signed extensions (they're already signed).
if file_obj.is_mozilla_signed_extension:
# Don't raise an exception here, just log and return file_obj even
# though we didn't sign, it's not an error - we just don't need to do
# anything in this case.
log.info(u'Not signing file {0}: mozilla signed extension is already '
u'signed'.format(file_obj.pk))
return file_obj
# We only sign files that are compatible with Firefox.
if not supports_firefox(file_obj):
raise SigningError(
u'Not signing version {0}: not for a Firefox version we support'
.format(file_obj.version.pk))
# Sign the file. If there's any exception, we skip the rest.
cert_serial_num = str(call_signing(file_obj))
size = storage.size(file_obj.current_file_path)
# Save the certificate serial number for revocation if needed, and re-hash
# the file now that it's been signed.
file_obj.update(cert_serial_num=cert_serial_num,
hash=file_obj.generate_hash(),
is_signed=True,
size=size)
log.info(u'Signing complete for file {0}'.format(file_obj.pk))
if waffle.switch_is_active('enable-uploads-commit-to-git-storage'):
# Extract this version into git repository
transaction.on_commit(
lambda: extract_version_to_git.delay(
version_id=file_obj.version.pk,
note='after successful signing'))
return file_obj
def is_signed(file_path):
"""Return True if the file has been signed.
This utility function will help detect if a XPI file has been signed by
mozilla (if we can't trust the File.is_signed field).
It will simply check the signature filenames, and assume that if they're
named "mozilla.*" then the xpi has been signed by us.
This is in no way a perfect or correct solution, it's just the way we
do it until we decide to inspect/walk the certificates chain to
validate it comes from Mozilla.
"""
try:
with zipfile.ZipFile(file_path, mode='r') as zf:
filenames = set(zf.namelist())
except (zipfile.BadZipfile, IOError):
filenames = set()
return set([u'META-INF/mozilla.rsa', u'META-INF/mozilla.sf',
u'META-INF/manifest.mf']).issubset(filenames)
class SignatureInfo(object):
def __init__(self, pkcs7):
if isinstance(pkcs7, SignatureInfo):
# Allow passing around SignatureInfo objects to avoid
# re-reading the signature every time.
self.content = pkcs7.content
else:
self.content = cms.ContentInfo.load(pkcs7).native['content']
@property
def signer_serial_number(self):
return self.signer_info['sid']['serial_number']
@property
def signer
|
Venturi/oldcms
|
env/lib/python2.7/site-packages/tests/config.py
|
Python
|
apache-2.0
| 31,993
| 0.00175
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import copy
import os
import sys
from argparse import Namespace
from mock import patch
from six import StringIO, text_type
from tzlocal import get_localzone
import six
from djangocms_installer import config
from djangocms_installer.config.data import CMS_VERSION_MATRIX, DJANGO_VERSION_MATRIX
from djangocms_installer.install import check_install
from djangocms_installer.utils import less_than_version, supported_versions
from .base import BaseTestClass, unittest
class TestConfig(BaseTestClass):
def test_default_config(self):
conf_data = config.parse(['--db=postgres://user:pwd@host/dbname',
'-q', '-p'+self.project_dir, 'example_prj'])
self.assertEqual(conf_data.project_name, 'example_prj')
self.assertEqual(conf_data.cms_version, 3.1)
self.assertEqual(conf_data.django_version, 1.7)
self.assertEqual(conf_data.i18n, 'yes')
self.assertEqual(conf_data.reversion, 'yes')
self.assertEqual(conf_data.permissions, 'yes')
self.assertEqual(conf_data.use_timezone, 'yes')
self.assertEqual(conf_data.db, 'postgres://user:pwd@host/dbname')
self.assertEqual(conf_data.no_db_driver, False)
self.assertEqual(conf_data.no_deps, False)
self.assertEqual(conf_data.no_sync, False)
self.assertEqual(conf_data.plugins, False)
self.assertEqual(conf_data.requirements_file, None)
def test_cli_config(self):
conf_data = config.parse([
'-q',
'--db=postgres://user:pwd@host/dbname',
'--cms-version=stable',
'--django-version=1.7',
'--i18n=no',
'--reversion=no',
'--permissions=no',
'--use-tz=no',
'-tEurope/Rome',
'-len', '-lde', '-lit',
'-p'+self.project_dir,
'example_prj'])
self.assertEqual(conf_data.project_name, 'example_prj')
self.assertEqual(conf_data.cms_version, 3.1)
self.assertEqual(conf_data.django_version, 1.7)
self.assertEqual(conf_data.i18n, 'no')
self.assertEqual(conf_data.reversion, 'no')
self.assertEqual(conf_data.permissions, 'no')
self.assertEqual(conf_data.use_timezone, 'no')
self.assertEqual(conf_data.timezone, 'Europe/Rome')
self.assertEqual(conf_data.languages, ['en', 'de', 'it'])
self.assertEqual(conf_data.project_directory, self.project_dir)
self.assertEqual(conf_data.db, 'postgres://user:pwd@host/dbname')
self.assertEqual(conf_data.db_driver, 'psycopg2')
conf_data = config.parse([
'-q',
'--db=postgres://user
|
:pwd@host/dbname',
'--cms-version=stable',
|
'--django-version=1.4',
'--cms-version=3.0',
'--i18n=no',
'--reversion=no',
'--permissions=no',
'--use-tz=no',
'-tEurope/Rome',
'-len', '-lde', '-lit',
'-p'+self.project_dir,
'example_prj'])
self.assertEqual(conf_data.project_name, 'example_prj')
self.assertEqual(conf_data.cms_version, 3.0)
self.assertEqual(conf_data.django_version, 1.4)
self.assertEqual(conf_data.i18n, 'no')
self.assertEqual(conf_data.reversion, 'no')
self.assertEqual(conf_data.permissions, 'no')
self.assertEqual(conf_data.use_timezone, 'no')
self.assertEqual(conf_data.timezone, 'Europe/Rome')
self.assertEqual(conf_data.languages, ['en', 'de', 'it'])
self.assertEqual(conf_data.project_directory, self.project_dir)
self.assertEqual(conf_data.db, 'postgres://user:pwd@host/dbname')
self.assertEqual(conf_data.db_driver, 'psycopg2')
def test_version_mismatch(self):
with self.assertRaises(SystemExit):
conf_data = config.parse([
'-q',
'--db=postgres://user:pwd@host/dbname',
'--cms-version=stable',
'--django-version=1.4',
'--i18n=no',
'--reversion=no',
'--permissions=no',
'--use-tz=no',
'-tEurope/Rome',
'-len', '-lde', '-lit',
'-p'+self.project_dir,
'example_prj'])
def test_cli_config_commaseparated_languages(self):
conf_data = config.parse([
'-q',
'--db=postgres://user:pwd@host/dbname',
'-len,de,it',
'-p'+self.project_dir,
'example_prj'
])
self.assertEqual(conf_data.languages, ['en', 'de', 'it'])
def test_cli_config_comma_languages_with_space(self):
conf_data = config.parse([
'-q',
'--db=postgres://user:pwd@host/dbname',
'-len , de , it',
'-p'+self.project_dir,
'example_prj'
])
self.assertEqual(conf_data.languages, ['en', 'de', 'it'])
def test_invalid_choices(self):
with patch('sys.stdout', self.stdout):
with patch('sys.stderr', self.stderr):
with self.assertRaises(SystemExit) as error:
conf_data = config.parse([
'-q',
'--db=postgres://user:pwd@host/dbname',
'--cms-version=2.6',
'--django-version=1.1',
'--i18n=no',
'-p'+self.project_dir,
'example_prj'])
if six.PY3:
self.assertTrue(self.stderr.getvalue().find('--cms-version/-v: invalid choice: \'2.6\'') > -1)
else:
self.assertTrue(self.stderr.getvalue().find('--cms-version/-v: invalid choice: u\'2.6\'') > -1)
def test_invalid_project_name(self):
with patch('sys.stdout', self.stdout):
stderr_tmp = StringIO()
with patch('sys.stderr', stderr_tmp):
with self.assertRaises(SystemExit) as error:
conf_data = config.parse([
'-q',
'--db=postgres://user:pwd@host/dbname',
'-p'+self.project_dir,
'test'])
self.assertTrue(stderr_tmp.getvalue().find('Project name "test" is not a valid app name') > -1)
stderr_tmp = StringIO()
with patch('sys.stderr', stderr_tmp):
with self.assertRaises(SystemExit) as error:
conf_data = config.parse([
'-q',
'--db=postgres://user:pwd@host/dbname',
'-p'+self.project_dir,
'assert'])
self.assertTrue(stderr_tmp.getvalue().find('Project name "assert" is not a valid app name') > -1)
stderr_tmp = StringIO()
with patch('sys.stderr', stderr_tmp):
with self.assertRaises(SystemExit) as error:
conf_data = config.parse([
'-q',
'--db=postgres://user:pwd@host/dbname',
'-p'+self.project_dir,
'values'])
self.assertTrue(stderr_tmp.getvalue().find('Project name "values" is not a valid app name') > -1)
stderr_tmp = StringIO()
with patch('sys.stderr', stderr_tmp):
with self.assertRaises(SystemExit) as error:
conf_data = config.parse([
'-q',
'--db=postgres://user:pwd@host/dbname',
'-p'+self.project_dir,
'project-name'])
self.assertTrue(stderr_tmp.getvalue().find('Project name "project-name" is not a valid app name') > -1)
def test_invalid_project_path(self):
prj_dir = 'example_prj'
existing_path = os.path.join(self.project_dir, prj_dir)
os.makedirs(existing_path)
with patch('sys.stdout', self.stdout):
with patch('sys.stderr', self.stderr):
with se
|
PiotrZakrzewski/steamweb
|
steamweb/actions/auction_actions.py
|
Python
|
mit
| 4,727
| 0.003385
|
from steamweb.actions_base import PlayerAction
from steamweb.db_utils import get_tender, get_tile, get_resource
from steamweb.utils import InsufficientFunds, get_capacity
from steamweb.database import db_session
from steamweb.models import *
class BidAction(PlayerAction):
def resolution(self, game, player, arguments):
tender = get_tender(player.id)
highest = get_highest_bid(tender.id)
if not player.cash > highest: # player must bid highest +1
raise InsufficientFunds(
"Player {0} has only {1} cash, while highest bid is: {2}".format(player.id, player.cash, highest))
new_bid = Bid()
new_bid.bid = highest + 1
new_bid.player = player.id
new_bid.tender = tender.id
new_bid.game = player.game
db_session.add(new_bid)
db_session.commit()
def get_phases(self):
return ["tile_auction", "specialist_auction"]
class PickSpecialistAuctionPrize(PlayerAction):
def resolution(self, game, player, arguments):
spec_id = arguments['specialist_id']
specialist = Specialist.query.filter(Specialist.id == spec_id).first()
bonus = arguments['bonus']
if bonus not in ['energy', 'water', 'ore', 'quartz', 'None']:
raise ValueError("Invalid resource name: {}".format(bonus))
if not bonus == 'None' and not getattr(game, "bonus_" + bonus):
raise ValueError("Bonus resource {} is not available".format(bonus))
if not specialist or specialist.game != game.id:
raise ValueError("Specialist id must exist and belong to the current game")
if specialist.player:
raise ValueError("Specialist {} is already taken by another player".format(specialist))
player.specialist = spec_id
specialist.player = player.id
if not bonus == 'None':
capacity = get_capacity(player, bonus)
setattr(game, "bonus_" + bonus, False)
current = getattr(player, bonus)
new_stock = current + 1
if new_stock <= capacity:
setattr(player, bonus, new_stock)
else:
r = get_resource(game, bonus)
r.current_supply += 1
def get_phases(self):
return ["special
|
ist_auction_victory"]
def domain(self, game, player):
specs = Specialist.query.filter(Specialist.game == game.id).all()
specs = [spec.id for spec in specs if spec.player is None]
bonus_res = ['None'] # player can always pick no bonus, for instance when there is none to be taken
if game.bonus_energy:
bonus_res.appe
|
nd('energy')
if game.bonus_ore:
bonus_res.append('ore')
if game.bonus_quartz:
bonus_res.append('quartz')
if game.bonus_water:
bonus_res.append('water')
pick_spec_domain = []
for spec_id in specs:
for bonus in bonus_res:
arg_permutation = {}
arg_permutation['specialist_id'] = spec_id
arg_permutation['bonus'] = bonus
pick_spec_domain.append(arg_permutation)
return ('set', pick_spec_domain)
def get_arguments(self):
return [('specialist_id', int), ('bonus', str)]
class VentureerTileSelect(PlayerAction):
def resolution(self, game, player, arguments):
"""
Select tile for tile auction. Happens in special phase tile_auction_setup where only player who picked ventureer can
perform actions (only this one).
:param command_arguments:
:param game:
:param player:
:return:
"""
x = arguments['x']
y = arguments['y']
if not x < game.map_width:
raise ValueError("Map width: {0} claimed tile X coordinate: {1}".format(game.map_width, x))
if not y < game.map_height:
raise ValueError("Map height: {0} claimed tile Y coordinate: {1}".format(game.map_height, y))
tile = get_tile(game, x, y)
if tile.owner:
raise ValueError("Tile {0} cannot be claimed - it is already owned".format(tile))
tile.auction_target = True
game.phase = "tile_auction"
def get_phases(self):
return ["tile_auction_setup"]
def get_arguments(self):
return [('x', int), ('y', int)]
def domain(self, game, player):
tiles = Tile.query.filter(Tile.game == game.id).all()
domain = [{'x': tile.x, 'y': tile.y} for tile in tiles if not tile.neutral and tile.owner is None]
if not domain:
domain = [{'x': tile.x, 'y': tile.y} for tile in tiles if tile.owner is None]
return ('set', domain)
|
myhearter/dianping
|
ddp/app/merchants/migrations/0007_auto_20171119_0852.py
|
Python
|
mit
| 807
| 0.001239
|
# -*- coding: utf-8 -*-
from
|
__future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('merchants', '0006_classfy'),
]
operations = [
migrations.CreateModel(
name='RegionItem',
fields=[
('name', models.CharField(max_length=20)),
('id', models.CharField(max_length=10, serialize=False, primary_key=True)),
],
op
|
tions={
'db_table': 'T_region',
},
),
migrations.RenameModel(
old_name='Classfy',
new_name='ClassfyItem',
),
migrations.RenameModel(
old_name='MerchantInfo',
new_name='MerchantItem',
),
]
|
DirectXMan12/nova-hacking
|
nova/tests/virt/baremetal/test_driver.py
|
Python
|
apache-2.0
| 14,537
| 0.001376
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# coding=utf-8
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright (c) 2011 University of Southern California / ISI
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the base baremetal driver class."""
from oslo.config import cfg
from nova.compute import power_state
from nova import exception
from nova import test
from nova.tests.image import fake as fake_image
from nova.tests import utils
from nova.tests.virt.baremetal.db import base as bm_db_base
from nova.tests.virt.baremetal.db import utils as bm_db_utils
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import db
from nova.virt.baremetal import driver as bm_driver
from nova.virt.baremetal import fake
CONF = cfg.CONF
COMMON_FLAGS = dict(
firewall_driver='nova.virt.baremetal.fake.FakeFirewallDriver',
host='test_host',
)
BAREMETAL_FLAGS = dict(
driver='nova.virt.baremetal.fake.FakeDriver',
instance_type_extra_specs=['cpu_arch:test', 'test_spec:test_value'],
power_manager='nova.virt.baremetal.fake.FakePowerManager',
vif_driver='nova.virt.baremetal.fake.FakeVifDriver',
volume_driver='nova.virt.baremetal.fake.FakeVolumeDriver',
group='baremetal',
)
class BareMetalDriverNoDBTestCase(test.TestCase):
def setUp(self):
super(BareMetalDriverNoDBTestCase, self).setUp()
self.flags(**COMMON_FLAGS)
self.flags(**BAREMETAL_FLAGS)
self.driver = bm_driver.BareMetalDriver(None)
def test_validate_driver_loading(self):
self.assertTrue(isinstance(self.driver.driver,
fake.FakeDriver))
self.assertTrue(isinstance(self.driver.vif_driver,
fake.FakeVifDriver))
self.assertTrue(isinstance(self.driver.volume_driver,
fake.FakeVolumeDriver))
self.assertTrue(isinstance(self.driver.firewall_driver,
fake.FakeFirewallDriver))
class BareMetalDriverWithDBTestCase(bm_db_base.BMDBTestCase):
def setUp(self):
super(BareMetalDriverWithDBTestCase, self).setUp()
self.flags(**COMMON_FLAGS)
self.flags(**BAREMETAL_FLAGS)
fake_image.stub_out_image_service(self.stubs)
self.context = utils.get_test_admin_context()
self.driver = bm_driver.BareMetalDriver(None)
self.addCleanup(fake_image.FakeImageService_reset)
def _create_node(self, node_info=None, nic_info=None):
result = {}
if node_info is None:
node_info = bm_db_utils.new_bm_node(
id=123,
service_host='test_host',
cpus=2,
memory_mb=2048,
)
if nic_info is None:
nic_info = [
{'address': '01:23:45:67:89:01', 'datapath_id': '0x1',
'port_no': 1},
{'address': '01:23:45:67:89:02', 'datapath_id': '0x2',
'port_no': 2},
]
result['node_info'] = node_info
result['nic_info'] = nic_info
result['node'] = db.bm_node_create(self.context, node_info)
for nic in nic_info:
db.bm_interface_create(
self.context,
result['node']['id'],
nic['address'],
nic['datapath_id'],
nic['port_no'],
)
result['instance'] = utils.get_test_instance()
result['instance']['node'] = result['node']['uuid']
result['spawn_params'] = dict(
admin_password='test_pass',
block_device_info=None,
context=self.context,
image_meta=utils.get_test_image_info(
None, result['instance']),
injected_files=[('/fake/path', 'hello world')],
instance=result['instance'],
network_info=utils.get_test_network_info(),
)
result['destroy_params'] = dict(
instance=result['instance'],
network_info=result['spawn_params']['network_info'],
block_device_info=result['spawn_params']['block_device_info'],
)
return result
def test_get_host_stats(self):
node = self._create_node()
stats = self.driver.get_host_stats()
self.assertTrue(isinstance(stats, list))
self.assertEqual(len(stats), 1)
stats = stats[0]
self.assertEqual(stats['cpu_arch'], 'test')
self.assertEqual(stats['test_spec'], 'test_value')
self.assertEqual(stats['hypervisor_type'], 'baremetal')
self.assertEqual(stats['hypervisor_hostname'], node['node']['uuid'])
self.assertEqual(stats['host'], 'test_host')
self.assertEqual(stats['vcpus'], 2)
self.assertEqual(stats['host_memory_total'], 2048)
def test_spawn_ok(self):
node = self._create_node()
self.driver.spawn(**node['spawn_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], baremetal_states.ACTIVE)
self.assertEqual(row['instance_uuid'], node['instance']['uuid'])
self.assertEqual(row['instance_name'], node['instance']['hostname'])
def test_macs_from_nic_for_instance(self):
node = self._create_node()
expected = set([nic['address'] for nic in node['nic_info']])
self.assertEqual(
expected, self.driver.macs_for_instance(node['instance']))
def test_macs_for_instance_after_spawn(self):
node = self._create_node()
self.driver.spawn(**node['spawn_params'])
expected = set([nic['address'] for nic in node['nic_info']])
sel
|
f.assertEqual(
expected, self.driver.macs_for_instance(node['instance']))
def test_macs_for_instance(self):
node = self._create_node()
expected = set(['01:23:45:67:89:01', '01:23:45:67:89:02'])
self.assertEqual(
expected, self.driver.macs_for_instance(node['instance']))
def test_
|
macs_for_instance_no_interfaces(self):
# Nodes cannot boot with no MACs, so we raise an error if that happens.
node = self._create_node(nic_info=[])
self.assertRaises(exception.NovaException,
self.driver.macs_for_instance, node['instance'])
def test_spawn_node_already_associated(self):
node = self._create_node()
db.bm_node_update(self.context, node['node']['id'],
{'instance_uuid': '1234-5678'})
self.assertRaises(exception.NovaException,
self.driver.spawn, **node['spawn_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], None)
def test_spawn_node_in_use(self):
node = self._create_node()
self.driver.spawn(**node['spawn_params'])
self.assertRaises(exception.NovaException,
self.driver.spawn, **node['spawn_params'])
def test_spawn_node_not_found(self):
node = self._create_node()
db.bm_node_update(self.context, node['node']['id'],
{'uuid': 'hide-this-node'})
self.assertRaises(exception.NovaException,
self.driver.spawn, **node['spawn_params'])
row = db.bm_node_get(self.context, node['node']['id'])
|
dims/heat
|
heat/engine/resources/openstack/keystone/user.py
|
Python
|
apache-2.0
| 9,242
| 0
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common.i18n import _
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine.resources.openstack.keystone import role_assignments
from heat.engine import support
class KeystoneUser(resource.Resource,
role_assignments.KeystoneRoleAssignmentMixin):
"""Heat Template Resource for Keystone User.
Users represent an individual API consumer. A user itself must be owned by
a specific domain, and hence all user names are not globally unique, but
only unique to their domain.
"""
support_status = support.SupportStatus(
version='2015.1',
message=_('Supported versions: keystone v3'))
default_client_name = 'keystone'
entity = 'users'
PROPERTIES = (
NAME, DOMAIN, DESCRIPTION, ENABLED, EMAIL, PASSWORD,
DEFAULT_PROJECT, GROUPS
) = (
'name', 'domain', 'description', 'enabled', 'email', 'password',
'default_project', 'groups'
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Name of keystone user.'),
update_allowed=True
),
DOMAIN: properties.Schema(
properties.Schema.STRING,
_('Name of keystone domain.'),
default='default',
update_allowed=True,
constraints=[constraints.CustomConstraint('keystone.domain')]
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description of keystone user.'),
default='',
update_allowed=True
),
ENABLED: properties.Schema(
properties.Schema.BOOLEAN,
_('Keystone user is enabled or disabled'),
default=True,
update_allowed=True
),
EMAIL: properties.Schema(
properties.Schema.STRING,
_('Email address of keystone user.'),
update_allowed=True
),
PASSWORD: properties.Schema(
properties.Schema.STRING,
_('Password of keystone user.'),
update_allowed=True
),
DEFAULT_PROJECT: properties.Schema(
properties.Schema.STRING,
_('Default project of keystone user.'),
update_allowed=True,
constraints=[constraints.CustomConstraint('keystone.project')]
),
GROUPS: properties.Schema(
properties.Schema.LIST,
_('keystone user groups.'),
update_allowed=True,
schema=properties.Schema(
properties.Schema.STRING,
_('keystone user group.'),
constraints=[constraints.CustomConstraint('keystone.group')]
)
)
}
properties_schema.update(
role_assignments.KeystoneRoleAssignmentMixin.mixin_properties_schema)
def validate(self):
super(KeystoneUser, self).validate()
self.validate_assignment_properties()
def client(self):
return super(KeystoneUser, self).client().client
def _update_user(self,
user_id,
domain,
new_name=None,
new_description=None,
new_email=None,
new_password=None,
new_default_project=None,
enabled=None):
values = dict()
if new_name is not None:
values['name'] = new_name
if new_description is not None:
values['description'] = new_description
if new_email is not None:
values['email'] = new_email
if new_password is not None:
values['password'] = new_password
if new_default_project is not None:
values['default_project'] = new_default_project
if enabled is not None:
values['enabled'] = enabled
values['user'] = user_id
domain = (self.client_plugin().get_domain_id(domain))
values['domain'] = domain
return self.client().users.update(**values)
def _add_user_to_groups(self, user_id, groups):
if groups is not None:
group_ids = [self.client_plugin().get_group_id(group)
for group in groups]
for group_id in group_ids:
self.client().users.add_to_group(user_id,
group_id)
def _remove_user_from_groups(self, user_id, groups):
if groups is not None:
group_ids = [self.client_plugin().get_group_id(group)
for group in groups]
for group_id in group_ids:
self.client().users.remove_from_group(user_id,
group_id)
def _find_diff(self, updated_prps, stored_prps):
new_group_ids = [self.client_plugin().get_group_id(group)
for group in
(set(updated_
|
prps or []) -
set(stored_prps or []))]
removed_group_ids = [self.client_p
|
lugin().get_group_id(group)
for group in
(set(stored_prps or []) -
set(updated_prps or []))]
return new_group_ids, removed_group_ids
def handle_create(self):
user_name = (self.properties[self.NAME] or
self.physical_resource_name())
description = self.properties[self.DESCRIPTION]
domain = self.client_plugin().get_domain_id(
self.properties[self.DOMAIN])
enabled = self.properties[self.ENABLED]
email = self.properties[self.EMAIL]
password = self.properties[self.PASSWORD]
default_project = self.client_plugin().get_project_id(
self.properties[self.DEFAULT_PROJECT])
groups = self.properties[self.GROUPS]
user = self.client().users.create(
name=user_name,
domain=domain,
description=description,
enabled=enabled,
email=email,
password=password,
default_project=default_project)
self.resource_id_set(user.id)
self._add_user_to_groups(user.id, groups)
self.create_assignment(user_id=user.id)
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
name = None
# Don't update the name if no change
if self.NAME in prop_diff:
name = prop_diff[self.NAME] or self.physical_resource_name()
description = prop_diff.get(self.DESCRIPTION)
enabled = prop_diff.get(self.ENABLED)
email = prop_diff.get(self.EMAIL)
password = prop_diff.get(self.PASSWORD)
domain = (prop_diff.get(self.DOMAIN) or
self._stored_properties_data.get(self.DOMAIN))
default_project = prop_diff.get(self.DEFAULT_PROJECT)
self._update_user(
user_id=self.resource_id,
domain=domain,
new_name=name,
new_description=description,
enabled=enabled,
new_default_project=default_project,
new_email=email,
new_password=password
)
if self.GROUPS in prop_diff:
(new_group_ids, removed_group_ids) = self._find_diff(
prop_diff[self.GROUPS],
self._stored_pro
|
shaneknapp/spark
|
python/pyspark/tests/test_context.py
|
Python
|
apache-2.0
| 13,901
| 0.001511
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import shutil
import stat
import tempfile
import threading
import time
import unittest
from collections import namedtuple
from pyspark import SparkConf, SparkFiles, SparkContext
from pyspark.testing.utils import ReusedPySparkTestCase, PySparkTestCase, QuietTest, SPARK_HOME
class CheckpointTests(ReusedPySparkTestCase):
def setUp(self):
self.checkpointDir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.checkpointDir.name)
self.sc.setCheckpointDir(self.checkpointDir.name)
def tearDown(self):
shutil.rmtree(self.checkpointDir.name)
def test_basic_checkpointing(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: range(1, x + 1))
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
self.assertFalse(self.sc.getCheckpointDir() is None)
flatMappedRDD.checkpoint()
result = flatMappedRDD.collect()
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.isCheckpointed())
self.assertEqual(flatMappedRDD.collect(), result)
self.assertEqual(
"file:" + self.checkpointDir.name,
os.path.dirname(os.path.dirname(flatMappedRDD.getCheckpointFile())),
)
self.assertEqual(
self.sc.getCheckpointDir(), os.path.dirname(flatMappedRDD.getCheckpointFile())
)
def test_checkpoint_and_restore(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: [x])
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
flatMappedRDD.count() # forces a checkpoint to be computed
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.getCheckpointFile() is not None)
recovered = self.sc._checkpointFile(
flatMappedRDD.getCheckpointFile(), flatMappedRDD._jrdd_deserializer
)
self.assertEqual([1, 2, 3, 4], recovered.collect())
class LocalCheckpointTests(ReusedPySparkTestCase):
def test_basic_localcheckpointing(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: range(1, x + 1))
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertFalse(flatMappedRDD.isLocallyCheckpointed())
flatMappedRDD.localCheckpoint()
result = flatMappedRDD.collect()
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.isLocallyCheckpointed())
self.assertEqual(flatMappedRDD.collect(), result)
class AddFileTests(PySparkTestCase):
def test_add_py_file(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this job fails due to `userlibrary` not being on the Python path:
# disable logging in log4j temporarily
def func(x):
from userlibrary import UserClass # type: ignore
return UserClass().he
|
llo()
with QuietTest(self.sc):
self.assertRaises(Exception, self.sc.parallelize(range(2)).map(func).first)
# Add the file, so the job should now succeed:
path = os.pat
|
h.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addPyFile(path)
res = self.sc.parallelize(range(2)).map(func).first()
self.assertEqual("Hello World!", res)
def test_add_file_locally(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
self.sc.addFile(path)
download_path = SparkFiles.get("hello.txt")
self.assertNotEqual(path, download_path)
with open(download_path) as test_file:
self.assertEqual("Hello World!\n", test_file.readline())
def test_add_file_recursively_locally(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello")
self.sc.addFile(path, True)
download_path = SparkFiles.get("hello")
self.assertNotEqual(path, download_path)
with open(download_path + "/hello.txt") as test_file:
self.assertEqual("Hello World!\n", test_file.readline())
with open(download_path + "/sub_hello/sub_hello.txt") as test_file:
self.assertEqual("Sub Hello World!\n", test_file.readline())
def test_add_py_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlibrary import UserClass # noqa: F401
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addPyFile(path)
from userlibrary import UserClass
self.assertEqual("Hello World!", UserClass().hello())
def test_add_egg_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlib import UserClass # type: ignore[import]
UserClass()
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlib-0.1.zip")
self.sc.addPyFile(path)
from userlib import UserClass
self.assertEqual("Hello World from inside a package!", UserClass().hello())
def test_overwrite_system_module(self):
self.sc.addPyFile(os.path.join(SPARK_HOME, "python/test_support/SimpleHTTPServer.py"))
import SimpleHTTPServer # type: ignore[import]
self.assertEqual("My Server", SimpleHTTPServer.__name__)
def func(x):
import SimpleHTTPServer # type: ignore[import]
return SimpleHTTPServer.__name__
self.assertEqual(["My Server"], self.sc.parallelize(range(1)).map(func).collect())
class ContextTests(unittest.TestCase):
def test_failed_sparkcontext_creation(self):
# Regression test for SPARK-1550
self.assertRaises(Exception, lambda: SparkContext("an-invalid-master-name"))
def test_get_or_create(self):
with SparkContext.getOrCreate() as sc:
self.assertTrue(SparkContext.getOrCreate() is sc)
def test_parallelize_eager_cleanup(self):
with SparkContext() as sc:
temp_files = os.listdir(sc._temp_dir)
sc.parallelize([0, 1, 2])
post_parallelize_temp_files = os.listdir(sc._temp_dir)
self.assertEqual(temp_files, post_parallelize_temp_files)
def test_set_conf(self):
# This is for an internal use case. When there is an existing SparkContext,
# SparkSession's builder needs to set configs into SparkContext's conf.
sc = SparkContext()
sc._conf.set("spark.test.SPARK16224", "SPARK16224")
self.assertEqual(sc._jsc.sc().conf().get("spark.test.SPARK16224"), "SPARK16224")
sc.stop()
def test_stop(self):
sc = SparkContext()
self.assertNotEqual(SparkContext._active_spark_context, None)
sc.stop()
self.assertEqual(SparkContext._active_spark_context, None)
def test_with(self):
|
sdpython/pyquickhelper
|
src/pyquickhelper/loghelper/buffered_flog.py
|
Python
|
mit
| 844
| 0
|
"""
@file
@brief Buffer as a logging function.
"""
from io import StringIO
class BufferedPrint:
"""
Buffered display. Relies on :epkg:`*py:io:StringIO`.
Use it as follows:
.. runpython::
:showcode:
def
|
do_something(fLOG=None):
if fLOG:
fLOG("Did something.")
return 3
from pyquickhelper.loghelper import BufferedPrint
buf = BufferedPrint()
do_something(fLOG=buf.fprint)
print(buf)
"""
|
def __init__(self):
"constructor"
self.buffer = StringIO()
def fprint(self, *args, **kwargs):
"print function"
mes = " ".join(str(_) for _ in args)
self.buffer.write(mes)
self.buffer.write("\n")
def __str__(self):
"Returns the content."
return self.buffer.getvalue()
|
michaelyou/One-piece-forum
|
forum/migrations/0001_initial.py
|
Python
|
mit
| 12,680
| 0.005205
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.contrib.auth.models
import django.utils.timezone
from django.conf import settings
import django.core.validators
import forum.models
class Migration(migrations.Migration):
dependencies = [
('auth', '0006_require_contenttypes_0002'),
]
operations = [
migrations.CreateModel(
name='ForumUser',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(null=True, verbose_name='last login', blank=True)),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, max_length=30, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.', 'invalid')], help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True, verbose_name='username')),
('first_name', models.CharField(max_length=30, verbose_name='first name', blank=True)),
('last_name', models.CharField(max_length=30, verbose_name='last name', blank=True)),
('email', models.EmailField(max_length=254, verbose_name='email address', blank=True)),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('nickname', models.CharField(max_length=200, null=True, blank=True)),
('avatar', models.CharField(max_length=200, null=True, blank=True)),
('signature', models.CharField(max_length=500, null=True, blank=True)),
('location', models.CharField(max_length=200, null=True, blank=True)),
('website', models.URLField(null=True, blank=True)),
('company', models.CharField(max_length=200, null=True, blank=True)),
('role', models.IntegerField(null=True, blank=True)),
('balance', models.IntegerField(null=True, blank=True)),
('reputation', models.IntegerField(null=True, blank=True)),
('self_intro', models.CharField(max_length=500, null=True, blank=True)),
('updated', models.DateTimeField(null=True, blank=True)),
('twitter', models.CharField(max_length=200, null=True, blank=True)),
('github', models.CharField(max_length=200, null=True, blank=True)),
('douban', models.CharField(max_length=200, null=True, blank=True)),
('groups', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', verbose_name='groups')),
('user_permissions', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Permission', blank=True, help_text='Specific permissions for this user.', verbose_name='user permissions')),
],
options={
'abstract': False,
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Favorite',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('involved_type', models.IntegerField(null=True, blank=True)),
('created', models.DateTimeField(null=True, blank=True)),
],
),
migrations.CreateModel(
name='Node',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200, null=True, blank=True)),
('slug', models.SlugField(max_length=200, null=True, blank=True)),
('thumb', models.CharField(max_length=200, null=True, blank=True)),
('introduction', models.CharField(max_length=500, null=True, blank=True)),
('created', models.DateTimeField(null=True, blank=True)),
('updated', models.DateTimeField(null=True, blank=True)),
('topic_count', models.IntegerField(null=True, blank=True)),
('custom_style', forum.models.NormalTextField(null=True, blank=True)),
('limit_reputation', models.IntegerField(null=True, blank=True)),
],
),
migrations.CreateModel(
name='Notification',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('content', forum.models.NormalTextField(null=True, blank=True)),
('status', models.IntegerField(null=True, blank=True)),
('involved_type', models.IntegerField(null=True, blank=True)),
('occurrence_time', models.DateTimeField(null=True, blank=True)),
],
),
migrations.CreateModel(
name='Plane',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200, null=True, blank=True)),
('created', models.DateTimeField(null=True, blank=True)),
('updated', models.DateTimeField(null=True, blank=True)),
],
),
migrations.CreateModel(
name='Reply',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('content', forum.models.NormalTextField(null=True, blank=True)),
('created', models.DateTimeField(null=True, blank=True)),
('updated', models.DateTimeField(null=True, blank=True)),
('up_vote', models.IntegerField(null=True, blank=True)),
('down_vote', models.IntegerField(null=True, blank=True)),
('last_touched', models.DateTimeField(null=True, blank=True)),
('author', models.ForeignKey(related_name='reply_author', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
),
migrations.CreateModel(
name='Topic',
fields=[
('id', m
|
odels.Au
|
toField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=200, null=True, blank=True)),
('slug', models.SlugField(max_length=200, null=True, blank=True)),
('content', forum.models.NormalTextField(null=True, blank=True)),
('status', models.IntegerField(null=True, blank=True)),
('hits', models.IntegerField(null=True, blank=True)),
('created', models.DateTimeField(null=True, blank=True)),
('updated', models.DateTimeField(null=True, blank=True)),
('reply_count', models.IntegerField(null=True, blank=True)),
('last_replied_time', models.DateTimeField(null=True, blank=True)),
|
lsolanka/gridcells
|
tests/unit/test_analysis_signal.py
|
Python
|
gpl-3.0
| 10,163
| 0.000197
|
'''Test the analysis.signal module.'''
from __future__ import absolute_import, print_function, division
import pytest
import numpy as np
import gridcells.analysis.signal as asignal
from gridcells.analysis.signal import (local_extrema, local_maxima,
local_minima, ExtremumTypes,
LocalExtrema)
RTOL = 1e-10
def _data_generator(n_items, sz):
'''Generate pairs of test vectors.'''
it = 0
while it < n_items:
N1 = np.random.randint(sz) + 1
N2 = np.random.randint(sz) + 1
if N1 == 0 and N2 == 0:
continue
a1 = np.random.rand(N1)
a2 = np.random.rand(N2)
yield (a1, a2)
it += 1
class TestCorrelation(object):
'''
Test the analysis.signal.corr function (and effectively the core of the
autoCorrelation) function.
'''
maxN = 500
maxLoops = 1000
def test_onesided(self):
'''Test the one-sided version of ``corr``.'''
for a1, a2 in _data_generator(self.maxLoops, self.maxN):
c_cpp = asignal.corr(a1, a2, mode='onesided')
c_np = np.correlate(a1, a2, mode='full')[::-1][a1.size - 1:]
np.testing.assert_allclose(c_cpp, c_np, rtol=RTOL)
def test_twosided(self):
'''Test the two-sided version of ``corr``.'''
for a1, a2 in _data_generator(self.maxLoops, self.maxN):
c_cpp = asignal.corr(a1, a2, mode='twosided')
c_np = np.correlate(a1, a2, mode='full')[::-1]
np.testing.assert_allclose(c_cpp, c_np, rtol=RTOL)
def test_range(self):
'''Test the ranged version of ``corr``.'''
# Half the range of both signals
for a1, a2 in _data_generator(self.maxLoops, self.maxN):
if a1.size <= 1 or a2.size <= 1:
continue
lag_start = - (a1.size // 2)
lag_end = a2.size // 2
c_np_centre = a1.size - 1
c_cpp = asignal.corr(a1, a2, mode='range', lag_start=lag_start,
lag_end=lag_end)
c_np = np.correlate(a1, a2, mode='full')[::-1]
np.testing.assert_allclose(
c_cpp,
c_np[c_np_centre + lag_start:c_np_centre + lag_end + 1],
rtol=RTOL)
def test_zero_len(self):
'''Test that an exception is raised when inputs have zero length.'''
a1 = np.array([])
a2 = np.arange(10)
# corr(a1, a2)
lag_start = 0
lag_end = 0
for mode in ("onesided", "twosided", "range"):
with pytest.raises(TypeError):
asignal.corr(a1, a2, mode, lag_start, lag_end)
with pytest.raises(TypeError):
asignal.corr(a2, a1, mode, lag_start, lag_end)
with pytest.raises(TypeError):
asignal.corr(a1, a1, mode, lag_start, lag_end)
def test_non_double(self):
'''Test the corr function when dtype is not double.'''
a1 = np.array([1, 2, 3], dtype=int)
asignal.corr(a1, a1, mode='twosided')
class TestAutoCorrelation(object):
'''Test the acorr function.'''
maxN = 500
maxLoops = 1000
def test_default_params(self):
'''Test default parameters.'''
a = np.arange(10)
c_cpp = asignal.acorr(a)
c_np = np.correlate(a, a, mode='full')[::-1][a.size - 1:]
np.testing.assert_allclose(c_cpp, c_np, rtol=RTOL)
def test_onesided(self):
'''Test the one-sided version of ``corr``.'''
a = np.arange(10)
c_cpp = asignal.acorr(a, mode='onesided', max_lag=5)
c_np = np.correlate(a, a, mode='full')[::-1][a.size - 1:a.size - 1 + 6]
np.testing.assert_allclose(c_cpp, c_np, rtol=RTOL)
def test_twosided(self):
'''Test the two-sided version of ``corr``.'''
a = np.arange(10)
c_cpp = asignal.acorr(a, mode='twosided', max_lag=5)
c_np = np.correlate(a, a, mode='full')[::-1][a.size - 6:a.size + 5]
np.testing.assert_allclose(c_cpp, c_np, rtol=RTOL)
def test_norm(self):
'''Test normalization.'''
# Simple array
a = np.arange(10)
c_cpp = asignal.acorr(a, mode='twosided', norm=True)
c_np = np.correlate(a, a, mode='full')[::-1]
np.testing.assert_allclose(c_cpp, c_np / np.max(c_np), rtol=RTOL)
# A zero array will return zero
zero_array = np.zeros(13)
c_cpp = asignal.acorr(zero_array, mode='twosided', norm=True)
assert np.all(c_cpp == 0.)
def generate_sin(n_half_cycles, resolution=100):
'''Generate a sine function with a number of (full) half cycles.
Note that the positions of the extrema might be shifted +/- 1 with respect
to the actual real sin because of possible rounding errors.
Parameters
----------
n_half_cycles : int
Number of half cycles to generate. Does not have to be even.
resolution : int
Number of data points for each half cycle.
'''
if n_half_cycles < 1:
raise ValueError()
if resolution < 1:
raise ValueError()
f = 1. / (2 * resolution)
t = np.arange(n_half_cycles * resolution, dtype=float)
sig = np.sin(2 * np.pi * f * t)
extrema_positions = np.array(np.arange(n_half_cycles) * resolution +
resolution / 2,
dtype=int)
extrema_types = []
current_type = ExtremumTypes.MAX
for _ in range(n_half_cycles):
extrema_types.append(current_type)
if current_type is ExtremumTypes.MAX:
current_type = ExtremumTypes.MIN
else:
current_type = ExtremumTypes.MAX
return (sig, extrema_positions, np.array(extrema_types))
class TestLocalExtrema(object):
'''Test computation of local extrema.'''
def test_local_extrema(self):
for n_extrema in [1, 2, 51]:
sig, extrema_idx, extrema_types = generate_sin(n_extrema)
extrema = local_extrema(sig)
assert len(extrema) == n_extrema
assert np.all(extrema_idx[extrema_types == ExtremumTypes.MIN] ==
extrema.get_type(ExtremumTypes.MIN))
assert np.all(extrema_idx[extrema_types == ExtremumTypes.MAX] ==
extrema.get_type(ExtremumTypes.MAX))
def test_zero_array(self):
for func in [local_extrema, local_maxima, local_minima]:
extrema = func(np.empty(0))
assert len(extrema) == 0
def test_single_item(self):
'''This should return a zero length array.'''
for func in [local_extrema, local_maxima, local_minima]:
extrema = func(np.array([1.]))
assert len(extrema) == 0
def test_maxima(self):
# One maximum only
for n_extrema in [1, 2]:
sig, extrema_idx, extrema_types = generate_sin(n_extrema)
maxima = local_maxima(sig)
assert len(maxima) == 1
assert np.all(extrema_idx[extrema_types == ExtremumTypes.MAX] ==
maxima)
# 2 maxima
for n_extrema in [3, 4]:
sig, extrema_idx, extrema_types = generate_sin(n_extrema)
maxima = local_maxima(sig)
assert len(maxima) == 2
assert np.all(extrema_idx[extrema_types == ExtremumTypes.MAX] ==
maxima)
def test_minima(self):
# Only one maximum so should return empty
n_extrema = 1
sig, extrema_idx, extrema_types = generate_sin(n_extrema)
minima = local_minima(sig)
assert len(minima) == 0
assert np.all(extrema_idx[extrema_types == ExtremumTypes.MIN] ==
minima)
# One maximum and minimum
n_extrema = 2
sig, extrema_idx, extrema_types = generate_sin(n_extrema)
minima = local_minima(sig)
assert len(minima) == 1
assert np.all(extrema_idx[extrema_types == ExtremumTypes.MIN] ==
|
minima)
# 2 minima
for n_extrema
|
in [4, 5]:
sig, extrema_idx, extrema_types = generate_sin(n_extrema)
|
Empire-of-Code-Puzzles/checkio-empire-most-numbers
|
verification/src/tests.py
|
Python
|
gpl-2.0
| 2,790
| 0.001792
|
TESTS = {
"Level_1": [
{
"input": [1, 2, 3],
"answer": 2,
"explanation": "3-1=2"
},
{
"input": [5, -5],
"answer": 10,
"explanation": "5-(-5)=10"
},
{
"input": [10.2, -2.2, 0, 1.1, 0.5],
"answer": 12.4,
"explanation": "10.2-(-2.2)=12.4"
},
{
"input": [],
"answer": 0,
"explanation": "Empty"
},
{"input": [-99.9, 99.9],
"answer": 199.8,
"explanation": "99.9-(-99.9)"},
{"input": [1, 1],
"answer": 0,
"explanation": "1-1"},
{"input": [0, 0, 0, 0],
"answer": 0,
"explanation": "0-0"},
{"input": [36.0, -26.0, -7.5, 0.9, 0.53, -6.6, -71.0, 0.53, -48.0, 57.0, 69.0, 0.063, -4.7, 0.01, 9.2],
"answer": 140.0,
"explanation": "69.0-(-71.0)"},
{"input": [-0.035, 0.0, -0.1, 83.0, 0.28, 60.0],
"answer": 83.1,
"explanation": "83.0-(-0.1)"},
{"input": [0.02, 0.93, 0.066, -94.0, -0.91, -21.0, -7.2, -0.018, 26.0],
"answer": 120.0,
"explanation": "26.0-(-94.0)"},
{"input": [89.0, 0.014, 2.9, -1.2, 5.8],
"answer": 90.2,
"explanation": "89.0-(-1.2)"},
{"input": [-69.0, 0.0, 0.0, -0.051, -0.021, -0.81],
"answer": 69.0,
"explanation": "0.0-(-69.0)"},
{"input": [-0.07],
"answer": 0.0,
"explanation": "-0.07-(-0.07)"},
{"input": [0.074, 0.12, -0.4, 4.0, -1.7, 3.0, -5.1, 0.57, -54.0, -41.0, -5.2, -5.6, 3.8, 0.054, -35.0, -5.0,
-0.005, 0.034],
"answer": 58.0,
"explanation": "4.0-(-54.0)"},
{"input": [29.0, 0.47, -4.5, -6.7, -0.051, -0.82, -0.074, -4.0, -0.015, -0.015, -8.0, -0.43],
"answer": 37.0,
"explanation": "29.0-(-8.0)"},
{"input": [-0.036, -0.11, -0.55, -64.0],
"answer": 63.964,
"explanation": "-0.036-(-64.0)"},
{"input": [-0.092, -0.079, -0.31, -0.87, -28.0, -6.2, -0.097, -5.8
|
, -0.025, -28.0, -4.7, -2.9, -8.0, -0.093,
-13.0, -73.0],
"answer": 72.975,
"explanation": "-0.025-(-73.0)"},
{"inpu
|
t": [-0.015, 7.6],
"answer": 7.615,
"explanation": "7.6-(-0.015)"},
{"input": [-46.0, 0.19, -0.08, -4.0, 4.4, 0.071, -0.029, -0.034, 28.0, 0.043, -97.0],
"answer": 125.0,
"explanation": "28.0-(-97.0)"},
{"input": [32.0, -0.07, -0.056, -6.4, 0.084],
"answer": 38.4,
"explanation": "32.0-(-6.4)"},
{"input": [0.017, 0.015, 0.69, 0.78],
"answer": 0.765,
"explanation": "0.78-0.015"},
]
}
|
CODAIT/graph_def_editor
|
tests/function_graph_test.py
|
Python
|
apache-2.0
| 6,316
| 0.006966
|
# Copyright 2021 Google. All Rights Reserved.
# Copyright 2019 IBM. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for function_graph.py in the GraphDef Editor."""
import unittest
import tensorflow.compat.v1 as tf
tf.disable_eager_execution()
import shutil
import tempfile
import numpy as np
import graph_def_editor as gde
class FunctionGraphTest(unittest.TestCase):
def setUp(self):
# Create a temporary directory for SavedModel files.
self.temp_dir = tempfile.mkdtemp()
def tearDown(self):
# Remove the directory after the test.
# Comment out this line to prevent deleting temps.
shutil.rmtree(self.temp_dir)
pass # In case previous line gets commented out
def build_tf_graph(self):
"""Builds a tf graph for function (x + y) * 10.0 ."""
@tf.function
def multiplier_function(x):
return tf.constant(10.0, name="function_multiplier") * x
tf_g = tf.Graph()
with tf_g.as_default():
|
x = tf.placeholder(name="x", dtype=tf.float32, shape=[])
y = tf.placeholder(name="y", dtype=tf.float32, shape=[])
result_op = tf.add(x, y, name="add")
_ = multiplier_function(result_op)
return tf_g
def run_tf_graph(self, tf_g
|
, x, y):
with tf.Session(graph=tf_g) as sess:
x_tensor = tf_g.get_tensor_by_name("x:0")
y_tensor = tf_g.get_tensor_by_name("y:0")
output_tensor = tf_g.get_tensor_by_name("PartitionedCall:0")
return sess.run(output_tensor, {x_tensor: x, y_tensor: y})
def save_tf_graph(self, tf_g, model_dir):
x_tensor = tf_g.get_tensor_by_name("x:0")
y_tensor = tf_g.get_tensor_by_name("y:0")
output_tensor = tf_g.get_tensor_by_name("PartitionedCall:0")
with tf.Session(graph=tf_g) as sess:
tf.saved_model.simple_save(sess, model_dir,
inputs={"x": x_tensor, "y": y_tensor},
outputs={"out": output_tensor})
def test_function_rewrite(self):
tf_g = self.build_tf_graph()
self.assertEqual(30.0, self.run_tf_graph(tf_g, 1.0, 2.0))
graph = gde.Graph(tf_g)
add_op = graph.get_node_by_name("add")
function_name = add_op.outputs[0].consumers()[0].get_attr("f").name
self.assertIn(function_name, graph.function_names)
function_graph = graph.get_function_graph_by_name(function_name)
function_multiplier_op = \
function_graph.get_node_by_name("function_multiplier")
self.assertEqual(10.0, function_multiplier_op.get_attr("value"))
function_multiplier_op.replace_attr("value",
np.array(1000.0, dtype=np.float32))
self.assertEqual(3000.0, self.run_tf_graph(graph.to_tf_graph(), 1.0, 2.0))
return graph
def test_export_saved_model(self):
g = self.test_function_rewrite()
model_dir = self.temp_dir + "/saved_model"
g.to_saved_model(model_dir)
tf_g = tf.Graph()
with tf.Session(graph=tf_g) as sess:
_ = tf.saved_model.load(sess, [tf.saved_model.tag_constants.SERVING],
model_dir)
self.assertEqual(3000.0, self.run_tf_graph(tf_g, 1.0, 2.0))
def test_import_saved_model(self):
g = self.test_function_rewrite()
model_dir = self.temp_dir + "/saved_model"
self.save_tf_graph(g.to_tf_graph(), model_dir)
g = gde.saved_model_to_graph(model_dir)
self.assertEqual(3000.0, self.run_tf_graph(g.to_tf_graph(), 1.0, 2.0))
def test_number_attr_support(self):
model_dir = self.temp_dir + "/saved_model"
@tf.function
def test_function(c):
cdim = tf.constant(1, tf.int32)
c1 = tf.constant([2, 1, 5], tf.int32, name="FuncConst")
c2 = tf.constant([2, 1, 5], tf.int32)
# ConcatOffset has variable number of intputs and outputs
# that is using number_attr in functions
concat_offset = tf.raw_ops.ConcatOffset(
concat_dim=cdim, shape=[c, c1, c2])
out = tf.math.reduce_sum(concat_offset)
return out
tf_g = tf.Graph()
with tf_g.as_default():
with tf.Session() as sess:
c = tf.placeholder(name="c", dtype=tf.int32)
out_func = test_function(c)
c = tf_g.get_tensor_by_name("c:0")
self.assertEqual(3, sess.run(out_func, {c: [2, 1, 5]}))
tf.saved_model.simple_save(
sess, model_dir, inputs={"c": c}, outputs={"out_func": out_func})
g = gde.saved_model_to_graph(model_dir)
tf_g = g.to_tf_graph()
with tf.Session(graph=tf_g) as sess:
output_tensor = tf_g.get_tensor_by_name("PartitionedCall:0")
c = tf_g.get_tensor_by_name("c:0")
self.assertEqual(3, sess.run(output_tensor, {c: [2, 1, 5]}))
f = g.get_function_graph_by_name(g.function_names[0])
func_const_op = f.get_node_by_name("FuncConst")
func_const_op.replace_attr("value", np.array([2, 2, 5], dtype=np.int32))
tf_g = g.to_tf_graph()
with tf.Session(graph=tf_g) as sess:
output_tensor = tf_g.get_tensor_by_name("PartitionedCall:0")
c = tf_g.get_tensor_by_name("c:0")
self.assertEqual(4, sess.run(output_tensor, {c: [2, 1, 5]}))
def test_visialize(self):
try:
import graphviz
except ModuleNotFoundError as error:
print("WARNING: graphviz is not installed, skipping test")
return
tf_g = self.build_tf_graph()
graph = gde.Graph(tf_g)
function_graph = graph.get_function_graph_by_name(graph.function_names[0])
gv_graph = gde.util.parse_graphviz_json(
function_graph.visualize(format="json").decode())
expected_gv_graph = {
"x": ["mul"],
"function_multiplier": ["mul"],
"mul": ["Identity"],
"Identity": []
}
self.assertEqual(expected_gv_graph, gv_graph)
if __name__ == "__main__":
unittest.main()
|
piotras/midgard-core
|
tests/GIR/test_query_select.py
|
Python
|
lgpl-2.1
| 9,821
| 0.020772
|
# coding=utf-8
import sys
import struct
import unittest
from test_000_config import TestConfig
from test_001_connection import TestConnection
from gi.repository import Midgard
from gi.repository import GObject
class TestQuerySelect(unittest.TestCase):
mgd = None
def setUp(self):
if self.mgd == None:
self.mgd = TestConnection.openConnection()
def tearDown(self):
self.mgd.close()
self.mgd = None
# Create three persons for *all* tests
@staticmethod
def setUpClass():
mgd = TestConnection.openConnection()
tr = Midgard.Transaction(connection = mgd)
tr.begin()
# Create three persons for tests
a = Midgard.Object.factory(mgd, "midgard_person", None)
a.set_property("firstname", "Alice")
a.set_property("lastname", "Smith")
a.create()
b = Midgard.Object.factory(mgd, "midgard_person", None)
b.set_property("firstname", "John")
b.set_property("lastname", "Smith")
b.create()
c = Midgard.Object.factory(mgd, "midgard_person", None)
c.set_property("firstname", "Marry")
c.set_property("lastname", "Smith")
c.create()
tr.commit()
# Purge three persons after all tests are done
@staticmethod
def tearDownClass():
mgd = TestConnection.openConnection()
tr = Midgard.Transaction(connection = mgd)
tr.begin()
st = Midgard.QueryStorage(dbclass = "midgard_person")
qs = Midgard.QuerySelect(connection = mgd, storage = st)
qs.set_constraint(
Midgard.QueryConstraint(
property = Midgard.QueryProperty(property = "lastname"),
operator = "=",
holder = Midgard.QueryValue.create_with_value("Smith")
)
)
qs.execute()
for person in qs.list_objects():
person.purge(False)
tr.commit()
def testSelectAllPersons(self):
st = Midgard.QueryStorage(dbclass = "midgard_person")
qs = Midgard.QuerySelect(connection = self.mgd, storage = st)
qs.execute()
objects = qs.list_objects()
# Expect admin person and Smith family
self.assertEqual(len(objects), 4);
def getQSForSmiths(self):
st = Midgard.QueryStorage(dbclass = "midgard_person")
qs = Midgard.QuerySelect(connection = self.mgd, storage = st)
qs.set_constraint(
Midgard.QueryConstraint(
property = Midgard.QueryProperty(property = "lastname"),
operator = "=",
holder = Midgard.QueryValue.create_with_value("Smith")
)
)
return qs
def testSelectSmithFamily(self):
qs = self.getQSForSmiths()
qs.execute()
# Expect Smith family - 3 persons
self.assertEqual(qs.get_results_count(), 3);
def testSelectNothing(self):
st = Midgard.QueryStorage(dbclass = "midgard_person")
qs = Midgard.QuerySelect(connection = self.mgd, storage = st)
qs.set_constraint(
Midgard.QueryConstraint(
property = Midgard.QueryProperty(property = "firstname"),
operator = "=",
holder = Midgard.QueryValue.create_with_value("Sir Lancelot")
)
)
qs.execute()
# Do not expect persons
self.assertEqual(qs.get_results_count(), 0);
def testSelectInvalidType(self):
st = Midgard.QueryStorage(dbclass = "NotExists")
qs = Midgard.QuerySelect(connection = self.mgd, storage = st)
# Check if we have GError
self.assertRaises(GObject.GError, qs.execute)
# Check if we have correct domain
try:
qs.execute()
except GObject.GError as e:
self.assertEqual(e.domain, "midgard-validation-error-quark")
self.assertEqual(e.code, Midgard.ValidationError.TYPE_INVALID)
def testSelectOrderASC(self):
qs = self.getQSForSmiths()
qs.add_order(Midgard.QueryProperty(property = "firstname"), "ASC")
qs.execute()
l = qs.list_objects()
self.assertEqual(l[0].get_property("firstname"), "Alice")
self.assertEqual(l[1].get_property("firstname"), "John")
self.assertEqual(l[2].get_property("firstname"), "Marry")
def testSelectOrderDESC(self):
qs = self.getQSForSmiths()
qs.add_order(Midgard.QueryProperty(property = "firstname"), "DESC")
qs.execute()
l = qs.list_objects()
self.assertEqual(l[0].get_property("firstname"), "Marry")
self.assertEqual(l[1].get_property("firstname"), "John")
self.assertEqual(l[2].get_property("firstname"), "Alice")
def testSelectLimit(self):
qs = self.getQSForSmiths()
qs.add_order(Midgard.QueryProperty(property = "firstname"), "DESC")
qs.set_limit(1)
qs.execute()
self.assertEqual(qs.get_results_count(), 1)
l = qs.list_objects()
self.assertEqual(l[0].get_property("firstname"), "Marry")
def testSelectOffset(self):
qs = self.getQSForSmiths()
qs.add_order(Midgard.QueryProperty(property = "firstname"), "DESC")
qs.set_limit(1)
qs.set_offset(2)
qs.execute()
self.assertEqual(qs.get_results_count(), 1)
l = qs.list_objects()
self.assertEqual(l[0].get_property("firstname"), "Alice")
def testJoin(self):
storage_one = Midgard.QueryStorage(dbcl
|
ass = "midgard_person")
storage_two = Midgard.QueryStorage(dbclass = "midgard_person")
qs = Midgard.QuerySelect(connection = self.mgd, storage = storage_one)
group = Midgard.QueryConstraintGroup(grouptype = "AND")
constraint_one =
|
Midgard.QueryConstraint(
property = Midgard.QueryProperty(property = "firstname"),
operator = "=",
holder = Midgard.QueryValue.create_with_value("Alice")
)
constraint_two = Midgard.QueryConstraint(
property = Midgard.QueryProperty(property = "firstname", storage = storage_two),
operator = "=",
holder = Midgard.QueryValue.create_with_value("John")
)
group.add_constraint(constraint_one)
group.add_constraint(constraint_two)
qs.set_constraint(group)
qs.add_join(
"LEFT",
Midgard.QueryProperty(property = "lastname"),
Midgard.QueryProperty(property = "lastname", storage = storage_two)
)
qs.execute()
# We expect Alice person only
self.assertEqual(qs.get_results_count(), 1)
objects = qs.list_objects()
self.assertEqual(objects[0].get_property("firstname"), "Alice")
def testConstraintGroupType(self):
group = Midgard.QueryConstraintGroup(grouptype = "AND")
self.assertEqual(group.get_property("grouptype"), "AND")
self.assertEqual(group.get_group_type(), "AND")
self.assertTrue(group.set_group_type("OR"))
self.assertFalse(group.set_group_type("INVALID"))
def testConstraint(self):
person_storage = Midgard.QueryStorage(dbclass = "midgard_person")
prop = Midgard.QueryProperty(property = "firstname", storage = person_storage)
constraint = Midgard.QueryConstraint(
storage = person_storage,
operator = "=",
property = prop
)
# test if we get the same
self.assertEqual(constraint.get_storage(), person_storage)
self.assertEqual(constraint.get_operator(), "=")
self.assertEqual(constraint.get_property(), prop)
# then set new ones and test again
new_storage = Midgard.QueryStorage(dbclass = "midgard_person")
new_prop = Midgard.QueryProperty(property = "firstname", storage = person_storage)
new_operator = "<>"
constraint.set_storage(new_storage)
constraint.set_operator(new_operator)
constraint.set_property(new_prop)
self.assertEqual(constraint.get_storage(), new_storage)
self.assertEqual(constraint.get_operator(), new_operator)
self.assertEqual(constraint.get_property(), new_prop)
def testProperty(self):
storage = Midgard.QueryStorage(dbclass = "midgard_person")
prop = Midgard.QueryProperty(property = "firstname", storage = storage)
self.assertEqual(prop.get_property("storage"), storage)
self.assertEqual(prop.get_property("property"), "firstname")
def testStorage(self):
storage = Midgard.QueryStorage(dbclass = "midgard_person")
self.assertEqual(storage.get_property("dbclass"), "midgard_person")
storage.set_property("dbclass", "midgard_snippet")
self.assertEqual(storage.get_property("dbclass"), "midgard_snippet")
def testValue(self):
query_value = Midgard.QueryValue()
# No idea how to test it
#self.assertIs(query_value.get_value(), None)
som
|
JoaquimPatriarca/senpy-for-gis
|
gasp/fromtxt/pnd.py
|
Python
|
gpl-3.0
| 272
| 0.014706
|
"""
Te
|
xt files to Pandas
"""
import pandas
def txt_to_pandas(csvFile, _delimiter, encoding_='utf8'):
"""
Text file to Pandas Dataframe
"""
return pandas.read_csv(
csvFile, sep=_delimiter, low_memory=False,
#encoding=encoding_
)
| |
comocheng/RMG-Py
|
rmgpy/rmg/input.py
|
Python
|
mit
| 22,111
| 0.007281
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2010 Prof. William H. Green (whgreen@mit.edu) and the
# RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
import logging
import quantities
import os
from rmgpy import settings
from rmgpy.molecule import Molecule
from rmgpy.quantity import Quantity
from rmgpy.solver.base import TerminationTime, TerminationConversion
from rmgpy.solver.simple import SimpleReactor
from rmgpy.solver.liquid import LiquidReactor
from model import CoreEdgeReactionModel
################################################################################
class InputError(Exception): pass
################################################################################
rmg = None
speciesDict = {}
def database(
thermoLibraries = None,
reactionLibraries = None,
frequenciesLibraries = None,
seedMechanisms = None,
kineticsFamilies = 'default',
kineticsDepositories = 'default',
kineticsEstimator = 'group additivity',
):
# This function just stores the information about the database to be loaded
# We don't actually load the database until after we're finished reading
# the input file
if isinstance(thermoLibraries, str): thermoLibraries = [thermoLibraries]
if isinstance(reactionLibraries, str): reactionLibraries = [reactionLibraries]
if isinstance(seedMechanisms, str): seedMechanisms = [seedMechanisms]
if isinstance(frequenciesLibraries, str): frequenciesLibraries = [frequenciesLibraries]
rmg.databaseDirectory = settings['database.directory']
rmg.thermoLibraries = thermoLibraries or []
rmg.reactionLibraries = reactionLibraries or []
rmg.seedMechanisms = seedMechanisms or []
rmg.statmechLibraries = frequenciesLibraries or []
rmg.kineticsEstimator = kineticsEstimator
if kineticsDepositories == 'default':
rmg.kineticsDepositories = ['training']
elif kineticsDepositories == 'all':
rmg.kineticsDepositories = None
else:
if not isinstance(kineticsDepositories,list):
raise InputError("kineticsDepositories should be either 'default', 'all', or a list of names eg. ['training','PrIMe'].")
rmg.kineticsDepositories = kineticsDepositories
if kineticsFamilies in ('default', 'all', 'none'):
rmg.kineticsFamilies = kineticsFamilies
else:
if not isinstance(kineticsFamilies,list):
raise InputError("kineticsFamilies should be either 'default', 'all', 'none', or a list of names eg. ['H_Abstraction','R_Recombination'] or ['!Intra_Disproportionation'].")
rmg.kineticsFamilies = kineticsFamilies
def species(label, structure, reactive=True):
logging.debug('Found {0} species "{1}" ({2})'.format('reactive' if reactive else 'nonreactive', label, structure.toSMILES()))
spec, isNew = rmg.reactionModel.makeNewSpecies(structure, label=label, reactive=reactive)
if not isNew:
raise InputError("Species {0} is a duplicate of {1}. Species in input file must be unique".format(label,spec.label))
rmg.initialSpecies.append(spec)
speciesDict[label] = spec
def SMARTS(string):
return Molecule().fromSMARTS(string)
def SMILES(string):
return Molecule().fromSMILES(string)
def InChI(string):
return Molecule().fromInChI(string)
def adjacencyList(string):
return Molecule().fromAdjacencyList(string)
# Reaction systems
def simpleReactor(temperature,
pressure,
initialMoleFractions,
terminationConversion=None,
terminationTime=None,
sensitivity=None,
sensitivityThreshold=1e-3
):
logging.debug('Found SimpleReactor reaction system')
for value in initialMoleFractions.values():
if value < 0:
raise InputError('Initial mole fractions cannot be negative.')
if sum(initialMoleFractions.values()) != 1:
logging.warning('Initial mole fractions do not sum to one; renormalizing.')
for spec in initialMoleFractions:
initialMoleFractions[spec] /= sum(initialMoleFractions.values())
T = Quantity(temperature)
P = Quantity(pressure)
termination = []
if terminationConversion is not None:
for spec, conv in terminationConversion.iteritems():
termination.append(TerminationConversion(speciesDict[spec], conv))
if terminationTime is not None:
termination.append(TerminationTime(Quantity(terminationTime)))
if len(termination) == 0:
raise InputError('No termination conditions specified for reaction system #{0}.'.format(len(rmg.reactionSystems)+2))
sensitiveSpecies = []
if sensitivity:
for spec in sensitivity:
sensitiveSpecies.append(speciesDict[spec])
system = SimpleReactor(T, P, initialMoleFractions, termination, sensitiveSpecies, sensitivityThreshold)
rmg.reactionSystems.append(system)
# Reaction systems
def liquidReactor(temperature,
initialConcentrations,
terminationConversion=None,
terminationTime=None,
sensitivity=None,
sensitivi
|
tyThreshold=1e-3):
logging.debug('Found LiquidReactor reaction system')
T = Quantity(temperature)
for spec,conc in initialConcentrations.iteritems():
concentration = Quantity(conc)
# check the dimensions are ok
# convert to mol/m^3 (or something numerically nice? or must it be SI)
initialConcentrations[spec] = concentration.value_si
termination = []
if terminationConversion is not None:
|
for spec, conv in terminationConversion.iteritems():
termination.append(TerminationConversion(speciesDict[spec], conv))
if terminationTime is not None:
termination.append(TerminationTime(Quantity(terminationTime)))
if len(termination) == 0:
raise InputError('No termination conditions specified for reaction system #{0}.'.format(len(rmg.reactionSystems)+2))
sensitiveSpecies = []
if sensitivity:
for spec in sensitivity:
sensitiveSpecies.append(speciesDict[spec])
system = LiquidReactor(T, initialConcentrations, termination, sensitiveSpecies, sensitivityThreshold)
rmg.reactionSystems.append(system)
def simulator(atol, rtol, sens_atol=1e-6, sens_rtol=1e-4):
rmg.absoluteTolerance = atol
rmg.relativeTolerance = rtol
rmg.sensitivityAbsoluteTolerance = sens_atol
rmg.sensitivityRelativeTolerance = sens_rtol
def solvation(solvent):
# If solvation module in input file, set the RMG solvent variable
if not isinstance(solvent,str):
raise InputError("solvent should be a string like 'water'")
rmg.solvent = solvent
def model(toleranceMoveToCore=None, toleranceKe
|
zqfan/leetcode
|
algorithms/664. Strange Printer/solution.py
|
Python
|
gpl-3.0
| 657
| 0.001522
|
class Solution(object):
def strangePrinter(self, s):
"""
:type s: str
:rtype: int
"""
n = len(s)
dp = [[0] * n for i in xrange(n + 1)]
for i in xrange(n):
dp[i][i] = 1
for l in xrange(1
|
, n):
for i in xrange(n-l):
e = i + l
dp[i][e] = dp[i+1][e] + 1
for j in xrange(i+1, e+1):
if s[i] == s[j] and dp[i][j-1] + dp[j+1][e] < dp[i][e]:
dp[i][e] = dp[i][j-1] + dp[j+1][e]
return dp[0][-1] if s else 0
# 201 / 201 test cases pas
|
sed.
# Status: Accepted
# Runtime: 852 ms
|
ganesh-95/python-programs
|
thoughtworks/strrev.py
|
Python
|
mit
| 109
| 0.036697
|
#reversing the words in a string
s = raw_input(
|
'enter the string:')
print '
|
'.join(s.split(' ')[::-1])
|
supertom/ansible-modules-core
|
cloud/digital_ocean/digital_ocean_block_storage.py
|
Python
|
gpl-3.0
| 11,293
| 0.00487
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import json
import time
DOCUMENT
|
ATION = '''
---
module: digital_ocean_block_storage
short_description: Create/destroy or attach/detach Block Storage volumes in DigitalOcean
description:
- Create/destroy Block Storage volume in DigitalOcean, or attach/detach Block Storage volume to a droplet.
version_added: "2.2"
author: "Harnek Sidhu"
|
options:
command:
description:
- Which operation do you want to perform.
choices: ['create', 'attach']
required: true
state:
description:
- Indicate desired state of the target.
choices: ['present', 'absent']
required: true
api_token:
description:
- DigitalOcean api token.
required: true
block_size:
description:
- The size of the Block Storage volume in gigabytes. Required when command=create and state=present.
volume_name:
description:
- The name of the Block Storage volume.
required: true
description:
description:
- Description of the Block Storage volume.
region:
description:
- The slug of the region where your Block Storage volume should be located in.
required: true
droplet_id:
description:
- The droplet id you want to operate on. Required when command=attach.
timeout:
description:
- The timeout in seconds used for polling DigitalOcean's API.
default: 10
notes:
- Two environment variables can be used, DO_API_KEY and DO_API_TOKEN.
They both refer to the v2 token.
author:
- "Harnek Sidhu (github: @harneksidhu)"
'''
EXAMPLES = '''
# Create new Block Storage
- digital_ocean_block_storage:
state: present
command: create
api_token: <TOKEN>
region: nyc1
block_size: 10
volume_name: nyc1-block-storage
# Delete Block Storage
- digital_ocean_block_storage:
state: absent
command: create
api_token: <TOKEN>
region: nyc1
volume_name: nyc1-block-storage
# Attach Block Storage to a Droplet
- digital_ocean_block_storage:
state: present
command: attach
api_token: <TOKEN>
volume_name: nyc1-block-storage
region: nyc1
droplet_id: <ID>
# Detach Block Storage from a Droplet
- digital_ocean_block_storage:
state: absent
command: attach
api_token: <TOKEN>
volume_name: nyc1-block-storage
region: nyc1
droplet_id: <ID>
'''
RETURN = '''
id:
description: Unique identifier of a Block Storage volume returned during creation.
returned: changed
type: string
sample: "69b25d9a-494c-12e6-a5af-001f53126b44"
'''
class DOBlockStorageException(Exception):
pass
class Response(object):
def __init__(self, resp, info):
self.body = None
if resp:
self.body = resp.read()
self.info = info
@property
def json(self):
if self.body:
return json.loads(self.body)
elif "body" in self.info:
return json.loads(self.info["body"])
else:
return None
@property
def status_code(self):
return self.info["status"]
class Rest(object):
def __init__(self, module, headers):
self.module = module
self.headers = headers
self.baseurl = 'https://api.digitalocean.com/v2'
def _url_builder(self, path):
if path[0] == '/':
path = path[1:]
return '%s/%s' % (self.baseurl, path)
def send(self, method, path, data=None, headers=None):
url = self._url_builder(path)
data = self.module.jsonify(data)
resp, info = fetch_url(self.module, url, data=data, headers=self.headers, method=method)
return Response(resp, info)
def get(self, path, data=None, headers=None):
return self.send('GET', path, data, headers)
def put(self, path, data=None, headers=None):
return self.send('PUT', path, data, headers)
def post(self, path, data=None, headers=None):
return self.send('POST', path, data, headers)
def delete(self, path, data=None, headers=None):
return self.send('DELETE', path, data, headers)
class DOBlockStorage(object):
def __init__(self, module):
api_token = module.params['api_token'] or \
os.environ['DO_API_TOKEN'] or os.environ['DO_API_KEY']
self.module = module
self.rest = Rest(module, {'Authorization': 'Bearer {}'.format(api_token),
'Content-type': 'application/json'})
def get_key_or_fail(self, k):
v = self.module.params[k]
if v is None:
self.module.fail_json(msg='Unable to load %s' % k)
return v
def poll_action_for_complete_status(self, action_id):
url = 'actions/{}'.format(action_id)
end_time = time.time() + self.module.params['timeout']
while time.time() < end_time:
time.sleep(2)
response = self.rest.get(url)
status = response.status_code
json = response.json
if status == 200:
if json['action']['status'] == 'completed':
return True
elif json['action']['status'] == 'errored':
raise DOBlockStorageException(json['message'])
raise DOBlockStorageException('Unable to reach api.digitalocean.com')
def get_attached_droplet_ID(self, volume_name, region):
url = 'volumes?name={}®ion={}'.format(volume_name, region)
response = self.rest.get(url)
status = response.status_code
json = response.json
if status == 200:
volumes = json['volumes']
if len(volumes)>0:
droplet_ids = volumes[0]['droplet_ids']
if len(droplet_ids)>0:
return droplet_ids[0]
return None
else:
raise DOBlockStorageException(json['message'])
def attach_detach_block_storage(self, method, volume_name, region, droplet_id):
data = {
'type' : method,
'volume_name' : volume_name,
'region' : region,
'droplet_id' : droplet_id
}
response = self.rest.post('volumes/actions', data=data)
status = response.status_code
json = response.json
if status == 202:
return self.poll_action_for_complete_status(json['action']['id'])
elif status == 200:
return True
elif status == 422:
return False
else:
raise DOBlockStorageException(json['message'])
def create_block_storage(self):
block_size = self.get_key_or_fail('block_size')
volume_name = self.get_key_or_fail('volume_name')
region = self.get_key_or_fail('region')
description = self.module.params['description']
data = {
'size_gigabytes' : block_size,
'name' : volume_name,
'description' : description,
'region' : region
}
response = self.rest.post("volumes", data=data)
status = response.status_code
json = response.json
if status == 201:
self.module.exit_json(changed=True, id=json['volume']['id'])
elif status == 409 and json['id'] == 'already_exists':
self.module.exit_json(changed=False)
else:
raise DOBlockStorageException(json['message'])
def delete_block_storage(self):
volume_name = self.get_key_or_fail('volume_name')
region = self.get_key_or_fail('region')
url = 'volumes?name={}®ion={}'.for
|
hfeeki/django-guardian
|
guardian/mixins.py
|
Python
|
bsd-2-clause
| 6,473
| 0.002935
|
from collections import Iterable
from django.conf import settings
from django.contrib.auth.decorators import REDIRECT_FIELD_NAME
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ImproperlyConfigured
from django.core.exceptions import PermissionDenied
from guardian.utils import get_403_or_None
class LoginRequiredMixin(object):
"""
A login required mixin for use with class based views. This Class is a
light wrapper around the `login_required` decorator and hence function
parameters are just attributes defined on the class.
Due to parent class order traversal this mixin must be added as the left
most mixin of a view.
The mixin has exaclty the same flow as `login_required` decorator:
If the user isn't logged in, redirect to ``settings.LOGIN_URL``, passing
the current absolute path in the query string. Example:
``/accounts/login/?next=/polls/3/``.
If the user is logged in, execute the view normally. The view code is
free to assume the user is logged in.
**Class Settings**
``LoginRequiredMixin.redirect_field_name``
*Default*: ``'next'``
``LoginRequiredMixin.login_url``
*Default*: ``settings.LOGIN_URL``
"""
redirect_field_name = REDIRECT_FIELD_NAME
login_url = settings.LOGIN_URL
def dispatch(self, request, *args, **kwargs):
return login_required(redirect_field_name=self.redirect_field_name,
login_url=self.login_url)(
super(LoginRequiredMixin, self).dispatch
)(request, *args, **kwargs)
class PermissionRequiredMixin(object):
"""
A view mixin that verifies if the current logged in user has the specified
permission by wrapping the ``request.user.has_perm(..)`` method.
If a `get_object()` method is defined either manually or by including
another mixin (for example ``SingleObjectMixin``) or ``self.object`` is
defiend then the permission will be tested against that specific instance.
.. note:
Testing of a permission against a specific object instance requires an
authentication backend that supports. Please see ``django-guardian`` to
add object level permissions to your project.
The mixin does the following:
If the user isn't logged in, redirect to settings.LOGIN_URL, passing
the current absolute path in the query string. Example:
/accounts/login/?next=/polls/3/.
If the `raise_exception` is set to True than rather than redirect to
login page a `PermissionDenied` (403) is raised.
If the user is logged in, and passes the permission check than the view
is executed normally.
**Example Usage**::
class SecureView(PermissionRequiredMixin, View):
...
permission_required = 'auth.change_user'
...
**Class Settings**
``PermissionRequiredMixin.permission_required``
*Default*: ``None``, must be set to either a string or list of strings
in format: *<app_label>.<permission_codename>*.
``PermissionRequiredMixin.login_url``
*Default*: ``settings.LOGIN_URL``
``PermissionRequiredMixin.redirect_field_name``
*Default*: ``'next'``
``PermissionRequiredMixin.return_403``
*Default*: ``False``. Returns 403 error page instead of redirecting
user.
``PermissionRequiredMixin.raise_exception``
*Default*: ``False``
`permission_required` - the permission to check of form "<app_label>.<permission codename>"
i.e. 'polls.can_vote' for a permission on a model in the polls application.
"""
### default class view settings
login_url = settings.LOGIN_URL
permission_required = None
redirect_field_name = REDIRECT_FIELD_NAME
return_403 = False
raise_exception = False
def get_required_permissions(self, request=None):
"""
Returns list of permissions in format *<app_label>.<codename>* that
should be checked against *request.user* and *object*. By default, it
returns list from ``permission_required`` attribute.
:param request: Original request.
"""
if isinstance(self.permission_required, basestring):
perms = [self.permission_required]
elif isinstance(self.permission_required, Iterable):
perms = [p for p in self.permission_required]
else:
raise ImproperlyConfigured("'PermissionRequiredMixin' requires "
"'permission_required' attribute to be set to "
"'<app_label>.<permission codename>' but is set to '%s' instead"
% self.permission_required)
return perms
def check_permissions(self, request):
|
"""
Checks if *request.user* has all permissions returned by
*get_required_permissions* method.
:param request: Original request.
"""
obj = hasattr(self, 'get_object') and self.get_obje
|
ct() \
or getattr(self, 'object', None)
forbidden = get_403_or_None(request,
perms=self.get_required_permissions(request),
obj=obj,
login_url=self.login_url,
redirect_field_name=self.redirect_field_name,
return_403=self.return_403,
)
if forbidden:
self.on_permission_check_fail(request, forbidden, obj=obj)
if forbidden and self.raise_exception:
raise PermissionDenied()
return forbidden
def on_permission_check_fail(self, request, response, obj=None):
"""
Method called upon permission check fail. By default it does nothing and
should be overridden, if needed.
:param request: Original request
:param response: 403 response returned by *check_permissions* method.
:param obj: Object that was fetched from the view (using ``get_object``
method or ``object`` attribute, in that order).
"""
def dispatch(self, request, *args, **kwargs):
response = self.check_permissions(request)
if response:
return response
return super(PermissionRequiredMixin, self).dispatch(request, *args,
**kwargs)
|
kobejean/tensorflow
|
tensorflow/python/framework/random_seed.py
|
Python
|
apache-2.0
| 5,902
| 0.003219
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""For seeding individual ops based on a graph-level seed.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.util.tf_export import tf_export
DEFAULT_GRAPH_SEED = 87654321
_MAXINT32 = 2**31 - 1
def _truncate_seed(seed):
return seed % _MAXINT32 # Truncate to fit into 32-bit integer
@tf_export('get_seed')
def get_seed(op_seed):
"""Returns the local seeds an operation should use given an op-specific seed.
Given operation-specific seed, `op_seed`, this helper function returns two
seeds derived from graph-level and op-level seeds. Many random operations
internally use the two seeds to allow user to change the seed globally for a
graph, or for only specific operations.
For details on how the graph-level seed interacts with op seeds, see
`tf.set_random_seed`.
Args:
op_seed: integer.
Returns:
A tuple of two integers that should be used for the local seed of this
operation.
"""
eager = context.executing_eagerly()
if eager:
global_seed = context.global_seed()
else:
global_seed = ops.get_default_graph().seed
if global_seed is not None:
if op_seed is None:
# pylint: disable=protected-access
if eager:
op_seed = context.internal_operation_seed()
else:
op_seed = ops.get_default_graph()._last_id
seeds = _truncate_seed(global_seed), _truncate_seed(op_seed)
else:
if op_seed is not None:
seeds = DEFAULT_GRAPH_SEED, _truncate_seed(op_seed)
else:
seeds = None, None
# Avoid (0, 0) as the C++ ops interpret it as nondeterminism, which would
# be unexpected since Python docs say non
|
determinism is (None, None).
if seeds == (0, 0):
return (0, _MAXINT32)
return seeds
@tf_export('set_random_seed')
def set_random_seed(seed):
"""Sets the graph-level random seed.
Operations that rely on a random seed actually derive it from two seeds:
the graph-level and operation-level seeds. This sets the graph-level seed.
Its interactions with operation-level seeds is as follows:
1. If neither the graph-level nor th
|
e operation seed is set:
A random seed is used for this op.
2. If the graph-level seed is set, but the operation seed is not:
The system deterministically picks an operation seed in conjunction
with the graph-level seed so that it gets a unique random sequence.
3. If the graph-level seed is not set, but the operation seed is set:
A default graph-level seed and the specified operation seed are used to
determine the random sequence.
4. If both the graph-level and the operation seed are set:
Both seeds are used in conjunction to determine the random sequence.
To illustrate the user-visible effects, consider these examples:
To generate different sequences across sessions, set neither
graph-level nor op-level seeds:
```python
a = tf.random_uniform([1])
b = tf.random_normal([1])
print("Session 1")
with tf.Session() as sess1:
print(sess1.run(a)) # generates 'A1'
print(sess1.run(a)) # generates 'A2'
print(sess1.run(b)) # generates 'B1'
print(sess1.run(b)) # generates 'B2'
print("Session 2")
with tf.Session() as sess2:
print(sess2.run(a)) # generates 'A3'
print(sess2.run(a)) # generates 'A4'
print(sess2.run(b)) # generates 'B3'
print(sess2.run(b)) # generates 'B4'
```
To generate the same repeatable sequence for an op across sessions, set the
seed for the op:
```python
a = tf.random_uniform([1], seed=1)
b = tf.random_normal([1])
# Repeatedly running this block with the same graph will generate the same
# sequence of values for 'a', but different sequences of values for 'b'.
print("Session 1")
with tf.Session() as sess1:
print(sess1.run(a)) # generates 'A1'
print(sess1.run(a)) # generates 'A2'
print(sess1.run(b)) # generates 'B1'
print(sess1.run(b)) # generates 'B2'
print("Session 2")
with tf.Session() as sess2:
print(sess2.run(a)) # generates 'A1'
print(sess2.run(a)) # generates 'A2'
print(sess2.run(b)) # generates 'B3'
print(sess2.run(b)) # generates 'B4'
```
To make the random sequences generated by all ops be repeatable across
sessions, set a graph-level seed:
```python
tf.set_random_seed(1234)
a = tf.random_uniform([1])
b = tf.random_normal([1])
# Repeatedly running this block with the same graph will generate the same
# sequences of 'a' and 'b'.
print("Session 1")
with tf.Session() as sess1:
print(sess1.run(a)) # generates 'A1'
print(sess1.run(a)) # generates 'A2'
print(sess1.run(b)) # generates 'B1'
print(sess1.run(b)) # generates 'B2'
print("Session 2")
with tf.Session() as sess2:
print(sess2.run(a)) # generates 'A1'
print(sess2.run(a)) # generates 'A2'
print(sess2.run(b)) # generates 'B1'
print(sess2.run(b)) # generates 'B2'
```
Args:
seed: integer.
"""
if context.executing_eagerly():
context.set_global_seed(seed)
else:
ops.get_default_graph().seed = seed
|
lferr/charm
|
charm/test/schemes/dabenc_test.py
|
Python
|
lgpl-3.0
| 3,191
| 0.015042
|
from charm.schemes.dabe_aw11 import Dabe
from charm.adapters.dabenc_adapt_hybrid import HybridABEncMA
from charm.toolbox.pairinggroup import PairingGroup, GT
import unittest
debug = False
class DabeTest(unittest.TestCase):
def testDabe(self):
groupObj = PairingGroup('SS512')
dabe = Dabe(groupObj)
GP = dabe.setup()
#Setup an authority
auth_attrs= ['ONE', 'TWO', 'THREE', 'FOUR']
(SK, PK) = dabe.authsetup(GP, auth_attrs)
if debug: print("Authority SK")
if debug: print(SK)
#Setup a user and give him some keys
gid, K = "bob", {}
usr_attrs = ['THREE', 'ONE', 'TWO']
for i in usr_attrs: dabe.keygen(GP, SK, i, gid, K)
if debug: print('User credential list: %s' % usr_attrs)
if debug: print("\nSecret key:")
if debug: groupObj.debug(K)
#Encrypt a random element in GT
m = groupObj.random(GT)
policy = '((one or three) and (TWO or FOUR))'
if debug: print('Acces Policy: %s' % policy)
CT = dabe.encrypt(PK, GP, m, policy)
if debug: print("\nCiphertext..."
|
)
if debug: groupObj.debug(CT)
orig_m = dabe.decrypt(GP, K, CT)
assert m == orig_m, 'FAILED Decryp
|
tion!!!'
if debug: print('Successful Decryption!')
class HybridABEncMATest(unittest.TestCase):
def testHybridABEncMA(self):
groupObj = PairingGroup('SS512')
dabe = Dabe(groupObj)
hyb_abema = HybridABEncMA(dabe, groupObj)
#Setup global parameters for all new authorities
gp = hyb_abema.setup()
#Instantiate a few authorities
#Attribute names must be globally unique. HybridABEncMA
#Two authorities may not issue keys for the same attribute.
#Otherwise, the decryption algorithm will not know which private key to use
jhu_attributes = ['jhu.professor', 'jhu.staff', 'jhu.student']
jhmi_attributes = ['jhmi.doctor', 'jhmi.nurse', 'jhmi.staff', 'jhmi.researcher']
(jhuSK, jhuPK) = hyb_abema.authsetup(gp, jhu_attributes)
(jhmiSK, jhmiPK) = hyb_abema.authsetup(gp, jhmi_attributes)
allAuthPK = {}; allAuthPK.update(jhuPK); allAuthPK.update(jhmiPK)
#Setup a user with a few keys
bobs_gid = "20110615 bob@gmail.com cryptokey"
K = {}
hyb_abema.keygen(gp, jhuSK,'jhu.professor', bobs_gid, K)
hyb_abema.keygen(gp, jhmiSK,'jhmi.researcher', bobs_gid, K)
msg = b'Hello World, I am a sensitive record!'
size = len(msg)
policy_str = "(jhmi.doctor or (jhmi.researcher and jhu.professor))"
ct = hyb_abema.encrypt(allAuthPK, gp, msg, policy_str)
if debug:
print("Ciphertext")
print("c1 =>", ct['c1'])
print("c2 =>", ct['c2'])
decrypted_msg = hyb_abema.decrypt(gp, K, ct)
if debug: print("Result =>", decrypted_msg)
assert decrypted_msg == msg, "Failed Decryption!!!"
if debug: print("Successful Decryption!!!")
del groupObj
if __name__ == "__main__":
unittest.main()
|
fras2560/Baseball-Simulator
|
simulator/__init__.py
|
Python
|
apache-2.0
| 5,251
| 0.003047
|
"""
-------------------------------------------------------
Simulator
a package that helps simulate a baseball game based on
previous batting history
-------------------------------------------------------
Author: Dallas Fraser
ID: 110242560
Email: fras2560@mylaurier.ca
Version: 2014-09-11
-------------------------------------------------------
"""
import logging
from simulator.player import Player
from simulator.game import Game
from simulator.helpers import pstdev
import random
from copy import deepcopy
from pprint import PrettyPrinter
import sys
from simulator.tqdm import tqdm
SEPERATOR = "----------------------------------------"
class Simulator():
def __init__(self, file, logger=None):
if logger is None:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s
|
%(message)s')
logger = logging.getLogger(__name__)
self.logger = logger
self.import_players(file)
self.pp = PrettyPrinter(indent=4)
def import_players(self, file):
# assuming csv
self.girls = []
self.boys = []
with open(file) as f:
for line in f:
data = line.split(",")
name =
|
data[0]
gender = data[1]
hits = data[2:]
if gender.strip().upper() == "F":
self.girls.append(Player(name,
hits,
gender,
logger=self.logger))
else:
self.boys.append(Player(name,
hits,
gender,
logger=self.logger))
def run(self, lineups, games):
self.stats = []
for options in tqdm(range(0, lineups)):
#self.update_progress(options, lineups)
lineup = self.assemble_lineup()
scores = []
for game in range(0, games):
score = Game(lineup, logger=self.logger).run()
scores.append(score)
self.stats.append((sum(scores)/games, pstdev(scores), lineup))
self.display_results()
def display_results(self):
optimal = self.find_agressive()
conservative = self.find_conservative()
print("Conservative: {0:.2f} +- {1:.2f}".format(conservative[0], conservative[1]))
print(SEPERATOR)
for player in conservative[2]:
print(player)
print(SEPERATOR)
print("Agressive: {0:.2f} +- {1:.2f}".format(optimal[0], optimal[1]))
print(SEPERATOR)
for player in optimal[2]:
print(player)
print(SEPERATOR)
print("Simulation of Conservative")
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(message)s')
self.logger = logging.getLogger(__name__)
Game(conservative[2], logger=self.logger).run_p()
def update_progress(self, runs, total):
progress = runs / total
sys.stdout.write("\r{0:.2f}%".format(progress))
sys.stdout.flush()
def find_agressive(self):
result = self.stats[0]
for sim in self.stats:
if sim[0] > result[0]:
result = sim
return result
def find_conservative(self):
result = self.stats[0]
for sim in self.stats:
if sim[0]- 3*sim[1] > result[0] - 3 * result[1]:
result = sim
return result
def assemble_lineup(self):
guy_count = 0
girls = self.copy_list(self.girls)
guys = self.copy_list(self.boys)
lineup = []
while len(girls) > 0 or len(guys) > 0:
if len(girls) > 0 and len(guys) > 0 and guy_count < 4:
coin = random.randint(0,1)
if coin == 0:
lineup.append(girls.pop(random.randint(0, len(girls)-1)))
else:
guy_count += 1
lineup.append(guys.pop(random.randint(0, len(guys) - 1)))
elif len(girls) > 0:
guy_count = 0
lineup.append(girls.pop(random.randint(0, len(girls) - 1)))
elif len(guys) > 0:
lineup.append(guys.pop(random.randint(0, len(guys) - 1)))
return lineup
def copy_list(self, l):
c = []
for x in l:
c.append(x)
return c
import unittest
import os
class TestSimulator(unittest.TestCase):
def setUp(self):
directory = os.getcwd()
while "simulator" in directory:
directory = os.path.dirname(directory)
directory = os.path.join(directory, "Tests")
tests = ["test1.csv"]
for test in tests:
self.simulator = Simulator(os.path.join(directory,test))
def tearDown(self):
pass
def testAssembleLineup(self):
lineup = self.simulator.assemble_lineup()
self.assertEqual(len(lineup), 3)
|
MOOCworkbench/MOOCworkbench
|
marketplace/migrations/0011_auto_20170526_1215.py
|
Python
|
mit
| 462
| 0
|
# -*- codin
|
g: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-26 12:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('marketplace', '0010_packageresource_recommend
|
ed'),
]
operations = [
migrations.AlterField(
model_name='packageresource',
name='resource',
field=models.TextField(),
),
]
|
ostree/plaso
|
plaso/cli/hexdump.py
|
Python
|
apache-2.0
| 2,539
| 0.006302
|
# -*- coding: utf-8 -*-
"""Class to represent binary data as hexadecimal."""
class Hexdump(object):
"""Class that defines a hexadecimal representation formatter (hexdump)."""
@classmethod
def _FormatDataLine(cls, data, data_offset, data_size):
"""Formats binary data in a single line of hexadecimal representation.
Args:
data: String containing the binary data.
data_offset: Offset of the data.
data_size: Size of the data.
Returns:
A Unicode string con
|
taining a hexadecimal representation of
the binary data.
Raises:
|
ValueError: if the data offset is out of bounds.
"""
if data_offset < 0 or data_offset >= data_size:
raise ValueError(u'Data offset value out of bounds.')
if data_size - data_offset > 16:
data_size = data_offset + 16
word_values = []
for byte_offset in range(data_offset, data_size, 2):
word_value = u'{0:02x}{1:02x}'.format(
ord(data[byte_offset]), ord(data[byte_offset + 1]))
word_values.append(word_value)
byte_values = []
for byte_offset in range(data_offset, data_size):
byte_value = ord(data[byte_offset])
if byte_value > 31 and byte_value < 127:
byte_value = data[byte_offset]
else:
byte_value = u'.'
byte_values.append(byte_value)
return u'{0:07x}: {1:s} {2:s}'.format(
data_offset, u' '.join(word_values), u''.join(byte_values))
@classmethod
def FormatData(cls, data, data_offset=0, maximum_data_size=None):
"""Formats binary data in hexadecimal representation.
All ASCII characters in the hexadecimal representation (hexdump) are
translated back to their character representation.
Args:
data: String containing the binary data.
data_offset: Optional offset within the data to start formatting.
The default is 0.
maximum_data_size: Optional maximum size of the data to format.
The default is None which represents all of
the binary data.
Returns:
A Unicode string containing a hexadecimal representation of
the binary data.
"""
data_size = len(data)
if maximum_data_size is not None and maximum_data_size < data_size:
data_size = maximum_data_size
output_strings = []
for line_offset in range(data_offset, data_size, 16):
hexdump_line = cls._FormatDataLine(data, line_offset, data_size)
output_strings.append(hexdump_line)
return u'\n'.join(output_strings)
|
dougli1sqrd/yamldown
|
setup.py
|
Python
|
bsd-3-clause
| 555
| 0.003604
|
from setuptools import setup, find_packages
setup(
name="yamldown",
version="0.1.8",
packages=["yamldown"],
author="edou
|
glass",
author_email="edouglass@lbl.gov",
url="https://github.com/dougli1sqrd/yamldown",
download_url="https://github.com/dougli1sqrd/yamldown/archive/0.1.8.tar.gz",
description="Python library for loading and dumping \"yamldown\" (markdown with embedded yaml) files.",
long_description=ope
|
n("README.md").read(),
keywords=["yaml", "markdown"],
install_requires=[
"pyYaml",
],
)
|
uq-eresearch/uqam
|
location/views.py
|
Python
|
bsd-3-clause
| 4,790
| 0.003549
|
from django.http import HttpResponse, Http404
from django.core.exceptions import ObjectDoesNotExist
from django.shortcuts import render, get_object_or_404
from django.core.urlresolvers import reverse
from django.utils.xmlutils import SimplerXMLGenerator
from models import Place, Region
from models import Locality
fr
|
om models import GlobalRegion
from utils.utils import do_paging, split_list
from django.db.models import Count
from django.contrib.contenttypes.models import ContentType
import json
def place_detail(request, place_id):
"""
Lookup a ``Place`` based on its id. Pagination its objects.
"""
place = get_object_or_404(Place, pk=place_id)
try:
region = Region.objects.get(name=place.region)
except:
region = None
pla
|
ce_objects = place.museumobject_set.filter(public=True)
objects = do_paging(request, place_objects)
return render(request, "location/place_detail.html",
{'place': place, 'objects': objects,
'region': region})
def place_json(request, encoding='utf-8', mimetype='text/plain'):
places = Locality.objects.exclude(
latitude=None).annotate(Count('museumobject')).values(
'id', 'name', 'latitude', 'longitude',
'museumobject__count')
return HttpResponse(json.dumps(list(places), indent=2))
def place_kml(request, encoding='utf-8', mimetype='text/plain'):
"""
Write out all the known places to KML
"""
# mimetype = "application/vnd.google-earth.kml+xml"
# mimetype = "text/html"
places = Locality.objects.exclude(
latitude=None).annotate(Count('museumobject'))
response = HttpResponse(mimetype=mimetype)
handler = SimplerXMLGenerator(response, encoding)
handler.startDocument()
handler.startElement(u"kml",
{u"xmlns": u"http://www.opengis.net/kml/2.2"})
handler.startElement(u"Document", {})
for place in places:
place_url = request.build_absolute_uri(place.get_absolute_url())
handler.startElement(u"Placemark", {})
handler.addQuickElement(u"name",
"%s (%s)" % (place.name, place.museumobject__count))
handler.addQuickElement(u"description",
'<a href="%s">%s</a>' % (place_url, place.__unicode__()))
handler.startElement(u"Point", {})
handler.addQuickElement(u"coordinates", place.get_kml_coordinates())
handler.endElement(u"Point")
handler.endElement(u"Placemark")
handler.endElement(u"Document")
handler.endElement(u"kml")
return response
def place_duplicates(request):
'''
Used for finding duplicate places, by Geoname ID
'''
places = Place.objects.values(
'gn_id').order_by().annotate(
count=Count('gn_id')).filter(count__gt=1)
return render(request, "location/place_dups_list.html",
{'places': places})
def place_geoname(request, geoname_id):
places = Place.objects.filter(gn_id=geoname_id)
return render(request, "location/place_geoname.html", {'places': places})
def tree_view(request):
global_regions = GlobalRegion.objects.all()
return render(request, "location/tree_view.html",
{'global_regions': global_regions})
def find_location(model_type, id):
element_type = ContentType.objects.get(app_label='location', model=model_type)
return element_type.get_object_for_this_type(id=id)
def view_places(request):
grs = GlobalRegion.objects.exclude(icon_path="").prefetch_related('children')
d = dict((g.name, g) for g in grs)
grs = [d['Australia'], d['Pacific'], d['Asia'], d['Europe'], d['Americas'], d['Africa'],
d['Middle East']]
kml_url = request.build_absolute_uri(reverse('place_kml'))
return render(request, 'location/map.html',
{'global_regions': grs,
'kml_url': kml_url})
def view_geoloc(request, loctype, id, columns=3):
try:
geolocation = find_location(loctype, id)
except ObjectDoesNotExist:
raise Http404
items = geolocation.museumobject_set.select_related().filter(public=True
).prefetch_related('category', 'country', 'global_region'
).extra(
select={'public_images_count': 'select count(*) from mediaman_artefactrepresentation a WHERE a.artefact_id = cat_museumobject.id AND a.public'}
).order_by('-public_images_count', 'registration_number')
children = []
if hasattr(geolocation, 'children'):
children = geolocation.children.all()
objects = do_paging(request, items)
return render(request, 'location/geolocation.html',
{'geolocation': geolocation,
'objects': objects,
'num_children': len(children),
'children': split_list(children, parts=columns)})
|
Lcaracol/ideasbox.lan
|
ideasbox/fields.py
|
Python
|
mit
| 778
| 0
|
from select_multiple_field.models import SelectMultipleField
class CommaSeparatedCharField(SelectMultipleField):
def contribute_to_class(self, cls, name, **kwargs):
"""Contribute to the Model subclass.
We just set our custom get_FIELD_display(),
which returns a comma-separated list of displays.
"""
super(CommaSeparatedCharField, self).contribute_to_class(cls, name,
|
**kwargs)
def _get_FIELD_display(instance):
choices = dict(self.choices)
values = getattr(instance, self.attname)
return ", ".join(unicode(ch
|
oices.get(c, c)) for c in values if c)
setattr(cls, 'get_%s_display' % self.name, _get_FIELD_display)
|
paulrouget/servo
|
tests/wpt/web-platform-tests/tools/third_party/hyper/hyper/common/headers.py
|
Python
|
mpl-2.0
| 8,888
| 0
|
# -*- coding: utf-8 -*-
"""
hyper/common/headers
~~~~~~~~~~~~~~~~~~~~~
Contains hyper's structures for storing and working with HTTP headers.
"""
import collections
from hyper.common.util import to_bytestring, to_bytestring_tuple
class HTTPHeaderMap(collections.MutableMapping):
"""
A structure that contains HTTP headers.
HTTP headers are a curious beast. At the surface level they look roughly
like a name-value set, but in practice they have many variations that
make them tricky:
- duplicate keys are allowed
- keys are compared case-insensitively
- duplicate keys are isomorphic to comma-separated values, *except when
they aren't*!
- they logically contain a form of ordering
This data structure is an attempt to preserve all of that information
while being as user-friendly as possible. It retains all of the mapping
convenience methods (allowing by-name indexing), while avoiding using a
dictionary for storage.
When iterated over, this structure returns headers in 'canonical form'.
This form is a tuple, where the first entry is the header name (in
lower-case), and the second entry is a list of header values (in original
case).
The mapping always emits both names and values in the form of bytestrings:
never unicode strings. It can accept names and values in unicode form, and
will automatically be encoded to bytestrings using UTF-8. The reason for
what appears to be a user-unfriendly decision here is primarily to allow
the broadest-possible compatibility (to make it possible to send headers in
unusual encodings) while ensuring that users are never confused about what
type of data they will receive.
.. warning:: Note that this data structure makes none of the performance
guarantees of a dictionary. Lookup and deletion is not an O(1)
operation. Inserting a new value *is* O(1), all other
operations are O(n), including *replacing* a header entirely.
"""
def __init__(self, *args, **kwargs):
# The meat of the structure. In practice, headers are an ordered list
# of tuples. This early version of the data structure simply uses this
# directly under the covers.
#
# An important curiosity here is that the headers are not stored in
# 'canonical form', but are instead stored in the form they were
# provided in. This is t
|
o ensure that it is always possible to
# reproduce the original header structure if necessary. This leads to
# some unfortunate performance costs on structure access
|
where it is
# often necessary to transform the data into canonical form on access.
# This cost is judged acceptable in low-level code like `hyper`, but
# higher-level abstractions should consider if they really require this
# logic.
self._items = []
for arg in args:
self._items.extend(map(lambda x: to_bytestring_tuple(*x), arg))
for k, v in kwargs.items():
self._items.append(to_bytestring_tuple(k, v))
def __getitem__(self, key):
"""
Unlike the dict __getitem__, this returns a list of items in the order
they were added. These items are returned in 'canonical form', meaning
that comma-separated values are split into multiple values.
"""
key = to_bytestring(key)
values = []
for k, v in self._items:
if _keys_equal(k, key):
values.extend(x[1] for x in canonical_form(k, v))
if not values:
raise KeyError("Nonexistent header key: {}".format(key))
return values
def __setitem__(self, key, value):
"""
Unlike the dict __setitem__, this appends to the list of items.
"""
self._items.append(to_bytestring_tuple(key, value))
def __delitem__(self, key):
"""
Sadly, __delitem__ is kind of stupid here, but the best we can do is
delete all headers with a given key. To correctly achieve the 'KeyError
on missing key' logic from dictionaries, we need to do this slowly.
"""
key = to_bytestring(key)
indices = []
for (i, (k, v)) in enumerate(self._items):
if _keys_equal(k, key):
indices.append(i)
if not indices:
raise KeyError("Nonexistent header key: {}".format(key))
for i in indices[::-1]:
self._items.pop(i)
def __iter__(self):
"""
This mapping iterates like the list of tuples it is. The headers are
returned in canonical form.
"""
for pair in self._items:
for value in canonical_form(*pair):
yield value
def __len__(self):
"""
The length of this mapping is the number of individual headers in
canonical form. Sadly, this is a somewhat expensive operation.
"""
size = 0
for _ in self:
size += 1
return size
def __contains__(self, key):
"""
If any header is present with this key, returns True.
"""
key = to_bytestring(key)
return any(_keys_equal(key, k) for k, _ in self._items)
def keys(self):
"""
Returns an iterable of the header keys in the mapping. This explicitly
does not filter duplicates, ensuring that it's the same length as
len().
"""
for n, _ in self:
yield n
def items(self):
"""
This mapping iterates like the list of tuples it is.
"""
return self.__iter__()
def values(self):
"""
This is an almost nonsensical query on a header dictionary, but we
satisfy it in the exact same way we satisfy 'keys'.
"""
for _, v in self:
yield v
def get(self, name, default=None):
"""
Unlike the dict get, this returns a list of items in the order
they were added.
"""
try:
return self[name]
except KeyError:
return default
def iter_raw(self):
"""
Allows iterating over the headers in 'raw' form: that is, the form in
which they were added to the structure. This iteration is in order,
and can be used to rebuild the original headers (e.g. to determine
exactly what a server sent).
"""
for item in self._items:
yield item
def replace(self, key, value):
"""
Replace existing header with new value. If header doesn't exist this
method work like ``__setitem__``. Replacing leads to deletion of all
existing headers with the same name.
"""
key, value = to_bytestring_tuple(key, value)
indices = []
for (i, (k, v)) in enumerate(self._items):
if _keys_equal(k, key):
indices.append(i)
# If the key isn't present, this is easy: just append and abort early.
if not indices:
self._items.append((key, value))
return
# Delete all but the first. I swear, this is the correct slicing
# syntax!
base_index = indices[0]
for i in indices[:0:-1]:
self._items.pop(i)
del self._items[base_index]
self._items.insert(base_index, (key, value))
def merge(self, other):
"""
Merge another header set or any other dict-like into this one.
"""
# Short circuit to avoid infinite loops in case we try to merge into
# ourselves.
if other is self:
return
if isinstance(other, HTTPHeaderMap):
self._items.extend(other.iter_raw())
return
for k, v in other.items():
self._items.append(to_bytestring_tuple(k, v))
def __eq__(self, other):
return self._items == other._items
def __ne__(self, other):
return self._items != other._items
def __str__(self): # pragma: no cover
return 'HTTPHeaderMap(%s)' % self._items
|
frankrousseau/weboob
|
modules/allocine/module.py
|
Python
|
agpl-3.0
| 8,218
| 0.001583
|
# -*- coding: utf-8 -*-
# Copyright(C) 2013 Julien Veyssier
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import re
from weboob.capabilities.base import UserError
from weboob.capabilities.calendar import CapCalendarEvent, CATEGORIES, BaseCalendarEvent
from weboob.capabilities.video import CapVideo, BaseVideo
from weboob.capabilities.collection import CapCollection, CollectionNotFound, Collection
from weboob.capabilities.cinema import CapCinema, Person, Movie
from weboob.tools.backend import Module
from .browser import AllocineBrowser
__all__ = ['AllocineModule']
class AllocineModule(Module, CapCinema, CapVideo, CapCalendarEvent, CapCollection):
NAME = 'allocine'
MAINTAINER = u'Julien Veyssier'
EMAIL = 'julien.veyssier@aiur.fr'
VERSION = '1.1'
DESCRIPTION = u'AlloCiné French cinema database service'
LICENSE = 'AGPLv3+'
BROWSER = AllocineBrowser
ASSOCIATED_CATEGORIES = [CATEGORIES.CINE]
def get_movie(self, id):
return self.browser.get_movie(id)
def get_person(self, id):
return self.browser.get_person(id)
def iter_movies(self, pattern):
return self.browser.iter_movies(pattern.encode('utf-8'))
def iter_persons(self, pattern):
return self.browser.iter_persons(pattern.encode('utf-8'))
def iter_movie_persons(self, id, role=None):
return self.browser.iter_movie_persons(id, role)
def iter_person_movies(self, id, role=None):
return self.browser.iter_person_movies(id, role)
def iter_person_movies_ids(self, id):
return self.browser.iter_person_movies_ids(id)
def iter_movie_persons_ids(self, id):
return self.browser.iter_movie_persons_ids(id)
def get_person_biography(self, id):
return self.browser.get_person_biography(id)
def get_movie_releases(self, id, country=None):
return self.browser.get_movie_releases(id, country)
def fill_person(self, person, fields):
if 'real_name' in fields or 'birth_place' in fields\
or 'death_date' in fields or 'nationality' in fields\
or 'short_biography' in fields or 'roles' in fields\
or 'birth_date' in fields or 'th
|
umbnail_url' in fields\
or 'biography' in fields\
or 'gender' in fields or fields is None:
per = self.get_person(person.id)
person.real_name = per.real_name
person.birth_date = per.birth_date
person.death_date = per.death_date
person.birth_place = per.birth_place
person.gender = per.gender
person.nationality = per.nationality
person.short_biography = per.short_b
|
iography
person.short_description = per.short_description
person.roles = per.roles
person.biography = per.biography
person.thumbnail_url = per.thumbnail_url
return person
def fill_movie(self, movie, fields):
if 'other_titles' in fields or 'release_date' in fields\
or 'duration' in fields or 'country' in fields\
or 'roles' in fields or 'note' in fields\
or 'thumbnail_url' in fields:
mov = self.get_movie(movie.id)
movie.other_titles = mov.other_titles
movie.release_date = mov.release_date
movie.duration = mov.duration
movie.pitch = mov.pitch
movie.country = mov.country
movie.note = mov.note
movie.roles = mov.roles
movie.genres = mov.genres
movie.short_description = mov.short_description
movie.thumbnail_url = mov.thumbnail_url
if 'all_release_dates' in fields:
movie.all_release_dates = self.get_movie_releases(movie.id)
return movie
def fill_video(self, video, fields):
if 'url' in fields:
with self.browser:
if not isinstance(video, BaseVideo):
video = self.get_video(self, video.id)
if hasattr(video, '_video_code'):
video.url = unicode(self.browser.get_video_url(video._video_code))
if 'thumbnail' in fields and video and video.thumbnail:
with self.browser:
video.thumbnail.data = self.browser.readurl(video.thumbnail.url)
return video
def get_video(self, _id):
with self.browser:
split_id = _id.split('#')
if split_id[-1] == 'movie':
return self.browser.get_movie_from_id(split_id[0])
return self.browser.get_video_from_id(split_id[0], split_id[-1])
def iter_resources(self, objs, split_path):
with self.browser:
if BaseVideo in objs:
collection = self.get_collection(objs, split_path)
if collection.path_level == 0:
yield Collection([u'comingsoon'], u'Films prochainement au cinéma')
yield Collection([u'nowshowing'], u'Films au cinéma')
yield Collection([u'acshow'], u'Émissions')
yield Collection([u'interview'], u'Interviews')
if collection.path_level == 1:
if collection.basename == u'acshow':
emissions = self.browser.get_emissions(collection.basename)
if emissions:
for emission in emissions:
yield emission
elif collection.basename == u'interview':
videos = self.browser.get_categories_videos(collection.basename)
if videos:
for video in videos:
yield video
else:
videos = self.browser.get_categories_movies(collection.basename)
if videos:
for video in videos:
yield video
if collection.path_level == 2:
videos = self.browser.get_categories_videos(':'.join(collection.split_path))
if videos:
for video in videos:
yield video
def validate_collection(self, objs, collection):
if collection.path_level == 0:
return
if collection.path_level == 1 and (collection.basename in
[u'comingsoon', u'nowshowing', u'acshow', u'interview']):
return
if collection.path_level == 2 and collection.parent_path == [u'acshow']:
return
raise CollectionNotFound(collection.split_path)
def search_events(self, query):
with self.browser:
if CATEGORIES.CINE in query.categories:
if query.city and re.match('\d{5}', query.city):
events = list(self.browser.search_events(query))
events.sort(key=lambda x: x.start_date, reverse=False)
return events
raise UserError('You must enter a zip code in city field')
def get_event(self, id):
return self.browser.get_event(id)
def fill_event(self, event, fields):
if 'description' in fields:
movieCode = event.id.split('#')[2]
movie = self.get_movie(movieCode)
event.description = movie.pitch
return event
OBJECTS = {
Person: fill_person,
Movie: fill_movie,
BaseVideo: fill_video,
Ba
|
zerolab/wagtail
|
wagtail/admin/forms/tags.py
|
Python
|
bsd-3-clause
| 1,354
| 0.001477
|
from taggit.forms import TagField as TaggitTagField
from taggit.models import Tag
from wagtail.admin.widgets import AdminTagWidget
class TagField(TaggitTagField):
"""
Extends taggit's TagField with the option to prevent creating tags that do not already exist
"""
widget = AdminTagWidget
def __init__(self, *args, **kwargs):
self.tag_model = kwargs.pop('tag_model', None)
self.free_tagging = kwargs.pop('free_tagging', None)
super().__init__(*args, **kwargs)
# pass on tag_model and free_tagging kwargs to the widget,
# if (and only if) they have been passed explicitly here.
#
|
Otherwise, set default values for clean() to use
if self.tag_model is None:
self.tag_model = Tag
else:
self.widget.tag_model = self.tag_model
if self.free_tagging is None:
self.free_tagging = getattr(self.tag_model, 'free_tagging', True)
else:
self.widget.free_tagging = self.free_tagging
def clean(self, value):
value = super().clean(value)
if not self.free_tagging:
|
# filter value to just the tags that already exist in tag_model
value = list(
self.tag_model.objects.filter(name__in=value).values_list('name', flat=True)
)
return value
|
pgmillon/ansible
|
lib/ansible/module_utils/network/common/network.py
|
Python
|
gpl-3.0
| 8,128
| 0.002461
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) 2015 Peter Sprygada, <psprygada@ansible.com>
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import traceback
import json
from ansible.module_utils._text import to_text, to_native
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.connection import Connection, ConnectionError
from ansible.module_utils.network.common.netconf import NetconfConnection
from ansible.module_utils.network.common.parsing import Cli
from ansible.module_utils.six import iteritems
NET_TRANSPORT_ARGS = dict(
host=dict(required=True),
port=dict(type='int'),
username=dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
password=dict(no_log=True, fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD'])),
ssh_keyfile=dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
authorize=dict(default=False, fallback=(env_fallback, ['ANSIBLE_NET_AUTHORIZE']), type='bool'),
auth_pass=dict(no_log=True, fallback=(env_fallback, ['ANSIBLE_NET_AUTH_PASS'])),
provider=dict(type='dict', no_log=True),
transport=dict(choices=list()),
timeout=dict(default=10, type='int')
)
NET_CONNECTION_ARGS = dict()
NET_CONNECTIONS = dict()
def _transitional_argument_spec():
argument_spec = {}
for key, value in iteritems(NET_TRANSPORT_ARGS):
value['required'] = False
argument_spec[key] = value
return argument_spec
def to_list(val):
if isinstance(val, (list, tuple)):
return list(val)
elif val is not None:
return [val]
else:
return list()
class ModuleStub(object):
def __init__(self, argument_spec, fail_json):
self.params = dict()
for key, value in argument_spec.items():
self.params[key] = value.get('default')
self.fail_json = fail_json
class NetworkError(Exception):
def __init__(self, msg, **kwargs):
super(NetworkError, self).__init__(msg)
self.kwargs = kwargs
class Config(object):
def __init__(self, connection):
self.connection = connection
def __call__(self, commands, **kwargs):
lines = to_list(commands)
return self.connection.configure(lines, **kwargs)
def load_config(self, commands, **kwargs):
commands = to_list(commands)
return self.connection.load_config(commands, **kwargs)
def get_config(self, **kwargs):
return self.connection.get_config(**kwargs)
def save_config(self):
return self.connection.save_config()
class NetworkModule(AnsibleModule):
def __init__(self, *args, **kwargs):
connect_on_load = kwargs.pop('connect_on_load', True)
argument_spec = NET_TRANSPORT_ARGS.copy()
argument_spec['transport']['choices'] = NET_CONNECTIONS.keys()
argument_spec.update(NET_CONNECTION_ARGS.copy())
if kwargs.get('argument_spec'):
argument_spec.update(kwargs['argument_spec'])
kwargs['argument_spec'] = argument_spec
super(NetworkModule, self).__init__(*args, **kwargs)
self.connection = None
self._cli = None
self._config = None
try:
transport = self.params['transport'] or '__default__'
cls = NET_CONNECTIONS[transport]
self.connection = cls()
except KeyError:
self.fail_jso
|
n(msg='Unknown transport or no default transport specified')
except (TypeError, NetworkError) as exc:
self.fail_json(m
|
sg=to_native(exc), exception=traceback.format_exc())
if connect_on_load:
self.connect()
@property
def cli(self):
if not self.connected:
self.connect()
if self._cli:
return self._cli
self._cli = Cli(self.connection)
return self._cli
@property
def config(self):
if not self.connected:
self.connect()
if self._config:
return self._config
self._config = Config(self.connection)
return self._config
@property
def connected(self):
return self.connection._connected
def _load_params(self):
super(NetworkModule, self)._load_params()
provider = self.params.get('provider') or dict()
for key, value in provider.items():
for args in [NET_TRANSPORT_ARGS, NET_CONNECTION_ARGS]:
if key in args:
if self.params.get(key) is None and value is not None:
self.params[key] = value
def connect(self):
try:
if not self.connected:
self.connection.connect(self.params)
if self.params['authorize']:
self.connection.authorize(self.params)
self.log('connected to %s:%s using %s' % (self.params['host'],
self.params['port'], self.params['transport']))
except NetworkError as exc:
self.fail_json(msg=to_native(exc), exception=traceback.format_exc())
def disconnect(self):
try:
if self.connected:
self.connection.disconnect()
self.log('disconnected from %s' % self.params['host'])
except NetworkError as exc:
self.fail_json(msg=to_native(exc), exception=traceback.format_exc())
def register_transport(transport, default=False):
def register(cls):
NET_CONNECTIONS[transport] = cls
if default:
NET_CONNECTIONS['__default__'] = cls
return cls
return register
def add_argument(key, value):
NET_CONNECTION_ARGS[key] = value
def get_resource_connection(module):
if hasattr(module, '_connection'):
return module._connection
capabilities = get_capabilities(module)
network_api = capabilities.get('network_api')
if network_api == 'cliconf':
module._connection = Connection(module._socket_path)
elif network_api == 'netconf':
module._connection = NetconfConnection(module._socket_path)
else:
module.fail_json(msg='Invalid connection type {0!s}'.format(network_api))
return module._connection
def get_capabilities(module):
if hasattr(module, 'capabilities'):
return module._capabilities
try:
capabilities = Connection(module._socket_path).get_capabilities()
except ConnectionError as exc:
module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
module._capabilities = json.loads(capabilities)
return module._capabilities
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-compute/setup.py
|
Python
|
mit
| 2,784
| 0.001078
|
#!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import re
import os.path
from io import open
from setuptools import find_packages, setup
try:
from azure_bdist_wheel import cmdclass
except ImportError:
from distutils import log as logger
logger.warn("Wheel is not available, disabling bdist_wheel hook")
cmdclass = {}
# Change the PACKAGE_NAME only to change folder and different name
PACKAGE_NAME = "azure-mgmt-compute"
PACKAGE_PPRINT_NAME = "Compute Management"
# a-b-c => a/b/c
package_folder_path = PACKAGE_NAME.replace('
|
-', '/')
# a-b-c => a.b.c
namespace_name = PACKAGE_NAME.replace('-', '.')
# azure v0.x is not compatible with this package
# azure v0.x used to have a __version__ attribute (newer versions don't)
try:
import azure
try:
ver = azure.__version__
raise Exception(
'This package is incompatible with azure=={}. '.format(ver) +
'Uninstall it with "pip uninstall a
|
zure".'
)
except AttributeError:
pass
except ImportError:
pass
# Version extraction inspired from 'requests'
with open(os.path.join(package_folder_path, 'version.py'), 'r') as fd:
version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
with open('README.rst', encoding='utf-8') as f:
readme = f.read()
with open('HISTORY.rst', encoding='utf-8') as f:
history = f.read()
setup(
name=PACKAGE_NAME,
version=version,
description='Microsoft Azure {} Client Library for Python'.format(PACKAGE_PPRINT_NAME),
long_description=readme + '\n\n' + history,
license='MIT License',
author='Microsoft Corporation',
author_email='azpysdkhelp@microsoft.com',
url='https://github.com/Azure/azure-sdk-for-python',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
packages=find_packages(exclude=["tests"]),
install_requires=[
'msrestazure>=0.4.20,<2.0.0',
'azure-common~=1.1',
],
cmdclass=cmdclass
)
|
thaim/ansible
|
lib/ansible/modules/network/cumulus/nclu.py
|
Python
|
mit
| 7,641
| 0.001309
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016-2018, Cumulus Networks <ce-ceng@cumulusnetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nclu
version_added: "2.3"
author: "Cumulus Networks (@isharacomix)"
short_description: Configure network interfaces using NCLU
description:
- Interface to the Network Command Line Utility, developed to make it easier
to configure operating systems running ifupdown2 and Quagga, such as
Cumulus Linux. Command documentation is available at
U(https://docs.cumulusnetworks.com/cumulus-linux/System-Configuration/Network-Command-Line-Utility-NCLU/)
options:
commands:
description:
- A list of strings containing the net commands to run. Mutually
exclusive with I(template).
template:
description:
- A single, multi-line string with jinja2 formatting. This string
will be broken by lines, and each line will be run through net.
Mutually exclusive with I(commands).
commit:
description:
- When true, performs a 'net commit' at the end of the block.
Mutually exclusive with I(atomic).
default: false
type: bool
abort:
description:
- Boolean. When true, perform a 'net abort' before the block.
This cleans out any uncommitted changes in the buffer.
Mutually exclusive with I(atomic).
default: false
type: bool
atomic:
description:
- When true, equivalent to both I(commit) and I(abort) being true.
Mutually exclusive with I(commit) and I(atomic).
default: false
type: bool
description:
description:
- Commit description that will be recorded to the commit log if
I(commit) or I(atomic) are true.
default: "Ansible-originated commit"
'''
EXAMPLES = '''
- name: Add two interfaces without committing any changes
nclu:
commands:
- add int swp1
- add int swp2
- name: Modify hostname to Cumulus-1 and commit the change
nclu:
commands:
- add hostname Cumulus-1
commit: true
- name: Add 48 interfaces and commit the change.
nclu:
template: |
{% for iface in range(1,49) %}
add int swp{{iface}}
{% endfor %}
commit: true
description: "Ansible - add swps1-48"
- name: Fetch Status Of Interface
nclu:
commands:
- show interface swp1
register: output
- name: Print Status Of Interface
debug:
var: output
- name: Fetch Details From All Interfaces In JSON Format
nclu:
commands:
- show interface json
register: output
- name: Print Interface Details
debug:
var: output["msg"]
- name: Atomically add an interface
nclu:
commands:
- add int swp1
atomic: true
description: "Ansible - add swp1"
- name: Remove IP address from interface swp1
nclu:
commands:
- del int swp1 ip address 1.1.1.1/24
- name: Configure BGP AS and add 2 EBGP neighbors using BGP Unnumbered
nclu:
commands:
- add bgp autonomous-system 65000
- add bgp neighbor swp51 interface remote-as external
- add bgp neighbor swp52 interface remote-as external
commit: true
- name: Configure BGP AS and Add 2 EBGP neighbors Using BGP Unnumbered via Template
nclu:
template: |
{% for neighbor in range(51,53) %}
add bgp neighbor swp{{neighbor}} interface remote-as external
add bgp autonomous-system 65000
{% endfor %}
atomic: true
- name: Check BGP Status
nclu:
commands:
- show bgp summar
|
y json
register: output
- name: Print BGP Status In JSON
debug:
var: output["msg"]
'''
RETURN = '''
changed:
description: whether the interface was changed
returned: changed
type: bool
sample: True
msg:
description: human-readable report of success or failure
|
returned: always
type: str
sample: "interface bond0 config updated"
'''
from ansible.module_utils.basic import AnsibleModule
def command_helper(module, command, errmsg=None):
"""Run a command, catch any nclu errors"""
(_rc, output, _err) = module.run_command("/usr/bin/net %s" % command)
if _rc or 'ERROR' in output or 'ERROR' in _err:
module.fail_json(msg=errmsg or output)
return str(output)
def check_pending(module):
"""Check the pending diff of the nclu buffer."""
pending = command_helper(module, "pending", "Error in pending config. You may want to view `net pending` on this target.")
delimeter1 = "net add/del commands since the last 'net commit'"
color1 = '\x1b[94m'
if delimeter1 in pending:
pending = pending.split(delimeter1)[0]
pending = pending.replace(color1, '')
return pending.strip()
def run_nclu(module, command_list, command_string, commit, atomic, abort, description):
_changed = False
commands = []
if command_list:
commands = command_list
elif command_string:
commands = command_string.splitlines()
do_commit = False
do_abort = abort
if commit or atomic:
do_commit = True
if atomic:
do_abort = True
if do_abort:
command_helper(module, "abort")
# First, look at the staged commands.
before = check_pending(module)
# Run all of the net commands
output_lines = []
for line in commands:
if line.strip():
output_lines += [command_helper(module, line.strip(), "Failed on line %s" % line)]
output = "\n".join(output_lines)
# If pending changes changed, report a change.
after = check_pending(module)
if before == after:
_changed = False
else:
_changed = True
# Do the commit.
if do_commit:
result = command_helper(module, "commit description '%s'" % description)
if "commit ignored" in result:
_changed = False
command_helper(module, "abort")
elif command_helper(module, "show commit last") == "":
_changed = False
return _changed, output
def main(testing=False):
module = AnsibleModule(argument_spec=dict(
commands=dict(required=False, type='list'),
template=dict(required=False, type='str'),
description=dict(required=False, type='str', default="Ansible-originated commit"),
abort=dict(required=False, type='bool', default=False),
commit=dict(required=False, type='bool', default=False),
atomic=dict(required=False, type='bool', default=False)),
mutually_exclusive=[('commands', 'template'),
('commit', 'atomic'),
('abort', 'atomic')]
)
command_list = module.params.get('commands', None)
command_string = module.params.get('template', None)
commit = module.params.get('commit')
atomic = module.params.get('atomic')
abort = module.params.get('abort')
description = module.params.get('description')
_changed, output = run_nclu(module, command_list, command_string, commit, atomic, abort, description)
if not testing:
module.exit_json(changed=_changed, msg=output)
elif testing:
return {"changed": _changed, "msg": output}
if __name__ == '__main__':
main()
|
OCA/l10n-brazil
|
l10n_br_fiscal/wizards/__init__.py
|
Python
|
agpl-3.0
| 252
| 0
|
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from . import base_wizard_mixin
from . import document_cancel_wizard
from . import docume
|
nt_correction_wizar
|
d
from . import document_status_wizard
from . import invalidate_number_wizard
|
vijayendrabvs/ssl-neutron
|
neutron/plugins/ml2/plugin.py
|
Python
|
apache-2.0
| 35,120
| 0.000028
|
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from sqlalchemy import exc as sql_exc
from sqlalchemy.orm import exc as sa_exc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.api.v2 import attributes
from neutron.common import constants as const
from neutron.common import exceptions as exc
from neutron.common import topics
from neutron.db import agentschedulers_db
from neutron.db import allowedaddresspairs_db as addr_pair_db
from neutron.db import db_base_plugin_v2
from neutron.db import external_net_db
from neutron.db import extradhcpopt_db
from neutron.db import models_v2
from neutron.db import quota_db # noqa
from neutron.db import securitygroups_rpc_base as sg_db_rpc
from neutron.extensions import allowedaddresspairs as addr_pair
from neutron.extensions import extra_dhcp_opt as edo_ext
from neutron.extensions import multiprovidernet as mpnet
from neutron.extensions import portbindings
from neutron.extensions import providernet as provider
from neutron import manager
from neutron.openstack.common import db as os_db
from neutron.openstack.common import excutils
from neutron.openstack.common import importutils
from neutron.openstack.common import jsonutils
from neutron.openstack.common import log
from neutron.openstack.common import rpc as c_rpc
from neutron.plugins.common import constants as service_constants
from neutron.plugins.ml2.common import exceptions as ml2_exc
from neutron.plugins.ml2 import config # noqa
from neutron.plugins.ml2 import db
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2 import driver_context
from neutron.plugins.ml2 import managers
from neutron.plugins.ml2 import models
from neutron.plugins.ml2 import rpc
LOG = log.getLogger(__name__)
# REVISIT(rkukura): Move this and other network_type constants to
# providernet.py?
TYPE_MULTI_SEGMENT = 'multi-segment'
class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
external_net_db.External_net_db_mixin,
sg_db_rpc.SecurityGroupServerRpcMixin,
agentschedulers_db.DhcpAgentSchedulerDbMixin,
addr_pair_db.AllowedAddressPairsMixin,
extradhcpopt_db.ExtraDhcpOptMixin):
"""Implement the Neutron L2 abstractions using modules.
Ml2Plugin is a Neutron plugin based on separately extensible sets
of network types and mechanisms for connecting to networks of
those types. The network types and mechanisms are implemented as
drivers loaded via Python entry points. Networks can be made up of
multiple segments (not yet fully implemented).
"""
# This attribute specifies whether the plugin supports or not
# bulk/pagination/sorting operations. Name mangling is used in
# order to ensure it is qualified by class
__native_bulk_support = True
__native_pagination_support = True
__native_sorting_support = True
# List of supported extensions
_supported_extension_aliases = ["provider", "external-net", "binding",
"quotas", "security-group", "agent",
"dhcp_agent_scheduler",
"multi-provider", "allowed-address-pairs",
"extra_dhcp_opt"]
@property
def supported_extension_aliases(self):
if not hasattr(self, '_aliases'):
aliases = self._supported_extension_aliases[:]
sg_rpc.disable_security_group_extension_if_noop_driver(aliases)
self._aliases = aliases
return self._aliases
def __init__(self):
# First load drivers, then initialize DB, then initialize drivers
self.type_manager = managers.TypeManager()
self.mechanism_manager = managers.MechanismManager()
super(Ml2Plugin, self).__init__()
self.type_manager.initialize()
self.mechanism_manager.initialize()
# bulk support depends on the underlying drivers
self.__native_bulk_support = self.mechanism_manager.native_bulk_support
self._setup_rpc()
# REVISIT(rkukura): Use stevedore for these?
self.network_scheduler = importutils.import_object(
cfg.CONF.network_scheduler_driver
)
LOG.info(_("Modular L2 Plugin initialization complete"))
def _setup_rpc(self):
self.notifier = rpc.AgentNotifierApi(topics.AGENT)
self.agent_notifiers[const.AGENT_TYPE_DHCP] = (
dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
)
def start_rpc_listener(self):
self.callbacks = rpc.RpcCallbacks(self.notifier, self.type_manager)
self.topic = topics.PLUGIN
self.conn = c_rpc.create_connection(new=True)
self.dispatcher = self.callbacks.create_rpc_dispatcher
|
()
self.conn.create_consumer(self.topic, self.dispatcher,
fanout=False)
return self.conn.consume_in_thread()
def _process_provider_segment(self, segment):
network_type = self._get_attribute(segment, provider.NETWORK_TYPE)
physical_network = self._get_attribute(segment,
|
provider.PHYSICAL_NETWORK)
segmentation_id = self._get_attribute(segment,
provider.SEGMENTATION_ID)
if attributes.is_attr_set(network_type):
segment = {api.NETWORK_TYPE: network_type,
api.PHYSICAL_NETWORK: physical_network,
api.SEGMENTATION_ID: segmentation_id}
self.type_manager.validate_provider_segment(segment)
return segment
msg = _("network_type required")
raise exc.InvalidInput(error_message=msg)
def _process_provider_create(self, network):
segments = []
if any(attributes.is_attr_set(network.get(f))
for f in (provider.NETWORK_TYPE, provider.PHYSICAL_NETWORK,
provider.SEGMENTATION_ID)):
# Verify that multiprovider and provider attributes are not set
# at the same time.
if attributes.is_attr_set(network.get(mpnet.SEGMENTS)):
raise mpnet.SegmentsSetInConjunctionWithProviders()
network_type = self._get_attribute(network, provider.NETWORK_TYPE)
physical_network = self._get_attribute(network,
provider.PHYSICAL_NETWORK)
segmentation_id = self._get_attribute(network,
provider.SEGMENTATION_ID)
segments = [{provider.NETWORK_TYPE: network_type,
provider.PHYSICAL_NETWORK: physical_network,
provider.SEGMENTATION_ID: segmentation_id}]
elif attributes.is_attr_set(network.get(mpnet.SEGMENTS)):
segments = network[mpnet.SEGMENTS]
else:
return
return [self._process_provider_segment(s) for s in segments]
def _get_attribute(self, attrs, key):
value = attrs.get(key)
if value is attributes.ATTR_NOT_SPECIFIED:
value = None
return value
def _extend_network_dict_provider(self, context, network):
id = network['id']
segments = db.get_network_segments(context.session, id)
if not segments:
LOG.error(_("Network %s has no segments"), id)
network[provider.NETWORK_TYPE] = None
network[provider.PHYSICAL_NETWORK] = No
|
CropCircleSys/bsd-cloudinit
|
cloudbaseinit/metadata/services/ec2service.py
|
Python
|
apache-2.0
| 3,464
| 0
|
# Copyright 2014 Cloudbase Solutions Srl
# Copyright 2012 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import posixpath
from oslo.config import cfg
from six.moves.urllib import error
from six.moves.urllib import request
from cloudbaseinit.metadata.services import base
from cloudbaseinit.openstack.common import log as logging
from cloudbaseinit.utils import network
opts = [
cfg.StrOpt('ec2_metadata_base_url',
default='http://169.254.169.254/',
help='The base URL where the service looks for metadata'),
cfg.BoolOpt('ec2_add_metadata_private_ip_route', default=True,
help='Add a route for the metadata ip address to the gateway'),
]
CONF = cfg.CONF
CONF.register_opts(opts)
LOG = logging.getLogger(__name__)
class EC2Service(base.BaseMetadataService):
_metadata_version = '2009-04-04'
def __init__(self):
super(EC2Service, self).__init__()
self._enable_retry = True
def load(self):
super(EC2Service, self).load()
if CONF.ec2_add_metadata_private_ip_route:
network.check_metadata_ip_route(CONF.ec2_metadata_base_url)
try:
self.get_host_name()
return True
except Exception as ex:
LOG.exception(ex)
LOG.debug('Metadata not found at URL \'%s\'' %
CONF.ec2_metadata_base_url)
return False
def _get_response(self, req):
try:
return request.urlopen(req)
except error.HTTPError as ex:
if ex.code == 404:
raise base.NotExistingMetadataException()
else:
raise
def _get_data(self, path):
norm_path = posixpath.join(CONF.ec2_metadata_base_url, path)
LOG.debug('Getting metadata from: %(norm_path)s',
{'norm_path': norm_path})
req = request.Request(norm_path)
response = self._get_response(req)
return response.read()
def get_host_name(self):
return self._get_cache_data('%s/meta-data/local-hostname' %
self._metadata_version)
def get_instance_id(self):
return self._get_cache_data('%s/meta-data/instance-id' %
|
self._metadata_version)
def get_public_keys(self):
ssh_keys = []
keys_info = self._get_cache_data(
'%s/meta-data/public-keys' %
self._metadata_version).split("\n")
for key_info in keys_info:
(idx, key_name) = key_info.split('=')
ssh_key = self._get_cache_data(
'%(version)s/meta-data/public-keys/%(
|
idx)s/openssh-key' %
{'version': self._metadata_version, 'idx': idx})
ssh_keys.append(ssh_key)
return ssh_keys
def get_network_details(self):
# TODO(cpoieana): add static network config support
pass
|
murphyke/avocado
|
tests/cases/query/tests/parsers.py
|
Python
|
bsd-2-clause
| 19,203
| 0
|
from copy import deepcopy
from django.test import TestCase
from django.core.exceptions import ValidationError
from django.core import management
from avocado.query import oldparsers as parsers
from avocado.models import DataConcept, DataField, DataConceptField
from ....models import Employee
class DataContextParserTestCase(TestCase):
fixtures = ['employee_data.json']
def setUp(self):
management.call_command('avocado', 'init', 'tests', quiet=True)
def test_valid(self):
title = DataField.objects.get_by_natural_key('tests.title.name')
# Single by id (deprecated)
attrs = {
'id': title.pk,
'operator': 'exact',
'value': 'CEO',
'cleaned_value': {'value': 'CEO', 'label': 'CEO'},
'language': 'Name is CEO'
}
self.assertEqual(
parsers.datacontext.validate(deepcopy(attrs), tree=Employee),
attrs)
# Single by dotted label
attrs = {
'field': 'tests.title.name',
'operator': 'exact',
'value': 'CEO',
'cleaned_value': {'value': 'CEO', 'label': 'CEO'},
'language': 'Name is CEO'
}
self.assertEqual(
parsers.datacontext.validate(deepcopy(attrs), tree=Employee),
attrs)
# Single by label list
attrs = {
'field': ['tests
|
', 'title', 'name'],
'operator': 'exact',
'value': 'CEO',
'cleaned_value': {'value': 'CEO', 'label': 'CEO'},
'language': 'Name is CEO'
}
self.assertEqual(
parsers.datacontext.validate(deepcopy(attrs), tree=Employee),
attrs)
|
# Single by field
attrs = {
'field': title.pk,
'operator': 'exact',
'value': 'CEO',
'cleaned_value': {'value': 'CEO', 'label': 'CEO'},
'language': 'Name is CEO'
}
self.assertEqual(
parsers.datacontext.validate(deepcopy(attrs), tree=Employee),
attrs)
# Branch node
attrs = {
'type': 'and',
'children': [{
'field': 'tests.title.name',
'operator': 'exact',
'value': 'CEO',
'cleaned_value': {'value': 'CEO', 'label': 'CEO'},
'language': 'Name is CEO'
}, {
'field': 'tests.employee.first_name',
'operator': 'exact',
'value': 'John',
'cleaned_value': {'value': 'John', 'label': 'John'},
'language': 'First Name is John'
}],
}
self.assertEqual(
parsers.datacontext.validate(deepcopy(attrs), tree=Employee),
attrs)
# No children
attrs = {
'type': 'and',
'children': [],
}
self.assertEqual(
parsers.datacontext.validate(deepcopy(attrs), tree=Employee),
attrs)
# 1 child
attrs = {
'type': 'and',
'children': [{
'field': 'tests.title.name',
'operator': 'exact',
'value': 'CEO',
'cleaned_value': {'value': 'CEO', 'label': 'CEO'},
'language': 'Name is CEO'
}]
}
self.assertEqual(
parsers.datacontext.validate(deepcopy(attrs), tree=Employee),
attrs)
def test_invalid(self):
# Non-existent data field
attrs = parsers.datacontext.validate({
'field': 999,
'operator': 'exact',
'value': 'CEO'
})
self.assertFalse(attrs['enabled'])
# Object must be a dict
self.assertRaises(ValidationError, parsers.datacontext.validate, 1)
# Invalid logical operator
attrs = parsers.datacontext.validate({'type': 'foo', 'children': []})
self.assertFalse(attrs['enabled'])
# Missing 'value' key in first condition
attrs = parsers.datacontext.validate({
'type': 'and',
'children': [{
'field': 'tests.title.name',
'operator': 'exact'
}, {
'field': 'tests.title.name',
'operator': 'exact',
'value': 'CEO'
}]
}, tree=Employee)
self.assertTrue(attrs.get('enabled', True))
self.assertFalse(attrs['children'][0]['enabled'])
self.assertTrue(attrs['children'][1].get('enabled', True))
def test_field_for_concept(self):
f = DataField.objects.get(model_name='title', field_name='name')
c1 = DataConcept()
c2 = DataConcept()
c1.save()
c2.save()
cf = DataConceptField(concept=c1, field=f)
cf.save()
attrs = {
'concept': c1.pk,
'field': f.pk,
'operator': 'exact',
'value': 'CEO',
'cleaned_value': {'value': 'CEO', 'label': 'CEO'},
'language': 'Name is CEO'
}
self.assertEqual(
parsers.datacontext.validate(deepcopy(attrs), tree=Employee),
attrs)
# Invalid concept
attrs = parsers.datacontext.validate({
'concept': c2.pk,
'field': f.pk,
'operator': 'exact',
'value': 'CEO',
}, tree=Employee)
self.assertFalse(attrs['enabled'])
def test_parsed_node(self):
node = parsers.datacontext.parse({
'type': 'and',
'children': [],
}, tree=Employee)
# No condition has been defined..
self.assertEqual(node.condition, None)
node = parsers.datacontext.parse({
'type': 'and',
'children': [{
'field': 'tests.title.name',
'operator': 'exact',
'value': 'CEO',
}]
}, tree=Employee)
# Only the one condition is represented
self.assertEqual(str(node.condition),
"(AND: ('title__name__exact', u'CEO'))")
def test_apply(self):
f = DataField.objects.get_by_natural_key('tests',
'title',
'boss')
f1 = DataField.objects.get_by_natural_key('tests',
'employee',
'first_name')
node = parsers.datacontext.parse({
'field': 'tests.title.boss',
'operator': 'exact',
'value': True
}, tree=Employee)
self.assertEqual(
unicode(node.apply().values('id').query).replace(' ', ''),
'SELECT DISTINCT "tests_employee"."id" FROM "tests_employee" '
'INNER JOIN "tests_title" ON ("tests_employee"."title_id" = '
'"tests_title"."id") WHERE "tests_title"."boss" = True '
.replace(' ', ''))
self.assertEqual(node.language, {
'operator': 'exact',
'language': u'Boss is True',
'field': f.pk,
'value': True
})
# Branch node
node = parsers.datacontext.parse({
'type': 'and',
'children': [{
'field': 'tests.title.boss',
'operator': 'exact',
'value': True,
}, {
'field': 'tests.employee.first_name',
'operator': 'exact',
'value': 'John',
}]
}, tree=Employee)
self.assertEqual(
unicode(node.apply().values('id').query).replace(' ', ''),
'SELECT DISTINCT "tests_employee"."id" FROM "tests_employee" '
'INNER JOIN "tests_title" ON ("tests_employee"."title_id" = '
'"tests_title"."id") WHERE ("tests_employee"."first_name" = John '
'AND "tests_title"."boss" = True )'.replace(' ', ''))
self.assertEqual(node.language, {
'type': 'and',
'children': [{
'field': f.pk,
|
ratnania/pyccel
|
tests/internal/scripts/blas/ex2.py
|
Python
|
mit
| 611
| 0.036007
|
from pyccel.stdlib.internal.blas import dgemv
from numpy import zeros
n = 4
m = 5
a = zeros((n,m), 'double')
x = zeros(m, 'double')
y = zeros(n, 'double')
# ...
a[0,0] = 1.0
|
a[1,0] = 6.0
a[2,0] = 11.0
a[3,0] = 16.0
a[0,1] = 2.0
a[1,1] = 7.0
a[2,1] = 12.0
a[3,1] = 17.0
a[0,2] = 3.0
a[1,2] = 8.0
a[2,2] = 13.0
a[3,2] = 18.0
a[0,3] = 4.0
a[1,3] = 9.0
a[2,3] = 14.0
a[3,3] = 19.0
|
a[0,4] = 5.0
a[1,4] = 10.0
a[2,4] = 15.0
a[3,4] = 20.0
# ...
# ...
x[0] = 2.0
x[1] = 3.0
x[2] = 4.0
x[3] = 5.0
x[4] = 6.0
# ...
alpha = 2.0
beta = 0.0
incx = 1
incy = 1
dgemv('N', n, m, alpha, a, n, x, incx, beta, y, incy)
|
DevangS/CoralNet
|
images/tests/__init__.py
|
Python
|
bsd-2-clause
| 144
| 0
|
# Import test
|
s from each test file.
#
|
e.g. if we have tests/sources.py, then "from sources import *"
from sources import *
from tasks import *
|
ESOedX/edx-platform
|
cms/djangoapps/contentstore/views/tests/utils.py
|
Python
|
agpl-3.0
| 3,337
| 0.004195
|
"""
Utilities for view tests.
"""
from __future__ import absolute_import
import json
from contentstore.tests.utils import CourseTestCase
from contentstore.views.helpers import xblock_studio_url
from xmodule.modulestore.tests.factories import ItemFactory
class StudioPageTestCase(CourseTestCase):
"""
Base class for all tests of Studio pages.
"""
def setUp(self):
super(StudioPageTestCase, self).setUp()
self.chapter = ItemFactory.create(parent_location=self.course.location,
category='chapter', display_name="Week 1")
self.sequential = ItemFactory.create(parent_location=self.chapter.location,
category='sequential', display_name="Lesson 1")
def get_page_html(self, xblock):
"""
Returns the HTML for the page representing the xblock.
"""
url = xblock_studio_url(xblock)
self.assertIsNotNone(url)
resp = self.client.get_html(url)
self.assertEqual(resp.status_code, 200)
return resp.content.decode(resp.charset)
def get_preview_html(self, xblock, view_name):
"""
Returns the HTML for the xblock when shown within a unit or container page.
"""
preview_url = '/xblock/{usage_key}/{view_name}'.format(usage_key=xblock.location, view_name=view_name)
resp = self.client.get_json(preview_url)
self.assertEqual(resp.status_code, 200)
resp_content = json.loads(resp.content.decode('utf-8'))
return resp_content['html']
def validate_preview_html(self, xblock, view_name, can_add=True, can_reorder=True, can_move=True,
can_edit=True, can_duplicate=True, can_delete=True):
"""
Verify that the specified xblock's preview has the expected HTML elements.
"""
html = self.get_preview_html(xblock, view_
|
name)
self.validate_html_for_action_button(
html,
'<div class="add-xblock-component new-component-item adding"></div>',
can_add
)
self.validate_html_for_action_button(
html,
'<span data-tooltip="Drag to reorder" class="drag-handle action"></span>',
can_reorder
)
self.validate_html_for_action_button(
html,
'<button data-tooltip="Move" class="
|
btn-default move-button action-button">',
can_move
)
self.validate_html_for_action_button(
html,
'button class="btn-default edit-button action-button">',
can_edit
)
self.validate_html_for_action_button(
html,
'<button data-tooltip="Delete" class="btn-default delete-button action-button">',
can_duplicate
)
self.validate_html_for_action_button(
html,
'<button data-tooltip="Duplicate" class="btn-default duplicate-button action-button">',
can_delete
)
def validate_html_for_action_button(self, html, expected_html, can_action=True):
"""
Validate that the specified HTML has specific action..
"""
if can_action:
self.assertIn(expected_html, html)
else:
self.assertNotIn(expected_html, html)
|
botswana-harvard/edc-map
|
edc_map/mapper.py
|
Python
|
gpl-2.0
| 2,309
| 0.000433
|
import sys
from geopy import Point
from django.apps import apps as django_apps
from django.core.exceptions import ImproperlyConfigured
from django.core.management.color import color_style
from .geo_mixin import GeoMixin
LANDMARK_NAME = 0
LATITUDE = 2
LETTERS = list(map(chr, range(65, 91)))
LONGITUDE = 1
style = color_style()
class Mapper(GeoMixin):
center_lat = None
center_lon = None
landmarks = None # format ((name, longitude, latitude), )
map_area = None
radius = 5.5
mapper_model = None
def __init__(self):
self.name = self.map_area or f'mapper {self.__class__.__name__}'
app_config = django_apps.get_app_config('edc_map')
mapper_model = self.mapper_model or app_config.mapper_model
if not mapper_model:
raise ImproperlyConfigured(
f'Invalid mapper_model. Got None. See {repr(self)}.')
try:
self.item_model = django_apps.get_model(*mapper_model.split('.'))
except LookupError as e:
sys.stdout.write(style.WARNING(
f'\n Warning. Lookup error in mapper. See {repr(self)}. Got {e} '
'edc_map.apps.AppConfig\n'))
else:
self.item_model_cls = self.item_model
self.item_label = self.item_model._meta.verbose_name
self.load()
def __repr__(self):
return 'Mapper({0.map_area!r})'.format(self)
def __str__(self):
return '({0.map_area!r})'.format(self)
def load(self):
return None
@property
def __dict__(self):
return {
'map_area': self.map_area,
'center_lat': self.center_lat,
'center_lon': self.center_lon,
'radius': self.radius}
@property
def area_center_point(self):
return Point(self.center_lat, self.center_lon)
@property
def area_radius(self):
return self.radius
def point_in_map_area(self, point):
"""
|
Return True if point is within mapper area radius."""
return self.point_in_radius(
point, self.area_center_point, self.area_radius)
def raise_if_not_in_map_area(self, poi
|
nt):
self.raise_if_not_in_radius(
point, self.area_center_point, self.area_radius,
units='km', label=self.map_area)
|
moagstar/python-uncompyle6
|
test/simple_source/bug26/04_comp_for.py
|
Python
|
mit
| 156
| 0
|
#
|
From python2.6/_abcoll.py
# Bug was wrong code for "comp_for" giving
# "for in x" instead of: "for x in y"
chain = (e for s in (self, other) for x in y)
| |
cjaymes/pyscap
|
src/scap/model/xnl_2_0/TitleElement.py
|
Python
|
gpl-3.0
| 946
| 0.001057
|
# Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY
|
; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP. If not, see <http://www.gnu.org/licenses/>.
from scap.Model import Model
import logging
logger = logging.getLogger(__name__)
class TitleElement(Model):
MO
|
DEL_MAP = {
'tag_name': 'Title',
'attributes': {
'Type': {},
'Code': {},
'*': {},
}
}
|
aaxelb/osf.io
|
api_tests/metaschemas/views/test_metaschemas_detail.py
|
Python
|
apache-2.0
| 2,170
| 0.004608
|
import pytest
from api.base.settings.defaults import API_BASE
from osf.models import MetaSchema
from osf_tests.factories import (
AuthUserFactory,
)
from website.project.metadata.schemas import LATEST_SCHEMA_VERSION
@pytest.mark.django_db
class TestMetaSchemaDetail:
def test_metaschemas_detail_visibility(self, app):
user = AuthUserFactory()
schema = MetaSchema.objects.filter(name='Prereg Challenge', schema_version=LATEST_SCHEMA_VERSION).first()
#test_pass_authenticated_user_can_retrieve_schema
url = '/{}metaschemas/{}/'.format(API_BASE, schema._id)
res = app.get(url, auth=user.auth)
assert res.status_code == 200
data = res.json['data']['attributes']
assert data['name'] == 'Prereg Challenge'
assert data['schema_version'] == 2
assert data['active']
assert res.json['data']['id'] == schema._id
#test_pass_unauthenticated_user_can_view_schemas
res = app.get(url)
assert res.status_code == 200
#test_inactive_metaschema_returned
inactive_schema = MetaSchema.objects.get(name='Election Research Preacceptance Competition', active=False)
url = '/{}metaschemas/{}/'.format(API_BASE, inactive_schema._id)
res = app.get(url)
assert res.status_code == 200
assert res.json['data']['attributes']['name'] == 'Election Research Preacceptance Competition'
assert res.json['data']['attributes']['active'] is False
#test_non_latest_version_metaschema_returned
old_schema = MetaSchema.objects.get(name='OSF-Standard Pre-Data Collection Registration', schema_version=1)
url = '/{}metaschemas/{}/'.format(API_BASE, old_schema._id)
res = app.get(url)
|
assert res.status_code == 200
assert res.json['data']['attributes']['name'] == 'OSF-Standard Pre-Data Collection Registrati
|
on'
assert res.json['data']['attributes']['schema_version'] == 1
#test_invalid_metaschema_not_found
url = '/{}metaschemas/garbage/'.format(API_BASE)
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 404
|
Psycojoker/HamlPy
|
hamlpy/parser/filters.py
|
Python
|
mit
| 5,088
| 0.001769
|
from __future__ import unicode_literals
"""
Core HamlPy filters.
The implementation of these should match https://github.com/haml/haml/blob/master/lib/haml/filters.rb as closely as
possible. Where we differ is that we don't compile Stylus, Coffeescript etc into CSS or Javascript - but place the
content into suitable <style> and <script> that can be transformed later by something like django-compressor.
"""
import sys
from io import StringIO
# Pygments and Markdown are optional dependencies which may or may not be available
try:
import pygments
from pygments.formatters import HtmlFormatter
from pygments.lexers import guess_lexer, PythonLexer
from pygments.util import ClassNotFound
_pygments_available = True
except ImportError: # pragma: no cover
_pygments_available = False
try:
from markdown import markdown as markdown_lib
_markdown_available = True
except ImportError: # pragma: no cover
_markdown_available = False
from .core import ParseException
from .utils import html_escape
# ----------------------------------------------------------------------------------
# Core filters
# ----------------------------------------------------------------------------------
def plain(text, options):
return text
def preserve(text, options):
text = text.rstrip()
text = text.replace('\n', '
')
return text.replace('\r', '')
def escaped(text, options):
return html_escape(text)
def cdata(text, options):
text = '\n' + text.rstrip()
text = text.replace("\n", "\n ")
return '<![CDATA[%s\n]]>' % text
def css(text, options):
return style_filter(text, 'text/css', options)
def stylus(text, options):
return style_filter(text, 'text/stylus', options)
def less(text, options):
return style_filter(text, 'text/less', options)
def sass(text, options):
return style_filter(text, 'text/sass', options)
def javascript(text, options):
return script_filter(text, 'text/javascript', '//', options)
def coffee(text, options):
return script_filter(text, 'text/coffeescript', '#', options)
def markdown(content, options):
if not _markdown_available:
raise ParseException("Markdown is not available")
return markdown_lib(content)
def highlight(content, options):
if not _pygments_available:
raise ParseException("Pygments is not available")
if content:
# let Pygments try to guess syntax but default to Python
try:
lexer = guess_lexer(content)
except ClassNotFound:
lexer = PythonLexer()
return pygments.highlight(content, lexer, HtmlFormatter())
else:
return ''
def python(content, options):
if content:
compiled_code = compile(content, "", "exec")
output_buffer = StringIO()
sys.stdout = output_buffer
try:
exec(compiled_code)
except Exception as e:
raise ParseException('Error whilst executing python filter node') from e
finally:
# restore the original stdout
sys.stdout = sys.__stdout__
return output_buffer.getvalue()
else:
return ''
# ----------------------------------------------------------------------------------
# Helper functions
# ----------------------------------------------------------------------------------
def style_filter(text, mime_type, options):
indent = ' ' if options.cdata else ' '
text = text.rstrip().replace('\n', '\n' + indent)
type_attr = ' type=%(attr_wrapper)s%(mime_type)s%(attr_wrapper)s' % \
{'attr_wrapper': options.attr_wrapper, 'mime_type': mime_type}
before, after = (' /*<![CDATA[*/\n', ' /*]]>*/\n') if options.cdata else ('', '')
return '<style%s>\n%s%s%s\n%s</style>' % (type_attr, before, indent, text, after)
def script_filter(text, mime_type, comment, options):
indent = ' ' if options.cdata else ' '
text = text.rstrip().replace('\n', '\n' + indent)
type_attr = ' type=%(attr_wrapper)s%(mime_type)s%(attr_wrapper)s' % \
{'attr_wrapper': options.attr_wrapper, 'mime_type': mime_type}
before, after = (' %s<![CDATA[\n' % comment, ' %s]
|
]>\n' % comment) if options.cdata else ('', '')
return '<script%s>\n%s%s%s\n%s</script>' % (type_attr, before, indent, text, after)
# ----------------------------------------------------------------------------------
# Filter registration
# ----------------------------------------------------------------------------------
FILTERS = {
'plain': plai
|
n,
'preserve': preserve,
'escaped': escaped,
'cdata': cdata,
'css': css,
'stylus': stylus,
'less': less,
'sass': sass,
'javascript': javascript,
'coffee': coffee,
'coffeescript': coffee,
'markdown': markdown,
'highlight': highlight,
'python': python
}
def register_filter(name, callback):
FILTERS[name] = callback
def get_filter(name):
if name not in FILTERS:
raise ParseException("No such filter: " + name)
return FILTERS.get(name)
|
taras1k/finance
|
server/utils.py
|
Python
|
gpl-2.0
| 237
| 0.004219
|
from
|
collections import OrderedDict
class SerializableModel(object):
def _asdict(self):
result = OrderedDict()
for key in self.__mapper__.c.keys():
result[key] = getattr(self, key)
return resul
|
t
|
ThinkboxSoftware/Deadline
|
Examples/DeadlineCommand/JobTaskTimes/GetJobTaskTimes.py
|
Python
|
apache-2.0
| 6,747
| 0.006966
|
'''
This script shows an example of working with the output of the GetJobTasks subcommand.
The script takes a single argument, which is the JobID of the job. When calculating the elapsed time for
rendering tasks, the script does not account for daylight savings time differences or time zone differences.
Elapsed time for rendering tasks is rounded to the nearest second.
IMPORTANT:
Adjust the DEADLINECOMMAND_FULLPATH below to point your deadlinecommand executable.
USAGE EXAMPLE:
"C:\Program Files\Thinkbox\Deadline7\bin\deadlinecommand.exe" ExecuteScript GetJobTaskTimes.py 563c31ef2f359219f8745420
OUTPUT EXAMPLE:
0: 00:00:05.0140000 (Completed)
1: 00:00:02.4510000 (Completed)
2: 00:00:03.3090000 (Completed)
3: 00:00:02.7700000 (Completed)
4: 00:00:04.6160000 (Completed)
5: 00:00:02.8120000 (Completed)
6: 00:00:02.0000000 (Rendering)
7: 00:00:00.0000000 (Queued)
8: 00:00:00.0000000 (Queued)
9: 00:00:00.0000000 (Queued)
10: 00:00:00.0000000 (Queued)
11: 00:00:00.0000000 (Queued)
'''
# ======== Imports ========
import datetime
from time import time
import copy
import pprint
import subprocess
from Deadline.Scripting import *
from Deadline.Jobs import *
# ======== Constants ========
DEADLINECOMMAND_FULLPATH = "C:\\Program Files\\Thinkbox\\Deadline7\\bin\\deadlinecommand.exe"
# ======== Function Definitions ========
def secToHHMMSS(seconds):
"""
Converts input seconds into the desired output display format.
Rounds to the nearest second.
"""
rndSec = int(seconds + 0.5) # rounded seconds
hrs = int(rndSec / 3600)
min = int((rndSec - hrs * 3600) / 60)
sec = rndSec - hrs * 3600 - min * 60
return str(hrs).zfill(2) + ":" + str(min).zfill(2) + ":" + str(sec).zfill(2) + ".0000000"
def FixRenderTime(TaskDict):
"""
Estimates the render time for tasks that are rendering, and handles some special cases.
"""
# Can't help you if the required keys are missing.
if ("TaskStatus" not in TaskDict) or ("RenderStartTime" not in TaskDict):
return
# Estimate the Render time when rendering
if ("Rendering" == TaskDict["TaskStatus"]):
# Handle a special case:
if ("Jan 01/01 00:00:00" == TaskDict["RenderStartTime"]):
TaskDict["RenderTime"] = "00:00:00.0000000"
TaskDict["TaskRenderTime"] = TaskDict["RenderTime"]
return
# Parse the string into a Python datetime
# Expected format is 'Nov 09/15 11:16:30'
# See: http://strftime.org/
dtStart = datetime.datetime.strptime(TaskDict["RenderStartTime"], "%b %d/%y %H:%M:%S")
dtNow = datetime.datetime.now()
# print ("%s --> %s" % (TaskDict["RenderStartTime"], dtStart.isoformat()))
timeDelta = dtNow - dtStart
TaskDict["RenderTime"] = secToHHMMSS(timeDelta.seconds)
TaskDict["TaskRenderTime"] = TaskDict["RenderTime"]
else:
#
|
ASSUMPTION: Assume zero for all other TaskStatus values.
TaskDict["RenderTime"] = "00:00:00.0000000"
TaskDict
|
["TaskRenderTime"] = TaskDict["RenderTime"]
return
def ParseGetJobTasksOutput(output):
"""
Parses the output of the call to GetJobTasks. The result is a "TaskList", mean a list of "TaskDict"s,
where a "TaskDict" is a dictionary of key,value pairs of information about the Task.
"""
TaskList = []
# All entries in this dictionary must be lower case.
IntegerKeysDict = {'averageram', 'averagerampercentage', 'averageswap',
'cpuutilisation', 'errorcount', 'imagefilesize',
'peakcpuusage', 'peakrampercentage', 'peakramusage',
'peakswap', 'taskaverageram',
'taskaveragerampercentage', 'taskaverageswap',
'taskcpuitilisation', 'taskerrorcount', 'taskid',
'taskimagefilesize', 'taskpeakcpuusage',
'taskpeakrampercentage', 'taskpeakramusage',
'taskpeakswap', 'tasktotalcpuclocks',
'taskusedcpuclocks', 'totalcpuclocks', 'usedcpuclocks'}
# All entries in this dictionary must be lower case.
BooleanKeysDict = {'isstarting', 'taskisstarted', 'taskwaitingtostart',
'waitingtostart'}
# Parse the lines
TaskDict = {}
lines = output.splitlines()
for line in lines:
if (not line.strip()):
if (len(TaskDict) > 0):
if ("10675199.02:48:05.4775807" == TaskDict["TaskRenderTime"]):
FixRenderTime(TaskDict)
TaskList.append(copy.copy(TaskDict))
TaskDict = {}
continue
# Split the non-empty line into key and value.
kv = line.split('=', 1)
# Check for and handle keys with no value.
if (len(kv) < 2):
TaskDict[kv[0]] = None
continue
# Check for and handle keys that should have integer values.
if (kv[0].lower() in IntegerKeysDict):
TaskDict[kv[0]] = int(kv[1])
continue
# Check for and handle keys that should have boolean values.
if (kv[0].lower() in BooleanKeysDict):
TaskDict[kv[0]] = (kv[1].lower() in ("true", "t", "yes", "y", "1"))
continue
# Assume all other keys have string values.
TaskDict[kv[0]] = kv[1]
return TaskList
def PrintJobTaskTimes(TaskList):
"""
Prints out the task times for tasks contained in the TaskList.
"""
for TaskDict in TaskList:
TaskID = "?"
if "TaskId" in TaskDict:
TaskID = "%d" % TaskDict["TaskId"]
print("%s: %s (%s)" % (TaskID, TaskDict["TaskRenderTime"], TaskDict["TaskStatus"]))
# ======== Main Entry Point ========
def __main__(jobId):
job = RepositoryUtils.GetJob(jobId, True)
if (not job):
print("The specified Job ID was not found: %s" % jobId)
exit
call_deadline = subprocess.Popen([DEADLINECOMMAND_FULLPATH, 'GetJobTasks', jobId], stdout=subprocess.PIPE)
output = call_deadline.communicate()[0]
TaskList = ParseGetJobTasksOutput(output)
# Uncomment the following lines to see a full readout of each Task's information.
# for TaskDict in TaskList:
# print("--")
# pprint.pprint(TaskDict)
PrintJobTaskTimes(TaskList)
|
ktan2020/jpype
|
jpype/_jcollection.py
|
Python
|
apache-2.0
| 8,284
| 0.003621
|
#*****************************************************************************
# Copyright 2004-2008 Steve Menard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#*****************************************************************************
import collections
from . import _jclass
class _WrappedIterator(object):
"""
Wraps a Java iterator to respect the Python 3 iterator API
"""
def __init__(self, iterator):
self.iterator = iterator
def __iter__(self):
return self.iterator
def __next__(self):
return next(self.iterator)
# Compatibility name
next = __next__
def _initialize():
_jclass.registerClassCustomizer(CollectionCustomizer())
_jclass.registerClassCustomizer(ListCustomizer())
_jclass.registerClassCustomizer(MapCustomizer())
_jclass.registerClassCustomizer(IteratorCustomizer())
_jclass.registerClassCustomizer(EnumerationCustomizer())
def isPythonSequence(v):
if isinstance(v, collections.Sequence):
if not hasattr(v.__class__, '__metaclass__') \
or v.__class__.__metaclass__ is _jclass._JavaClass:
return True
return False
def _colLength(self):
return self.size()
def _colIter(self):
return _WrappedIterator(self.iterator())
def _colDelItem(self,
|
i):
return self.remove(i)
def _colAddAll(self, v):
if isPythonSequence(v):
r = False
for i in v:
r = self.add(i) or r
return r
else:
return self._addAll(v)
def _colRemoveAll(self, v):
|
if isPythonSequence(v):
r = False
for i in v:
r = self.remove(i) or r
return r
else:
return self._removeAll(v)
def _colRetainAll(self, v):
if isPythonSequence(v):
r = _jclass.JClass("java.util.ArrayList")(len(v))
for i in v:
r.add(i)
else:
r = v
return self._retainAll(r)
class CollectionCustomizer(object):
_METHODS = {
'__len__': _colLength,
'__iter__': _colIter,
'__delitem__': _colDelItem,
}
def canCustomize(self, name, jc):
if name == 'java.util.Collection':
return True
return jc.isSubclass('java.util.Collection')
def customize(self, name, jc, bases, members):
if name == 'java.util.Collection':
members.update(CollectionCustomizer._METHODS)
else:
# AddAll is handled by List
if (not jc.isSubclass("java.util.List")) and 'addAll' in members:
members['_addAll'] = members['addAll']
members['addAll'] = _colAddAll
if 'removeAll' in members:
members['_removeAll'] = members['removeAll']
members['removeAll'] = _colRemoveAll
if 'retainAll' in members:
members['_retainAll'] = members['retainAll']
members['retainAll'] = _colRetainAll
def _listGetItem(self, ndx):
if isinstance(ndx, slice):
start = ndx.start
stop = ndx.stop
if start < 0:
start = self.size() + start
if stop < 0:
stop = self.size() + stop
return self.subList(start, stop)
else:
if ndx < 0:
ndx = self.size() + ndx
return self.get(ndx)
def _listSetItem(self, ndx, v):
if isinstance(ndx, slice):
start = ndx.start
stop = ndx.stop
if start < 0:
start = self.size() + start
if stop < 0:
stop = self.size() + stop
for i in range(start, stop):
self.remove(start)
if isinstance(v, collections.Sequence):
ndx = start
for i in v:
self.add(ndx, i)
ndx += 1
else:
if ndx < 0:
ndx = self.size() + ndx
self.set(ndx, v)
def _listAddAll(self, v, v2=None):
if isPythonSequence(v):
r = False
if v2 is not None: # assume form (int, values)
for i in range(len(v2)):
r = r or self.add(v + i, v2[i])
else:
for i in v:
r = self.add(i) or r
return r
else:
return self._addAll(v)
class ListCustomizer(object):
_METHODS = {
'__setitem__': _listSetItem,
'__getitem__': _listGetItem,
}
def canCustomize(self, name, jc):
if name == 'java.util.List':
return True
return jc.isSubclass('java.util.List')
def customize(self, name, jc, bases, members):
if name == 'java.util.List':
members.update(ListCustomizer._METHODS)
else:
if 'addAll' in members:
members['_addAll'] = members['addAll']
members['addAll'] = _listAddAll
def isPythonMapping(v):
if isinstance(v, collections.Mapping):
if not hasattr(v.__class__, '__metaclass__') or \
v.__class__.__metaclass__ is _jclass._JavaClass:
return True
return False
def _mapLength(self):
return self.size()
def _mapIter(self):
return _WrappedIterator(self.keySet().iterator())
def _mapDelItem(self, i):
return self.remove(i)
def _mapGetItem(self, ndx):
return self.get(ndx)
def _mapSetItem(self, ndx, v):
self.put(ndx, v)
def _mapPutAll(self, v):
if isPythonMapping(v):
for i in v:
self.put(i, v[i])
else:
# do the regular method ...
self._putAll(v)
class MapCustomizer(object):
_METHODS = {
'__len__': _mapLength,
'__iter__': _mapIter,
'__delitem__': _mapDelItem,
'__getitem__': _mapGetItem,
'__setitem__': _mapSetItem,
}
def canCustomize(self, name, jc):
if name == 'java.util.Map':
return True
return jc.isSubclass('java.util.Map')
def customize(self, name, jc, bases, members):
if name == 'java.util.Map':
members.update(MapCustomizer._METHODS)
else:
if "putAll" in members:
members["_putAll"] = members["putAll"]
members["putAll"] = _mapPutAll
def _iterCustomNext(self):
if self.hasNext():
return self._next()
raise StopIteration
def _iterIteratorNext(self):
if self.hasNext():
return next(self)
raise StopIteration
def _iterIter(self):
return self
class IteratorCustomizer(object):
_METHODS = {
'__iter__': _iterIter,
'__next__': _iterCustomNext,
}
def canCustomize(self, name, jc):
if name == 'java.util.Iterator':
return True
return jc.isSubclass('java.util.Iterator')
def customize(self, name, jc, bases, members):
if name == 'java.util.Iterator':
members.update(IteratorCustomizer._METHODS)
elif jc.isSubclass('java.util.Iterator'):
__next__ = 'next' if 'next' in members else '__next__'
members['_next'] = members[__next__]
members[__next__] = _iterCustomNext
def _enumNext(self):
if self.hasMoreElements():
return self.nextElement()
raise StopIteration
def _enumIter(self):
return self
class EnumerationCustomizer(object):
_METHODS = {
'next': _enumNext,
'__next__': _enumNext,
'__iter__': _enumIter,
}
def canCustomize(self, name, jc):
return name == 'java.util.Enumeration'
|
h4ck3rm1k3/ansible
|
lib/ansible/module_utils/basic.py
|
Python
|
gpl-3.0
| 60,500
| 0.004496
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# == BEGIN DYNAMICALLY INSERTED CODE ==
ANSIBLE_VERSION = "<<ANSIBLE_VERSION>>"
MODULE_ARGS = "<<INCLUDE_ANSIBLE_MODULE_ARGS>>"
MODULE_COMPLEX_ARGS = "<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>"
BOOLEANS_TRUE = ['yes', 'on', '1', 'true', 1]
BOOLEANS_FALSE = ['no', 'off', '0', 'false', 0]
BOOLEANS = BOOLEANS_TRUE + BOOLEANS_FALSE
# ansible modules can be written in any language. To simplify
# development of Python modules, the functions available here
# can be inserted in any module source automatically by including
# #<<INCLUDE_ANSIBLE_MODULE_COMMON>> on a blank line by itself inside
# of an ansible module. The source of this common code lives
# in lib/ansible/module_common.py
import locale
import os
import re
import pipes
import shlex
import subprocess
import sys
import syslog
import types
import time
import select
import shutil
import stat
import tempfile
import traceback
import grp
import pwd
import platform
import errno
import tempfile
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
sys.stderr.write('Error: ansible requires a json module, none found!')
sys.exit(1)
except SyntaxError:
sys.stderr.write('SyntaxError: probably due to json and python being for different versions')
sys.exit(1)
HAVE_SELINUX=False
try:
import selinux
HAVE_SELINUX=True
except ImportError:
pass
HAVE_HASHLIB=False
try:
from hashlib import sha1 as _sha1
HAVE_HASHLIB=True
except ImportError:
from sha import sha as _sha1
try:
from hashlib import md5 as _md5
except ImportError:
try:
from md5 import md5 as _md5
except ImportError:
# MD5 unavailable. Possibly FIPS mode
_md5 = None
try:
from hashlib import sha256 as _sha256
except ImportError:
pass
try:
from systemd import journal
has_journal = True
except ImportError:
import syslog
has_journal = False
try:
from ast import literal_eval as _literal_eval
except ImportError:
# a replacement for literal_eval that works with python 2.4. from:
# https://mail.python.org/pipermail/python-list/2009-September/551880.html
# which is essentially a cut/past from an earlier (2.6) version of python's
# ast.py
from compiler import parse
from compiler.ast import *
def _literal_eval(node_or_string):
"""
Safely evaluate an expression node or a string containing a Python
expression. The string or node provided may only consist of the following
Python literal structures: strings, numbers, tuples, lists, dicts, booleans,
and None.
"""
_safe_names = {'None': None, 'True': True, 'False': False}
if isinstance(node_or_string, basestring):
node_or_string = parse(node_or_string, mode='eval')
if isinstance(node_or_string, Expression):
node_or_string = node_or_string.node
def _convert(node):
if isinstance(node, Const) and isinstance(node.value, (basestring, int, float, long, complex)):
return node.value
elif isinstance(node, Tuple):
return tuple(map(_convert, node.nodes))
elif isinstance(node, List):
return list(map(_convert, node.nodes))
elif isinstance(node, Dict):
return dict((_convert(k), _convert(v)) for k, v in node.items)
elif isinstance(node, Name):
if node.name in _safe_names:
return _safe_names[node.name]
elif isinstance(node, UnarySub):
return -_convert(node.expr)
raise ValueError('malformed string')
return _convert(node_or_string)
FILE_COMMON_ARGUMENTS=dict(
src = dict(),
mode = dict(),
owner = dict(),
group = dict(),
seuser = dict(),
serole = dict(),
selevel = dict(),
setype = dict(),
follow = dict(type='bool', default=False),
# not taken by the file module, but other modules call file so it must ignore them.
content = dict(no_log=True),
backup = dict(),
force = dict(),
remote_src = dict(), # used by assemble
regexp = dict(), # used by assemble
delimiter = dict(), # used by assemble
directory_mode = dict(), # used by copy
)
def get_platform():
''' what's the platform? example: Linux is a platform. '''
return platform.system()
def get_distribution():
''' return the distribution name '''
if platform.system() == 'Linux':
try:
distribution = platform.linux_distribution()[0].capitalize()
if not distribution and os.path.isfile('/etc/system-release'):
distribution = platform.linux_distribution(supported_dists=['system'])[0].capitalize()
if 'Amazon' in distribution:
distribution = 'Amazon'
else:
distribution = 'OtherLinux'
except:
# FIXME: MethodMissing, I assume?
distribution = platform.dist()[0].capitalize()
else:
distribution = None
return distribution
def get_distribution_version():
''' return the distribution version '''
if platform.system() == 'Linux':
try:
distribution_version = platform.linux_distributio
|
n()[1]
if not distribution_version and os.path.isfile('/etc/system-release'):
distribution_version = platform.linux_distribution(supported_dists=['system'])[1]
except:
# FIXME: MethodMissing, I assume?
distribution_version = platform.dist()[1]
else:
distribution_version = None
return distribution_version
def load_platform_subclas
|
s(cls, *args, **kwargs):
'''
used by modules like User to have different implementations based on detected platform. See User
module for an example.
'''
this_platform = get_platform()
distribution = get_distribution()
subclass = None
# get the most specific superclass for this platform
if distribution is not None:
for sc in cls.__subclasses__():
if sc.distribution is not None and sc.distribution == distribution and sc.platform == this_platform:
subclass = sc
if subclass is None:
for sc in cls.__subclasses__():
if sc.platform == this_platform and sc.distribution is None:
subclass = sc
if subclass is None:
|
ericmjl/bokeh
|
tests/unit/bokeh/util/test_version.py
|
Python
|
bsd-3-clause
| 3,274
| 0.00733
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import re
# External imports
import mock
# Bokeh imports
from bokeh._version import get_versions
# Module under test
import bokeh.util.version as buv # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
VERSION_PAT = re.compile(r"^(\d+\.\d+\.\d+)$")
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class Test___version__(object):
def test_basic(self) -> None:
assert isinstance(buv.__version__, str)
assert buv.__version__ == get_versions()['version']
class Test_base_version(object):
def test_returns_helper(self) -> None:
with mock.patch('bokeh.util.version._base_version_helper') as helper:
buv.base_version()
assert helper.called
class Test_is_full_release(object):
def test_actual(self) -> None:
assert buv.is_full_release() == bool(VERSION_PAT.match(buv.__version__))
def test_mock_full(self, monkeypatch) -> None:
monkeypatch.setattr(buv, '__version__', "1.5.0")
assert buv.is_full_release()
@pytest.mark.parametrize('v',
|
("1.2.3dev2", "1.4.5rc3", "junk"))
def test_mock_not_full(self, monkeypatch, v) -> None:
monkeypatch.setattr(buv, '__version__', v)
assert not buv.is_full_release()
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------
|
------------------------
# Private API
#-----------------------------------------------------------------------------
class Test__base_version_helper(object):
def test_release_version_unchanged(self) -> None:
assert buv._base_version_helper("0.2.3") == "0.2.3"
assert buv._base_version_helper("1.2.3") == "1.2.3"
def test_dev_version_stripped(self) -> None:
assert buv._base_version_helper("0.2.3dev2") == "0.2.3"
assert buv._base_version_helper("1.2.3dev10") == "1.2.3"
def test_rc_version_stripped(self) -> None:
assert buv._base_version_helper("0.2.3rc2") == "0.2.3"
assert buv._base_version_helper("1.2.3rc10") == "1.2.3"
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
r-castro/Python
|
NewList.py
|
Python
|
gpl-3.0
| 1,219
| 0.001641
|
# Simple class SimgleList
class SingleList:
"""Documentation for SingleList
"""
def __init__(self, initial_list=None):
self.__list = []
if initial_list:
for value in initial_list:
if value not in self.__list:
self.__list.append(value)
def __str__(self):
temp_string = ""
i = 0
for i in range(len(self)):
temp_string += "%12d" % self.__list[i]
if (i + 1) % 4 == 0:
temp_string += "\n"
if i % 4 != 0:
temp_string += "\n"
return temp_string
def __len__(self):
return len(self.__list)
def __getitem__(self, index):
return self.__list[index]
|
def __setitem__(self, index, value):
if value in
|
self.__list:
raise ValueError("List already contains value %s" % str(value))
self.__list[index] = value
def __eq__(self, other):
if len(self) != len(other):
return 0
for i in range(0, len(self)):
if self.__list[i] != other.__list[i]:
return 0
return 1
def __ne__(self, other):
return not (self == other)
|
vileopratama/vitech
|
src/addons/sale_order_dates/__init__.py
|
Python
|
mit
| 124
| 0
|
# -*- coding: utf-
|
8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import sale_ord
|
er_dates
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.