repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
Ikusaba-san/Chiaki-Nanami
|
chiaki.py
|
Python
|
mit
| 4,858
| 0.004529
|
#!/usr/bin/env python3
import asyncio
import contextlib
import datetime
import functools
import importlib
import itertools
import logging
import os
import sys
import traceback
import click
import discord
import config
from cogs.utils import db
from core import Chiaki, migration
# use faster event loop, but fall back to default if on Windows or not installed
try:
import uvloop
except ImportError:
pass
else:
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
@contextlib.contextmanager
def log(stream=False):
logging.getLogger('discord').setLevel(logging.INFO)
os.makedirs(os.path.join(os.path.dirname(__file__), 'logs'), exist_ok=True)
root = logging.getLogger()
root.setLevel(logging.INFO)
handler = logging.FileHandler(
filename=f'logs/chiaki-{datetime.datetime.now()}.log',
encoding='utf-8',
mode='w'
)
fmt = logging.Formatter('[{asctime}] ({levelname:<7}) {name}: {message}', '%Y-%m-%d %H:%M:%S', style='{')
handler.setFormatter(fmt)
root.addHandler(handler)
if stream:
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(fmt)
root.addHandler(stream_handler)
try:
yield
finally:
for hdlr in root.handlers[:]:
hdlr.close()
root.removeHandler(hdlr)
#--------------MAIN---------------
_old_send = discord.abc.Messageable.send
async def new_send(self, content=None, *, allow_everyone=False, **kwargs):
if content is not None:
if not allow_everyone:
content = str(content).replace('@everyone', '@\u200beveryone').replace('@here', '@\u200bhere')
return await _old_send(self, content, **kwargs)
@click.group(invoke_without_command=True)
@click.option('--log-stream', is_flag=True, help='Adds a stderr stream-handler for logging')
@click.pass_context
def main(ctx, log_stream):
if ctx.invoked_subcommand is not None:
return
# This has to be patched first because Chiaki loads her extensions in
# __init__, which means she loads her commands in __init__
from discord.ext import commands
old_commands_group = commands.group
commands.group = functools.partial(old_commands_group, case_insensitive=True)
bot = Chiaki()
discord.abc.Messageable.send = new_send
with log(log_stream):
try:
bot.run()
finally:
discord.abc.Messageable.send = _old_send
commands.group = old_commands_group
return 69 * bot.reset_requested
# ------------- DB-related stuff ------------------
async def _create_pool():
psql = f'postgresql://{config.psql_user}:{config.psql_pass}@{config.psql_host}/{config.psql_db}'
return await db.create_pool(psql, command_timeout=60)
def _load_modules(extensions):
for e in itertools.chain.from_iterable(Chiaki.find_extensions(e) or [e] for e in extensions):
try:
importlib.import_module(e)
except:
click.echo(f'Could not load {e}.\n{traceback.format_exc()}', err=True)
raise
async def _migrate(version='', downgrade=False, verbose=False):
# click doesn't like None as a default so we have to settle with an empty string
if not version:
version = None
_load_modules(config.extensions)
pool = await _create_pool()
async with pool.acquire() as conn:
await migration.migrate(version, connection=conn, downgrade=downgrade, verbose=verbose)
def _sync_migrate(version, downgrade, verbose):
run = asyncio.get_event_loop().run_until_complete
run(_migrate(version, downgrade=downgra
|
de, verbose=verbose))
@main.command()
@click.option('--version', default='', metavar='[version]', help='Version to migrate to, defaults to latest')
@click.option('-v', '--ve
|
rbose', is_flag=True)
def upgrade(version, verbose):
"""Upgrade the database to a version"""
_sync_migrate(version, downgrade=False, verbose=verbose)
click.echo('Upgrade successful! <3')
@main.command()
@click.option('--version', default='', metavar='[version]', help='Version to migrate to, defaults to latest')
@click.option('-v', '--verbose', is_flag=True)
def downgrade(version, verbose):
"""Downgrade the database to a version"""
_sync_migrate(version, downgrade=True, verbose=verbose)
click.echo('Downgrade successful! <3')
async def _init(verbose):
_load_modules(config.extensions)
pool = await _create_pool()
async with pool.acquire() as conn:
await migration.init(connection=conn, verbose=verbose)
@main.command(name='init-db')
@click.option('-v', '--verbose', is_flag=True)
def init_db(verbose):
"""Initialize the database"""
run = asyncio.get_event_loop().run_until_complete
run(_init(verbose))
click.echo('Database initialization successful! <3')
if __name__ == '__main__':
sys.exit(main())
|
brupoon/pipya
|
pipya.py
|
Python
|
gpl-3.0
| 3,093
| 0.005496
|
# -*- coding: utf-8 -*-
"""
pipya main cli file
Copyright (c) 2015 Brunston Poon
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
Full license in LICENCE.txt
"""
#IMPORT IMPORT IMPORT IMPORT IMPORT IMPORT IMPORT IMPORT IMPORT IMPORT IMPORT
#Let's make sure that the user has all the dependencies installed and that
#they are running the correct version of Python
try:
import feedparser
except ImportError:
print("feedparser is a system-agnostic dependency for RSS support")
sys.exit()
import sys, os
import time
import json
import webbrowser
import helper as h
import fetch as fetch
toggle = True
version = sys.version_info[0]
if version != 3:
print("""
Please upgrade to Python3, preferably 3.4.* or greater, before continuing""")
toggle = False
sys.exit()
if os.name == "nt":
try:
from colorama import init
init()
except ImportError:
print("colorama is a windows dependency for ANSI colors support")
sys.exit()
def main():
wapi, user, citystr,newslink = h.kernfig()
h.welcome(user)
while True:
wapi, user, citystr,newslink = h.kernfig()
print(h.ask())
uin = str.lower(input(">"))
if "fetch" in uin:
fetch.main(uin)
elif "visit" in uin:
for itemToVisit in h.giveComputerIndex(uin):
newsfeed = h.grab(newslink)
webbrowser.open(newsfeed.entries[itemToVisit-1].link)
elif "set" in uin:
if "name" in uin:
name = input("What would you like me to call you? ")
h.cfgwriter("settings.cfg",0,name)
if "city" in uin:
city = input("""
Changing weather location? Where to?
Must be in Wunderground form. """)
h.cfgwriter("settings.cfg",1,city)
elif "name" and "pronounce" in uin:
print(h.pipya()+"My name is pronounced Pip-pah. The y is silent :).")
elif "name" and ("how" or "where") and "get" in uin:
print(h.pipya()+"""\
My name started as pypa, for "python personal assistant". It morphed to pipya
for pronounceability. Thanks for asking!""")
elif "what" and "can" and "do" in uin:
h.capabilities()
elif "who" and "are" and ("you" or "pipya") in uin:
print(h.pipya()+"""
I am Pipya, a personal assistant written in python3. My creator is brupoon.
He intended for me to be a jack-of-all-trades personal assistant operated by
a cli. I am a sexless, genderless entity, though my name is similar to the
human feminine "Pippa".
""")
elif uin in ["quit", "goodbye", "exit"]:
print("Goodbye, {0}! 'Till next time.".format(user))
sys.exit
|
()
elif uin in ["jellyfish"]:
h.jellyfish
|
()
else:
print("Pipya: Sorry, {0}, I didn't quite catch that.".format(user))
if (__name__ == '__main__') and (toggle==True):
main()
|
hackultura/octofiles
|
docs/source/conf.py
|
Python
|
gpl-3.0
| 11,319
| 0.006538
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Octofiles documentation build configuration file, created by
# sphinx-quickstart on Thu Mar 10 14:51:08 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.pngmath',
'sphinx.ext.ifconfig',
'sphinxcontrib.httpdomain'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Octofiles'
copyright = '2016, Hackultura'
author = 'Hackultura'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'pt_BR'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is ge
|
nerated.
#html_use_index = True
# If true, the index is split into individual pag
|
es for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Octofilesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Octofiles.tex', 'Octofiles Documentation',
'Hackultura', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_ind
|
n8foo/cigarbox
|
app.py
|
Python
|
apache-2.0
| 223
| 0.004484
|
#!/usr/bin/env
|
python
# -*- coding:utf-8 -*-
from flask import Flask
from config import *
# create the app
app = Flask(__name__)
# Load default config and override config from config fil
|
e
app.config.from_object('config')
|
jordanemedlock/psychtruths
|
temboo/core/Library/Amazon/IAM/GetLoginProfile.py
|
Python
|
apache-2.0
| 3,876
| 0.005418
|
# -*- coding: utf-8 -*-
###############################################################################
#
# GetLoginProfile
# Retrieves the user name
|
and password create date for the specified user.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is
|
distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetLoginProfile(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetLoginProfile Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetLoginProfile, self).__init__(temboo_session, '/Library/Amazon/IAM/GetLoginProfile')
def new_input_set(self):
return GetLoginProfileInputSet()
def _make_result_set(self, result, path):
return GetLoginProfileResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetLoginProfileChoreographyExecution(session, exec_id, path)
class GetLoginProfileInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetLoginProfile
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AWSAccessKeyId(self, value):
"""
Set the value of the AWSAccessKeyId input for this Choreo. ((required, string) The Access Key ID provided by Amazon Web Services.)
"""
super(GetLoginProfileInputSet, self)._set_input('AWSAccessKeyId', value)
def set_AWSSecretKeyId(self, value):
"""
Set the value of the AWSSecretKeyId input for this Choreo. ((required, string) The Secret Key ID provided by Amazon Web Services.)
"""
super(GetLoginProfileInputSet, self)._set_input('AWSSecretKeyId', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are "xml" (the default) and "json".)
"""
super(GetLoginProfileInputSet, self)._set_input('ResponseFormat', value)
def set_UserName(self, value):
"""
Set the value of the UserName input for this Choreo. ((required, string) Name of the user whose login profile you want to retrieve.)
"""
super(GetLoginProfileInputSet, self)._set_input('UserName', value)
class GetLoginProfileResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetLoginProfile Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Amazon.)
"""
return self._output.get('Response', None)
class GetLoginProfileChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetLoginProfileResultSet(response, path)
|
Forage/Gramps
|
gramps/plugins/lib/libsubstkeyword.py
|
Python
|
gpl-2.0
| 48,355
| 0.007424
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2010 Craig J. Anderson
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""
Provide the SubstKeywords class that will replace keywords in a passed
string with information about the person/marriage/spouse. For sample:
foo = SubstKeywords(database, person_handle)
print foo.replace_and_clean(['$n was born on $b.'])
Will return a value such as:
Mary Smith was born on 3/28/1923.
"""
from __future__ import print_function
#------------------------------------------------------------------------
#
# Gramps modules
#
#------------------------------------------------------------------------
from gramps.gen.display.name import displayer as name_displayer
from gramps.gen.datehandler import displayer
from gramps.gen.lib import EventType
from gramps.gen.utils.db import get_birth_or_fallback, get_death_or_fallback
from gramps.gen.constfunc import STRTYPE, cuni
#------------------------------------------------------------------------
#
# Local constants
#
#------------------------------------------------------------------------
class TextTypes():
"""Four enumerations that are used to for the four main parts of a string.
and used for states. Separator is not used in states.
text -> remove or display
remove -> display
"""
separator, text, remove, display = list(range(4))
TXT = TextTypes()
#------------------------------------------------------------------------
#
# Formatting classes
#
#------------------------------------------------------------------------
class GenericFormat(object):
"""A Generic parsing class. Will be subclassed by specific format strings
"""
def __init__(self, string_in):
self.string_in = string_in
def _default_format(self, item):
""" The default format if there is no format string """
pass
def is_blank(self, item):
""" if the information is not known (item is None), remove the format
string information from the input string if any.
"""
if item is None:
self.string_in.remove_start_end("(", ")")
return True
return False
def generic_format(self, item, code, uppr, function):
"""the main parsing engine.
Needed are the following: the input string
code - List of one character (string) codes (all lowercase)
uppr - list of one character (string) codes that can be uppercased
each needs to have a lowercase equivalent in code
function - list of functions.
there is a one to one relationship with character codes and functions.
"""
if self.string_in.this != "(":
return self._default_format(item)
self.string_in.step()
main = VarString()
separator = SeparatorParse(self.string_in)
#code given in args
#function given in args
while self.string_in.this and self.string_in.this != ")":
#Check to see if _in.this is in code
to_upper = False
if uppr.find(self.string_in.this) != -1:
#and the result should be uppercased.
to_upper = Tr
|
ue
where = code.find(self.string_in.this.lower())
else:
where = code.find(self.string_in.this)
if where != -1:
self.string_in.step()
tmp = function[where]()
if to_upper:
tmp = tmp.upper
|
()
if tmp == "" or tmp is None:
main.add_remove()
elif isinstance(tmp, VarString): #events cause this
main.extend(tmp)
else:
main.add_variable(tmp)
elif separator.is_a():
main.add_separator(separator.parse_format())
else:
main.add_text(self.string_in.parse_format())
if self.string_in.this == ")":
self.string_in.step()
return main
#------------------------------------------------------------------------
# Name Format strings
#------------------------------------------------------------------------
class NameFormat(GenericFormat):
""" The name format class.
If no format string, the name is displayed as per preference options
otherwise, parse through a format string and put the name parts in
"""
def get_name(self, person):
""" A helper method for retrieving the person's name """
if person:
return person.get_primary_name()
return None
def _default_format(self, name):
""" display the name as set in preferences """
return name_displayer.sorted_name(name)
def parse_format(self, name):
""" Parse the name """
if self.is_blank(name):
return
def common():
""" return the common name of the person """
return (name.get_call_name() or
name.get_first_name().split(' ')[0])
code = "tfcnxslg"
upper = code.upper()
function = [name.get_title, #t
name.get_first_name, #f
name.get_call_name, #c
name.get_nick_name, #n
common, #x
name.get_suffix, #s
name.get_surname, #l
name.get_family_nick_name #g
]
return self.generic_format(name, code, upper, function)
#------------------------------------------------------------------------
# Date Format strings
#------------------------------------------------------------------------
class DateFormat(GenericFormat):
""" The date format class.
If no format string, the date is displayed as per preference options
otherwise, parse through a format string and put the date parts in
"""
def get_date(self, event):
""" A helper method for retrieving a date from an event """
if event:
return event.get_date_object()
return None
def _default_format(self, date):
return displayer.display(date)
def __count_chars(self, char, max_amount):
""" count the year/month/day codes """
count = 1 #already have seen/passed one
while count < max_amount and self.string_in.this == char:
self.string_in.step()
count = count +1
return count
def parse_format(self, date):
""" Parse the name """
if self.is_blank(date):
return
def year():
""" The year part only """
year = cuni(date.get_year())
count = self.__count_chars("y", 4)
if year == "0":
return
if count == 1: #found 'y'
if len(year) == 1:
return year
elif year[-2] == "0":
return year[-1]
else:
return year[-2:]
elif count == 2: #found 'yy'
tmp = "0" + year
return tmp[-2:]
elif count == 3: #found 'yyy'
if len(year) > 2:
return year
else:
tmp = "00" + year
return tmp[-3:]
else:
|
takmid/inasafe
|
safe_qgis/dock.py
|
Python
|
gpl-3.0
| 73,919
| 0.00188
|
"""
InaSAFE Disaster risk assessment tool developed by AusAid - **GUI Dialog.**
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
.. todo:: Check raster is single band
"""
from safe.common.utilities import temp_dir
__author__ = 'tim@linfiniti.com'
__version__ = '0.5.0'
__revision__ = '$Format:%H$'
__date__ = '10/01/2011'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
__type__ = 'alpha' # beta, final etc will be shown in dock title
import numpy
import os
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import pyqtSlot
from safe_qgis.dock_base import Ui_DockBase
from safe_qgis.aggregation_attribute_dialog_base import\
Ui_AggregationAttributeDialogBase
from safe_qgis.help import Help
from safe_qgis.utilities import (getExceptionWithStacktrace,
getWGS84resolution,
logOnQgsMessageLog)
from qgis.core import (QgsMapLayer,
QgsVectorLayer,
QgsRasterLayer,
QgsMapLayerRegistry,
QgsCoordinateReferenceSystem,
QgsCoordinateTransform,
QGis,
QgsFeature,
QgsRectangle)
from qgis.analysis import QgsZonalStatistics
from safe_qgis.impact_calculator import ImpactCalculator
from safe_qgis.safe_interface import (availableFunctions,
getFunctionTitle,
getOptimalExtent,
getBufferedExtent,
internationalisedNames,
writeKeywordsToFile)
from safe_qgis.keyword_io import KeywordIO
from safe_qgis.clipper import clipLayer
from safe_qgis.exceptions import (KeywordNotFoundException,
InsufficientOverlapException,
InvalidParameterException,
InsufficientParametersException,
HashNotFoundException)
from safe_qgis.map import Map
from safe.api import write_keywords, read_keywords, ReadLayerError
from safe_qgis.utilities import (htmlHeader,
htmlFooter,
setVectorStyle,
setRasterStyle,
qgisVersion)
# Don't remove this even if it is flagged as unused by your ide
# it is needed for qrc:/ url resolution. See Qt Resources docs.
import safe_qgis.resources # pylint: disable=W0611
#see if we can import pydev - see development docs for details
try:
from pydevd import * # pylint: disable=F0401
print 'Remote debugging is enabled.'
DEBUG = True
except ImportError:
print 'Debugging was disabled'
class Dock(QtGui.QDockWidget, Ui_DockBase):
"""Dock implementation class for the Risk In A Box plugin."""
def __init__(self, iface):
"""Constructor for the dialog.
This dialog will allow the user to select layers and scenario details
and subsequently run their model.
.. note:: We use the multiple inheritance approach from Qt4 so that
for elements are directly accessible in the form context and we can
use autoconnect to set up slots. See article below:
http://doc.qt.nokia.com/4.7-snapshot/designer-using-a-ui-file.html
Args:
* iface - a Quantum GIS QGisAppInterface instance.
Returns:
not applicable
Raises:
no exceptions explicitly raised
"""
QtGui.QDockWidget.__init__(self, None)
self.setupUi(self)
self.setWindowTitle(self.tr('InaSAFE %s %s' % (
__version__, __type__)))
# Save reference to the QGIS interface
self.iface = iface
self.header = None # for storing html header template
self.footer = None # for storing html footer template
self.calculator = ImpactCalculator()
self.keywordIO = KeywordIO()
self.runner = None
self.helpDialog = None
self.state = None
self.runInThreadFlag = False
self.showOnlyVisibleLayersFlag = True
|
self.setLayerNameFromTitleFlag = True
self.zoomToImpactFlag = True
self.hideExposureFlag = True
self.hazardLayers = None # array of all hazard layers
self.exposureLayers = None # array of all exposure layers
self.readSettings() # getLayers called by this
self.setOkButtonStatus()
self._aggregationPrefix = 'aggr_'
|
self.initPostprocessingOutput()
myButton = self.pbnHelp
QtCore.QObject.connect(myButton, QtCore.SIGNAL('clicked()'),
self.showHelp)
myButton = self.pbnPrint
QtCore.QObject.connect(myButton, QtCore.SIGNAL('clicked()'),
self.printMap)
#self.showHelp()
myButton = self.pbnRunStop
QtCore.QObject.connect(myButton, QtCore.SIGNAL('clicked()'),
self.accept)
#myAttribute = QtWebKit.QWebSettings.DeveloperExtrasEnabled
#QtWebKit.QWebSettings.setAttribute(myAttribute, True)
def readSettings(self):
"""Set the dock state from QSettings. Do this on init and after
changing options in the options dialog.
Args:
None
Returns:
None
Raises:
None
"""
mySettings = QtCore.QSettings()
myFlag = mySettings.value('inasafe/useThreadingFlag',
False).toBool()
self.runInThreadFlag = myFlag
myFlag = mySettings.value(
'inasafe/visibleLayersOnlyFlag', True).toBool()
self.showOnlyVisibleLayersFlag = myFlag
myFlag = mySettings.value(
'inasafe/setLayerNameFromTitleFlag', True).toBool()
self.setLayerNameFromTitleFlag = myFlag
myFlag = mySettings.value(
'inasafe/setZoomToImpactFlag', True).toBool()
self.zoomToImpactFlag = myFlag
# whether exposure layer should be hidden after model completes
myFlag = mySettings.value(
'inasafe/setHideExposureFlag', False).toBool()
self.hideExposureFlag = myFlag
# whether to clip hazard and exposure layers to the viewport
myFlag = mySettings.value(
'inasafe/clipToViewport', True).toBool()
self.clipToViewport = myFlag
self.getLayers()
def connectLayerListener(self):
"""Establish a signal/slot to listen for changes in the layers loaded
in QGIS.
..seealso:: disconnectLayerListener
Args:
None
Returns:
None
Raises:
None
"""
if qgisVersion() >= 10800: # 1.8 or newer
QgsMapLayerRegistry.instance().layersWillBeRemoved.connect(
self.layersWillBeRemoved)
QgsMapLayerRegistry.instance().layersAdded.connect(
self.layersAdded)
# All versions of QGIS
QtCore.QObject.connect(self.iface.mapCanvas(),
QtCore.SIGNAL('layersChanged()'),
self.getLayers)
# pylint: disable=W0702
def disconnectLayerListener(self):
"""Destroy the signal/slot to listen for changes in the layers loaded
in QGIS.
..seealso:: connectLayerListener
Args:
None
Returns:
None
Raises:
None
"""
try:
QtCore.QObject.disconnect(QgsMapLayerRegistry.instance(),
|
nmccrory/job-visualization
|
jobvisualization/apps/data_storage/migrations/0003_auto_20150728_2302.py
|
Python
|
apache-2.0
| 953
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('data_storage', '0002_auto_20150727_2312'),
]
operations = [
migrations.RenameField(
model_name='readonly',
old_name='state',
new_name='stateAbbreviation',
),
migrations.RenameField(
model_name='writeonly',
old_name='state',
new_name='stateAbbreviation',
),
migrations.AddField(
model_name='readonly',
name='stateName',
field=models.TextField(default='sexy jens'),
preserve_default
|
=False,
),
migrations.AddField(
model_name='writeonly',
name='stateName',
field=models.TextField(default='sexy cindy'),
preserve_default=False,
),
|
]
|
jhosmer/find_unicode
|
find_unicode.py
|
Python
|
gpl-2.0
| 1,875
| 0.005867
|
#!/usr/bin/python
# find_unicode.py
#
# Author: Jonathan Hosmer
# Date: Sun Feb 15 14:06:15 2015
#
import os
import sys
def help():
print 'Usage: {} [file, [dir, [file, ..]]]'.format(__file__)
print 'Displays line:character position of all non-ascii Unicode character(s) in a file'
def main():
args = sys.argv[1:]
if '-h' in args or '--help' in args:
sys.exit(help())
if not args:
files = [sys.stdin]
long_fname = len('stdin') + 1
else:
# make a flat list of all files
## if a dir is given as an arg then take all files in that dir [non-recursive]
files = [f for f in args if os.path.isfile(f)] + [x for y in
[[os.path.join(d, f) for f in os.listdir(d) if os.path.isfile(os.path.join(d, f))]
for d in args if os.path.isdir(d)]
for x in y]
long_fname = max(map(len, files)) + 1
chars = []
out_str = '{{:<{}}} {{:03}}:{{:04}} {{chars:^5}} {{chars!r:^13}}'.format(long_fname)
if sys.stdin not in files:
header = '{{:^{}}} {{}}:{{}} {{:^5}} {{:^10}}'.format(long_fname)
head_out = header.format('File', 'Line', 'Col', 'char', '(ord)')
print head_out + '\n' + '-'*len(head_out)
for f in files:
if f is sys.stdin:
infile = sys.stdin
fname = 'stdin'
else:
fname = f
infile = open(f)
for line_i, line in enumerate(infile):
for char
|
_i, char in enumerate(line):
if ord(char) > 126:
chars.append(char)
else:
if chars:
print out_str.format(fname, line_i+1, char_i+1-len(chars),
chars=''.join(chars))
chars = []
|
if __name__ == '__main__':
sys.exit(main())
|
Nosferatul/coala
|
tests/output/JSONEncoderTest.py
|
Python
|
agpl-3.0
| 2,948
| 0
|
import json
import re
import unittest
from datetime import datetime
from coalib.output.JSONEncoder import create_json_encoder
class TestClass1(object):
def __init__(self):
self.a = 0
class TestClass2(object):
def __init__(self):
self.a = 0
self.b = TestClass1()
class TestClass3(object):
def __init__(self):
self.a = 0
self.b = TestClass1()
@staticmethod
def __getitem__(key):
return 'val'
@staticmethod
def keys():
return ['key']
class PropertiedClass(object):
def __init__(self):
self._a = 5
@property
def prop(self):
return self._a
class JSONAbleClass(object):
@staticmethod
def __json__():
return ['dont', 'panic']
class JSONEncoderTest(unittest.TestCase):
JSONEncoder = create_json_encoder(use_relpath=True)
kw = {'cls': JSONEncoder, 'sort_keys': True}
def test_builtins(self):
self.assertEqual('"test"', json.dumps('test', **self.kw))
self.assertEqual('1', json.dumps(1, **self.kw))
self.assertEqual('true', json.dumps(True, **self.kw))
self.assertEqual('null', json.dumps(None, **self.kw))
def test_iter(self):
self.assertEqual('[0, 1]', json.dumps([0, 1], **self.kw))
self.assertEqual('[0, 1]', json.dumps((0, 1), **self.kw))
self.assertEqual('[0, 1]', json.dumps(range(2), **self.kw))
def test_dict(self):
self.assertEqual('{"0": 1}', json.dumps({0: 1}, **self.kw))
self.assertEqual('{"0": 1}', json.dumps({'0': 1}, **self.kw))
self.assertEqual('{"0": "1"}', json.dumps({'0': '1'}, **self.kw))
def test_time(self):
tf = datetime.today()
self.assertEqual('"' + tf.isoformat() + '"',
json.dumps(tf, **self.kw))
def test_re_object(self):
uut = re.compile('x')
self.assertEqual('"' + uut.pattern + '"',
json.dumps(uut, **self.kw))
def test_class1(self):
tc1 = TestClass1()
self.assertEqual('{"a": 0}', json.dumps(tc1, **self.kw))
self.assertEqual('[{"a": 0}]', json.dumps([tc1], **self.kw))
self.assertEqual('{"0": {"a": 0}}', json.dumps({0: tc1}, **self.kw))
def test_class2(self):
tc2 = TestClass2()
self.assertEqual('{"a": 0, "b": {"a": 0}}',
json.dumps(tc2, **self.kw))
def test_class3(self):
tc3 = TestClass3()
self.assertEqual('{"key": "val"}',
json.dumps(tc3, **self.
|
kw))
def test_propertied_class(self):
uut = PropertiedCl
|
ass()
self.assertEqual('{"prop": 5}', json.dumps(uut, **self.kw))
def test_jsonable_class(self):
uut = JSONAbleClass()
self.assertEqual('["dont", "panic"]', json.dumps(uut, **self.kw))
def test_type_error(self):
with self.assertRaises(TypeError):
json.dumps(1j, **self.kw)
|
sandvine/horizon
|
openstack_dashboard/dashboards/project/routers/views.py
|
Python
|
apache-2.0
| 9,121
| 0
|
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing Neutron Routers.
"""
from collections import OrderedDict
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon import tables
from horizon import tabs
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.utils im
|
port filters
from openstack_dashboard.dashboards.project.routers\
import forms as project_forms
from openstack_dashboard.dashboards.projec
|
t.routers import tables as rtables
from openstack_dashboard.dashboards.project.routers import tabs as rdtabs
class IndexView(tables.DataTableView):
table_class = rtables.RoutersTable
template_name = 'project/routers/index.html'
page_title = _("Routers")
FILTERS_MAPPING = {'admin_state_up': {_("up"): True, _("down"): False}}
def _get_routers(self, search_opts=None):
try:
search_opts = self.get_filters(
filters=search_opts, filters_map=self.FILTERS_MAPPING)
tenant_id = self.request.user.tenant_id
routers = api.neutron.router_list(self.request,
tenant_id=tenant_id,
**search_opts)
except Exception:
routers = []
exceptions.handle(self.request,
_('Unable to retrieve router list.'))
ext_net_dict = self._list_external_networks()
for r in routers:
r.name = r.name_or_id
self._set_external_network(r, ext_net_dict)
return routers
def get_data(self):
routers = self._get_routers()
return routers
def _list_external_networks(self):
try:
search_opts = {'router:external': True}
ext_nets = api.neutron.network_list(self.request,
**search_opts)
ext_net_dict = OrderedDict((n['id'], n.name_or_id)
for n in ext_nets)
except Exception as e:
msg = _('Unable to retrieve a list of external networks "%s".') % e
exceptions.handle(self.request, msg)
ext_net_dict = {}
return ext_net_dict
def _set_external_network(self, router, ext_net_dict):
gateway_info = router.external_gateway_info
if gateway_info:
ext_net_id = gateway_info['network_id']
if ext_net_id in ext_net_dict:
gateway_info['network'] = ext_net_dict[ext_net_id]
else:
msg_params = {'ext_net_id': ext_net_id, 'router_id': router.id}
msg = _('External network "%(ext_net_id)s" expected but not '
'found for router "%(router_id)s".') % msg_params
messages.error(self.request, msg)
# gateway_info['network'] is just the network name, so putting
# in a smallish error message in the table is reasonable.
# Translators: The usage is "<UUID of ext_net> (Not Found)"
gateway_info['network'] = pgettext_lazy(
'External network not found',
u'%s (Not Found)') % ext_net_id
class DetailView(tabs.TabbedTableView):
tab_group_class = rdtabs.RouterDetailTabs
template_name = 'horizon/common/_detail.html'
failure_url = reverse_lazy('horizon:project:routers:index')
network_url = 'horizon:project:networks:detail'
page_title = "{{ router.name|default:router.id }}"
@memoized.memoized_method
def _get_data(self):
try:
router_id = self.kwargs['router_id']
router = api.neutron.router_get(self.request, router_id)
router.set_id_as_name_if_empty(length=0)
except Exception:
msg = _('Unable to retrieve details for router "%s".') \
% router_id
exceptions.handle(self.request, msg, redirect=self.failure_url)
if router.external_gateway_info:
ext_net_id = router.external_gateway_info['network_id']
router.external_gateway_info['network_url'] = reverse(
self.network_url, args=[ext_net_id])
try:
ext_net = api.neutron.network_get(self.request, ext_net_id,
expand_subnet=False)
ext_net.set_id_as_name_if_empty(length=0)
router.external_gateway_info['network'] = ext_net.name
except Exception:
msg = _('Unable to retrieve an external network "%s".') \
% ext_net_id
exceptions.handle(self.request, msg)
router.external_gateway_info['network'] = ext_net_id
return router
@memoized.memoized_method
def _get_ports(self):
try:
ports = api.neutron.port_list(self.request,
device_id=self.kwargs['router_id'])
except Exception:
ports = []
msg = _('Unable to retrieve port details.')
exceptions.handle(self.request, msg)
return ports
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
router = self._get_data()
table = rtables.RoutersTable(self.request)
context["router"] = router
context["url"] = self.failure_url
context["actions"] = table.render_row_actions(router)
context['dvr_supported'] = api.neutron.get_feature_permission(
self.request, "dvr", "get")
context['ha_supported'] = api.neutron.get_feature_permission(
self.request, "l3-ha", "get")
choices = rtables.STATUS_DISPLAY_CHOICES
router.status_label = filters.get_display_label(choices, router.status)
choices = rtables.ADMIN_STATE_DISPLAY_CHOICES
router.admin_state_label = (
filters.get_display_label(choices, router.admin_state))
return context
def get_tabs(self, request, *args, **kwargs):
router = self._get_data()
ports = self._get_ports()
return self.tab_group_class(request, router=router,
ports=ports, **kwargs)
class CreateView(forms.ModalFormView):
form_class = project_forms.CreateForm
form_id = "create_router_form"
modal_header = _("Create Router")
template_name = 'project/routers/create.html'
success_url = reverse_lazy("horizon:project:routers:index")
page_title = _("Create Router")
submit_label = _("Create Router")
submit_url = reverse_lazy("horizon:project:routers:create")
class UpdateView(forms.ModalFormView):
form_class = project_forms.UpdateForm
form_id = "update_router_form"
modal_header = _("Edit Router")
template_name = 'project/routers/update.html'
success_url = reverse_lazy("horizon:project:routers:index")
page_title = _("Update Router")
submit_label = _("Save Changes")
submit_url = "horizon:project:routers:update"
def get_context_data(self, **kwargs):
context = super(UpdateView, self).get_context_data(**kwargs)
args = (self.kwargs['router_id'],)
context["router_id"] = self.kwargs['router_id']
context['submit_url'] = re
|
JoakimLindbom/ago
|
tellstick/tellstickduo.py
|
Python
|
gpl-3.0
| 7,092
| 0.005781
|
#!/usr/bin/python
AGO_TELLSTICK_VERSION = '0.0.9'
"""
############################################
#
# Tellstick Duo class
#
# Date of origin: 2014-01-25
#
__author__ = "Joakim Lindbom"
__copyright__ = "Copyright 2014, Joakim Lindbom"
__credits__ = ["Joakim Lindbom", "The ago control team"]
__license__ = "GPL Public License Version 3"
__maintainer__ = "Joakim Lindbom"
__email__ = 'Joakim.Lindbom@gmail.com'
__status__ = "Experimental"
__version__ = AGO_TELLSTICK_VERSION
############################################
"""
from tellstickbase import tellstickbase
import td
class tellstickduo(tellstickbase):
"""Class used for Tellstick & Tellstick Duo devices"""
def __get__(self, obj, objtype=None):
pass
def __set__(self, obj, val):
pass
def __delete__(self, obj):
pass
def __init__(self):
self.SensorEvent = None
def init(self, SensorPollDelay, TempUnits):
# TELLSTICK_BELL | TELLSTICK_TOGGLE | TELLSTICK_LEARN | TELLSTICK_EXECUTE | TELLSTICK_UP | TELLSTICK_DOWN | TELLSTICK_STOP
td.init(defaultMethods=td.TELLSTICK_TURNON | td.TELLSTICK_TURNOFF | td.TELLSTICK_DIM)
self.log.info("Init executed")
def close(self):
return td.close()
def turnOn(self, devId):
resCode = td.turnOn(devId)
return self.getErrorString(resCode).lower()
def turnOff(self, devId):
resCode = td.turnOff(devId)
return self.getErrorString(resCode).lower()
def getErrorString(self, resCode):
return td.getErrorString(resCode)
def dim(self, devId, level):
resCode = td.dim(devId, level)
return self.getErrorString(resCode).lower()
def getName(self, devId):
return td.getName(devId)
def methodsReadable(self, method, default):
return td.methodsReadable(method, default)
def getNumberOfDevices(self):
return td.getNumberOfDevices()
def getNumberOfSensors(self):
return td.getNumberOfDevices() # wrong
def getDeviceId(self, i):
return td.getDeviceId(i)
def getModel(self, devId):
return td.getModel(devId)
def registerDeviceEvent(self, deviceEvent):
return td.registerDeviceEvent(deviceEvent)
def registerDeviceChangedEvent(self, deviceEvent):
return td.registerDeviceChangedEvent(deviceEvent)
def newTempSensor(self, devId, model, value):
self.log.debug("New temperature sensor intercepted: devId=" + devId + " model=" + model)
s = {
"id" : devId,
"description" : "",
"model" : model,
"new" : True,
"temp" : float(value), # C/F
"offset" : 0.0, # TODO: Add to parameter & config file
"lastTemp" : float(-274.0),
"isTempSensor" : True,
"isHumiditySensor" : False,
"ignore" : False}
return s
def newHumiditySensor(self, devId, model, value):
self.log.debug("New humidity sensor intercepted: devId=" + devId + " model=" + model)
s = {
"id" : devId,
"description" : "",
"model" : model,
"new" : True,
"humidity" : float(value),
"offset" : 0.0, # TODO: Add to parameter & config file
"lastHumidity" : float(-999.0),
"isHumiditySensor" : True,
"isTempSensor" : False,
"ignore" : False}
return s
def SensorEventInterceptor(self, protocol, model, id, dataType, value, timestamp, callbackId):
devId = 'S' + str(id) # Prefix 'S' to make sure name doesn't clash with self-defined devices
devIdT = devId + "-temp"
devIdH = devId + "-hum"
# self.checkIgnore(self, devId) #TODO: Add once moved
self.log.trace("SensorEventInterceptor called for " + devId)
if str(id) not in self.ignoreDevices:
# New temperature sensor?
if devIdT not in self.sensors and dataType & td.TELLSTICK_TEMPERATURE == td.TELLSTICK_TEMPERATURE:
self.sensors[devIdT] = self.newTempSensor(devIdT, model, value)
# New humidity sensor?
if devIdH not in self.sensors and dataType & td.TELLSTICK_HUMIDITY == td.TELLSTICK_HUMIDITY:
self.sensors[devIdH] = self.newHumiditySensor(devIdH, model, value)
# Call registered callback
self.SensorEvent(protocol, model, devId, dataType, value, timestamp, callbackId)
def regi
|
sterSensorEvent(self, deviceEvent):
self.SensorEvent = deviceEvent
return td.registerSensorEvent(self.SensorEventInterceptor)
def listSensors(self):
sensors = td.listSensors()
if len(sensors) != 0:
for id, value in sensors.iteritems():
self.log.trace("listSensors: devId: %s ", str(id))
if id not in self.ignoreDevices:
devId = str(id) + "-temp"
|
if devId not in self.sensors:
if value["isTempSensor"]:
# New temp sensor found
self.sensors[devId] = self.newTempSensor(devId, value["model"], value["temp"])
devId = str(id) + "-hum"
if devId not in self.sensors:
if value["isHumiditySensor"]:
# New hum sensor found
self.sensors[devId] = self.newHumiditySensor(devId, value["model"], value["humidity"])
if not value["new"]:
continue
return self.sensors
def listSwitches(self):
if len(self.switches) == 0:
for i in range(self.getNumberOfDevices()):
devId = self.getDeviceId(i)
model = self.getModel(devId)
if ('switch' in model or 'dimmer' in model):
dev = {
"id" : devId,
"name" : self.getName(devId),
"model" : model}
if 'dimmer' in model:
dev["isDimmer"] = True
else:
dev["isDimmer"] = False
self.switches[devId] = dev
return self.switches
def listRemotes(self):
self.log.trace("listRemotes start")
if len(self.remotes) == 0:
self.log.info("getNumberOfDevices=" + str(self.getNumberOfDevices()))
for i in range(self.getNumberOfDevices()):
devId = self.getDeviceId(i)
model = self.getModel(devId)
self.log.info("devId=" + str(devId) + " model=" + model)
if 'switch' not in model and 'dimmer' not in model:
dev = {
"id" : str(devId),
"name" : self.getName(devId),
"model" : model}
self.log.info("devId=" + str(devId) + " model=" + model)
self.remotes[devId] = dev
return self.remotes
|
pquentin/libcloud
|
libcloud/test/storage/test_azure_blobs.py
|
Python
|
apache-2.0
| 38,350
| 0.000078
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import os
import sys
import tempfile
from io import BytesIO
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import urlparse
from libcloud.utils.py3 import parse_qs
from libcloud.utils.py3 import b
from libcloud.utils.py3 import basestring
from libcloud.common.types import InvalidCredsError
from libcloud.common.types import LibcloudError
from libcloud.storage.base import Container, Object
from libcloud.storage.types import ContainerDoesNotExistError
from libcloud.storage.types import ContainerIsNotEmptyError
from libcloud.storage.types import ContainerAlreadyExistsError
from libcloud.storage.types import InvalidContainerNameError
from libcloud.storage.types import ObjectDoesNotExistError
from libcloud.storage.types import ObjectHashMismatchError
from libcloud.storage.drivers.azure_blobs import AzureBlobsStorageDriver
from libcloud.storage.drivers.azure_blobs import AZURE_BLOCK_MAX_SIZE
from libcloud.storage.drivers.azure_blobs import AZURE_P
|
AGE_CHUNK_SIZE
from libcloud.test import unittest
from libcloud.test import MockHttp, generate_random_data # pylint: disable-msg=E0611
from libcloud.test.file_fixtures import StorageFileFixtures # pylint: disable-msg=E0611
from libcloud.test.secrets import ST
|
ORAGE_AZURE_BLOBS_PARAMS
class AzureBlobsMockHttp(MockHttp, unittest.TestCase):
fixtures = StorageFileFixtures('azure_blobs')
base_headers = {}
def _UNAUTHORIZED(self, method, url, body, headers):
return (httplib.UNAUTHORIZED,
'',
self.base_headers,
httplib.responses[httplib.UNAUTHORIZED])
def _list_containers_EMPTY(self, method, url, body, headers):
body = self.fixtures.load('list_containers_empty.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _list_containers(self, method, url, body, headers):
query_string = urlparse.urlsplit(url).query
query = parse_qs(query_string)
if 'marker' not in query:
body = self.fixtures.load('list_containers_1.xml')
else:
body = self.fixtures.load('list_containers_2.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _test_container_EMPTY(self, method, url, body, headers):
if method == 'DELETE':
body = u''
return (httplib.ACCEPTED,
body,
self.base_headers,
httplib.responses[httplib.ACCEPTED])
else:
body = self.fixtures.load('list_objects_empty.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _new__container_INVALID_NAME(self, method, url, body, headers):
return (httplib.BAD_REQUEST,
body,
self.base_headers,
httplib.responses[httplib.BAD_REQUEST])
def _test_container(self, method, url, body, headers):
query_string = urlparse.urlsplit(url).query
query = parse_qs(query_string)
if 'marker' not in query:
body = self.fixtures.load('list_objects_1.xml')
else:
body = self.fixtures.load('list_objects_2.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _test_container100(self, method, url, body, headers):
body = ''
if method != 'HEAD':
return (httplib.BAD_REQUEST,
body,
self.base_headers,
httplib.responses[httplib.BAD_REQUEST])
return (httplib.NOT_FOUND,
body,
self.base_headers,
httplib.responses[httplib.NOT_FOUND])
def _test_container200(self, method, url, body, headers):
body = ''
if method != 'HEAD':
return (httplib.BAD_REQUEST,
body,
self.base_headers,
httplib.responses[httplib.BAD_REQUEST])
headers = {}
headers['etag'] = '0x8CFB877BB56A6FB'
headers['last-modified'] = 'Fri, 04 Jan 2013 09:48:06 GMT'
headers['x-ms-lease-status'] = 'unlocked'
headers['x-ms-lease-state'] = 'available'
headers['x-ms-meta-meta1'] = 'value1'
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _test_container200_test(self, method, url, body, headers):
body = ''
if method != 'HEAD':
return (httplib.BAD_REQUEST,
body,
self.base_headers,
httplib.responses[httplib.BAD_REQUEST])
headers = {}
headers['etag'] = '0x8CFB877BB56A6FB'
headers['last-modified'] = 'Fri, 04 Jan 2013 09:48:06 GMT'
headers['content-length'] = '12345'
headers['content-type'] = 'application/zip'
headers['x-ms-blob-type'] = 'Block'
headers['x-ms-lease-status'] = 'unlocked'
headers['x-ms-lease-state'] = 'available'
headers['x-ms-meta-rabbits'] = 'monkeys'
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _test2_test_list_containers(self, method, url, body, headers):
# test_get_object
body = self.fixtures.load('list_containers.xml')
headers = {'content-type': 'application/zip',
'etag': '"e31208wqsdoj329jd"',
'x-amz-meta-rabbits': 'monkeys',
'content-length': '12345',
'last-modified': 'Thu, 13 Sep 2012 07:13:22 GMT'
}
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _new_container_ALREADY_EXISTS(self, method, url, body, headers):
# test_create_container
return (httplib.CONFLICT,
body,
headers,
httplib.responses[httplib.CONFLICT])
def _new_container(self, method, url, body, headers):
# test_create_container, test_delete_container
headers = {}
if method == 'PUT':
status = httplib.CREATED
headers['etag'] = '0x8CFB877BB56A6FB'
headers['last-modified'] = 'Fri, 04 Jan 2013 09:48:06 GMT'
headers['x-ms-lease-status'] = 'unlocked'
headers['x-ms-lease-state'] = 'available'
headers['x-ms-meta-meta1'] = 'value1'
elif method == 'DELETE':
status = httplib.NO_CONTENT
return (status,
body,
headers,
httplib.responses[status])
def _new_container_DOESNT_EXIST(self, method, url, body, headers):
# test_delete_container
return (httplib.NOT_FOUND,
body,
headers,
httplib.responses[httplib.NOT_FOUND])
def _foo_bar_container_NOT_FOUND(self, method, url, body, headers):
# test_delete_container_not_found
return (httplib.NOT_FOUND,
body,
headers,
|
aroth-arsoft/arsoft-web-crashupload
|
app/crashdump/model.py
|
Python
|
gpl-3.0
| 24,814
| 0.00129
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# kate: space-indent on; indent-width 4; mixedindent off; indent-mode python;
import re
import copy
import os
from trac.resource import Resource, ResourceNotFound
from trac.util.translation import _
from trac.util.datefmt import from_utimestamp, to_utimestamp, utc, utcmax
from trac.util.compat import set, sorted
from trac.util.text import empty
from trac.ticket.model import Ticket
from .api import CrashDumpSystem
from uuid import UUID
from datetime import datetime
def _fixup_cc_list(cc_value):
"""Fix up cc list separators and remove duplicates."""
cclist = []
for cc in re.split(r'[;,\s]+', cc_value):
if cc and cc not in cclist:
cclist.append(cc)
return ', '.join(cclist)
class CrashDump(object):
# Fields that must not be modified directly by the user
protected_fields = ('resolution', 'status', 'time', 'changetime')
__db_fields = [
'uuid',
'type',
'status',
'priority',
'milestone',
'component',
'severity',
'summary',
'description',
'keywords',
'owner',
'reporter',
'cc',
'crashtime',
'reporttime',
'uploadtime',
'changetime',
'closetime',
'applicationname',
'applicationfile',
'uploadhostname',
'uploadusername',
'crashhostname',
'crashusername',
'productname',
'productcodename',
'productversion',
'producttargetversion',
'buildtype',
'buildpostfix',
'machinetype',
'systemname',
'osversion',
'osrelease',
'osmachine',
'minidumpfile',
'minidumpreporttextfile',
'minidumpreportxmlfile',
'minidumpreporthtmlfile',
'coredumpfile',
'coredumpreporttextfile',
'coredumpreportxmlfile',
'coredumpreporthtmlfile',
]
@staticmethod
def id_is_valid(num):
return 0 < int(num) <= 1L << 31
@staticmethod
def uuid_is_valid(uuid):
if isinstance(uuid, UUID):
return True
else:
try:
UUID(uuid)
return True
except:
return False
def __init__(self, id=None, uuid=None, env=None, version=None, must_exist=True, row=None):
self.id = None
|
self.status = None
self.uuid = uuid
self.env = env
self._changes = None
self.resource = Resource('crash', uuid, version)
self.fields = CrashDumpSystem(self.env).get_crash_fields()
self.std_fields, self.custom_fields
|
, self.time_fields = [], [], []
for f in self.fields:
if f.get('custom'):
self.custom_fields.append(f['name'])
else:
self.std_fields.append(f['name'])
if f['type'] == 'time':
self.time_fields.append(f['name'])
self.values = {}
if uuid is not None:
self._fetch_crash_by_uuid(uuid, must_exist=must_exist)
elif id is not None:
crash_id = CrashDumpSystem.get_crash_id(id)
if crash_id is None:
raise ResourceNotFound(_("Crash %(id)s does not exist.",
id=id), _("Invalid crash identifier"))
self._fetch_crash_by_id(crash_id, must_exist=must_exist)
elif row is not None:
self._load_from_record(row)
else:
self._init_defaults()
self._old = {}
def _init_defaults(self):
for field in self.fields:
default = None
if field['name'] in self.protected_fields:
# Ignore for new - only change through workflow
pass
elif not field.get('custom'):
default = self.env.config.get('ticket',
'default_' + field['name'])
else:
default = self._custom_field_default(field)
if default:
self.values.setdefault(field['name'], default)
exists = property(lambda self: self.id is not None)
has_minidump = property(lambda self: self['minidumpfile'] or self['minidumpreporttextfile'] or self['minidumpreportxmlfile'] or self['minidumpreporthtmlfile'] )
has_coredump = property(lambda self: self['coredumpfile'] or self['coredumpreporttextfile'] or self['coredumpreportxmlfile'] or self['coredumpreporthtmlfile'] )
def __getitem__(self, name):
return self.values.get(name)
def __setitem__(self, name, value):
"""Log crash modifications so the table crashdump_change can be updated
"""
if name in self.values and self.values[name] == value:
return
if name not in self._old: # Changed field
if name in self.time_fields:
self._old[name] = to_utimestamp(self.values.get(name))
else:
self._old[name] = self.values.get(name)
elif self._old[name] == value: # Change of field reverted
del self._old[name]
if value:
if isinstance(value, list):
if len(value) == 1:
value = value[0]
else:
raise ValueError(_("Multi-values field %s not supported yet: %s") % (name, value))
field = [field for field in self.fields if field['name'] == name]
if field:
field_type = field[0].get('type')
if field_type == 'time':
pass
elif field_type != 'textarea':
if isinstance(value, basestring):
value = value.strip()
self.values[name] = value
def get_value_or_default(self, name):
"""Return the value of a field or the default value if it is undefined
"""
try:
value = self.values[name]
return value if value is not empty else self.get_default(name)
except KeyError:
pass
def get_default(self, name):
"""Return the default value of a field."""
field = [field for field in self.fields if field['name'] == name]
if field:
return field[0].get('value', '')
def populate(self, values):
"""Populate the ticket with 'suitable' values from a dictionary"""
field_names = [f['name'] for f in self.fields]
for name in [name for name in values.keys() if name in field_names]:
self[name] = values.get(name, '')
# We have to do an extra trick to catch unchecked checkboxes
for name in [name for name in values.keys() if name[9:] in field_names
and name.startswith('checkbox_')]:
if name[9:] not in values:
self[name[9:]] = '0'
def _load_from_record(self, row):
for i, field in enumerate(self.std_fields):
#print('_load_from_record %i, %s=%s' % (i, field, row[i+1]))
if i == 0:
self.id = row[0]
elif field == 'uuid':
self.uuid = row[i + 1]
else:
value = row[i + 1]
if value is None:
self.values[field] = empty
elif field in self.time_fields:
self.values[field] = from_utimestamp(value)
else:
self.values[field] = value
def _fetch_crash_by_id(self, id, must_exist=True):
row = None
if self.id_is_valid(id):
# Fetch the standard crashdump fields
for row in self.env.db_query("SELECT id,%s FROM crashdump WHERE id=%%s" %
','.join(self.std_fields), (id,)):
break
if not row and must_exist:
raise ResourceNotFound(_("Crash %(id)s does not exist.",
id=id), _("Invalid crash identifier"))
if row:
self.id = id
self._load_from_record(row)
def _fetch_crash_by_uuid(self, uuid, must_exist
|
michhar/notedown
|
notedown/__init__.py
|
Python
|
bsd-2-clause
| 338
| 0
|
from __future__ import absolute_import
from .notedown import *
from .main impor
|
t convert, markdown_template, __version__
# avoid having to require the notebook to install notedown
try:
from .contentsmanager import NotedownContentsManager
except ImportError:
|
NotedownContentsManager = 'You need to install the jupyter notebook.'
|
andrei4ka/fuel-web-redhat
|
nailgun/nailgun/test/integration/test_db_refresh.py
|
Python
|
apache-2.0
| 1,879
| 0
|
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in complianc
|
e with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the L
|
icense.
from datetime import datetime
from unittest import TestCase
from paste.fixture import TestApp
from sqlalchemy import orm
from nailgun.db import engine
from nailgun.db import flush
from nailgun.db import NoCacheQuery
from nailgun.db.sqlalchemy.models import Node
from nailgun.wsgi import build_app
class TestDBRefresh(TestCase):
def setUp(self):
self.app = TestApp(build_app().wsgifunc())
self.db = orm.scoped_session(
orm.sessionmaker(bind=engine, query_cls=NoCacheQuery)
)()
self.db2 = orm.scoped_session(
orm.sessionmaker(bind=engine, query_cls=NoCacheQuery)
)()
self.default_headers = {
"Content-Type": "application/json"
}
flush()
def test_session_update(self):
node = Node()
node.mac = u"ASDFGHJKLMNOPR"
node.timestamp = datetime.now()
self.db.add(node)
self.db.commit()
node2 = self.db2.query(Node).filter(
Node.id == node.id
).first()
node2.mac = u"12345678"
self.db2.add(node2)
self.db2.commit()
self.db.query(Node).filter(
Node.id == node.id
).first()
self.assertEqual(node.mac, u"12345678")
|
alexykot/bitfund
|
bitfund/project/migrations/0006_auto__del_field_project_dependencies_is_public.py
|
Python
|
gpl-3.0
| 11,471
| 0.008195
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Project_Dependencies.is_public'
db.delete_column('project_project_dependencies', 'is_public')
def backwards(self, orm):
# Adding field 'Project_Dependencies.is_public'
db.add_column('project_project_dependencies', 'is_public',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'project.project': {
'Meta': {'object_name': 'Project'},
'brief': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['project.ProjectCategory']", 'symmetrical': 'False'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 18, 0, 0)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_refused_to_give_to_bitfund': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'maintainer': ('django.db.models.fields.related.ForeignKey', [], {'default': '0', 'to': "orm['auth.User']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'unclaimed'", 'max_length': '80'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'project.project_dependencies': {
'Meta': {'object_name': 'Project_Dependencies'},
'brief': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 18, 0, 0)'}),
'dependee_project': ('django.db.models.fields.related.ForeignKey', [], {'default': '0', 'related_name': "'dependee_project'", 'to': "orm['project.Project']"}),
'depender_project': ('django.db.models.fields.related.ForeignKey', [], {'default': '0', 'related_name': "'depender_project'", 'to': "orm['project.Project']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'redonation_amount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '2', 'blank': 'True'}),
'redonation_percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '2', 'blank': 'True'}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'project.projectcategory': {
'Meta': {'object_name': 'ProjectCategory'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 18, 0, 0)'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.Aut
|
oField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'project.projectgoal': {
'Meta': {'un
|
ique_together': "(('project', 'key'),)", 'object_name': 'ProjectGoal'},
'amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '0'}),
'brief': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 18, 0, 0)'}),
'date_ending': ('django.db.models.fields.DateTimeField', [], {}),
'date_starting': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 18, 0, 0)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'long_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey'
|
rbuffat/pyidf
|
tests/test_chillerelectricreformulatedeir.py
|
Python
|
apache-2.0
| 10,596
| 0.005379
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.plant_heating_and_cooling_equipment import ChillerElectricReformulatedEir
log = logging.getLogger(__name__)
class TestChillerElectricReformulatedEir(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_chillerelectricreformulatedeir(self):
pyidf.validation_level = ValidationLevel.error
obj = ChillerElectricReformulatedEir()
# alpha
var_name = "Name"
obj.name = var_name
# real
var_reference_capacity = 0.0001
obj.reference_capacity = var_reference_capacity
# real
var_reference_cop = 0.0001
obj.reference_cop = var_reference_cop
# real
var_reference_leaving_chilled_water_temperature = 4.4
obj.reference_leaving_chilled_water_temperature = var_reference_leaving_chilled_water_temperature
# real
var_reference_leaving_condenser_water_temperature = 5.5
obj.reference_leaving_condenser_water_temperature = var_reference_leaving_condenser_water_temperature
# real
var_reference_chilled_water_flow_rate = 0.0001
obj.reference_chilled_water_flow_rate = var_reference_chilled_water_flow_rate
# real
var_reference_condenser_water_flow_rate = 0.0001
obj.reference_condenser_water_flow_rate = var_reference_condenser_water_flow_rate
# object-list
var_cooling_capacity_function_of_temperature_curve_name = "object-list|Cooling Capacity Function of Temperature Curve Name"
obj.cooling_capacity_function_of_temperature_curve_name = var_cooling_capacity_function_of_temperature_curve_name
# object-list
var_electric_input_to_cooling_output_ratio_function_of_temperature_curve_name = "object-list|Electric Input to Cooling Output Ratio Function of Temperature Curve Name"
obj.electric_input_to_cooling_output_ratio_function_of_temperature_curve_name = var_electric_input_to_cooling_output_ratio_function_of_temperature_curve_name
# alpha
var_electric_input_to_cooling_output_ratio_function_of_part_load_ratio_curve_type = "LeavingCondenserWaterTemperature"
obj.electric_input_to_cooling_output_ratio_function_of_part_load_ratio_curve_type = var_electric_input_to_cooling_output_ratio_function_of_part_load_ratio_curve_type
# object-list
var_electric_input_to_cooling_output_ratio_function_of_part_load_ratio_curve_name = "object-list|Electric Input to Cooling Output Ratio Function of Part Load Ratio Curve Name"
obj.electric_input_to_cooling_output_ratio_function_of_part_load_ratio_curve_name = var_electric_input_to_cooling_output_ratio_function_of_part_load_ratio_curve_name
# real
var_minimum_part_load_ratio = 0.0
obj.minimum_part_load_ratio = var_minimum_part_load_ratio
# real
var_maximum_part_load_ratio = 0.0001
obj.maximum_part_load_ratio = var_maximum_part_load_ratio
# real
var_optimum_part_load_ratio = 0.0001
obj.optimum_part_load_ratio = var_optimum_part_load_ratio
# real
var_minimum_unloading_ratio = 0.0
obj.minimum_unloading_ratio = var_minimum_unloading_ratio
# node
var_chilled_water_inlet_node_name = "node|Chilled Water Inlet Node Name"
obj.chilled_water_inlet_node_name = var_chilled_water_inlet_node_name
# node
var_chilled_water_outlet_node_name = "node|Chilled Water Outlet Node Name"
obj.chilled_water_outlet_node_name = var_chilled_water_outlet_node_name
# node
var_condenser_inlet_node_name = "node|Condenser Inlet Node Name"
obj.condenser_inlet_node_name = var_condenser_inlet_node_name
# node
var_condenser_outlet_node_name = "node|Condenser Outlet Node Name"
obj.condenser_outlet_node_name = var_condenser_outlet_node_name
# real
var_fraction_of_compressor_electric_consumption_rejected_by_condenser = 0.50005
obj.fraction_of_compressor_electric_consumption_rejected_by_condenser = var_fraction_of_compressor_electric_consumption_rejected_by_condenser
# real
var_leaving_chilled_water_lower_temperature_limit = 21.21
obj.leaving_chilled_water_lower_temperature_limit = var_leaving_chilled_water_lower_temperature_limit
# alpha
var_chiller_flow_mode_type = "ConstantFlow"
obj.chiller_flow_mode_type = var_chiller_flow_mode_type
# real
var_design_heat_recovery_water_flow_rate = 0.0
obj.design_heat_recovery_water_flow_rate = var_design_heat_recovery_water_flow_rate
# node
var_heat_recovery_inlet_node_name = "node|Heat Recovery Inlet Node Name"
obj.heat_recovery_inlet_node_name = var_heat_recovery_inlet_node_name
# node
var_heat_recovery_outlet_node_name = "node|Heat Recovery Outlet Node Name"
obj.heat_recovery_outlet_node_name = var_heat_recovery_outlet_node_name
# real
var_sizing_factor = 0.0001
obj.sizing_factor = var_sizing_factor
# real
var_condenser_heat_recovery_relative_capacity_fraction = 0.5
obj.condenser_heat_recovery_relative_capacity_fraction = var_condenser_heat_recovery_relative_capacity_fraction
# object-list
var_heat_recovery_inlet_high_temperature_limit_schedule_name = "object-list|Heat Recovery Inlet High Temperature Limit Schedule Name"
obj.heat_recovery_inlet_hi
|
gh_temperature_limit_schedule_name = var_heat
|
_recovery_inlet_high_temperature_limit_schedule_name
# node
var_heat_recovery_leaving_temperature_setpoint_node_name = "node|Heat Recovery Leaving Temperature Setpoint Node Name"
obj.heat_recovery_leaving_temperature_setpoint_node_name = var_heat_recovery_leaving_temperature_setpoint_node_name
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.chillerelectricreformulatedeirs[0].name, var_name)
self.assertAlmostEqual(idf2.chillerelectricreformulatedeirs[0].reference_capacity, var_reference_capacity)
self.assertAlmostEqual(idf2.chillerelectricreformulatedeirs[0].reference_cop, var_reference_cop)
self.assertAlmostEqual(idf2.chillerelectricreformulatedeirs[0].reference_leaving_chilled_water_temperature, var_reference_leaving_chilled_water_temperature)
self.assertAlmostEqual(idf2.chillerelectricreformulatedeirs[0].reference_leaving_condenser_water_temperature, var_reference_leaving_condenser_water_temperature)
self.assertAlmostEqual(idf2.chillerelectricreformulatedeirs[0].reference_chilled_water_flow_rate, var_reference_chilled_water_flow_rate)
self.assertAlmostEqual(idf2.chillerelectricreformulatedeirs[0].reference_condenser_water_flow_rate, var_reference_condenser_water_flow_rate)
self.assertEqual(idf2.chillerelectricreformulatedeirs[0].cooling_capacity_function_of_temperature_curve_name, var_cooling_capacity_function_of_temperature_curve_name)
self.assertEqual(idf2.chillerelectricreformulatedeirs[0].electric_input_to_cooling_output_ratio_function_of_temperature_curve_name, var_electric_input_to_cooling_output_ratio_function_of_temperature_curve_name)
self.assertEqual(idf2.chillerelectricreformulatedeirs[0].electric_input_to_cooling_output_ratio_function_of_part_load_ratio_curve_type, var_electric_input_to_cooling_output_ratio_function_of_part_load_ratio_curve_type)
self.assertEqual(idf2.chillerelectricreformulatedeirs[0].electric_input_to_cooling_output_ratio_function_of_part_load_ratio_curve_name, var_electric_input_to_cooling_output_ratio_function_of_part_load_ratio_curve_name)
self.assertAlmostEqual(idf2.chillerelectricreformulatedeirs[0].minimum_part_load_ratio, var_minimum_part_load_ratio)
self.assertAlmostE
|
Thortoise/Super-Snake
|
Blender/animation_nodes-master/nodes/mesh/generation/line.py
|
Python
|
gpl-3.0
| 799
| 0.010013
|
import bpy
from .... base_types.node import AnimationNode
class LineMeshNode(bpy.types.Node, AnimationNode):
bl_idname = "an_LineMeshNode"
bl_label = "Line Mesh"
def create(self):
self.newInput("Vector", "Start", "start")
self.newInput("Vector", "End", "end", value = [0, 0, 10])
self.newInput("Integer", "Steps", "steps", value = 2, minValu
|
e = 2)
self.newOutput("Vector List", "Vertices", "vertices")
self.newOutput("Edge Indices List", "Edge Indices", "edgeIndices")
def execute(self, start, end, steps):
steps = max(steps, 2)
divisor = steps - 1
vertices = [start * (1 - i / divisor) + end * i / divisor for i in range(steps)]
edges = [(i, i + 1) for i in range(steps
|
- 1)]
return vertices, edges
|
uw-it-aca/mdot-developers
|
travis-ci/manage.py
|
Python
|
apache-2.0
| 252
| 0
|
#!/usr/bin/env pytho
|
n
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "travis-ci.settings")
from django.core.management import execute_from_command_line
execute_fro
|
m_command_line(sys.argv)
|
Saruus/drPencilcode
|
app/migrations/0015_survey_question2.py
|
Python
|
agpl-3.0
| 435
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db impor
|
t models, migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0014_survey'),
]
operations = [
migrations.AddField(
model_name='survey',
name='question2',
field=models.CharField(default=50, max_length=50),
|
preserve_default=False,
),
]
|
davy39/eric
|
Helpviewer/History/HistoryDialog.py
|
Python
|
gpl-3.0
| 5,112
| 0.003912
|
# -*- coding: utf-8 -*-
# Copyright (c) 2009 - 2014 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing a dialog to manage history.
"""
from __future__ import unicode_literals
from PyQt5.QtCore import pyqtSignal, Qt, QUrl
from PyQt5.QtGui import QFontMetrics, QCursor
from PyQt5.QtWidgets import QDialog, QMenu, QApplication
from E5Gui.E5TreeSortFilterProxyModel import E5TreeSortFilterProxyModel
from .HistoryModel import HistoryModel
from .Ui_HistoryDialog import Ui_HistoryDialog
class HistoryDialog(QDialog, Ui_HistoryDialog):
"""
Class implementing a dialog to manage history.
@signal openUrl(QUrl, str) emitted to open a URL in the current tab
@signal newUrl(QUrl, str) emitted to open a URL in a new tab
"""
openUrl = pyqtSignal(QUrl, str)
newUrl = pyqtSignal(QUrl, str)
def __init__(self, parent=None, manager=None):
"""
Constructor
@param parent reference to the parent widget (QWidget
@param manager reference to the history manager object (HistoryManager)
"""
super(HistoryDialog, self).__init__(parent)
self.setupUi(self)
|
self.__historyManager = manager
if self.__historyManager is None:
import Helpviewer.HelpWindow
self.__historyManager = \
Helpviewer.HelpWindow.HelpWindow.historyManager()
self.__model = self.__historyManager.historyTreeModel()
self.__proxyModel = E5TreeSortFilterProxyModel(self)
self.__proxyModel.setSortRole(HistoryModel.DateTimeRole)
s
|
elf.__proxyModel.setFilterKeyColumn(-1)
self.__proxyModel.setSourceModel(self.__model)
self.historyTree.setModel(self.__proxyModel)
self.historyTree.expandAll()
fm = QFontMetrics(self.font())
header = fm.width("m") * 40
self.historyTree.header().resizeSection(0, header)
self.historyTree.header().setStretchLastSection(True)
self.historyTree.setContextMenuPolicy(Qt.CustomContextMenu)
self.historyTree.activated.connect(self.__activated)
self.historyTree.customContextMenuRequested.connect(
self.__customContextMenuRequested)
self.searchEdit.textChanged.connect(
self.__proxyModel.setFilterFixedString)
self.removeButton.clicked.connect(self.historyTree.removeSelected)
self.removeAllButton.clicked.connect(self.__historyManager.clear)
self.__proxyModel.modelReset.connect(self.__modelReset)
def __modelReset(self):
"""
Private slot handling a reset of the tree view's model.
"""
self.historyTree.expandAll()
def __customContextMenuRequested(self, pos):
"""
Private slot to handle the context menu request for the bookmarks tree.
@param pos position the context menu was requested (QPoint)
"""
menu = QMenu()
idx = self.historyTree.indexAt(pos)
idx = idx.sibling(idx.row(), 0)
if idx.isValid() and not self.historyTree.model().hasChildren(idx):
menu.addAction(
self.tr("&Open"), self.__openHistoryInCurrentTab)
menu.addAction(
self.tr("Open in New &Tab"), self.__openHistoryInNewTab)
menu.addSeparator()
menu.addAction(self.tr("&Copy"), self.__copyHistory)
menu.addAction(self.tr("&Remove"), self.historyTree.removeSelected)
menu.exec_(QCursor.pos())
def __activated(self, idx):
"""
Private slot to handle the activation of an entry.
@param idx reference to the entry index (QModelIndex)
"""
self.__openHistory(
QApplication.keyboardModifiers() & Qt.ControlModifier)
def __openHistoryInCurrentTab(self):
"""
Private slot to open a history entry in the current browser tab.
"""
self.__openHistory(False)
def __openHistoryInNewTab(self):
"""
Private slot to open a history entry in a new browser tab.
"""
self.__openHistory(True)
def __openHistory(self, newTab):
"""
Private method to open a history entry.
@param newTab flag indicating to open the history entry in a new tab
(boolean)
"""
idx = self.historyTree.currentIndex()
if newTab:
self.newUrl.emit(
idx.data(HistoryModel.UrlRole),
idx.data(HistoryModel.TitleRole))
else:
self.openUrl.emit(
idx.data(HistoryModel.UrlRole),
idx.data(HistoryModel.TitleRole))
def __copyHistory(self):
"""
Private slot to copy a history entry's URL to the clipboard.
"""
idx = self.historyTree.currentIndex()
if not idx.parent().isValid():
return
url = idx.data(HistoryModel.UrlStringRole)
clipboard = QApplication.clipboard()
clipboard.setText(url)
|
cgmb/guardonce
|
tests/test_guess_guard.py
|
Python
|
mit
| 4,227
| 0.012302
|
# -*- coding: utf-8 -*-
# Copyright (C) 2016-2018 Cordell Bloor
# Published under the MIT License
from nose.tools import *
import guardonce.util as go
def test_ok():
contents = '''
#ifndef MATCH_H
#define MATCH_H
'''
g,s,e = go.guess_guard(contents)
assert_equals(g, 'MATCH_H')
assert_equals(s, 1)
assert_equals(e, 32)
def test_ok_space_before_hash():
contents = '''
#ifndef MATCH_H
#define MATCH_H
'''
g,s,e = go.guess_guard(contents)
assert_equals(g, 'MATCH_H')
assert_equals(s, 1)
assert_equals(e, 33)
def test_ok_space_after_hash():
contents = '''
# ifndef MATCH_H
# define MATCH_H
'''
g,s,e = go.guess_guard(contents)
assert_equals(g, 'MATCH_H')
assert_equals(s, 1)
assert_equals(e, 34)
@raises(ValueError)
def test_no_ifndef():
contents = '''
#ifdef MATCH_H
#define MATCH_H
'''
go.guess_guard(contents)
@raises(ValueError)
def test_no_define():
contents = '''
#ifndef MATCH_H
#defne MATCH_H
'''
go.guess_guard(contents)
@raises(ValueError)
def test_mismatched_define_symbol():
contents = '''
#ifndef MATCH_H
#define MISMATCH_H
'''
go.guess_guard(contents)
@raises(ValueError)
def test_extra_junk_on_ifndef():
contents = '''
#ifndef MATCH_H WEIRD_HUH
#define MATCH_H
'''
go.guess_guard(contents)
@raises(ValueError)
def test_extra_junk_on_define():
contents = '''
#ifndef MATCH_H
#define MATCH_H WEIRD_HUH
'''
go.guess_guard(contents)
def test_extra_whitespace_on_ifndef():
contents = '''
#ifndef MATCH_H
#define MATCH_H
'''
g,s,e = go.guess_guard(contents)
assert_equals(g, 'MATCH_H')
assert_equals(s, 1)
assert_equals(e, 33)
def test_extra_whitespace_on_define():
contents = '''
#ifndef MATCH_H
#define MATCH_H
'''
g,s,e = go.guess_guard(contents)
assert_equals(g, 'MATCH_H')
assert_equals(s, 1)
assert_equals(e, 33)
def test_define_with_value_1():
contents = '''
#ifndef MATCH_H
#define MATCH_H 1
'''
g,s,e = go.guess_guard(contents)
assert_equals(g, 'MATCH_H')
assert_equals(s, 1)
assert_equals(e, 34)
@raises(ValueError)
def test_define():
contents = '''
#ifndef ONE
#define ONE 1
#endif
'''
go.guess_guard(contents)
@raises(ValueError)
def test_define_with_space():
contents = '''
#ifndef ONE
#define ONE 1
#endif
'''
go.guess_guard(contents)
def test_if_defined():
contents = '''
#if !defined(MATCH_H)
#define MATCH_H
'''
g,s,e = go.guess_guard(contents)
assert_equals(g, 'MATCH_H')
assert_equals(s, 1)
assert_equals(e, 38)
def test_if_defined_no_parentheses():
contents = '''
#if !defined MATCH_H
#define MATCH_H
'''
g,s,e = go.guess_guard(contents)
assert_equals(g, 'MATCH_H')
assert_equals(s, 1)
assert_equals(e, 37)
def test_if_defined_space_after_bang():
contents = '''
#if ! defined(MATCH_H)
#define MATCH_H
'''
g,s,e = go.guess_guard(contents)
assert_equals(g, 'MATCH_H')
assert_equals(s, 1)
assert_equals(e, 39)
def test_if_defined_space_before_parentheses():
contents = '''
#if !defined (MATCH_H)
#define MATCH_H
'''
g,s,e = go.guess_guard(contents)
assert_equals(g, 'MATCH_H')
assert_equals(s, 1)
assert_equals(e, 39)
def test_if_defined_space_before_symbol():
contents = '''
#i
|
f !defined( MATCH_H)
#define MATCH_H
'''
g,s,e = go.guess_guard(contents)
assert_equals(g, 'MATCH_H')
assert_equals(s, 1)
assert_equals(e, 39)
def test_if_defined_space_after_symbol():
contents =
|
'''
#if !defined(MATCH_H )
#define MATCH_H
'''
g,s,e = go.guess_guard(contents)
assert_equals(g, 'MATCH_H')
assert_equals(s, 1)
assert_equals(e, 39)
def test_if_defined_space_before_newline():
contents = '''
#if !defined(MATCH_H)
#define MATCH_H
'''
g,s,e = go.guess_guard(contents)
assert_equals(g, 'MATCH_H')
assert_equals(s, 1)
assert_equals(e, 39)
@raises(ValueError)
def test_if_defined_extra_junk_before_newline():
contents = '''
#if !defined(MATCH_H) WEIRD_HUH
#define MATCH_H
'''
go.guess_guard(contents)
@raises(ValueError)
def test_if_defined_extra_junk_in_defined():
contents = '''
#if !defined(MATCH_H WEIRD_HUH)
#define MATCH_H
'''
go.guess_guard(contents)
|
wevoice/wesub
|
apps/videos/management/commands/load_thumbnails.py
|
Python
|
agpl-3.0
| 4,320
| 0.00787
|
from django.core.management.base import BaseCommand
from django.conf import settings
from utils.amazon import default_s3_store
from videos.models import Video, VIDEO_TYPE_FLV, VIDEO_TYPE_HTML5
import urllib
import os
import commands
import sys
from django.core.exceptions import ImproperlyConfigured
from django.core.files.base import ContentFile
from django.db.models import ObjectDoesNotExist
VIDEO_UPLOAD_PATH = getattr(settings, 'VIDEO_UPLOAD_PATH', \
os.path.join(settings.MEDIA_ROOT, 'videos'))
VIDEO_THUMBNAILS_FOLDER = getattr(settings, 'VIDEO_THUMBNAILS_PATH', 'videos/thumbnails/')
THUMBNAILS_PATH = os.path.join(settings.MEDIA_ROOT, VIDEO_THUMBNAILS_FOLDER)
class Command(BaseCommand):
def handle(self, *args, **options):
print 'Run load thumbnail command'
self.verbosity = int(options.get('verbosity', 1))
self.s3_store = self.init_s3()
if not os.path.exists(VIDEO_UPLOAD_PATH):
os.makedirs(VIDEO_UPLOAD_PATH)
if not os.path.exists(THUMBNAILS_PATH):
os.makedirs(THUMBNAILS_PATH)
qs = Video.objects.filter(thumbnail='', videourl__original=True, videourl__type__in=[VIDEO_TYPE_FLV, VIDEO_TYPE_HTML5])
for video in qs:
self.print_to_console(u'Handling %s' % video.__unicode__())
try:
video_url = video.videourl_set.filter(original=True)[:1].get()
except ObjectDoesNotExist:
continue
path = self.get_file_path(video, video_url)
if not os.path.exists(path):
self.print_to_console(u'Saving...')
urll
|
ib.urlretrieve(video_url.url, path)
self.print_to_console(u'Video saved.')
else:
self.print_to_console(u'File exist.')
self.get_thumbnail(video, path)
self.print_to_console(u'-----------------')
#--- Save original thumbnails to S3 Store ---
self.print_to_console(u'Save original thumbnails to S3 Store...')
qs = Video.objects.exclude(thumb
|
nail='').filter(s3_thumbnail='')
for video in qs:
self.print_to_console(u'Handling %s' % video.__unicode__())
name = video.thumbnail.strip('/').split('/')[-1]
cf = ContentFile(urllib.urlopen(video.thumbnail).read())
video.s3_thumbnail.save('%s/%s' % (video.video_id, name), cf, True)
def print_to_console(self, msg, min_verbosity=1):
if self.verbosity >= min_verbosity:
print msg
def init_s3(self):
if not default_s3_store:
raise ImproperlyConfigured('Have not settings for thumbnails uploading to S3 Store.')
return default_s3_store
def get_thumbnail(self, video, path):
self.print_to_console(u'Get thumbnail...')
grabimage = "ffmpeg -y -i %s -vframes 1 -ss 00:00:%s -an -vcodec png -f rawvideo %s"
thumbnailfilename = "%s.png" % video.video_id
thumbnailpath = os.path.normpath(os.path.join(THUMBNAILS_PATH, thumbnailfilename))
grab_result = 'Command is not runned yet'
try:
grab_result = commands.getoutput(grabimage % (path, 10, thumbnailpath))
if not os.path.exists(thumbnailpath):
raise Exception('Error in converting: %s' % grab_result)
if not os.path.getsize(thumbnailpath):
grab_result = commands.getoutput(grabimage % (path, 5, thumbnailpath))
self.print_to_console(u'Saving in S3 Store...')
cf = ContentFile(open(thumbnailpath, 'rb').read())
video.s3_thumbnail.save(thumbnailfilename, cf, True)
video.thumbnail = video.s3_thumbnail.url
video.save()
os.remove(thumbnailpath)
os.remove(path)
except:
if settings.DEBUG:
raise
self.handle_error(sys.exc_info())
def get_file_path(self, video, video_url):
type = video_url.url.split('.')[-1]
name = '%s.%s' % (video.video_id, type)
return os.path.join(VIDEO_UPLOAD_PATH, name)
|
TeachBoost/ansible
|
ansible.py
|
Python
|
mit
| 1,499
| 0
|
#! /usr/bin/python
import bottle
import settings
from controller import admin as admin_controller
from controller import email as email_controller
app = application = bottle.Bottle()
# Base url for regular users
app.route(settings.BASEPATH, 'GET', admin_controller.index)
app.route(settings.BASEPATH + '/', 'GET', admin_controller.index)
app.route(
settings.BASEPATH + '/tasks/<id>',
|
'GET',
admin_controller.read_user_tasks
)
app.route(
settings.BASEPATH + '/update/<id>',
'POST',
admin_controller.update_self
)
# Email handler
email = bottle.Bottle()
app.mount(settings.EMAIL_PATH, email)
email.route('/', 'POST', email_controller.receive_email)
email.route('/', 'GET', email_controller.test_form)
email.route('', 'GET', email_controller.test_form)
# Ansible admin
admin = bottle.Bottle()
app.mount(settings.ADMIN_PATH, admin)
admin.route('/tasks', 'GET', admin_contr
|
oller.read_tasks)
admin.route('/create', 'POST', admin_controller.create_person)
admin.route('/delete', 'POST', admin_controller.delete_people)
admin.route('/<id>', 'GET', admin_controller.read_person)
admin.route('/<id>', 'POST', admin_controller.update_person)
admin.route('/', 'GET', admin_controller.admin)
# Static files
app.route(
settings.STATIC_PATH + '/<type>/<filename>',
'GET',
lambda **kwargs: bottle.static_file(
filename=kwargs['filename'], root='static/' + kwargs['type']
)
)
if __name__ == '__main__':
bottle.run(app=app, reloader=True, **settings.SERVER)
|
heyf/cloaked-octo-adventure
|
leetcode/1420_build-array-where-you-can-find-the-maximum-exactly-k-comparisons.py
|
Python
|
mit
| 550
| 0.003636
|
# https://leetcode.com/problems/build-array-where-you-can-find-the-maximum-exactly-k-comparisons
class Solution:
def numOfArrays(self, n: int, m: int, k: int) -> int:
M = 1000000007
ret = 0
for i in range(k, m+1):
ret += ((i-1)**(k-1)) * (i**(n-k))
return ret % M
# n = 2
# m = 3
# k = 1
# ans = 6
# n = 9
# m = 1
# k = 1
# ans = 1
n = 50
|
m = 100
k = 25
|
ans = 34549172
# n = 37
# m = 17
# k = 7
# ans = 418930126
sl = Solution()
ret = sl.numOfArrays(n, m, k)
print(ret, "O" if ret==ans else "X")
|
materialsproject/pymatgen
|
pymatgen/phonon/tests/test_bandstructure.py
|
Python
|
mit
| 3,536
| 0.00198
|
import json
import os
import unittest
from pymatgen.phonon.bandstructure import PhononBandStructureSymmLine
from pymatgen.util.testing import PymatgenTest
class PhononBandStructureSymmLineTest(PymatgenTest):
def setUp(self):
with open(
os.path.join(PymatgenTest.TEST_FILES_DIR, "NaCl_phonon_bandstructure.json"),
encoding="utf-8",
) as f:
d = json.load(f)
self.bs = PhononBandStructureSymmLine.from_dict(d)
with open(
os.path.join
|
(PymatgenTest.TEST_FILES_DIR, "Si_phonon_bandstructure.json"),
encoding="utf-8",
) as f:
d = json.load(f)
self.bs2 = PhononBandStructureSymmLine.from_dict(d)
def test_basic(self):
|
self.assertAlmostEqual(self.bs.bands[1][10], 0.7753555184)
self.assertAlmostEqual(self.bs.bands[5][100], 5.2548379776)
self.assertArrayEqual(self.bs.bands.shape, (6, 204))
self.assertArrayEqual(self.bs.eigendisplacements.shape, (6, 204, 2, 3))
self.assertArrayAlmostEqual(
self.bs.eigendisplacements[3][50][0],
[0.0 + 0.0j, 0.14166569 + 0.04098339j, -0.14166569 - 0.04098339j],
)
self.assertTrue(self.bs.has_eigendisplacements, True)
self.assertArrayEqual(self.bs.min_freq()[0].frac_coords, [0, 0, 0])
self.assertAlmostEqual(self.bs.min_freq()[1], -0.03700895020)
self.assertTrue(self.bs.has_imaginary_freq())
self.assertFalse(self.bs.has_imaginary_freq(tol=0.5))
self.assertArrayAlmostEqual(self.bs.asr_breaking(), [-0.0370089502, -0.0370089502, -0.0221388897])
self.assertEqual(self.bs.nb_bands, 6)
self.assertEqual(self.bs.nb_qpoints, 204)
self.assertArrayAlmostEqual(self.bs.qpoints[1].frac_coords, [0.01, 0, 0])
def test_nac(self):
self.assertTrue(self.bs.has_nac)
self.assertFalse(self.bs2.has_nac)
self.assertAlmostEqual(self.bs.get_nac_frequencies_along_dir([1, 1, 0])[3], 4.6084532143)
self.assertIsNone(self.bs.get_nac_frequencies_along_dir([0, 1, 1]))
self.assertIsNone(self.bs2.get_nac_frequencies_along_dir([0, 0, 1]))
self.assertArrayAlmostEqual(
self.bs.get_nac_eigendisplacements_along_dir([1, 1, 0])[3][1],
[(0.1063906409128248 + 0j), 0j, 0j],
)
self.assertIsNone(self.bs.get_nac_eigendisplacements_along_dir([0, 1, 1]))
self.assertIsNone(self.bs2.get_nac_eigendisplacements_along_dir([0, 0, 1]))
def test_branches(self):
self.assertEqual(self.bs.branches[0]["end_index"], 50)
self.assertEqual(self.bs.branches[1]["start_index"], 51)
self.assertEqual(self.bs.branches[2]["name"], "Y-Gamma")
self.assertAlmostEqual(self.bs.get_branch(10)[0]["name"], "Gamma-X")
self.assertEqual(len(self.bs.branches), 4)
def test_dict_methods(self):
s = self.bs.as_dict()
self.assertIsNotNone(s)
self.assertIsNotNone(json.dumps(s))
s = self.bs2.as_dict()
self.assertIsNotNone(s)
self.assertIsNotNone(json.dumps(s))
s = self.bs2.as_phononwebsite()
self.assertIsNotNone(s)
self.assertIsNotNone(json.dumps(s))
self.assertMSONable(self.bs)
self.assertMSONable(self.bs2)
def test_write_methods(self):
self.bs2.write_phononwebsite("test.json")
def tearDown(self):
if os.path.isfile("test.json"):
os.remove("test.json")
if __name__ == "__main__":
unittest.main()
|
toenuff/treadmill
|
lib/python/treadmill/cli/show.py
|
Python
|
apache-2.0
| 4,580
| 0
|
"""Manage Treadmill app manifest."""
from __future__ import absolute_import
import logging
import urllib
import click
from .. import cli
from treadmill import restclient
from treadmill import context
_LOGGER = logging.getLogger(__name__)
_STATE_FORMATTER = cli.make_formatter(cli.InstanceStatePrettyFormatter)
_ENDPOINT_FORMATTER = cli.make_formatter(cli.EndpointPrettyFormatter)
_APP_FORMATTER = cli.make_formatter(cli.AppPrettyFormatter)
def _show_state(apis, match):
"""Show cell state."""
url = '/state/'
if match:
url += '?' + urllib.urlencode([('match', match)])
response = restclient.get(apis, url)
cli.out(_STATE_FORMATTER(response.json()))
def _show_list(apis, match, states):
"""Show list of instnces in given state."""
url = '/state/'
if match:
url += '?' + urllib.urlencode([('match', match)])
response = restclient.get(apis, url)
names = [item['name']
for item in response.json() if item['state'] in states]
for name in names:
print name
def _show_endpoints(apis, pattern, endpoint, proto):
"""Show cell endpoints."""
url = '/endpoint/%s' % urllib.quote(pattern)
if endpoint:
if proto:
url += '/' + proto
else:
url += '/*'
url += '/' + endpoint
response = restclient.get(apis, url)
endpoints = [{
'name': end['name'],
'proto': end['proto'],
'endpoint': end['endpoint'],
'hostport': '{0}:{1}'.format(end['host'], end['port'])
} for end in response.json()]
cli.out(_ENDPOINT_FORMATTER(endpoints))
def _show_instance(apis, instance_id):
"""Show instance manifest."""
url = '/instance/%s' % urllib.quote(instance_id)
response = restclient.get(apis, url)
cli.out(_APP_FORMATTER(response.json()))
def init():
"""Return top level command handler."""
ctx = {}
@click.group()
@click.option('--cell', required=True,
envvar='TREADMILL_CELL',
callback=cli.handle_context_opt,
expose_value=False)
@click.option('--api', required=False, help='API url to use.',
metavar='URL',
envvar='TREADMILL_STATEAPI')
def show(api):
"""Show state of scheduled applications."""
ctx['api'] = api
@show.command()
@cli.ON_REST_EXCEPTIONS
@click.option('--match', help='Application name pattern match')
def state(match):
"""Show state of Treadmill scheduled instances."""
apis = context.GLOBAL.state_api(ctx['api'])
return _show_state(apis, match)
@show.command()
@cli.ON_REST_EXCEPTIONS
@click.option('--match', help='Application name pattern match')
def pending(match):
"""Show pending instances."""
apis = context.GLOBAL.state_api(ctx['api'])
return _show_list(apis, match, ['pending'])
@show.command()
@cli.ON_REST_EXCEPTIONS
@click.option('--match', help='Application name pattern match')
def running(match):
"""Show running instances."""
apis = context.GLOBAL.state_api(ctx['api'])
return _show_list(apis, match, ['running'])
@show.command()
@cli.ON_REST_EXCEPTIONS
@click.option('--match', help='Application name pattern match')
def scheduled(match):
"""Show scheduled instances."""
apis = context.GLOBAL.state_api(ctx['api'])
return _show_list(apis, match, ['running', 'scheduled'])
@show.command(name='all')
@cli.ON_REST_EXCEPTIONS
@click.option('--match', help='Application name pattern match')
def _all(match):
"""Show scheduled instances."""
apis = context.GLOBAL.state_api(ctx['api'])
return _show_list(apis, match, ['pending', 'running', 'scheduled'])
@show.command()
@cli
|
.ON_REST_EXCEPTIONS
@click.argument('pattern')
@click.argument('endpoint', required=False)
@click.argument('proto', required=False)
def endpoints(pattern, endpoint, proto):
"""Show application endpoints."""
apis = context.GLOBAL.state_api(ctx['api'])
return _show_endpoints(apis, pattern, endpoint, proto)
@show.command()
@cli.ON_REST_EXCEPTIONS
@click.argument('instance_id
|
')
def instance(instance_id):
"""Show scheduled instance manifest."""
apis = context.GLOBAL.cell_api(ctx['api'])
return _show_instance(apis, instance_id)
del _all
del running
del scheduled
del pending
del instance
del state
del endpoints
return show
|
mickypaganini/SSI2016-jet-clustering
|
spartyjet-4.0.2_mac/examples_py/pythiaExample.py
|
Python
|
mit
| 3,668
| 0.008179
|
#!/usr/bin/env python
# $Id: pythiaExample.py 545 2012-01-18 06:10:03Z cvermilion $
#----------------------------------------------------------------------
# Copyright (c) 2010-12, Pierre-Antoine Delsart, Kurtis Geerlings, Joey Huston,
# Brian Martin, and Christopher Vermilion
#
#----------------------------------------------------------------------
# This file is part of SpartyJet.
#
# SpartyJet is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# t
|
he Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# SpartyJet is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Publi
|
c License
# along with SpartyJet; if not, write to the Free Software
# Foundation, Inc.:
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#----------------------------------------------------------------------
from spartyjet import SpartyJet as SJ, fastjet as fj, writeCurrentFile
from ROOT import TPythia8, gSystem
#===============================================
# Example showing how to pass events directly from Pythia to SpartyJet.
# Requires that you have built ROOT's Pythia 6 and/or 8 interface.
gSystem.Load('libEG') # import common event gen lib
# Create a jet builder(MessageLevel = INFO)------
builder = SJ.JetBuilder(SJ.INFO)
######### PYTHIA8 #########
# Load Libraries
gSystem.Load('libEGPythia8')
# Creat TPythia8 object
pythia = TPythia8()
# Single W production
pythia.ReadString('WeakSingleBoson:ffbar2W = on')
# Force decay W->ev
pythia.ReadString("24:onMode = off")
pythia.ReadString("24:onIfAny = 11 12")
# UE
pythia.ReadString("PartonLevel:MI = off")
# ISR,FSR, beam remnant
pythia.ReadString("PartonLevel:ISR = on")
pythia.ReadString("PartonLevel:FSR = on")
pythia.ReadString("PartonLevel:Remnants = on")
# Hadronize
pythia.ReadString("HadronLevel:all = on")
# Initialize for LHC
pythia.Initialize(2212,2212, 7000.)
# Initialize for Tevatron
#pythia.Initialize( 2212, -2212, 1960.)
###########################
######### PYTHIA6 #########
## Load Library
#ROOT.gSystem.Load("libEGPythia6")
## Create TPythia8 object
#pythia = ROOT.TPythia6()
## Turn on W+jet production
#pythia.SetMSEL(14)
## Turn off all decay modes except W->e+nu
#decayChOff = [190,191,192,194,195,196,198,199,200,207,208]
#for dc in decayChOff:
# pythia.SetMDME(dc,1,0)
## Turn on W->e+nu
#pythia.SetMDME(206,1,1)
## Initialize for LHC
#pythia.Initialize('cms','p+','p+',7000.)
## Initialize for Tevatron
##pythia.Initialize('cms','p+','p-',1960.)
###########################
# Create input object and add to builder --------
input = SJ.PythiaInput(pythia)
# Event Particle printout (Only usable in Pythia8)
#input.printEvent(True)
builder.configure_input(input)
# Create jet finder and add to builder ----------
analysis = SJ.JetAnalysis(SJ.FastJet.FastJetFinder('AntiKt4', fj.antikt_algorithm, 0.4))
builder.add_analysis(analysis)
# Configure text output--------------------------
builder.add_text_output("../data/output/pythia.dat")
# Configure ntuple output------------------------
outfile = "../data/output/pythia.root"
builder.configure_output("SpartyJet_Tree", outfile)
# Run SpartyJet----------------------------------
builder.process_events(10)
# Save this script in the ROOT file (needs to go after process_events or it
# gets over-written!)
writeCurrentFile(outfile)
|
joelstanner/django-imager
|
imager/imager_images/urls.py
|
Python
|
mit
| 1,528
| 0.003927
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
import views
from django.contrib.auth.decorators import login_required
urlpatterns = patterns('',
url(r'^library$', views.library, name='profile-library'),
url(r'^stream$', views.stream, name='profile-stream'),
url(r'^add_photo/', login_required(views.PhotoCreate.as_view(
template_name="imager_images/create_form.html",
success_url='/images/library')),
name='add_photo'),
url(r'^add_album/', login_required(views.AlbumCreate.as_view(
template_name="imager_images/create_form.html",
success_url='/images/library')),
name='add_album'),
url(r'^update_album/(?P<pk>\d+)/$', login_required(views.AlbumUpdate.as_view(
template_name="imager_images/update_album.html",
success_url='/images/library')),
name='update_album'),
url(r'^update_photo/(?P<pk>\d+)/$', login_required(views.PhotoUpdate.as_view(
template_nam
|
e="imager_images/update_photo.html",
success_url='/images/library')),
name='update_photo'),
url(r'^delete_photo/(?P<pk>\d+)/$', login_required(views.PhotoDelete.as_view(
template_name="imager_images/delete_form.html",
success_url='/images/library')),
name='delete_photo'),
url(r'^delete_album/(?P<pk>\d+)/$', login_required(views.AlbumDelete.as_view(
template_name="imager_images/delete_form.html",
success_url='/images/library')),
|
name='delete_album'),
)
|
megaumi/django
|
tests/generic_relations/models.py
|
Python
|
bsd-3-clause
| 4,327
| 0.001156
|
"""
Generic relations
Generic relations let an object have a foreign key to any object through a
content-type/object-id field. A ``GenericForeignKey`` field can point to any
object, be it animal, vegetable, or mineral.
The canonical example is tags (although this example implementation is *far*
from complete).
"""
from __future__ import unicode_literals
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class TaggedItem(models.Model):
"""A tag on an item."""
tag = models.SlugField()
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
class Meta:
ordering = ["tag", "content_type__model"]
def __str__(self):
return self.tag
class ValuableTaggedItem(TaggedItem):
value = models.PositiveIntegerField()
class AbstractComparison(models.Model):
comparative = models.CharField(max_length=50)
content_type1 = models.ForeignKey(ContentType, models.CASCADE, related_name="comparative1_set")
object_id1 = models.PositiveIntegerField()
first_obj = GenericForeignKey(ct_field="content_type1", fk_field="object_id1")
@python_2_unicode_compatible
class Comparison(AbstractComparison):
"""
A model that tes
|
ts having multiple GenericForeignKeys. One is defined
through an inherited abstract model and one defined directly on this class.
"""
content_type2 = models.ForeignKey(ContentType, models.CASCADE, related_name="comparative2_set")
object_id2 = models.PositiveIntegerField()
other_obj = GenericForeignKey(ct_field="content_type2", fk_field="object_id2")
def __str__(self):
return "%s is %s than %s" % (self.first_
|
obj, self.comparative, self.other_obj)
@python_2_unicode_compatible
class Animal(models.Model):
common_name = models.CharField(max_length=150)
latin_name = models.CharField(max_length=150)
tags = GenericRelation(TaggedItem, related_query_name='animal')
comparisons = GenericRelation(Comparison,
object_id_field="object_id1",
content_type_field="content_type1")
def __str__(self):
return self.common_name
@python_2_unicode_compatible
class Vegetable(models.Model):
name = models.CharField(max_length=150)
is_yucky = models.BooleanField(default=True)
tags = GenericRelation(TaggedItem)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Mineral(models.Model):
name = models.CharField(max_length=150)
hardness = models.PositiveSmallIntegerField()
# note the lack of an explicit GenericRelation here...
def __str__(self):
return self.name
class GeckoManager(models.Manager):
def get_queryset(self):
return super(GeckoManager, self).get_queryset().filter(has_tail=True)
class Gecko(models.Model):
has_tail = models.BooleanField(default=False)
objects = GeckoManager()
# To test fix for #11263
class Rock(Mineral):
tags = GenericRelation(TaggedItem)
class ManualPK(models.Model):
id = models.IntegerField(primary_key=True)
tags = GenericRelation(TaggedItem, related_query_name='manualpk')
class ForProxyModelModel(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
obj = GenericForeignKey(for_concrete_model=False)
title = models.CharField(max_length=255, null=True)
class ForConcreteModelModel(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
obj = GenericForeignKey()
class ConcreteRelatedModel(models.Model):
bases = GenericRelation(ForProxyModelModel, for_concrete_model=False)
class ProxyRelatedModel(ConcreteRelatedModel):
class Meta:
proxy = True
# To test fix for #7551
class AllowsNullGFK(models.Model):
content_type = models.ForeignKey(ContentType, models.SET_NULL, null=True)
object_id = models.PositiveIntegerField(null=True)
content_object = GenericForeignKey()
|
biomodels/BIOMD0000000462
|
BIOMD0000000462/model.py
|
Python
|
cc0-1.0
| 427
| 0.009368
|
import os
path = os.path.dirname(os.path.realpath(__file__))
sbmlFilePath = os.path.join(path, 'BIOMD0000000462.xml')
with open(sbmlFi
|
lePath,'r')
|
as f:
sbmlString = f.read()
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
if module_exists('libsbml'):
import libsbml
sbml = libsbml.readSBMLFromString(sbmlString)
|
toontownfunserver/Panda3D-1.9.0
|
direct/pyinst/archive_rt.py
|
Python
|
bsd-3-clause
| 7,405
| 0.017016
|
#
# Gordon McMillan (as inspired and influenced by Greg Stein)
#
# subclasses may not need marshal or struct, but since they're
# builtin, importing is safe.
#
# While an Archive is really an abstraction for any "filesystem
# within a file", it is tuned for use with imputil.FuncImporter.
# This assumes it contains python code objects, indexed by the
# the internal name (ie, no '.py').
# See carchive.py for a more general archive (contains anything)
# that can be understood by a C program.
#archive_rt is a stripped down version of MEInc.Dist.archive.
#It has had all building logic removed.
#It's purpose is to bootstrap the Python installation.
import marshal
import struct
class Archive:
""" A base class for a repository of python code objects.
The extract method is used by imputil.ArchiveImporter
to get code objects by name (fully qualified name), so
an enduser "import a.b" would become
extract('a.__init__')
extract('a.b')
"""
MAGIC = 'PYL\0'
HDRLEN = 12 # default is MAGIC followed by python's magic, int pos of toc
TOCPOS = 8
TRLLEN = 0 # default - no trailer
TOCTMPLT = {} #
os = None
def __init__(self, path=None, start=0):
"Initialize an Archive. If path is omitted, it will be an empty Archive."
self.toc = None
self.path = path
self.start = start
import imp
self.pymagic = imp.get_magic()
if path is not None:
self.lib = open(self.path, 'rb')
self.checkmagic()
self.loadtoc()
####### Sub-methods of __init__ - override as needed #############
def checkmagic(self):
""" Overridable.
Check to see if the file object self.lib actually has a file
we understand.
"""
self.lib.seek(self.start) #default - magic is at start of file
if self.lib.read(len(self.MAGIC)) != self.MAGIC:
raise RuntimeError, "%s is not a valid %s archive file" \
% (self.path, self.__class__.__name__)
if self.lib.read(len(self.pymagic)) != self.pymagic:
raise RuntimeError, "%s has version mismatch to dll" % (self.path)
def loadto
|
c(self):
""" Overridable.
|
Default: After magic comes an int (4 byte native) giving the
position of the TOC within self.lib.
Default: The TOC is a marshal-able string.
"""
self.lib.seek(self.start + self.TOCPOS)
(offset,) = struct.unpack('=i', self.lib.read(4))
self.lib.seek(self.start + offset)
self.toc = marshal.load(self.lib)
######## This is what is called by FuncImporter #######
## Since an Archive is flat, we ignore parent and modname.
def get_code(self, parent, modname, fqname):
print "parent: ", parent
print "modname: ", modname
print "fqname: ", fqname
return self.extract(fqname) # None if not found, (ispkg, code) otherwise
if rslt is None:
return None
ispkg, code = rslt
if ispkg:
return ispkg, code, {'__path__': []}
return rslt
####### Core method - Override as needed #########
def extract(self, name):
""" Get the object corresponding to name, or None.
For use with imputil ArchiveImporter, object is a python code object.
'name' is the name as specified in an 'import name'.
'import a.b' will become:
extract('a') (return None because 'a' is not a code object)
extract('a.__init__') (return a code object)
extract('a.b') (return a code object)
Default implementation:
self.toc is a dict
self.toc[name] is pos
self.lib has the code object marshal-ed at pos
"""
ispkg, pos = self.toc.get(name, (0, None))
if pos is None:
return None
self.lib.seek(self.start + pos)
return ispkg, marshal.load(self.lib)
########################################################################
# Informational methods
def contents(self):
"""Return a list of the contents
Default implementation assumes self.toc is a dict like object.
Not required by ArchiveImporter.
"""
return self.toc.keys()
########################################################################
# Building
####### Top level method - shouldn't need overriding #######
## def build(self, path, lTOC):
## """Create an archive file of name 'path'.
## lTOC is a 'logical TOC' - a list of (name, path, ...)
## where name is the internal name, eg 'a'
## and path is a file to get the object from, eg './a.pyc'.
## """
## self.path = path
## self.lib = open(path, 'wb')
## #reserve space for the header
## if self.HDRLEN:
## self.lib.write('\0'*self.HDRLEN)
##
## #create an empty toc
##
## if type(self.TOCTMPLT) == type({}):
## self.toc = {}
## else: # assume callable
## self.toc = self.TOCTMPLT()
##
## for tocentry in lTOC:
## self.add(tocentry) # the guts of the archive
##
## tocpos = self.lib.tell()
## self.save_toc(tocpos)
## if self.TRLLEN:
## self.save_trailer(tocpos)
## if self.HDRLEN:
## self.update_headers(tocpos)
## self.lib.close()
##
##
## ####### manages keeping the internal TOC and the guts in sync #######
## def add(self, entry):
## """Override this to influence the mechanics of the Archive.
## Assumes entry is a seq beginning with (nm, pth, ...) where
## nm is the key by which we'll be asked for the object.
## pth is the name of where we find the object. Overrides of
## get_obj_from can make use of further elements in entry.
## """
## if self.os is None:
## import os
## self.os = os
## nm = entry[0]
## pth = entry[1]
## ispkg = self.os.path.splitext(self.os.path.basename(pth))[0] == '__init__'
## self.toc[nm] = (ispkg, self.lib.tell())
## f = open(entry[1], 'rb')
## f.seek(8) #skip magic and timestamp
## self.lib.write(f.read())
##
## def save_toc(self, tocpos):
## """Default - toc is a dict
## Gets marshaled to self.lib
## """
## marshal.dump(self.toc, self.lib)
##
## def save_trailer(self, tocpos):
## """Default - not used"""
## pass
##
## def update_headers(self, tocpos):
## """Default - MAGIC + Python's magic + tocpos"""
## self.lib.seek(self.start)
## self.lib.write(self.MAGIC)
## self.lib.write(self.pymagic)
## self.lib.write(struct.pack('=i', tocpos))
##############################################################
#
# ZlibArchive - an archive with compressed entries
#
class ZlibArchive(Archive):
MAGIC = 'PYZ\0'
TOCPOS = 8
HDRLEN = 12
TRLLEN = 0
TOCTMPLT = {}
LEVEL = 9
def __init__(self, path=None, offset=0):
Archive.__init__(self, path, offset)
# dynamic import so not imported if not needed
global zlib
import zlib
def extract(self, name):
(ispkg, pos, lngth) = self.toc.get(name, (0, None, 0))
if pos is None:
return None
self.lib.seek(self.start + pos)
return ispkg, marshal.loads(zlib.decompress(self.lib.read(lngth)))
## def add(self, entry):
## if self.os is None:
## import os
## self.os = os
## nm = entry[0]
## pth = entry[1]
## ispkg = self.os.path.splitext(self.os.path.basename(pth))[0] == '__init__'
## f = open(pth, 'rb')
## f.seek(8) #skip magic and timestamp
## obj = zlib.compress(f.read(), self.LEVEL)
## self.toc[nm] = (ispkg, self.lib.tell(), len(obj))
## self.lib.write(obj)
##
|
acbilson/py-lex-lib
|
point.py
|
Python
|
gpl-3.0
| 593
| 0.016863
|
class Point:
Empty = " "
Full = "XX"
def __init__(self, x, y):
if not type(x) == int \
or not type(y) == int:
raise Exception('Can only assign int type to x or y value')
self.X = x
self.Y = y
self.Value = self.Empty
def __eq__(self, other):
if isinstance
|
(other, self.__class__):
return self.X == other.X and self.Y == other.Y
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return "P(" + str(self.X) + "," + str(self.Y) + ")"
d
|
ef __repr__(self):
return "P(" + str(self.X) + "," + str(self.Y) + ")"
|
PennNLP/SLURP
|
download.py
|
Python
|
gpl-3.0
| 1,488
| 0.001344
|
#!/usr/bin/env python
"""Download data files needed by SLURP."""
# Copyright (C) 2012-2013 Constantine Lignos
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licen
|
ses/>.
import os
import gzip
from datamanager import download, unzip
# Assume that download.py and pennpipeline.py are located in the same
# directory
PIPELINE_NAME = "SUBTLEPipeline-master"
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
PIPELINE_URL = "https://github.com/PennNLP/SUBTLEPipeline/archive/master.zip"
FILENAME = os.path.join(ROOT_DIR, PIPELINE_NAME + ".zip")
download(PIPELINE_URL, FILENAME)
unzip(FILENAME, ROOT_DIR)
# Now we need to additionally unzip the model file contained in the pipeline
print "Decompressing parser model file..."
model_gz = gzip.open(os.path.join(ROOT_DIR, PIPELINE_NAME, "models", "wsjall.obj.gz"))
open(os.path.join(ROOT_DIR, PIPELINE_NAME, "models", "wsjall.obj"), 'wb').write(model_gz.read())
|
davidcurrie/ci.docker.websphere-traditional
|
network-deployment/appserver/updateHostName.py
|
Python
|
apache-2.0
| 1,804
| 0.033259
|
#####################################################################################
# #
# Script to update Hostname #
# #
# Usage : wsadmin -lang jython -f updateHostName.py <node name > < host name > #
# #
#####################################################################################
def updateHostName(nodename,ho
|
stname):
nlist = AdminConfig.list('ServerIndex')
attr=[["hostName", hostname ]]
AdminConfig.modify(nlist,attr)
AdminTask.modifyServerPort('server1', '[-nodeName '+ nodename +' -endPointName BOOTSTRAP_ADDRESS -host '+ hostname +' -port 2809 -modifyShared true]')
AdminTask.modifyServerPort('server1', '[-nodeName '+ nodename +' -endPointName CSIV2_SSL_MUTUALAUTH_LISTENER_ADDRESS -host '+ hostname +' -port
|
9202 -modifyShared true]')
AdminTask.modifyServerPort('server1', '[-nodeName '+ nodename +' -endPointName CSIV2_SSL_SERVERAUTH_LISTENER_ADDRESS -host '+ hostname +' -port 9201 -modifyShared true]')
AdminTask.modifyServerPort('server1', '[-nodeName '+ nodename +' -endPointName ORB_LISTENER_ADDRESS -host '+ hostname +' -port 9900 -modifyShared true]')
AdminTask.modifyServerPort('server1', '[-nodeName '+ nodename +' -endPointName SAS_SSL_SERVERAUTH_LISTENER_ADDRESS -host '+ hostname +' -port 9901 -modifyShared true]')
AdminTask.modifyServerPort('server1', '[-nodeName '+ nodename +' -endPointName SOAP_CONNECTOR_ADDRESS -host '+ hostname +' -port 8878 -modifyShared true]')
AdminConfig.save()
updateHostName(sys.argv[0], sys.argv[1])
|
Arvin-X/swarm
|
setup.py
|
Python
|
gpl-3.0
| 1,370
| 0.024818
|
from setuptools import find_packages
from setuptools import setup
with open('README.md') as f:
setup(
name = 'swarm',
version = '0.5.0',
author = 'arvin.x',
author_email = 'arvin.x@icloud.com',
description = 'A modular distributed penetration testing tool',
license = 'GPLv3',
long_description = f.read(),
packages = find_packages(),
scripts = ['swarm.py','swarm_s.py'],
entry_points = {
'console_scripts': [
'swarm = swarm:main',
'swarm-s = swarm_s:main',
]
},
install_requires = [
'pymongo>=3.3.0',
'beautifulsoup4>=4.5.0',
'python-libnmap>=0.7.0',
'requests>=2.7.0',
'IPy>=0.83',
'argparse>=1.2.1',
],
data_files=[
('/etc/swarm',['swarm.conf']),
('/etc/swarm',['etc/dirsc.conf','etc/domainsc.conf','etc/nmap.conf',
'etc/sitemap.conf','etc/intruder.conf']),
],
classifiers = [
'Programming Language :: Python :: 2.7',
|
'Progra
|
mming Language :: Python :: 2.6',
],
)
|
BobWhitelock/termtris
|
termtris/debug.py
|
Python
|
mit
| 74
| 0.027027
|
import config
def debug(*args):
if config.DEBUG:
print(*ar
|
gs)
|
|
LumPenPacK/NetworkExtractionFromImages
|
win_build/nefi2_win_amd64_msvc_2015/site-packages/numpy/compat/setup.py
|
Python
|
bsd-2-clause
| 371
| 0.005391
|
#!/usr/bin/env python
from __future__ import division, print_function
def co
|
nfiguration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('compat', parent_package, top_path)
|
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
|
solex/django-odesk
|
django_odesk/auth/decorators.py
|
Python
|
bsd-3-clause
| 446
| 0.004484
|
from django.contrib.auth.decorators import user_passes_test
from
|
django.contrib.auth.models import Group
def group_required(names, login_url=None):
"""
Checks if the user is a member of a particular group (or at least one
group from the list)
"""
if not hasattr(names,'__iter__'):
names = [names]
return user_passes_test(lambda u: u.groups.filter(name__in=names),
|
login_url=login_url)
|
openlmi/openlmi-doc
|
doc/python/lmi/scripts/logicalfile/lf_cmd.py
|
Python
|
gpl-2.0
| 3,427
| 0.003502
|
# LogicalFile Management Providers
#
# Copyright (C) 2013-2014 Red Hat, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of the FreeBSD Project.
#
# Authors: Jan Synacek <jsynacek@redhat.com>
#
"""
File and directory management functions.
Usage:
%(cmd)s list <directory> [ <depth> ]
%(cmd)s createdir <directory>
%(cmd)s deletedir <directory>
%(cmd)s show <target>
Commands:
list List a directory. When depth is specified, at most depth levels
will be listed recursively.
The files and directories are listed in a tree-like structure.
Possible listed file types are:
* F : Regular data file.
* Dev :
|
Device file. Can be either block or character device.
* Dir : Directory.
* P
|
: Pipe file.
* L : Symbolic link.
* S : Unix socket.
createdir Create a directory. The parent directory must exist.
deletedir Delete a directory. The directory must be empty.
show Show detailed information about target. Target can be any file
on the remote system.
"""
from lmi.scripts.common import command
from lmi.scripts.logicalfile import logicalfile
class Lister(command.LmiLister):
COLUMNS = ('Type', 'Name', 'Mode', 'Current SELinux Context')
OPT_NO_UNDERSCORES = True
CALLABLE = logicalfile.lf_list
class Show(command.LmiLister):
COLUMNS = ('Name', 'Value')
OPT_NO_UNDERSCORES = True
CALLABLE = logicalfile.lf_show
class CreateDir(command.LmiCheckResult):
EXPECT = None
CALLABLE = logicalfile.lf_createdir
class DeleteDir(command.LmiCheckResult):
EXPECT = None
CALLABLE = logicalfile.lf_deletedir
Lf = command.register_subcommands(
'Lf', __doc__,
{ 'list' : Lister,
'createdir' : CreateDir,
'deletedir' : DeleteDir,
'show' : Show,
},
)
|
crlang/sublime-text---front-end-config
|
Data/Packages/CodeFormatter/CodeFormatter.py
|
Python
|
mit
| 6,068
| 0.000989
|
# @author Avtandil Kikabidze
# @copyright Copyright (c) 2008-2015, Avtandil Kikabidze aka LONGMAN (akalongman@gmail.com)
# @link http://longman.me
# @license The MIT License (MIT)
import os
import sys
import sublime
import sublime_plugin
st_version = 2
if sublime.version() == '' or int(sublime.version()) > 3000:
st_version = 3
reloader_name = 'codeformatter.reloader'
# ST3 loads each package as a module, so it needs an extra prefix
if st_version == 3:
reloader_name = 'CodeFormatter.' + reloader_name
from imp import reload
if reloader_name in sys.modules:
reload(sys.modules[reloader_name])
try:
# Python 3
from .codeformatter.formatter import Formatter
except (ValueError):
# Python 2
from codeformatter.formatter import Formatter
# fix for ST2
cprint = globals()['__builtins__']['print']
debug_mode = False
def plugin_loaded():
cprint('CodeFormatter: Plugin Initialized')
# settings = sublime.load_settings('CodeFormatter.sublime-settings')
# debug_mode = settings.get('codeformatter_debug', False)
# if debug_mode:
# from pprint import pprint
# pprint(settings)
# debug_write('Debug mode enabled')
# debug_write('Platform ' + sublime.platform() + ' ' + sublime.arch())
# debug_write('Sublime Version ' + sublime.version())
# debug_write('Settings ' + pprint(settings))
if (sublime.platform() != 'windows'):
import stat
path = (
sublime.packages_path() +
'/CodeFormatter/codeformatter/lib/phpbeautifier/fmt.phar'
)
st = os.stat(path)
|
os.chmod(path, st.st_mode | stat.S_IEXEC)
if st_version == 2:
plugin_loaded()
class CodeFormatterCommand(sublime_plugin.TextCommand):
def run(self, edit, syntax=None, saving=None):
run_formatter(self.view, edit, syntax=syntax, saving=saving)
class CodeFormatterOpenTabsCommand(sublime_plugin.TextCommand):
def run(self, edit, syntax=None):
window = su
|
blime.active_window()
for view in window.views():
run_formatter(view, edit, quiet=True)
class CodeFormatterEventListener(sublime_plugin.EventListener):
def on_pre_save(self, view):
view.run_command('code_formatter', {'saving': True})
class CodeFormatterShowPhpTransformationsCommand(sublime_plugin.TextCommand):
def run(self, edit, syntax=False):
import subprocess
import re
platform = sublime.platform()
settings = sublime.load_settings('CodeFormatter.sublime-settings')
opts = settings.get('codeformatter_php_options')
php_path = 'php'
if ('php_path' in opts and opts['php_path']):
php_path = opts['php_path']
php55_compat = False
if ('php55_compat' in opts and opts['php55_compat']):
php55_compat = opts['php55_compat']
cmd = []
cmd.append(str(php_path))
if php55_compat:
cmd.append(
'{}/CodeFormatter/codeformatter/lib/phpbeautifier/fmt.phar'.format(
sublime.packages_path()))
else:
cmd.append(
'{}/CodeFormatter/codeformatter/lib/phpbeautifier/phpf.phar'.format(
sublime.packages_path()))
cmd.append('--list')
#print(cmd)
stderr = ''
stdout = ''
try:
if (platform == 'windows'):
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
p = subprocess.Popen(
cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, startupinfo=startupinfo,
shell=False, creationflags=subprocess.SW_HIDE)
else:
p = subprocess.Popen(
cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
except Exception as e:
stderr = str(e)
if (not stderr and not stdout):
stderr = 'Error while gethering list of php transformations'
if len(stderr) == 0 and len(stdout) > 0:
text = stdout.decode('utf-8')
text = re.sub(
'Usage:.*?PASSNAME', 'Available PHP Tranformations:', text)
window = self.view.window()
pt = window.get_output_panel('paneltranformations')
pt.set_read_only(False)
pt.insert(edit, pt.size(), text)
window.run_command(
'show_panel', {'panel': 'output.paneltranformations'})
else:
show_error('Formatter error:\n' + stderr)
def run_formatter(view, edit, *args, **kwargs):
if view.is_scratch():
show_error('File is scratch')
return
# default parameters
syntax = kwargs.get('syntax')
saving = kwargs.get('saving', False)
quiet = kwargs.get('quiet', False)
formatter = Formatter(view, syntax)
if not formatter.exists():
if not quiet and not saving:
show_error('Formatter for this file type ({}) not found.'.format(
formatter.syntax))
return
if (saving and not formatter.format_on_save_enabled()):
return
file_text = sublime.Region(0, view.size())
file_text_utf = view.substr(file_text).encode('utf-8')
if (len(file_text_utf) == 0):
return
stdout, stderr = formatter.format(file_text_utf)
if len(stderr) == 0 and len(stdout) > 0:
view.replace(edit, file_text, stdout)
elif not quiet:
show_error('Format error:\n' + stderr)
def console_write(text, prefix=False):
if prefix:
sys.stdout.write('CodeFormatter: ')
sys.stdout.write(text + '\n')
def debug_write(text, prefix=False):
console_write(text, True)
def show_error(text):
sublime.error_message(u'CodeFormatter\n\n%s' % text)
|
jelmer/xandikos
|
xandikos/vcard.py
|
Python
|
gpl-3.0
| 2,165
| 0.000924
|
# Xandikos
# Copyright (C) 2017 Jelmer Vernooij <jelmer@jelmer.uk>, et al.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 3
# of the License or (at your option) any later version of
# the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""VCard file handling.
"""
from .store import File, InvalidFileContents
class VCardFile(File):
content_type = "text/vcard"
def __init__(self, content, content_type):
super(VCardFile, self).__init__(content, content_type)
self._addressbook = None
def validate(self):
c = b"".join(self.content).strip()
# TODO(jelmer): Do more extensive checking of VCards
if not c.startswith((b"BEGIN:VCARD\r\n", b"BEGIN:VCARD\n")) or not c.endswith(
b"\nEND:VCARD"
):
raise InvalidFileContents(
self.content_type,
self.content,
"Missing header and trailer lines",
)
if not self.addressbook.validate():
|
# TODO(jelmer): Get data about what is invalid
raise InvalidFileContents(
self.content_type,
self.content,
"Invalid VCard file")
@property
def addressbook(self):
if self._addressbook is None:
import vobject
text = b""
|
.join(self.content).decode('utf-8', 'surrogateescape')
try:
self._addressbook = vobject.readOne(text)
except vobject.base.ParseError as e:
raise InvalidFileContents(self.content_type, self.content, str(e))
return self._addressbook
|
CiscoSystems/avos
|
openstack_dashboard/dashboards/project/data_processing/job_executions/tests.py
|
Python
|
apache-2.0
| 2,517
| 0
|
# Licens
|
ed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "A
|
S IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
INDEX_URL = reverse('horizon:project:data_processing.job_executions:index')
DETAILS_URL = reverse(
'horizon:project:data_processing.job_executions:details', args=['id'])
class DataProcessingJobExecutionTests(test.TestCase):
@test.create_stubs({api.sahara: ('job_execution_list',)})
def test_index(self):
api.sahara.job_execution_list(IsA(http.HttpRequest)) \
.AndReturn(self.job_executions.list())
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(
res, 'project/data_processing.job_executions/job_executions.html')
self.assertContains(res, 'Executions')
@test.create_stubs({api.sahara: ('job_execution_get',)})
def test_details(self):
api.sahara.job_execution_get(IsA(http.HttpRequest), IsA(unicode)) \
.AndReturn(self.job_executions.list()[0])
self.mox.ReplayAll()
res = self.client.get(DETAILS_URL)
self.assertTemplateUsed(
res, 'project/data_processing.job_executions/details.html')
self.assertContains(res, 'RUNNING')
@test.create_stubs({api.sahara: ('job_execution_list',
'job_execution_delete')})
def test_delete(self):
job_exec = self.job_executions.first()
api.sahara.job_execution_list(IsA(http.HttpRequest)) \
.AndReturn(self.job_executions.list())
api.sahara.job_execution_delete(IsA(http.HttpRequest), job_exec.id)
self.mox.ReplayAll()
form_data = {'action': 'job_executions__delete__%s' % job_exec.id}
res = self.client.post(INDEX_URL, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
self.assertMessageCount(success=1)
|
Williams224/davinci-scripts
|
kstetappipieta/DNTupleMaker.py
|
Python
|
mit
| 3,046
| 0.017072
|
from Gaudi.Configuration import *
from Configurables import GaudiSequencer
from Configurables import DaVinci
simulation=False
from Configurables import EventNodeKiller
eventNodeKiller = EventNodeKiller('DAQkiller')
eventNodeKiller.Nodes = ['DAQ','pRec']
#MySequencer.Members+=[eventNodeKiller]
from Configurables import DecayTreeTuple
from DecayTreeTuple.Configuration import *
tuple=DecayTreeTuple()
tuple.Decay="[B0 -> ^(K*(892)0 -> ^K+ ^pi-) ^(eta_prime -> ^pi- ^pi+ ^(eta -> ^gamma ^gamma))]CC"
tuple.Branches={"B0":"[B0 -> (K*(892)0 -> K+ pi-) (eta_prime -> pi- pi+ (eta -> gamma gamma))]CC"}
tuple.Inputs=["/Event/Bhadron/Phys/B2XEtaB2etapKstarLine/Particles"]
tuple.ToolList += [
"TupleToolGeometry"
, "TupleToolDira"
, "TupleToolAngles"
, "TupleToolPid"
, "TupleToolKinematic"
, "TupleToolPropertime"
, "TupleToolPrimaries"
, "TupleToolEventInfo"
, "TupleToolTrackInfo"
, "TupleToolVtxIsoln"
, "TupleToolPhotonInfo"
#, "TupleToolMCTruth"
#, "TupleToolMCBackgroundInfo"
, "TupleToolCaloHypo"
#, "TupleToolTrackIsolation"
]
tuple.addTool(TupleToolDecay,name="B0")
from Configurabl
|
es import TupleToolDecayTreeFitter
tuple.B0.addTool(TupleToolDecayTreeFitter("PVFit"))
tuple.B0.PVFit.Verbose=True
tuple.B0.PVFit.constrainToOriginVertex=True
tuple.B0.PVFit.daughtersToConstrain = ["K*(892)0","eta_prime"]
tu
|
ple.B0.ToolList+=["TupleToolDecayTreeFitter/PVFit"]
from Configurables import TupleToolTISTOS
tistos = tuple.B0.addTupleTool(TupleToolTISTOS, name="TupleToolTISTOS")
tistos.VerboseL0=True
tistos.VerboseHlt1=True
tistos.VerboseHlt2=True
tistos.TriggerList=["L0PhotonDecision",
"L0ElectronDecision",
"Hlt1TrackPhotonDecision",
"Hlt1TrackAllL0Decision",
"Hlt1TrackMuonDecision",
"Hlt1TrackForwardPassThroughDecision",
"Hlt1TrackForwardPassThroughLooseDecision",
"Hlt1SingleElectronNoIPDecision",
"L0HadronDecision",
"L0LocalPi0Decision",
"L0GlobalPi0Decision",
"L0MuonDecision",
"Hlt2Topo2BodyBBDTDecision",
"Hlt2Topo3BodyBBDTDecision",
"Hlt2Topo4BodyBBDTDecision",
"Hlt2RadiativeTopoTrackTOSDecision",
"Hlt2RadiativeTopoPhotonL0Decision",
"Hlt2TopoRad2BodyBBDTDecision",
"Hlt2TopoRad2plus1BodyBBDTDecision",
"Hlt2Topo2BodySimpleDecision",
"Hlt2Topo3BodySimpleDecision",
"Hlt2Topo4BodySimpleDecision"]
DaVinci().InputType='MDST'
#DaVinci().RootInTES='/Event/Bhadron/'
DaVinci().UserAlgorithms+=[eventNodeKiller,tuple]
DaVinci().TupleFile="Output.root"
DaVinci().HistogramFile="histos.root"
DaVinci().DataType='2012'
DaVinci().EvtMax=-1
DaVinci().PrintFreq=1000
DaVinci().MoniSequence=[tuple]
DaVinci().Simulation=False
|
vinder/django-instaforms
|
templatetags/instaforms_tags.py
|
Python
|
mit
| 1,416
| 0.003531
|
import logging
from django import template
from django.conf import settings
from django.core.urlresolvers import reverse
from django.http import Http404
from apps.dasforms.forms import ContactForm
from apps.dasforms.utils import
|
get_form
register = template.Library()
logger = logging.getLogger(__name__)
@register.inclusion_tag('dasforms/inline_form.html', takes_context=True)
def render_inline_form(context, formtype):
"""Render inline form."""
formtype = formtype.encode('utf8')
try:
formclass = get_form(formtype)
form = formclass(referer, topic)
action = reverse("dasform", kwargs={'formtype'
|
:formtype})
except:
logger.warning('Form class could not be found: %s' % formtype)
form = ContactForm()
action = reverse("dasform", kwargs={'formtype':'ContactForm'})
senturl = reverse("sent")
return {
'action': action,
'form': form,
'formtype': formtype,
'senturl': senturl,
}
@register.inclusion_tag('dasforms/honeypot_field.html')
def render_honeypot_field(field_name=None):
"""
Renders honeypot field named field_name (defaults to HONEYPOT_FIELD_NAME).
"""
if not field_name:
field_name = settings.HONEYPOT_FIELD_NAME
value = getattr(settings, 'HONEYPOT_VALUE', '')
if callable(value):
value = value()
return {'fieldname': field_name, 'value': value}
|
mavlyutovrus/light_search
|
lib/utils.py
|
Python
|
mit
| 1,167
| 0.010283
|
#-*- coding:utf8 -*-
def crawl_folder(folder):
import os
os_objects = []
seen = set([folder])
for os_object_name in os.listdir(folder):
full_path = os.path.normpath(os.path.join(folder, os_object_name))
if not full_path in seen:
os_objects.append((full_path, os_object_name,))
seen.add(full_path)
return os_objects
class TCustomCounter:
def __init__(self, name, log_stream, verbosity, interval=10):
self.name = name
self.verbosity = verbosity
self.log_stream = log_stream
self.interval = interval
|
self.value = 0
def add(self):
from datetime import datetime
self.value += 1
if self.verbosity and self.value % self.interval == 0:
self.log_stream.write("Logger: " + self.name + ", value: " + str(self.value) + ", time: " + str(datetime.now())+ "\n")
self.log_stream.flush()
def log_state(self):
from datetime import datetime
self.log_stream.write("Logger: " + self.name + ", value: " + str(self.value) + ", time: "
|
+ str(datetime.now())+ "\n")
self.log_stream.flush()
|
qkzk/sncf_lcd
|
adafruit_lcd/examples/red.py
|
Python
|
gpl-3.0
| 1,310
| 0.003817
|
#!/usr/bin/python
# Example using an RGB character LCD wired directly to Raspberry Pi or BeagleBone Black.
import time
import Adafruit_CharLCD as LCD
# Raspberry Pi configuration:
lcd_rs = 27 # Change this to pin 21 on older revision Raspberry Pi's
lcd_en = 22
lcd_d4 = 25
lcd_d5 =
|
24
lcd_d6 = 23
lcd_d7 = 18
lcd_red = 4
lcd_green = 17
lcd_blue = 7 # Pin 7 is CE1
# Define LCD column and row size for 16x2 LCD.
lcd_columns = 16
lcd_rows =
|
2
# Initialize the LCD using the pins above.
lcd = LCD.Adafruit_RGBCharLCD(lcd_rs, lcd_en, lcd_d4, lcd_d5, lcd_d6, lcd_d7,
lcd_columns, lcd_rows, lcd_red, lcd_green, lcd_blue)
# Show some basic colors.
lcd.set_color(1.0, 0.0, 0.0)
lcd.clear()
lcd.message('Joyeux')
time.sleep(3.0)
lcd.set_color(0.0, 1.0, 0.0)
lcd.clear()
lcd.message('Noel')
time.sleep(3.0)
lcd.set_color(0.0, 0.0, 1.0)
lcd.clear()
lcd.message('Je vais')
time.sleep(3.0)
lcd.set_color(1.0, 1.0, 0.0)
lcd.clear()
lcd.message('te faire')
time.sleep(3.0)
lcd.set_color(0.0, 1.0, 1.0)
lcd.clear()
lcd.message('des trucs')
time.sleep(3.0)
lcd.set_color(1.0, 0.0, 1.0)
lcd.clear()
lcd.message('de fou')
time.sleep(3.0)
lcd.set_color(1.0, 1.0, 1.0)
lcd.clear()
lcd.message('MOUAHHH')
time.sleep(3.0)
|
kumar303/addons-server
|
src/olympia/core/tests/test_logger.py
|
Python
|
bsd-3-clause
| 2,279
| 0
|
# -*- coding: utf-8 -*-
import logging
from unittest import mock
impor
|
t olympia.core.logger
from olympia.amo.tests import TestCase
from olympia.users.models import UserProfile
class LoggerTests(TestCase):
@mock.patch('olympia.core.get_remote_addr', lambda: '127.0.0.1')
@mock.patch('olympia.core.get_user', lambda: UserProfile(username=u'fôo'))
def test_get_logger_adapter(self):
log = olympia.core.logger.getLogger('test')
expected_kwargs =
|
{
'extra': {
'REMOTE_ADDR': '127.0.0.1',
'USERNAME': u'fôo',
}
}
assert log.process('test msg', {}) == ('test msg', expected_kwargs)
@mock.patch('olympia.core.get_remote_addr', lambda: '127.0.0.1')
@mock.patch('olympia.core.get_user', lambda: None)
def test_logger_adapter_user_is_none(self):
log = olympia.core.logger.getLogger('test')
expected_kwargs = {
'extra': {
'REMOTE_ADDR': '127.0.0.1',
'USERNAME': '<anon>',
}
}
assert log.process('test msg', {}) == ('test msg', expected_kwargs)
@mock.patch('olympia.core.get_remote_addr', lambda: None)
@mock.patch('olympia.core.get_user', lambda: UserProfile(username='bar'))
def test_logger_adapter_addr_is_none(self):
log = olympia.core.logger.getLogger('test')
expected_kwargs = {
'extra': {
'REMOTE_ADDR': '',
'USERNAME': 'bar',
}
}
assert log.process('test msg', {}) == ('test msg', expected_kwargs)
def test_formatter(self):
formatter = olympia.core.logger.Formatter()
record = logging.makeLogRecord({})
formatter.format(record)
assert 'USERNAME' in record.__dict__
assert 'REMOTE_ADDR' in record.__dict__
def test_json_formatter(self):
formatter = olympia.core.logger.JsonFormatter()
record = logging.makeLogRecord({})
# These would be set by the adapter.
record.__dict__['USERNAME'] = 'foo'
record.__dict__['REMOTE_ADDR'] = '127.0.0.1'
formatter.format(record)
assert record.__dict__['uid'] == 'foo'
assert record.__dict__['remoteAddressChain'] == '127.0.0.1'
|
adlnet/ADL_LRS
|
lrs/managers/AgentProfileManager.py
|
Python
|
apache-2.0
| 5,034
| 0.001589
|
import json
import datetime
from django.core.files.base import ContentFile
from django.core.exceptions import ValidationError
from django.utils.timezone import utc
from ..models import AgentProfile
from ..exceptions import IDNotFoundError, ParamError
from ..utils import etag
class AgentProfileManager():
def __init__(self, agent):
self.Agent = agent
def save_non_json_profile(self, p, profile, request_dict):
p.content_type = request_dict['headers']['CONTENT_TYPE']
p.etag = etag.create_tag(profile.read())
if 'updated' in request_dict['headers'] and request_dict['headers']['updated']:
p.updated = request_dict['headers']['updated']
else:
p.updated = datetime.datetime.utcnow().replace(tzinfo=utc)
# Go to beginning of file
profile.seek(0)
fn = "%s_%s" % (p.agent_id, request_dict.get('filename', p.id))
p.profile.save(fn, profile)
p.save()
def post_profile(self, request_dict):
# get/create profile
p, created = AgentProfile.objects.get_or_create(
profile_id=request_dict['params']['profileI
|
d'], agent=self.Agent)
post_profile = request_dict['profile']
# If incoming profile is application/json and if a profile didn't
# already exist with the same agent and profileId
if created:
p.json_profile = post_profile
p.content_type = "application/json"
p.etag = etag.create_tag(post_profile)
# If incoming profile is application/json and if a profile already
# existed with the same agent and profileId
|
else:
orig_prof = json.loads(p.json_profile)
post_profile = json.loads(post_profile)
merged = json.dumps(
dict(list(orig_prof.items()) + list(post_profile.items())))
p.json_profile = merged
p.etag = etag.create_tag(merged)
# Set updated
if 'updated' in request_dict['headers'] and request_dict['headers']['updated']:
p.updated = request_dict['headers']['updated']
else:
p.updated = datetime.datetime.utcnow().replace(tzinfo=utc)
p.save()
def put_profile(self, request_dict):
# get/create profile
p, created = AgentProfile.objects.get_or_create(
profile_id=request_dict['params']['profileId'], agent=self.Agent)
# Profile being PUT is not json
if "application/json" not in request_dict['headers']['CONTENT_TYPE']:
try:
profile = ContentFile(request_dict['profile'].read())
except:
try:
profile = ContentFile(request_dict['profile'])
except:
profile = ContentFile(str(request_dict['profile']))
etag.check_preconditions(request_dict, p, created)
# If it already exists delete it
if p.profile:
try:
p.profile.delete()
except OSError:
# probably was json before
p.json_profile = {}
self.save_non_json_profile(p, profile, request_dict)
# Profile being PUT is json
else:
# (overwrite existing profile data)
etag.check_preconditions(request_dict, p, created)
the_profile = request_dict['profile']
p.json_profile = the_profile
p.content_type = request_dict['headers']['CONTENT_TYPE']
p.etag = etag.create_tag(the_profile)
# Set updated
if 'updated' in request_dict['headers'] and request_dict['headers']['updated']:
p.updated = request_dict['headers']['updated']
else:
p.updated = datetime.datetime.utcnow().replace(tzinfo=utc)
p.save()
def get_profile(self, profile_id):
try:
return self.Agent.agentprofile_set.get(profile_id=profile_id)
except:
err_msg = 'There is no agent profile associated with the id: %s' % profile_id
raise IDNotFoundError(err_msg)
def get_profile_ids(self, since=None):
ids = []
if since:
try:
# this expects iso6801 date/time format
# "2013-02-15T12:00:00+00:00"
profs = self.Agent.agentprofile_set.filter(updated__gt=since)
except ValidationError:
err_msg = 'Since field is not in correct format for retrieval of agent profiles'
raise ParamError(err_msg)
ids = [p.profile_id for p in profs]
else:
ids = self.Agent.agentprofile_set.values_list(
'profile_id', flat=True)
return ids
def delete_profile(self, profile_id):
try:
self.get_profile(profile_id).delete()
# we don't want it anyway
except AgentProfile.DoesNotExist:
pass
except IDNotFoundError:
pass
|
acsone/hr
|
hr_expense_product_policy/tests/__init__.py
|
Python
|
agpl-3.0
| 156
| 0
|
# -*- coding: utf-8 -*-
# © 2016 OpenSynergy Indonesia
# License AGPL-3.0 or later (http://www.gnu.org/li
|
censes/ag
|
pl.html).
from . import test_hr_expense
|
sysadminmatmoz/odoo-clearcorp
|
account_invoice_incoterm/models/__init__.py
|
Python
|
agpl-3.0
| 153
| 0
|
# -*- coding: utf-8 -*-
# © 2016 ClearCorp
# License AGPL-3.0 or later (http://www.gnu.or
|
g/licenses/agpl.html).
from .
|
import account_invoice_incoterm
|
sk-rai/Intro-to-SPARK-with-Python
|
code/sales/sales_schema.py
|
Python
|
unlicense
| 899
| 0.053393
|
import datetime
class Store:
def parse(self,line):
fields=line.split('\t')
self.id = fields[0]
self.name = fields[1]
return self
def __repr__(self):
return "Store: id=%s \t name=%s"%(self.id,self.name)
class Product:
def parse(self,line):
fields=line.split('\t')
self.id = fields[0]
self.name = fields[1]
self.category=fields[2]
return self
def __repr__(self):
return "Product: id=%s \t name=%s"%(self.id,self.name)
class SaleRow:
def parse(self,line):
fields=line.split('\t')
self.day=fields[0] # maybe parse as date? see below:)
|
# self.day=datetime.datetime.strptime(fields[0],"%Y-%m-%d")
self.store_id=fields[1]
self.product_id=fields[2]
self.quantity=int(fields[3]) # let's parse this
return self
def __repr__(self):
return "SaleRow: day=%s \t store_id=%s \t product_id=%s quantity=%d"%(self.day,self.store_id,self.product_id, self.quantity)
| |
googleapis/python-artifact-registry
|
google/cloud/artifactregistry_v1beta2/services/artifact_registry/pagers.py
|
Python
|
apache-2.0
| 25,894
| 0.00197
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import (
Any,
AsyncIterator,
Awaitable,
Callable,
Sequence,
Tuple,
Optional,
Iterator,
)
from google.cloud.artifactregistry_v1beta2.types import file
from google.cloud.artifactregistry_v1beta2.types import package
from google.cloud.artifactregistry_v1beta2.types import repository
from google.cloud.artifactregistry_v1beta2.types import tag
from google.cloud.artifactregistry_v1beta2.types import version
class ListRepositoriesPager:
"""A pager for iterating through ``list_repositories`` requests.
This class thinly wraps an initial
:class:`google.cloud.artifactregistry_v1beta2.types.ListRepositoriesResponse` object, and
provides an ``__iter__`` method to iterate through its
``repositories`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListRepositories`` requests and continue to iterate
through the ``repositories`` field on the
corresponding responses.
All the usual :class:`google.cloud.artifactregistry_v1beta2.types.ListRepositoriesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., repository.ListRepositoriesResponse],
request: repository.ListRepositoriesRequest,
response: repository.ListRepositoriesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.artifactregistry_v1beta2.types.ListRepositoriesRequest):
The initial request object.
response (google.cloud.artifactregistry_v1beta2.types.ListRepositoriesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = repository.ListRepositoriesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[repository.ListRepositoriesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[repository.Repository]:
for page in self.pages:
yield from page.repositories
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListRepositoriesAsyncPager:
"""A pager for iterating through ``list_repositories`` requests.
This class thinly wraps an initial
:class:`google.cloud.artifactregistry_v1beta2.types.ListRepositoriesResponse` object, and
provides an ``__aiter__`` method to iterate through its
``repositories`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListRepositories`` requests and continue to iterate
through the ``repositories`` field on the
corresponding responses.
All the usual :class:`google.cloud.artifactregistry_v1beta2.types.ListRepositoriesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[repository.ListRepositoriesResponse]],
request: repository.ListRepositoriesRequest,
response: repository.ListRepositoriesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.artifactregistry_v1beta2.types.ListRepositoriesRequest):
The initial request object.
response (google.cloud.artifactregistry_v1beta2.types.ListRepositoriesRe
|
sponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
|
sent along with the request as metadata.
"""
self._method = method
self._request = repository.ListRepositoriesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterator[repository.ListRepositoriesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[repository.Repository]:
async def async_generator():
async for page in self.pages:
for response in page.repositories:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListPackagesPager:
"""A pager for iterating through ``list_packages`` requests.
This class thinly wraps an initial
:class:`google.cloud.artifactregistry_v1beta2.types.ListPackagesResponse` object, and
provides an ``__iter__`` method to iterate through its
``packages`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListPackages`` requests and continue to iterate
through the ``packages`` field on the
corresponding responses.
All the usual :class:`google.cloud.artifactregistry_v1beta2.types.ListPackagesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., package.ListPackagesResponse],
request: package.ListPackagesRequest,
response: package.ListPackagesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.artifactregistry_v1beta2.types.ListPackagesRequest):
The initial request object.
response (google.cloud.artifactregistry_v1beta2.types.ListPackagesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = package.ListPackagesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[package.ListPackagesResponse]:
yield self._response
while sel
|
noahgoldman/adbpy
|
tests/test_adb.py
|
Python
|
mit
| 2,479
| 0.006858
|
from mock import MagicMock, patch
import pytest
from adbpy.adb import Adb
from adbpy import Target, AdbError
@pytest.fixture
def adb():
adb = Adb(())
adb.socket = MagicMock()
return adb
def test_get_transport():
assert Adb._get_transport(Target.ANY) == "host:transport-any"
assert Adb._get_transport(Target.USB) == "host:transport-usb"
assert Adb._get_transport(Target.EMULATOR) == "host:transport-local"
assert Adb._get_transport("950a8ad5") == "host:transport:950a8ad5"
def test_adb_version(adb):
adb.version()
|
adb.socket.send.assert_called_once_with("host:version")
def test_adb_get_serialno_any(adb):
adb.get_serialno(Target.ANY)
adb.socket.send.assert_called_once_with("host:get-serialno")
def test_adb_get_serialno_serial(adb):
adb.get_serialno("6097191b")
adb.socket.send.assert_called_once_with("host-serial:6097191b:get-serialno")
def test_adb_get_product(adb):
adb.get_product("950a8ad5")
adb.socket.send.assert_called_once_with("host-serial:950a8ad5:get-product")
def test_
|
adb_get_devpath(adb):
adb.get_devpath(Target.USB)
adb.socket.send.assert_called_once_with("host-usb:get-devpath")
def test_adb_get_state(adb):
adb.get_state(Target.EMULATOR)
adb.socket.send.assert_called_once_with("host-local:get-state")
def test_shell(adb):
with patch.object(Adb, "_setup_target"):
adb.shell("ls -l")
adb.socket.send.assert_called_once_with("shell:ls -l")
adb._setup_target.assert_called_once()
def test_forward(adb):
device_id = "950a8ad5"
adb.forward("tcp:6001", "tcp:36001", device_id, norebind=False)
adb.socket.send.assert_called_once_with("host-serial:950a8ad5:"
"forward:tcp:6001;"
"tcp:36001")
def test_forward_rebind(adb):
device_id = "950a8ad5"
adb.forward("tcp:6001", "tcp:36001", device_id, norebind=True)
adb.socket.send.assert_called_once_with("host-serial:950a8ad5:"
"forward:norebind:"
"tcp:6001;tcp:36001")
def test_devices(adb):
adb.socket.receive = MagicMock(return_value="950a8ad5\tdevice\n")
output = adb.devices()
assert output == [("950a8ad5", "device")]
def test_start(adb):
adb.process = MagicMock()
adb.process.running = MagicMock(return_value=False)
with pytest.raises(AdbError):
adb.start()
|
gravufo/commotion-router-testbench
|
ping/mainTest.py
|
Python
|
gpl-2.0
| 479
| 0.004175
|
from netools import nextIpInPool, ping, aliveHost, hostsUnDone
def main():
aliveHosts = []
# pool IP
ipStart = "192.168.56.1"
ipEnd = "192.168.56.5"
|
print"Pools: ", ipStart + " -> " + ipEnd
print"Scanning online Router on network..."
aliveHosts = aliveHost(ipStart, ipEnd)
print "online Router:"
print aliveHosts
# print"New Hosts Alive in Pools:",hostsUnDone(aliveHosts, aliveHost(ipStart,
|
ipEnd))
if __name__ == '__main__':
main()
|
geowurster/Acronym
|
acronym/cmdl/_portable_algorithms/__init__.py
|
Python
|
bsd-3-clause
| 1,916
| 0.004175
|
# This document is part of Acronym
# https://github.com/geowurster/Acronym
# =================================================================================== #
#
# New BSD License
#
# Copyright (c) 2014, Kevin D. Wurster
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the
|
following disclaimer.
#
# * Redistributions in binary form must reproduce
|
the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * The names of its contributors may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# =================================================================================== #
"""
Algorithms that can be easily ported from one datatype to another with little work
"""
from . import tileindex
|
imrehg/electrum
|
lib/blockchain.py
|
Python
|
mit
| 8,852
| 0.00113
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@ecdsa.org
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import util
from bitcoin import *
MAX_TARGET = 0x00000000FFFF0000000000000000000000000000000000000000000000000000
class Blockchain(util.PrintError):
'''Manages blockchain headers and their verification'''
def __init__(self, config, network):
self.config = config
self.network = network
self.headers_url = "https://headers.electrum.org/blockchain_headers"
self.local_height = 0
self.set_local_height()
def height(self):
return self.local_height
def init(self):
self.init_headers_file()
self.set_local_height()
self.print_error("%d blocks" % self.local_height)
def verify_header(self, header, prev_header, bits, target):
prev_hash = self.hash_header(prev_header)
assert prev_hash == header.get('prev_block_hash'), "prev hash mismatch: %s vs %s" % (prev_hash, header.get('prev_block_hash'))
assert bits == header.get('bits'), "bits mismatch: %s vs %s" % (bits, header.get('bits'))
_hash = self.hash_header(header)
assert int('0x' + _hash, 16) <= target, "insufficient proof of work: %s vs target %s" % (int('0x' + _hash, 16), target)
def verify_chain(self, chain):
first_header = chain[0]
prev_header = self.read_header(first_header.get('block_height') - 1)
for header in chain:
height = header.get('block_height')
bits, target = self.get_target(height / 2016, chain)
self.verify_header(header, prev_header, bits, target)
prev_header = header
def verify_chunk(self, index, data):
num = len(data) / 80
prev_header = None
if index != 0:
prev_header = self.read_header(index*2016 - 1)
bits, target = self.get_target(index)
for i in range(num):
raw_header = data[i*80:(i+1) * 80]
header = self.deserialize_header(raw_header)
self.verify_header(header, prev_header, bits, target)
prev_header = header
def serialize_header(self, res):
s = int_to_hex(res.get('version'), 4) \
+ rev_hex(res.get('prev_block_hash')) \
+ rev_hex(res.get('merkle_root')) \
+ int_to_hex(int(res.get('timestamp')), 4) \
+ int_to_hex(int(res.get('bits')), 4) \
+ int_to_hex(int(res.get('nonce')), 4)
return s
def deserialize_header(self, s):
hex_to_int = lambda s: int('0x' + s[::-1].encode('hex'), 16)
h = {}
h['version'] = hex_to_int(s[0:4])
h['prev_block_hash'] = hash_encode(s[4:36])
h['merkle_root'] = hash_encode(s[36:68])
h['timestamp'] = hex_to_int(s[68:72])
h['bits'] = hex_to_int(s[72:76])
h['nonce'] = hex_to_int(s[76:80])
return h
def hash_header(self, header):
if header is None:
return '0' * 64
return hash_encode(Hash(self.serialize_header(header).decode('hex')))
def path(self):
return util.get_headers_path(self.config)
def init_headers_file(self):
filename = self.path()
if os.path.exists(filename):
return
try:
import urllib, socket
socket.setdefaulttimeout(30)
self.print_error("downloading ", self.headers_url)
urllib.urlretrieve(self.headers_url, filename)
self.print_error("done.")
except Exception:
self.print_error("download failed. creating file", filename)
open(filename, 'wb+').close()
def save_chunk(self, index, chunk):
filename = self.path()
f = open(filename, 'rb+')
f.seek(index * 2016 * 80)
h = f.write(chunk)
f.close()
self.set_local_height()
def save_header(self, header):
data = self.serialize_header(header).decode('hex')
assert len(data) == 80
height = header.get('block_height')
filename = self.path()
f = open(filename, 'rb+')
f.seek(height * 80)
h = f.write(data)
f.close()
self.set_local_height()
def set_local_height(self):
name = self.path()
if os.path.exists(name):
h = os.path.getsize(name)/80 - 1
if self.local_height != h:
self.local_height = h
def read_header(self, block_height):
name = self.path()
if os.path.exists(name):
f = open(name, 'rb')
f.seek(block_height * 80)
h = f.read(80)
f.close()
if len(h) == 80:
h = self.deserialize_header(h)
return h
def get_target(self, index, chain=None):
if index == 0:
return 0x1d00ffff, MAX_TARGET
first = self.read_header((index-1) * 2016)
last = self.read_header(index*2016 - 1)
if last is None:
for h in chain:
if h.get('block_height') == index*2016 - 1:
last = h
assert last is not None
# bits to target
bits = last.get('bits')
bitsN = (bits >> 24) & 0xff
assert bitsN >= 0x03 and bitsN <= 0x1d, "First part of bits should be in [0x03, 0x1d]"
bitsBase = bits & 0xffffff
assert bitsBase >= 0x8000 and bitsBase <= 0x7fffff, "Second part of bits should be in [0x8000, 0x7fffff]"
target = bitsBase << (8 * (bitsN-3))
# new target
nActualTimespan = last.get('timestamp') - first.get('timestamp')
nTargetTimespan = 14 * 24 * 60 * 60
nActualTimespan = max(nActualTimespan, nTargetTimespan / 4)
nActualTimespan = min(nActualTimespan, nTargetTimespan * 4)
new_target = min(MAX_TARGET, (target*nActualTimespan) / nTargetTimespan)
# convert new target to bits
c = ("%064x" % new_target)[2:]
while c[:2] == '00' and len(c) > 6:
c = c[2:]
bitsN, bitsBase = len(c) / 2, int('0x' + c[:6], 16)
if bitsBase >= 0x800000:
bitsN += 1
bitsBase >>= 8
new_bits = bitsN << 24 | bitsBase
return new_bits, bitsBase << (8 * (bitsN-3))
def connect_header(self, chain, header):
'''Builds a header chain until it connects. Returns True if it has
successfully connected, False if verification failed, otherwise the
height of the next header needed.'''
chain.append(header) # Ordered by decreasing height
previous_height = header['block_height'] - 1
previous_header = self.read_header(previous_height)
# Missing header, request it
if not previous_header:
return previous_height
# Does it connect to my chain?
prev_hash = self.hash_header(previous_header)
if prev_hash != header.get('prev_block_hash'):
self.print_error("reorg")
return pr
|
evious_height
# The chain is complete. Reverse to order by increasing height
chai
|
n.reverse()
try:
self.v
|
shawnfuryan/django_website_core
|
django_website/manage.py
|
Python
|
mit
| 257
| 0
|
#!/usr/bin/env pytho
|
n
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_website.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.arg
|
v)
|
SCSSoftware/BlenderTools
|
addon/io_scs_tools/ui/mesh.py
|
Python
|
gpl-2.0
| 2,693
| 0.001114
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51
|
Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (C) 2013-2019: SCS Software
import bpy
from bpy.types import Panel
from io_scs_tools.
|
ui import shared as _shared
class _MeshPanelBlDefs(_shared.HeaderIconPanel):
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_ui_units_x = 15
@classmethod
def poll(cls, context):
return hasattr(context, "active_object") and context.active_object and context.active_object.type == "MESH"
def get_layout(self):
"""Returns layout depending where it's drawn into. If popover create extra box to make it distinguisable between different sub-panels."""
if self.is_popover:
layout = self.layout.box().column()
else:
layout = self.layout
return layout
class SCS_TOOLS_PT_Mesh(_MeshPanelBlDefs, Panel):
"""
Creates "SCS Mesh" panel in the Object properties window.
"""
bl_label = "SCS Mesh"
bl_context = "data"
def draw(self, context):
"""UI draw function.
:param context: Blender Context
:type context: bpy.context
"""
if not self.poll(context):
self.layout.label(text="No active mesh object!", icon="INFO")
return
layout = self.get_layout()
mesh = context.active_object.data
layout.use_property_split = True
layout.use_property_decorate = False
classes = (
SCS_TOOLS_PT_Mesh,
)
def register():
# for cls in classes:
# bpy.utils.register_class(cls)
#
# from io_scs_tools import SCS_TOOLS_MT_MainMenu
# SCS_TOOLS_MT_MainMenu.append_props_entry("Mesh Properties", SCS_TOOLS_PT_Mesh.__name__)
# No mesh settings available currently, thus commented out and just passing
pass
def unregister():
# for cls in classes:
# bpy.utils.unregister_class(cls)
# No mesh settings available currently, thus commented out and just passing
pass
|
wdm0006/Flask-Blogging
|
flask_blogging/__init__.py
|
Python
|
mit
| 403
| 0
|
from .engine import BloggingEngine
from .processor import PostProcess
|
or
from .sqlastorage import SQLAStorage
from .storage import Storage
"""
Flask-Blogging is a Flask extension to add blog support to your
web application. This extension uses Markdown to store and then
render the webpage.
Author: Gouthaman Balaraman
Date: June 1, 2015
"""
__author__ = 'Gouth
|
aman Balaraman'
__version__ = '0.4.2'
|
mahak/keystone
|
keystone/common/validation/__init__.py
|
Python
|
apache-2.0
| 2,344
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Request body validating middleware for OpenStack Identity resources."""
from keystone.common.validation import validators
def lazy_validate(request_body_schema, resource_to_valid
|
ate):
"""A non-decorator way to validate a request, to be used inline.
:param request_body_schema: a schema to validate the resource reference
:param resource_to_validate: dictionary to validate
:raises keystone.exception.ValidationError: if `resource_to_validate` is
None. (see wrapper method below).
:raises Type
|
Error: at decoration time when the expected resource to
validate isn't found in the decorated method's
signature
"""
schema_validator = validators.SchemaValidator(request_body_schema)
schema_validator.validate(resource_to_validate)
def nullable(property_schema):
"""Clone a property schema into one that is nullable.
:param dict property_schema: schema to clone into a nullable schema
:returns: a new dict schema
"""
# TODO(dstanek): deal with the case where type is already a list; we don't
# do that yet so I'm not wasting time on it
new_schema = property_schema.copy()
new_schema['type'] = [property_schema['type'], 'null']
# NOTE(kmalloc): If enum is specified (such as our boolean case) ensure we
# add null to the enum as well so that null can be passed/validated as
# expected. Without adding to the enum, null will not validate as enum is
# explicitly listing valid values. According to the JSON Schema
# specification, the values must be unique in the enum array.
if 'enum' in new_schema and None not in new_schema['enum']:
# In the enum the 'null' is NoneType
new_schema['enum'].append(None)
return new_schema
|
rtcTo/rtc2git
|
migration.py
|
Python
|
mit
| 6,494
| 0.003388
|
#!/usr/bin/python3
import argparse
import os
import sys
import configuration
import shouter
from gitFunctions import Commiter
from gitFunctions import Initializer, Differ
from rtcFunctions import ImportHandler
from rtcFunctions import RTCInitializer
from rtcFunctions import RTCLogin
from rtcFunctions import WorkspaceHandler
def initialize():
config = configuration.get()
directory = config.workDirectory
if os.path.exists(directory):
sys.exit("Configured directory '" + directory + "' already exists, please make sure to use a "
+ "non-existing directory")
shouter.shout("Migration will take place in " + directory)
os.makedirs(directory)
os.chdir(directory)
config.deletelogfolder()
git = Initializer()
git.initalize()
RTCInitializer.initialize()
if Differ.has_diff():
git.initialcommit()
Commiter.pushmaster()
def resume():
shouter.shout("Found existing git repo in work directory, resuming migration...")
config = configuration.get()
os.chdir(config.workDirectory)
os.chdir(config.clonedGitRepoName)
|
if Differ.has_diff():
sys.exit("Your git repo has some uncommited changes, please add/remove them manually")
RTCLogin.loginandcollectstreamuuid()
Initializer.preparerepo()
if config.previousstreamname:
prepare()
else:
Commiter.branch(config.str
|
eamname)
WorkspaceHandler().load()
def existsrepo():
config = configuration.get()
repodirectory = os.path.join(config.workDirectory, config.gitRepoName)
return os.path.exists(repodirectory)
def migrate():
rtc = ImportHandler()
rtcworkspace = WorkspaceHandler()
git = Commiter
if existsrepo():
resume()
else:
initialize()
config = configuration.get()
streamuuid = config.streamuuid
streamname = config.streamname
branchname = streamname + "_branchpoint"
componentbaselineentries = rtc.getcomponentbaselineentriesfromstream(streamuuid)
rtcworkspace.setnewflowtargets(streamuuid)
history = rtc.readhistory(componentbaselineentries, streamname)
changeentries = rtc.getchangeentriesofstreamcomponents(componentbaselineentries)
if len(changeentries) > 0:
git.branch(branchname)
rtc.acceptchangesintoworkspace(rtc.getchangeentriestoaccept(changeentries, history))
shouter.shout("All changes until creation of stream '%s' accepted" % streamname)
git.pushbranch(branchname)
rtcworkspace.setcomponentstobaseline(componentbaselineentries, streamuuid)
rtcworkspace.load()
git.branch(streamname)
changeentries = rtc.getchangeentriesofstream(streamuuid)
amountofacceptedchanges = rtc.acceptchangesintoworkspace(rtc.getchangeentriestoaccept(changeentries, history))
if amountofacceptedchanges > 0:
git.pushbranch(streamname)
git.promotebranchtomaster(streamname)
RTCLogin.logout()
summary(streamname)
def prepare():
config = configuration.get()
rtc = ImportHandler()
rtcworkspace = WorkspaceHandler()
# git checkout branchpoint
Commiter.checkout(config.previousstreamname + "_branchpoint")
# list baselines of current workspace
componentbaselineentries = rtc.getcomponentbaselineentriesfromstream(config.previousstreamuuid)
# set components to that baselines
rtcworkspace.setcomponentstobaseline(componentbaselineentries, config.previousstreamuuid)
rtcworkspace.load()
def summary(streamname):
config = configuration.get()
shouter.shout("\nAll changes accepted - Migration of stream '%s' is completed."
"\nYou can distribute the git-repo '%s'." % (streamname, config.gitRepoName))
if len(config.ignorefileextensions) > 0:
# determine and log the ignored but still present files
os.chdir(config.workDirectory)
os.chdir(config.clonedGitRepoName)
pathtoclonedgitrepo = config.workDirectory + os.sep + config.clonedGitRepoName
if pathtoclonedgitrepo[-1:] != os.sep:
pathtoclonedgitrepo += os.sep
ignoredbutexist = []
with open('.gitignore', 'r') as gitignore:
for line in gitignore.readlines():
line = line.strip()
if line != ".jazz5" and line != ".metadata" and line != ".jazzShed":
pathtoignored = pathtoclonedgitrepo + line
if os.path.exists(pathtoignored):
ignoredbutexist.append(line)
if len(ignoredbutexist) > 0:
shouter.shout("\nThe following files have been ignored in the new git repository, " +
"but still exist in the actual RTC workspace:")
ignoredbutexist.sort()
for ignored in ignoredbutexist:
shouter.shout("\t" + ignored)
def parsecommandline():
parser = argparse.ArgumentParser()
configfiledefault = 'config.ini'
configfilehelp = 'name of the config file, or full path to the config file; defaults to ' + configfiledefault
parser.add_argument('-c', '--configfile', metavar='file', dest='configfile', help=configfilehelp,
default=configfiledefault)
parser.add_argument('-u', '--user', metavar='user', dest='user', help='RTC user', default=None)
parser.add_argument('-p', '--password', metavar='password', dest='password', help='RTC password', default=None)
parser.add_argument('-s', '--stored', help='Use stored password for the repository connection', action='store_true')
arguments = parser.parse_args()
configuration.setconfigfile(arguments.configfile)
configuration.setUser(arguments.user)
configuration.setPassword(arguments.password)
configuration.setStored(arguments.stored)
def validate():
config = configuration.get()
streamname = config.streamname
branchname = streamname + "_branchpoint"
previousstreamname = config.previousstreamname
offendingbranchname = None
if not Commiter.checkbranchname(streamname):
offendingbranchname = streamname
elif not Commiter.checkbranchname(branchname):
offendingbranchname = branchname
elif not Commiter.checkbranchname(previousstreamname):
offendingbranchname = previousstreamname
if offendingbranchname:
sys.exit(offendingbranchname + " is not a valid git branch name - consider renaming the stream")
if __name__ == "__main__":
parsecommandline()
validate()
migrate()
|
fhahn/rpyre
|
tools/cli.py
|
Python
|
mit
| 904
| 0.001106
|
"""
Lua pattern matcher based on a NFA
inspired by
http://swtch.com/~rsc/regexp/regexp1.html
"""
from rpyre.interface.lua import compile_re
from rpyre.matching import find
def main(args):
n = 20
s = args[1]
#s = "(a|b)*a%sa(a|b)*$" % ("(a|b)" * n, )
print s
evilregex = compile_re(s)
import os
chunks = []
# use os.read to be RPython compatible
while True:
s = os.read(0, 4096)
if not s:
break
|
chunks.append(s)
s = "".join(chunks)
print len(s)
print find(evilregex, s, 0)
"""
for x in find2(evilregex, s, 0):
print x
"""
return 0
# needed for the PyPy translation toolchain
def target(*args):
return main, None
def jitpolicy(*args):
from rpython.jit.codewriter.policy import JitPolicy
return JitPoli
|
cy()
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
|
ntt-sic/neutron
|
neutron/plugins/common/constants.py
|
Python
|
apache-2.0
| 1,913
| 0.000523
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
|
Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITI
|
ONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# service type constants:
CORE = "CORE"
DUMMY = "DUMMY"
LOADBALANCER = "LOADBALANCER"
FIREWALL = "FIREWALL"
VPN = "VPN"
METERING = "METERING"
L3_ROUTER_NAT = "L3_ROUTER_NAT"
#maps extension alias to service type
EXT_TO_SERVICE_MAPPING = {
'dummy': DUMMY,
'lbaas': LOADBALANCER,
'fwaas': FIREWALL,
'vpnaas': VPN,
'metering': METERING,
'router': L3_ROUTER_NAT
}
# TODO(salvatore-orlando): Move these (or derive them) from conf file
ALLOWED_SERVICES = [CORE, DUMMY, LOADBALANCER, FIREWALL, VPN, METERING,
L3_ROUTER_NAT]
COMMON_PREFIXES = {
CORE: "",
DUMMY: "/dummy_svc",
LOADBALANCER: "/lb",
FIREWALL: "/fw",
VPN: "/vpn",
METERING: "/metering",
L3_ROUTER_NAT: "",
}
# Service operation status constants
ACTIVE = "ACTIVE"
DOWN = "DOWN"
PENDING_CREATE = "PENDING_CREATE"
PENDING_UPDATE = "PENDING_UPDATE"
PENDING_DELETE = "PENDING_DELETE"
INACTIVE = "INACTIVE"
ERROR = "ERROR"
# FWaaS firewall rule action
FWAAS_ALLOW = "allow"
FWAAS_DENY = "deny"
# L3 Protocol name constants
TCP = "tcp"
UDP = "udp"
ICMP = "icmp"
# Network Type constants
TYPE_FLAT = 'flat'
TYPE_GRE = 'gre'
TYPE_LOCAL = 'local'
TYPE_VXLAN = 'vxlan'
TYPE_VLAN = 'vlan'
TYPE_NONE = 'none'
|
apophys/ipaqe-provision-hosts
|
ipaqe_provision_hosts/backend/__init__.py
|
Python
|
mit
| 22
| 0
|
#
|
author: Milan Kubik
| |
jacobbendicksen/BlueAPI
|
matchinfo.py
|
Python
|
mit
| 4,831
| 0.012834
|
#####################################################
#
# A library for getting match information for a given team at a given event
# out of the Blue Alliance API
#
# Authors: Andrew Merrill and Jacob Bendicksen (Fall 2014)
#
# Requires the blueapi.py library
######################################################
#this doesn't currently fully work
import blueapi
teamNumber = 1540
eventKey = '2014pncmp'
#returns a list of qualification matches that the team played in
def getTeamQualMatches(teamNumber,eventKey):
matches = []
for n in range(0,len(blueapi.getTeamEventMatches(eventKey,teamNumber))):
if blueapi.getTeamEventMatches(teamNumber,eventKey)[n]['comp_level'] == 'qm':
matches.append(blueapi.getTeamEventMatches(eventKey)[n]['match_number'])
matches.sort()
return matches
#returns a list of qualification matches that the team played in
def getQualMatches(eventKey):
matches = []
for n in range(0,len(blueapi.getEventMatches(eventKey))):
if blueapi.getEventMatches(eventKey)[n]['comp_level'] == 'qm':
matches.append(blueapi.getEventMatches(eventKey)[n]['match_number'])
matches.sort()
return matches
#returns a list of quarterfinal matches that the team played in
def getTeamQFMatches(teamNumber, eventKey):
matches = []
for n in range(0,len(blueapi.getTeamEventMatches(eventKey))):
if blueapi.getTeamEventMatches(eventKey)[n]['comp_level'] == 'qf':
matches.append(blueapi.getTeamEventMatches(eventKey)[n]['match_number'])
matches.sort()
return matches
#returns a list of quarterfinal matches that the team played in
def getQFMatches(eventKey):
matches = []
for n in range(0,len(blueapi.getEventMatches(eventKey))):
if blueapi.getEventMatches(eventKey)[n]['comp_level'] == 'qf':
matches.append(blueapi.getEventMatches(eventKey)[n]['match_number'])
matches.sort()
return matches
#returns a list of semifinal matches that the team played in
def getTeamSFMatches(teamNumber, eventKey):
matches = []
for n in range(0,len(blueapi.getTeamEventMatches(eventKey))):
if blueapi.getTeamEventMatches(eventKey)[n]['comp_level'] == 'sf':
matches.append(blueapi.getTeamEventMatches(eventKey)[n]['match_number'])
matches.sort()
return matches
#returns a list of semifinal matches that the team played in
def getSFMatches(eventKey):
matches = []
for n in range(0,len(blueapi.getEventMatches(eventKey))):
if blueapi.getEventMatches(eventKey)[n]['comp_level'] == 'sf':
matches.append(blueapi.getEventMatches(eventKey)[n]['match_number'])
matches.sort()
return matches
#returns a list of finals matches that the team played in
def getTeamFMatches(teamNumber, eventKey):
matches = []
for n in range(0,len(blueapi.getTeamEventMatches(eventKey))):
if blueapi.getTeamEventMatches(eventKey)[n]['comp_level'] == 'f':
matches.append(blueapi.getTeamEventMatches(eventKey)[n]['match_number'])
matches.sort()
return matches
#returns a list of qualification matches that the team played in
def getFMatches(eventKey):
matches = []
for n in range(0,len(blueapi.getEventMatches(eventKey))):
if blueapi.getEventMatches(eventKey)[n]['comp_level'] == 'f':
matches.append(blueapi.getEventMatches(eventKey)[n]['match_number'])
matches.sort()
return matches
def getMatchRedScore(matchNumber,eventKey):
return blueapi.getEventMatches(eventKey)[matchNumber]['alliances']['red']['score']
def getMatchBlueScore(matchNumber,eventKey):
return blueapi.getEventMatches(eventKey)[matchNumber]['alliances']['blue']['teams']
def getMatchRedTeams(matchNumber,eventKey):
return blueapi.getEventMatches(eventKey)[matchNumber]['alliances']['red']['teams']
def getMatchBlueTeams(matchNumber,eventKey):
return blueapi.getEventMatches(eventKey)[matchNumber]['alliances']['blue']['teams']
def getMatchVideo(matchNumber,eventKey):
videos = blueapi.getEventMatches(eventKey)[matchNumber]['videos']
for n in range(0,5):
if videos[n]['type'] == 'youtube':
return "youtu.be/" + videos[n]['key']
elif videos[n]['type'] ==
|
'tba':
return videos[n]['key']
def getSetNumber(matchNumber,eventKey):
return blueapi.getEventMatches(eventKey)[matchNumber]['set_number']
def getTimeString(matchNumber,eventKey):
return blueapi.getEventMatches(eventKey)[matchNumber]['time_string']
def getMatchKey(matchNumber,eventKey):
return blueapi.getEventMatches(eventKey)[matchNumber]['
|
key']
def getMatchTime(matchNumber,eventKey):
return blueapi.getEventMatches(eventKey)[matchNumber]['time']
def getScoreBreakdown(matchNumber,eventKey):
return blueapi.getEventMatches(eventKey)[matchNumber]['score_breakdown']
|
roopali8/tempest
|
tempest/api/network/test_extra_dhcp_options.py
|
Python
|
apache-2.0
| 4,062
| 0.000246
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib.common.utils import data_utils
from tempest.api.network import base
from tempest import test
class ExtraDHCPOptionsTestJSON(base.BaseNetworkTest):
"""
Tests the following operations with the Extra DHCP Options Neutron API
extension:
port create
port list
port show
port update
v2.0 of the Neutron API is assumed. It is also assumed that the Extra
DHCP Options extension is enabled in the [network-feature-enabled]
section of etc/tempest.conf
"""
@classmethod
def skip_checks(cls):
super(ExtraDHCPOptionsTestJSON, cls).skip_checks()
if not test.is_extension_enabled('extra_dhcp_opt', 'network'):
msg = "Extra DHCP Options extension not enabled."
raise cls.skipException(msg)
@classmethod
def resource_setup(cls):
super(ExtraDHCPOptionsTestJSON, cls).resource_setup()
cls.network = cls.create_network()
cls.subnet = cls.create_subnet(cls.network)
cls.port = cls.create_port(cls.network)
cls.ip_tftp = ('123.123.123.123' if cls._ip_version == 4
else '2015::dead')
cls.ip_server = ('123.123.123.45' if cls._ip_version == 4
else '2015::badd')
cls.extra_dhcp_opts = [
{'opt_value': 'pxelinux.0', 'opt_name': 'bootfile-name'},
{'opt_value': cls.ip_tftp, 'opt_name': 'tftp-server'},
{'opt_value': cls.ip_server, 'opt_name': 'server-ip-address'}
]
@test.idempotent_id('d2c17063-3767-4a24-be4f-a23dbfa133c9')
def test_create_list_port_with_extra_dhcp_options(self):
# Create a port with Extra DHCP Options
body = self.client.create_port(
network_id=self.network['id'],
extra_dhcp_opts=self.extra_dhcp_opts)
port_id = body['port']['id']
self.addCleanup(self.client.d
|
elete_port, port_id)
# Confirm port created has Extra DHCP Options
body = self.client.list_ports()
ports = body['ports']
port = [p for p in ports if p['id'] == port_id]
self.assertTrue(port)
self._confirm_extra_dhcp_options(port[0], self.extra_dhcp_opts)
@test.idempotent_id('9a6aebf4-86ee-4f47-b07a-7f7232c55607')
|
def test_update_show_port_with_extra_dhcp_options(self):
# Update port with extra dhcp options
name = data_utils.rand_name('new-port-name')
body = self.client.update_port(
self.port['id'],
name=name,
extra_dhcp_opts=self.extra_dhcp_opts)
# Confirm extra dhcp options were added to the port
body = self.client.show_port(self.port['id'])
self._confirm_extra_dhcp_options(body['port'], self.extra_dhcp_opts)
def _confirm_extra_dhcp_options(self, port, extra_dhcp_opts):
retrieved = port['extra_dhcp_opts']
self.assertEqual(len(retrieved), len(extra_dhcp_opts))
for retrieved_option in retrieved:
for option in extra_dhcp_opts:
if (retrieved_option['opt_value'] == option['opt_value'] and
retrieved_option['opt_name'] == option['opt_name']):
break
else:
self.fail('Extra DHCP option not found in port %s' %
str(retrieved_option))
class ExtraDHCPOptionsIpV6TestJSON(ExtraDHCPOptionsTestJSON):
_ip_version = 6
|
skuda/client-python
|
kubernetes/test/test_v2alpha1_horizontal_pod_autoscaler_spec.py
|
Python
|
apache-2.0
| 1,023
| 0.00391
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
i
|
mport os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.cli
|
ent.models.v2alpha1_horizontal_pod_autoscaler_spec import V2alpha1HorizontalPodAutoscalerSpec
class TestV2alpha1HorizontalPodAutoscalerSpec(unittest.TestCase):
""" V2alpha1HorizontalPodAutoscalerSpec unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV2alpha1HorizontalPodAutoscalerSpec(self):
"""
Test V2alpha1HorizontalPodAutoscalerSpec
"""
model = kubernetes.client.models.v2alpha1_horizontal_pod_autoscaler_spec.V2alpha1HorizontalPodAutoscalerSpec()
if __name__ == '__main__':
unittest.main()
|
MiczFlor/Booktype
|
lib/booktype/apps/portal/forms.py
|
Python
|
agpl-3.0
| 2,755
| 0.002178
|
from django import forms
from django.utils.html import escape
from django.forms.utils import ErrorList
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from booktype.utils.misc import booktype_slugify
from booki.editor.models import BookiGroup
from booktype.utils import misc
from booktype.apps.core.forms import BaseBooktypeForm
from widgets import RemovableImageWidget
class SpanErrorList(ErrorList):
def __unicode__(self):
return unicode(self.as_spans())
def as_spans(self):
return "<span style='color: red'>%s</span>" % (
",".join([e for e in self])
)
class BaseGroupForm(BaseBooktypeForm, forms.ModelForm):
name = forms.CharField()
description = forms.CharField(
label=_('Description (250 characters)'),
required=False,
max_length=250,
widget=forms.Textarea(attrs={'rows': '10', 'cols': '40'})
)
group_image = forms.FileField(
label=_('Group image'),
required=False,
widget=RemovableImageWidget(attrs={
'label_class': 'checkbox-inline',
'input_class': 'group-image-removable'
}
)
)
class Meta:
model = BookiGroup
fields = [
'name', 'description'
]
def __init__(self, *args, **kwargs):
kwargs.update({'error_class': SpanErrorList})
super(BaseGroupForm, self).__init__(*args, **kwargs)
def clean_name(self):
new_url_name = booktype_slugify(self.cleaned_data['name'])
group_data_url_name = BookiGroup.objects.filter(url_name=new_url_name).exclude(pk=self.instance.pk)
if len(group_data_url_name) > 0:
raise ValidationError(_('Group name is already in use'))
return self.cleaned_data.get('name', '')
def clean_description(self):
return escape(self.cleaned_data.get('description', ''))
|
def set_group_image(self, group_id, group_image):
try:
filename = misc.set_group_image(group_id, group_image, 240, 240)
if len(filename) == 0:
raise ValidationError
|
(_('Only JPEG file is allowed for group image.'))
else:
misc.set_group_image( "{}_small".format(group_id), group_image, 18, 18)
except Exception as err:
# TODO: we should do something here
print err
class GroupCreateForm(BaseGroupForm):
pass
class GroupUpdateForm(BaseGroupForm):
def clean_group_image(self):
group_image = self.files.get('group_image', None)
group_id = str(self.instance.pk)
if group_image:
self.set_group_image(group_id, group_image)
return group_image
|
shuoli84/gevent_socketio2
|
socketio/engine/handler.py
|
Python
|
mit
| 5,977
| 0.002175
|
# coding=utf-8
from __future__ import absolute_import
import gevent
from gevent.pywsgi import WSGIHandler
import sys
from webob import Request
from .response import Response
from .socket import Socket
from ..event_emitter import EventEmitter
from .transports import WebsocketTransport
import logging
logger = logging.getLogger(__name__)
__all__ = ['EngineHandler']
class EngineHandler(WSGIHandler, EventEmitter):
"""
The WSGIHandler for EngineServer
It filters out interested requests and process them, leave other requests to the WSGIHandler
"""
transports = ('polling', 'websocket')
def __init__(self, server_context, *args, **kwargs):
super(EngineHandler, self
|
).__init__(*args, **kwargs)
EventEmitter.__init__(self)
self.server_context =
|
server_context
if self.server_context.transports:
self.transports = self.server_context.transports
def bind_framework_info(self, socket):
# Run framework's wsgi application to hook up framework specific info eg. request
# This is why we define /socket.io url in web frameworks and points them to a view
logger.debug("[EngineHandler] Bind the framework specific info to engine socket")
self.environ['engine_socket'] = socket
try:
def start_response(status, headers):
logger.debug("[EngineHandler] [%s] [%s]" % (status, headers))
res = self.application(self.environ, start_response)
logger.debug("[EngineHandler] %s" % res)
except Exception, e:
logger.debug("[EngineHandler] bind framework info met exception %s" % e)
self.handle_error(*sys.exc_info())
def handle_one_response(self):
"""
There are 3 situations we get a new request:
1. Handshake.
2. Upgrade.
3. Polling Request.
After the transport been upgraded, all data transferring handled by the WebSocketTransport
"""
path = self.environ.get('PATH_INFO')
if not path.lstrip('/').startswith(self.server_context.resource + '/'):
return super(EngineHandler, self).handle_one_response()
# Create a request and a response
request = Request(self.get_environ())
setattr(request, 'handler', self)
setattr(request, 'response', Response())
logger.debug("[EngineHandler] Incoming request with %s" % request.GET)
# Upgrade the websocket if needed
is_websocket = False
if request.GET.get("transport", None) == "websocket":
if 'Upgrade' in request.headers:
logger.debug("[EngineHandler] It is a websocket upgrade request")
# This is the ws upgrade request, here we handles the upgrade
ws_handler = self.server_context.ws_handler_class(self.socket, self.client_address, self.server)
ws_handler.__dict__.update(self.__dict__)
ws_handler.prevent_wsgi_call = True
ws_handler.handle_one_response()
# Suppose here we have an websocket connection
setattr(request, 'websocket', ws_handler.websocket)
is_websocket = True
else:
logger.warning("[EngineHandler] Client fired a websocket but the 'Upgrade' Header loose somewhere, maybe your proxy")
return
sid = request.GET.get("sid", None)
b64 = request.GET.get("b64", False)
socket = self.server_context.engine_sockets.get(sid, None)
# FIXME CHECK WHETHER WE NEED THIS?
if socket and not is_websocket:
# We spawn a new gevent here, let socket do its own business.
# In current event loop, we will wait on request.response, which is set in socket.set_request
logger.debug("[EngineHandler] Found existing socket")
self.bind_framework_info(socket)
gevent.spawn(socket.process_request, request)
else:
if socket is None:
logger.debug("[EngineHandler] No existing socket, handshake")
socket = self._do_handshake(b64=b64, request=request)
if not is_websocket:
logger.debug("[EngineHandler] The incoming request not websocket, bind framework info")
self.bind_framework_info(socket)
if is_websocket and socket.transport.name != 'websocket':
logger.debug("[EngineHandler] websocket, proceed as upgrade")
# Here we have a upgrade
ws_transport = WebsocketTransport(self, {})
ws_transport.process_request(request)
socket.maybe_upgrade(ws_transport)
# wait till the response ends
logger.debug("[EngineHandler] Waiting for the response signal")
request.response.join()
# The response object can be used as a wsgi application which will send out the buffer
self.application = request.response
# Call super handle_one_repsponse() to do timing, logging etc
super(EngineHandler, self).handle_one_response()
self.emit('cleanup')
def _do_handshake(self, b64, request):
"""
handshake with client to build a socket
:param b64:
:param request:
:return:
"""
transport_name = request.GET.get('transport', None)
if transport_name not in self.transports:
raise ValueError("transport name [%s] not supported" % transport_name)
socket = Socket(request, supports_binary=not bool(b64))
self.server_context.engine_sockets[socket.id] = socket
def remove_socket(*args, **kwargs):
self.server_context.engine_sockets.pop(socket.id)
socket.on('close', remove_socket)
request.response.headers['Set-Cookie'] = 'io=%s' % socket.id
socket.open()
self.emit('connection', socket)
return socket
|
yosshy/nova
|
nova/tests/unit/virt/libvirt/test_driver.py
|
Python
|
apache-2.0
| 667,651
| 0.000443
|
# Copyright 2010 OpenStack Foundation
# Copyright 2012 University Of Minho
#
# Licensed under the Apache License, Version
|
2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENS
|
E-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import copy
import datetime
import errno
import glob
import os
import random
import re
import shutil
import signal
import threading
import time
import uuid
import eventlet
from eventlet import greenthread
import fixtures
from lxml import etree
import mock
from mox3 import mox
from os_brick.initiator import connector
from oslo_concurrency import lockutils
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_service import loopingcall
from oslo_utils import encodeutils
from oslo_utils import fileutils
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import units
from oslo_utils import uuidutils
import six
from six.moves import builtins
from six.moves import range
from nova.api.metadata import base as instance_metadata
from nova.compute import arch
from nova.compute import cpumodel
from nova.compute import manager
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_mode
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova.network import model as network_model
from nova import objects
from nova.objects import fields
from nova.pci import manager as pci_manager
from nova import test
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_network
import nova.tests.unit.image.fake
from nova.tests.unit import matchers
from nova.tests.unit.objects import test_pci_device
from nova.tests.unit.objects import test_vcpu_model
from nova.tests.unit.virt.libvirt import fake_imagebackend
from nova.tests.unit.virt.libvirt import fake_libvirt_utils
from nova.tests.unit.virt.libvirt import fakelibvirt
from nova import utils
from nova import version
from nova.virt import block_device as driver_block_device
from nova.virt import configdrive
from nova.virt.disk import api as disk
from nova.virt import driver
from nova.virt import fake
from nova.virt import firewall as base_firewall
from nova.virt import hardware
from nova.virt.image import model as imgmodel
from nova.virt.libvirt import blockinfo
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import driver as libvirt_driver
from nova.virt.libvirt import firewall
from nova.virt.libvirt import guest as libvirt_guest
from nova.virt.libvirt import host
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt.storage import lvm
from nova.virt.libvirt.storage import rbd_utils
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt.libvirt.volume import volume as volume_drivers
libvirt_driver.libvirt = fakelibvirt
host.libvirt = fakelibvirt
libvirt_guest.libvirt = fakelibvirt
CONF = cfg.CONF
CONF.import_opt('compute_manager', 'nova.service')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
CONF.import_opt('image_cache_subdirectory_name', 'nova.virt.imagecache')
CONF.import_opt('instances_path', 'nova.compute.manager')
_fake_network_info = fake_network.fake_get_instance_nw_info
_fake_NodeDevXml = \
{"pci_0000_04_00_3": """
<device>
<name>pci_0000_04_00_3</name>
<parent>pci_0000_00_01_1</parent>
<driver>
<name>igb</name>
</driver>
<capability type='pci'>
<domain>0</domain>
<bus>4</bus>
<slot>0</slot>
<function>3</function>
<product id='0x1521'>I350 Gigabit Network Connection</product>
<vendor id='0x8086'>Intel Corporation</vendor>
<capability type='virt_functions'>
<address domain='0x0000' bus='0x04' slot='0x10' function='0x3'/>
<address domain='0x0000' bus='0x04' slot='0x10' function='0x7'/>
<address domain='0x0000' bus='0x04' slot='0x11' function='0x3'/>
<address domain='0x0000' bus='0x04' slot='0x11' function='0x7'/>
</capability>
</capability>
</device>""",
"pci_0000_04_10_7": """
<device>
<name>pci_0000_04_10_7</name>
<parent>pci_0000_00_01_1</parent>
<driver>
<name>igbvf</name>
</driver>
<capability type='pci'>
<domain>0</domain>
<bus>4</bus>
<slot>16</slot>
<function>7</function>
<product id='0x1520'>I350 Ethernet Controller Virtual Function
</product>
<vendor id='0x8086'>Intel Corporation</vendor>
<capability type='phys_function'>
<address domain='0x0000' bus='0x04' slot='0x00' function='0x3'/>
</capability>
<capability type='virt_functions'>
</capability>
</capability>
</device>""",
"pci_0000_04_11_7": """
<device>
<name>pci_0000_04_11_7</name>
<parent>pci_0000_00_01_1</parent>
<driver>
<name>igbvf</name>
</driver>
<capability type='pci'>
<domain>0</domain>
<bus>4</bus>
<slot>17</slot>
<function>7</function>
<product id='0x1520'>I350 Ethernet Controller Virtual Function
</product>
<vendor id='0x8086'>Intel Corporation</vendor>
<numa node='0'/>
<capability type='phys_function'>
<address domain='0x0000' bus='0x04' slot='0x00' function='0x3'/>
</capability>
<capability type='virt_functions'>
</capability>
</capability>
</device>"""}
_fake_cpu_info = {
"arch": "test_arch",
"model": "test_model",
"vendor": "test_vendor",
"topology": {
"sockets": 1,
"cores": 8,
"threads": 16
},
"features": ["feature1", "feature2"]
}
def _concurrency(signal, wait, done, target, is_block_dev=False):
signal.send()
wait.wait()
done.send()
class FakeVirDomainSnapshot(object):
def __init__(self, dom=None):
self.dom = dom
def delete(self, flags):
pass
class FakeVirtDomain(object):
def __init__(self, fake_xml=None, uuidstr=None, id=None, name=None):
if uuidstr is None:
uuidstr = str(uuid.uuid4())
self.uuidstr = uuidstr
self.id = id
self.domname = name
self._info = [power_state.RUNNING, 2048 * units.Mi, 1234 * units.Mi,
None, None]
if fake_xml:
self._fake_dom_xml = fake_xml
else:
self._fake_dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
</devices>
</domain>
"""
def name(self):
if self.domname is None:
return "fake-domain %s" % self
else:
return self.domname
def ID(self):
return self.id
def info(self):
return self._info
def create(self):
pass
def managedSave(self, *args):
pass
def createWithFlags(self, launch_flags):
pass
def XMLDesc(self, flags):
return self._fake_dom_xml
def UUIDString(self):
return self.uuidstr
def attachDeviceFlags(self, xml, flags):
pass
def attachDevice(self, xml):
pass
def detachDeviceFlags(self, xml, flags):
pass
def snapshotCreateXML(self, xml, flags):
pass
|
cloudera/hue
|
desktop/core/ext-py/prometheus_client-0.7.1/prometheus_client/registry.py
|
Python
|
apache-2.0
| 4,226
| 0.000237
|
import copy
from threading import Lock
from .metrics_core import Metric
class CollectorRegistry(object):
"""Metric collector registry.
Collectors must have a no-argument method 'collect' that returns a list of
Metric objects. The returned metrics should be consistent with the Prometheus
exposition formats.
"""
def __init__(self, auto_describe=False):
self._collector_to_names = {}
self._names_to_collectors = {}
self._auto_describe = auto_describe
self._lock = Lock()
def register(self, collector):
"""Add a collector to the registry."""
with self._lock:
names = self._get_names(collector)
duplicates = set(self._names_to_collectors).intersection(names)
if duplicates:
raise ValueError(
'Duplicated timeseries in CollectorRegistry: {0}'.format(
duplicates))
for name in names:
self._names_to_collectors[name] = collector
self._collector_to_names[collector] = names
def unregister(self, collector):
"""Remove a collector from the registry."""
with self._lock:
for name in self._collector_to_names[collector]:
del self._names_to_collectors[name]
del self._collector_to_names[collector]
def _get_names(self, collector):
"""Get names of timeseries the collector produces."""
desc_func = None
# If there's a describe function, use it.
try:
desc_func = collector.describe
except AttributeError:
pass
# Otherwise, if auto describe is enabled use the collect function.
if not desc_func and self._auto_describe:
desc_func = collector.collect
if not desc_func:
return []
result = []
type_suffixes = {
'counter': ['_total', '_created'],
'summary': ['', '_sum', '_count', '_created'],
'histogram': ['_bucket', '_sum', '_count', '_created'],
'gaugehistogram':
|
['_bucket', '_gsum', '_gcount'],
'info': ['_info'],
}
for metric in desc_func():
for suffix in type_suffixes.get(metric.type, ['']):
result.append(metric.name + suffix)
return result
def collect(self):
"""Yields metrics from the collectors in the registry."""
collectors = None
with self._lock:
collectors = copy.copy(self._collector_to_
|
names)
for collector in collectors:
for metric in collector.collect():
yield metric
def restricted_registry(self, names):
"""Returns object that only collects some metrics.
Returns an object which upon collect() will return
only samples with the given names.
Intended usage is:
generate_latest(REGISTRY.restricted_registry(['a_timeseries']))
Experimental."""
names = set(names)
collectors = set()
with self._lock:
for name in names:
if name in self._names_to_collectors:
collectors.add(self._names_to_collectors[name])
metrics = []
for collector in collectors:
for metric in collector.collect():
samples = [s for s in metric.samples if s[0] in names]
if samples:
m = Metric(metric.name, metric.documentation, metric.type)
m.samples = samples
metrics.append(m)
class RestrictedRegistry(object):
def collect(self):
return metrics
return RestrictedRegistry()
def get_sample_value(self, name, labels=None):
"""Returns the sample value, or None if not found.
This is inefficient, and intended only for use in unittests.
"""
if labels is None:
labels = {}
for metric in self.collect():
for s in metric.samples:
if s.name == name and s.labels == labels:
return s.value
return None
REGISTRY = CollectorRegistry(auto_describe=True)
|
RathinakumarVisweswaran/MeetCI
|
python/meetCI_sub.py
|
Python
|
mit
| 14,507
| 0.004067
|
#!/usr/bin/env python
#
# Generated Sat Jul 18 17:11:29 2015 by generateDS.py version 2.16a.
#
# Command line options:
# ('-o', 'meetCI.py')
# ('-s', 'meetCI_sub.py')
#
# Command line arguments:
# meetCI.xsd
#
# Command line:
# generateDS.py -o "meetCI.py" -s "meetCI_sub.py" meetCI.xsd
#
# Current working directory (os.getcwd()):
# generateDS-2.16a0
#
import sys
from lxml import etree as etree_
import ??? as supermod
def parsexml_(infile, parser=None, **kwargs):
if parser is None:
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
parser = etree_.ETCompatXMLParser()
doc = etree_.parse(infile, parser=parser, **kwargs)
return doc
#
# Globals
#
ExternalEncoding = 'ascii'
#
# Data representation classes
#
class MeetCISub(supermod.MeetCI):
def __init__(self, MachineLearning=None, ExpertSystem=None):
super(MeetCISub, self).__init__(MachineLearning, ExpertSystem, )
supermod.MeetCI.subclass = MeetCISub
# end class MeetCISub
class MachineLearningSub(supermod.MachineLearning):
def __init__(self, classification=None, prediction=None):
super(MachineLearningSub, self).__ini
|
t__(classification, prediction, )
supermod.MachineLearning.subclass = MachineLearningSub
# end class MachineLearningSub
class MultiLayerPerceptronSub(supermod.MultiLayerPer
|
ceptron):
def __init__(self, inputLayerActivation='Linear', hiddenLayerActivation=None, hiddenLayers=None, outputLayerActivation=None, momentum=None, epochs=None, learningRate=None):
super(MultiLayerPerceptronSub, self).__init__(inputLayerActivation, hiddenLayerActivation, hiddenLayers, outputLayerActivation, momentum, epochs, learningRate, )
supermod.MultiLayerPerceptron.subclass = MultiLayerPerceptronSub
# end class MultiLayerPerceptronSub
class RadialBasisFunctionNetworkSub(supermod.RadialBasisFunctionNetwork):
def __init__(self, hiddenNeurons=None, outputLayerActivation=None, momentum=None, epochs=None, learningRate=None):
super(RadialBasisFunctionNetworkSub, self).__init__(hiddenNeurons, outputLayerActivation, momentum, epochs, learningRate, )
supermod.RadialBasisFunctionNetwork.subclass = RadialBasisFunctionNetworkSub
# end class RadialBasisFunctionNetworkSub
class RecurrentNeuralNetworkSub(supermod.RecurrentNeuralNetwork):
def __init__(self, RNN_Type=None, hiddenLayerActivation=None, hiddenNeurons=None, outputLayerActivation=None, epochs=None, momentum=None, learningRate=None):
super(RecurrentNeuralNetworkSub, self).__init__(RNN_Type, hiddenLayerActivation, hiddenNeurons, outputLayerActivation, epochs, momentum, learningRate, )
supermod.RecurrentNeuralNetwork.subclass = RecurrentNeuralNetworkSub
# end class RecurrentNeuralNetworkSub
class RandomForestSub(supermod.RandomForest):
def __init__(self, nTrees=None, maxDepth=None, maxLeafNodes=None, minSamplesSplit=None, minSamplesLeaf=None, minFractionLeaf=None):
super(RandomForestSub, self).__init__(nTrees, maxDepth, maxLeafNodes, minSamplesSplit, minSamplesLeaf, minFractionLeaf, )
supermod.RandomForest.subclass = RandomForestSub
# end class RandomForestSub
class SupportVectorMachineSub(supermod.SupportVectorMachine):
def __init__(self, kernel=None, degree=None, gamma=None, coef=None, tol=None, maxIter=None):
super(SupportVectorMachineSub, self).__init__(kernel, degree, gamma, coef, tol, maxIter, )
supermod.SupportVectorMachine.subclass = SupportVectorMachineSub
# end class SupportVectorMachineSub
class classificationSub(supermod.classification):
def __init__(self, datafile=None, input=None, output=None, classes=None, split=None, delimiter=None, algorithm=None):
super(classificationSub, self).__init__(datafile, input, output, classes, split, delimiter, algorithm, )
supermod.classification.subclass = classificationSub
# end class classificationSub
class predictionSub(supermod.prediction):
def __init__(self, datafile=None, input=None, output=None, classes=None, split=None, delimiter=None, algorithm=None):
super(predictionSub, self).__init__(datafile, input, output, classes, split, delimiter, algorithm, )
supermod.prediction.subclass = predictionSub
# end class predictionSub
class clauseTypeSub(supermod.clauseType):
def __init__(self, extensiontype_=None):
super(clauseTypeSub, self).__init__(extensiontype_, )
supermod.clauseType.subclass = clauseTypeSub
# end class clauseTypeSub
class greaterThanTypeSub(supermod.greaterThanType):
def __init__(self, value2=None, value1=None):
super(greaterThanTypeSub, self).__init__(value2, value1, )
supermod.greaterThanType.subclass = greaterThanTypeSub
# end class greaterThanTypeSub
class greaterThanOrEqualTypeSub(supermod.greaterThanOrEqualType):
def __init__(self, value2=None, value1=None):
super(greaterThanOrEqualTypeSub, self).__init__(value2, value1, )
supermod.greaterThanOrEqualType.subclass = greaterThanOrEqualTypeSub
# end class greaterThanOrEqualTypeSub
class lessThanTypeSub(supermod.lessThanType):
def __init__(self, value2=None, value1=None):
super(lessThanTypeSub, self).__init__(value2, value1, )
supermod.lessThanType.subclass = lessThanTypeSub
# end class lessThanTypeSub
class lessThanOrEqualTypeSub(supermod.lessThanOrEqualType):
def __init__(self, value2=None, value1=None):
super(lessThanOrEqualTypeSub, self).__init__(value2, value1, )
supermod.lessThanOrEqualType.subclass = lessThanOrEqualTypeSub
# end class lessThanOrEqualTypeSub
class equalTypeSub(supermod.equalType):
def __init__(self, value2=None, value1=None):
super(equalTypeSub, self).__init__(value2, value1, )
supermod.equalType.subclass = equalTypeSub
# end class equalTypeSub
class notEqualTypeSub(supermod.notEqualType):
def __init__(self, value2=None, value1=None):
super(notEqualTypeSub, self).__init__(value2, value1, )
supermod.notEqualType.subclass = notEqualTypeSub
# end class notEqualTypeSub
class betweenTypeSub(supermod.betweenType):
def __init__(self, max=None, value=None, min=None):
super(betweenTypeSub, self).__init__(max, value, min, )
supermod.betweenType.subclass = betweenTypeSub
# end class betweenTypeSub
class notBetweenTypeSub(supermod.notBetweenType):
def __init__(self, max=None, value=None, min=None):
super(notBetweenTypeSub, self).__init__(max, value, min, )
supermod.notBetweenType.subclass = notBetweenTypeSub
# end class notBetweenTypeSub
class orTypeSub(supermod.orType):
def __init__(self, clause=None):
super(orTypeSub, self).__init__(clause, )
supermod.orType.subclass = orTypeSub
# end class orTypeSub
class andTypeSub(supermod.andType):
def __init__(self, clause=None):
super(andTypeSub, self).__init__(clause, )
supermod.andType.subclass = andTypeSub
# end class andTypeSub
class factTypeSub(supermod.factType):
def __init__(self, name=None, extensiontype_=None):
super(factTypeSub, self).__init__(name, extensiontype_, )
supermod.factType.subclass = factTypeSub
# end class factTypeSub
class predicateTypeSub(supermod.predicateType):
def __init__(self, name=None, value=None):
super(predicateTypeSub, self).__init__(name, value, )
supermod.predicateType.subclass = predicateTypeSub
# end class predicateTypeSub
class structTypeSub(supermod.structType):
def __init__(self, name=None, comment=None, field=None):
super(structTypeSub, self).__init__(name, comment, field, )
supermod.structType.subclass = structTypeSub
# end class structTypeSub
class instanceTypeSub(supermod.instanceType):
def __init__(self, name=None, type_=None, comment=None, field=None):
super(instanceTypeSub, self).__init__(name, type_, comment, field, )
supermod.instanceType.subclass = instanceTypeSub
# end class instanceTypeSub
class actionTypeSub(supermod.actionType):
def __init__(self, extensiontype_=None):
super(actionTypeSub, self).__init__(extensiontype_, )
supermod.actionType.subclass = actionTypeSub
# end class actionTypeSub
class setTypeSub(supermod.setType):
def __init__(self, name=None, value=None):
super(setTypeSub, s
|
iocoop/beancount
|
src/python/beancount/reports/holdings_reports.py
|
Python
|
gpl-2.0
| 13,561
| 0.00118
|
"""Generate reports no holdings.
"""
__author__ = "Martin Blais <blais@furius.ca>"
import csv
from beancount.core.number import D
from beancount.core.number import ZERO
from beancount.core import account
from beancount.core import data
from beancount.core import flags
from beancount.parser import options
from beancount.parser import printer
from beancount.ops import prices
from beancount.ops import holdings
from beancount.ops import summarize
from beancount.reports import table
from beancount.reports import report
def get_assets_holdings(entries, options_map, currency=None):
"""Return holdings for all assets and liabilities.
Args:
entries: A list of directives.
options_map: A dict of parsed options.
currency: If specified, a string, the target currency to convert all
holding values to.
Returns:
A list of Holding instances and a price-map.
"""
# Compute a price map, to perform conversions.
price_map = prices.build_price_map(entries)
# Get the list of holdings.
account_types = options.get_account_types(options_map)
holdings_list = holdings.get_final_holdings(entries,
(account_types.assets,
account_types.liabilities),
price_map)
# Convert holdings to a unified currency.
if currency:
holdings_list = holdings.convert_to_currency(price_map, currency, holdings_list)
return holdings_list, price_map
# A field spec that renders all fields.
FIELD_SPEC = [
('account', ),
('number', "Units", '{:,.2f}'.format),
('currency', ),
('cost_currency', ),
('cost_number', 'Average Cost', '{:,.2f}'.format),
('price_number', 'Price', '{:,.2f}'.format),
('book_value', 'Book Value', '{:,.2f}'.format),
('market_value', 'Market Value', '{:,.2f}'.format),
]
# A field spec for relative reports. Skipping the book value here because by
# combining it with market value % and price one could theoretically determined
# the total value of the portfolio.
RELATIVE_FIELD_SPEC = [
field_desc
for field_desc in FIELD_SPEC
if field_desc[0] not in ('account', 'number', 'book_value', 'market_value')
] + [
('market_value', 'Frac Folio', '{:,.2%}'.format),
]
def get_holdings_entries(entries, options_map):
"""Summarizes the entries to list of entries representing the final holdings..
This list includes the latest prices entries as well. This can be used to
load a full snapshot of holdings without including the entire history. This
is a way of summarizing a balance sheet in a way that filters away history.
Args:
entries: A list of directives.
options_map: A dict of parsed options.
Returns:
A string, the entries to print out.
"""
# The entries will be create at the latest date, against an equity account.
latest_date = entries[-1].date
_, equity_account, _ = options.get_previous_accounts(options_map)
# Get all the assets.
holdings_list, _ = get_assets_holdings(entries, options_map)
# Create synthetic entries for them.
holdings_entries = []
for index, holding in enumerate(holdings_list):
meta = data.new_metadata('report_holdings_print', index)
entry = data.Transaction(meta, latest_date, flags.FLAG_SUMMARIZE,
None, "", None, None, [])
# Convert the holding to a position.
position_ = holdings.holding_to_position(holding)
entry.postings.append(
data.Posting(holding.account, position_, None, None, None))
entry.postings.append(
data.Posting(equity_account, -position_.cost(), None, None, None))
holdings_entries.append(entry)
# Get opening directives for all the accounts.
used_accounts = {holding.account for holding in holdings_list}
open_entries = summarize.get_open_entries(entries, latest_date)
used_open_entries = [open_entry
for open_entry in open_entries
if open_entry.account in used_accounts]
# Add an entry for the equity account we're using.
meta = data.new_metadata('report_holdings_print', -1)
used_open_entries.insert(0, data.Open(meta, latest_date, equity_account,
None, None))
# Get the latest price entries.
price_entries = prices.get_last_price_entries(entries, None)
return used_open_entries + holdings_entries + price_entries
def report_holdings(currency, relative, entries, options_map,
aggregation_key=None,
sort_key=None):
"""Generate a detailed list of all holdings.
Args:
currency: A string, a currency to convert to. If left to None, no
conversion is carried out.
relative: A boolean, true if we should reduce this to a relative value.
entries: A list of directives.
options_map: A dict of parsed options.
aggregation_key: A callable use to generate aggregations.
sort_key: A function to use to sort the holdings, if specified.
Returns:
A Table instance.
"""
holdings_list, _ = get_assets_holdings(entries, options_map, currency)
if aggregation_key:
holdings_list = holdings.aggregate_holdings_by(holdings_list, aggregation_key)
if relative:
holdings_list = holdings.reduce_relative(holdings_list)
field_spec = RELATIVE_FIELD_SPEC
else:
field_spec = FIELD_SPEC
if sort_key:
holdings_list.sort(key=sort_key, reverse=True)
return table.create_table(holdings_list, field_spec)
def load_from_csv(fileobj):
"""Load a list of holdings from a CSV file.
Args:
fileobj: A file object.
Yields:
Instances of Holding, as read from the file.
"""
column_spec = [
('Account', 'account', None),
('Units', 'number', D),
('Currency', 'currency', None),
('Cost Currency', 'cost_currency', None),
('Average Cost', 'cost_number', D),
('Price', 'price_number', D),
('Book Value', 'book_value', D),
('Market Value', 'market_value', D),
('Price Date', 'price_date', None),
]
column_dict = {name: (attr, converter)
for name, attr, converter in column_spec}
klass = holdings.Holding
# Create a set of default values for the namedtuple.
defaults_dict = {attr: None for attr in klass._fields}
# Start reading the file.
reader = csv.reader(fileobj)
# Check that the header is readable.
header = next(reader)
attr_converters = []
for header_name in header:
try:
attr_converter = column_dict[header_name]
attr_converters.append(attr_converter)
except KeyError:
raise IOError("Invalid file contents for holdings")
for line in reader:
value_dict = defaults_dict.copy()
for (attr, converter), value in zip(attr_converters, line):
if converter:
value = converter(value)
value_dict[attr] = value
yield holdings.Holding(**value_dict)
class HoldingsReport
|
(report.TableReport):
"""The full list of holdings for Asset and Liabilities accounts."""
names = ['holdings']
aggregations = {
'commodity': dict(aggreg
|
ation_key=lambda holding: holding.currency),
'account': dict(aggregation_key=lambda holding: holding.account),
'root-account': dict(
aggregation_key=lambda holding: account.root(3, holding.account),
sort_key=lambda holding: holding.market_value or ZERO),
'currency': dict(aggregation_key=lambda holding: holding.cost_currency),
}
def __init__(self, *rest, **kwds):
super().__init__(*rest, **kwds)
if self.args.relative and not self.args.currency:
self.parser.error("--relative needs to have --currency set")
@classmethod
def add_args(cls, parser):
parser.add_argument('-c', '--currency',
action='store', default=None
|
Jayin/Lotus
|
Lotus/controller/user.py
|
Python
|
apache-2.0
| 2,550
| 0.001994
|
# -*- coding: utf-8 -*-
from . import app, db
from flask import request, g, session, redirect
from Lotus.model.user import User
from hashlib import md5
from Lotus.lib.msg_code import Msg
import json
@app.route('/user/login', methods=['POST'])
def user_login():
email = request.form.get('email', None)
psw = request.form.get('psw', None)
if email is not None and psw is not None:
users = User.query.filter_by(email=email, psw=psw)
if users:
g.user = users[0]
session['userid'] = users[0].userid
else:
return '{"code":%d,"msg":$s}'.format(Msg['faild'], 'user not exist')
else:
return '{"code":%d,"msg":$s}'.format(Msg['faild'], 'params not enougth')
@app.route('/user/register', methods=['POST'])
def
|
user_register():
# todo (参数不够)有插入异常怎么办?
# todo 忘记密码..
try:
u = User()
u.username = request.form.get('username', None)
u.description = request.form.get('description', None)
u.type = request.form.get('type', User.CONST_TYPE_USER)
u.email = request.form.get(
|
'email', None)
m = md5()
m.update(request.form.get('psw', User.CONST_DEFAULT_PASSWORD)) # 默认密码
u.psw = m.hexdigest()
db.session.add(u)
db.session.commit()
except Exception as e:
return '{"code":%d,"msg":$s}'.format(Msg['faild'], 'register faild')
return '{"code":%d,"msg":$s}'.format(Msg['success'], 'register success')
@app.route('/user/<int:userid>/avatar', methods=['GET', 'POST'])
def user_avatar(userid):
#upload
#TODO support upload avater
if request.method == 'POST':
pass
else:
pass
@app.route('/user/<int:userid>/profile', methods=['GET'])
def user_profile(userid):
if session.get('userid'):
result = {
'userid': g.user.userid,
'username': g.user.username,
'avatar': g.user.avatar,
'description': g.user.description,
'type': g.user.type,
'email': g.user.email
}
return json.dumps(result)
else:
redirect('/user/login')
@app.route('/user/<int:userid>/issue/sends/page/<int:page>', methods=['GET'])
def user_issues_send(userid, page):
pass
@app.route('/user/<int:userid>/issue/favours/page/<int:page>', methods=['GET'])
def user_issues_favour(userid, page):
pass
@app.route('/user/<int:userid>/issue/favours/page/<int:page>', methods=['GET'])
def user_messages(userid, page):
pass
|
Zolertia/openthread
|
tests/scripts/thread-cert/test_coap.py
|
Python
|
bsd-3-clause
| 16,471
| 0.002064
|
#!/usr/bin/python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import io
import random
import string
import unittest
import coap
import config
def any_delta():
return random.getrandbits(4)
def any_coap_option_type():
return random.getrandbits(4)
def any_value():
return random.getrandbits(8)
def any_4bits_value_different_than_13_and_14():
value = None
while value is None:
value = random.getrandbits(4)
if value == 13 or value == 14:
value = None
return value
def any_4bits_value_lower_or_equal_than_12():
value = None
while value is None:
value = random.getrandbits(4)
if value > 12:
value = None
return value
def any_bytearray(length):
return bytearray([random.getrandbits(8) for _ in range(length)])
def any_version():
return random.getrandbits(2)
def any_type():
return random.getrandbits(2)
def any_code():
return random.getrandbits(8)
def any_message_id():
return random.getrandbits(16)
def any_token():
length = random.randint(0, 8)
return bytearray([random.getrandbits(8) for _ in range(length)])
def any_options():
return []
def any_payload(length=None):
length = length if length is not None else random.randint(0, 64)
return bytearray([random.getrandbits(8) for _ in range(length)])
def any_uri_path():
return "/" + random.choice(string.ascii_lowercase)
class TestCoapMessageOptionHeader(unittest.TestCase):
def test_should_return_passed_on_value_when_read_extended_value_is_called_with_value_different_than_13_and_14(self):
# GIVEN
value = any_4bits_value_different_than_13_and_14()
# WHEN
actual_value = coap.CoapOptionHeader._read_extended_value(None, value)
# THEN
self.assertEqual(value, actual_value)
def test_should_return_value_stored_in_first_byte_plus_13_when_read_extended_value_is_called_with_value_equal_13(self):
# GIVEN
value = 13
extended_value = any_v
|
alue()
data = io.BytesIO(bytearray([extended_value]))
# WHEN
actual_value = coap.CoapOptionHeader._read_extended_value(data, value)
# THEN
self.assertEqual(extended_value + 13, actual_value)
def test_should_return_value_stor
|
ed_in_first_byte_plus_269_when_read_extended_value_is_called_with_value_equal_14(self):
# GIVEN
value = 14
extended_value = any_value()
data = io.BytesIO(bytearray([any_value(), extended_value]))
# WHEN
actual_value = coap.CoapOptionHeader._read_extended_value(data, value)
# THEN
self.assertEqual(extended_value + 269, actual_value)
def test_should_create_CoapOptionHeader_when_from_bytes_classmethod_is_called(self):
# GIVEN
delta = any_4bits_value_different_than_13_and_14()
length = any_4bits_value_different_than_13_and_14()
data = bytearray([delta << 4 | length])
# WHEN
option_header = coap.CoapOptionHeader.from_bytes(io.BytesIO(data))
# THEN
self.assertEqual(delta, option_header.delta)
self.assertEqual(length, option_header.length)
def test_should_return_True_when_is_payload_marker_property_called_with_delta_and_length_equal_15(self):
# GIVEN
delta = 15
length = 15
data = bytearray([delta << 4 | length])
# WHEN
option_header = coap.CoapOptionHeader.from_bytes(io.BytesIO(data))
# THEN
self.assertTrue(option_header.is_payload_marker)
class TestCoapOption(unittest.TestCase):
def test_should_return_type_value_when_type_property_is_called(self):
# GIVEN
_type = any_coap_option_type()
coap_opt = coap.CoapOption(_type, any_value())
# WHEN
actual_type = coap_opt.type
# THEN
self.assertEqual(_type, actual_type)
def test_should_return_value_value_when_value_property_is_called(self):
# GIVEN
value = any_value()
coap_opt = coap.CoapOption(any_coap_option_type(), value)
# WHEN
actual_value = coap_opt.value
# THEN
self.assertEqual(value, actual_value)
class TestCoapOptionsFactory(unittest.TestCase):
def test_should_create_list_of_CoapOption_from_bytearray_when_parse_method_is_called(self):
# GIVEN
delta = any_4bits_value_lower_or_equal_than_12()
length = any_4bits_value_lower_or_equal_than_12()
value = any_bytearray(length)
data = bytearray([delta << 4 | length]) + value
factory = coap.CoapOptionsFactory()
# WHEN
coap_options = factory.parse(io.BytesIO(data), None)
# THEN
self.assertEqual(1, len(coap_options))
self.assertEqual(delta, coap_options[0].type)
self.assertEqual(value, coap_options[0].value)
class TestCoapCode(unittest.TestCase):
def test_should_return_code_value_when_code_property_is_called(self):
# GIVEN
code = any_code()
code_obj = coap.CoapCode(code)
# WHEN
actual_code = code_obj.code
# THEN
self.assertEqual(code, actual_code)
def test_should_return_class_value_when_class_property_is_called(self):
# GIVEN
code = any_code()
code_obj = coap.CoapCode(code)
# WHEN
actual_class = code_obj._class
# THEN
self.assertEqual((code >> 5) & 0x7, actual_class)
def test_should_return_detail_value_when_detail_property_is_called(self):
# GIVEN
code = any_code()
code_obj = coap.CoapCode(code)
# WHEN
actual_detail = code_obj.detail
# THEN
self.assertEqual(code & 0x1f, actual_detail)
def test_should_return_dotted_value_when_dotted_property_is_called(self):
# GIVEN
code = any_code()
code_obj = coap.CoapCode(code)
# WHEN
actual_dotted = code_obj.dotted
# THEN
_class, detail = actual_dotted.split(".")
self.assertEqual(code, (int(_class) << 5) | int(detail))
def test_should_create_CoapCode_when_from_class_and_detail_classmethod_is_called(self):
# GIVEN
code = any_code()
_class = (code >> 5) & 0x7
detail = code & 0x1f
# WHEN
actual_coap_obj = coap.CoapCode.from_class_and_detail(_class, detail)
# THEN
self.assertEqual(code, actual_coap_obj.code)
def test_should_create_CoapCode_when_from_dotted_string_classmethod_is_called(self):
# GIVEN
code = any_code()
code_obj = coap.Coap
|
Telestream/telestream-cloud-python-sdk
|
telestream_cloud_qc_sdk/telestream_cloud_qc/models/partition_status_test.py
|
Python
|
mit
| 8,207
| 0
|
# coding: utf-8
"""
Qc API
Qc API # noqa: E501
The version of the OpenAPI document: 3.0.0
Contact: cloudsupport@telestream.net
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from telestream_cloud_qc.configuration import Configuration
class PartitionStatusTest(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'closed_complete': 'bool',
'open_incomplete': 'bool',
'closed_incomplete': 'bool',
'open_complete': 'bool',
'not_present': 'bool',
'reject_on_error': 'bool',
'checked': 'bool'
}
attribute_map = {
'closed_complete': 'closed_complete',
'open_incomplete': 'open_incomplete',
'closed_incomplete': 'closed_incomplete',
'open_complete': 'open_complete',
'not_present': 'not_present',
'reject_on_error': 'reject_on_error',
'checked': 'checked'
}
def __init__(self, closed_complete=None, open_incomplete=None, closed_incomplete=None, open_complete=None, not_present=None, reject_on_error=None, checked=None, local_vars_configuration=None): # noqa: E501
"""PartitionStatusTest - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._closed_complete = None
self._open_incomplete = None
self._closed_incomplete = None
self._open_complete = None
self._not_present = None
self._reject_on_error = None
self._checked = None
self.discriminator = None
if closed_complete is not None:
self.closed_complete = closed_complete
if open_incomplete is not None:
self.open_incomplete = open_incomplete
if closed_incomplete is not None:
self.closed_incomplete = closed_incomplete
if open_complete is not None:
self.open_complete = open_complete
if not_present is not None:
self.not_present = not_present
if reject_on_error is not None:
self.reject_on_error = reject_on_error
if checked is not None:
self.checked = checked
@property
def closed_complete(self):
"""Gets the closed_complete of this PartitionStatusTest. # noqa: E501
:return: The closed_complete of this PartitionStatusTest. # noqa: E501
:rtype: bool
"""
return self._closed_complete
@closed_complete.setter
def closed_complete(self, closed_complete):
"""Sets the closed_complete of this PartitionStatusTest.
:param closed_complete: The closed_complete of this PartitionStatusTest. # noqa: E501
:type: bool
"""
self._closed_complete = closed_complete
@property
def open_incomplete(self):
"""Gets the open_incomplete of this PartitionStatusTest. # noqa: E501
:return: The open_incomplete of this PartitionStatusTest. # noqa: E501
:rtype: bool
"""
return self._open_incomplete
@open_incomplete.setter
def open_incomplete(self, open_incomplete):
"""Sets the open_incomplete of this PartitionStatusTest.
:param open_incomplete: The open_incomplete of this PartitionStatusTest. # noqa: E501
:type: bool
"""
self._open_incomplete = open_incomplete
@property
def closed_incomplete(self):
"""Gets the closed_incomplete of this PartitionStatusTest. # noqa: E501
:return: The closed_incomplete of this PartitionStatusTest. # noqa: E501
:rtype: bool
"""
return self._closed_incomplete
@closed_incomplete.setter
def closed_incomplete(self, closed_incomplete):
"""Sets the closed_incomplete of this PartitionStatusTest.
:param closed_incomplete: The closed_incomplete of this PartitionStatusTest. # noqa: E501
:type: bool
"""
self._closed_incomplete = closed_incomplete
@property
def open_complete(self):
"""Gets the open_complete of this PartitionStatusTest. # noqa: E501
:return: The open_complete of this PartitionStatusTest. # noqa: E501
:rtype: bool
"""
return self._open_complete
@open_complete.setter
def open_complete(self, open_complete):
"""Sets the open_complete of this PartitionStatusTest.
:param open_complete: The open_complete of this PartitionStatusTest. # noqa: E501
:type: bool
"""
self._open_complete = open_complete
@property
def not_present(self):
"""Gets the not_present of this PartitionStatusTest. # noqa: E501
:return: The not_present of this PartitionStatusTest. # noqa: E501
:rtype: bool
"""
return self._not_present
@not_present.setter
def not_present(self, not_present):
"""Sets the not_present of this PartitionStatusTest.
:param not_present: The not_present of this P
|
artitionStatusTest. # noqa: E501
:type: bool
"""
self._not_present = not_present
@property
def reject_on_error(self):
"""Gets the reject_on_error of this PartitionStatusTest. # noqa: E501
:return: The reject_on_error of this PartitionStatusTest. # noqa: E501
:rtype: bool
"""
return self._reject_on_error
@reject_on_error.setter
def reject_on_e
|
rror(self, reject_on_error):
"""Sets the reject_on_error of this PartitionStatusTest.
:param reject_on_error: The reject_on_error of this PartitionStatusTest. # noqa: E501
:type: bool
"""
self._reject_on_error = reject_on_error
@property
def checked(self):
"""Gets the checked of this PartitionStatusTest. # noqa: E501
:return: The checked of this PartitionStatusTest. # noqa: E501
:rtype: bool
"""
return self._checked
@checked.setter
def checked(self, checked):
"""Sets the checked of this PartitionStatusTest.
:param checked: The checked of this PartitionStatusTest. # noqa: E501
:type: bool
"""
self._checked = checked
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PartitionStatusTest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, PartitionStatusTest):
return True
return self.to
|
nosedjango/nosedjango
|
nosedjangotests/settings_legacy.py
|
Python
|
lgpl-3.0
| 1,056
| 0
|
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = 'sqlite3'
DATABASE_NAME = 'nosedjango'
DATABASE_USER = 'root'
DATABASE_PASSWORD = ''
DATABASE_HOST = ''
DATABASE_PORT = ''
TIME_ZONE = 'America/Chicago'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N
|
= True
MEDIA_ROOT = ''
MEDIA_URL = ''
ADMIN_MEDIA_PREFIX = '/media/'
SECRET_KEY = 'w9*+(qevfn*j2959ikv-_7kj7ivptt#8&n*gy0o&ktisx@%rzt'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.
|
common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
ROOT_URLCONF = 'nosedjangotests.urls'
TEMPLATE_DIRS = (
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'nosedjangotests.polls',
)
|
bowen0701/algorithms_data_structures
|
lc0100_same_tree.py
|
Python
|
bsd-2-clause
| 4,044
| 0.001731
|
"""Leetcode 100. Same Tree
Easy
URL: https://leetcode.com/problems/same-tree/
Given two binary trees, write a function to check if they are the same or not.
Two binary trees are considered the same if they are structurally identical and
the nodes have the same value.
Example 1:
Input: 1 1
/ \ / \
2 3 2 3
[1,2,3], [1,2,3]
Output: true
Example 2:
Input: 1 1
/ \
2 2
[1,2], [1,null,2]
Output: false
Example 3:
Input: 1 1
/ \ / \
2 1 1 2
[1,2,1], [1,1,2]
Output: false
"""
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, val):
self.val = val
self.left = None
self.right = None
class SolutionPreorderRecur(object):
def isSameTree(self, p, q):
"""
:type p: TreeNode
:type q: TreeNode
:rtype: bool
Apply recursive preorder traversal to check same tree.
Time complexity: O(n).
Space complexity: O(n).
"""
# Check if both root don't exist.
if not p and not q:
return True
# Check if just one of roots exits.
if not p or not q:
return False
# If both exist, check their values are the same.
if p.val != q.val:
return False
# Recursively check left/right subtrees.
return (self.isSameTree(p.left, q.left) and
self.isSameTree(p.right, q.right))
class SolutionPreorderIter(object):
def isSameTree(self, p, q):
"""
:type p: TreeNode
:type q: TreeNode
:rtype: bool
Apply iterative preorder traversal to check same tree.
Time complexity: O(n).
Space complexity: O(n).
"""
stack = [(p, q)]
while stack:
cur_p, cur_q = stack.pop()
# Check if both root don't exist, continue,
# since there may be other node pairs to check.
if not cur_p and not cur_q:
continue
# Check if just one of roots exits.
if not cur_p or not cur_q:
return False
# If both exist, check their values are the same.
if cur_p.val != cur_q.val:
return False
# Add root's right and then left to stack, since stack is FILO.
stack.append((cur_p.right, cur_q.right))
stack.append((cur_p.left, cur_q.left))
return True
def main():
# Input: 1 1
# / \ / \
# 2 3 2 3
# [1,2,3], [1,2,3]
# Output: true
p = TreeNode(1)
p.left = TreeNode(2)
p.right = TreeNode(3)
q = TreeNode(1)
q.left = TreeNode(2)
q.right = TreeNode(3)
print SolutionPreorderRecur().isSameTree(p, q)
print SolutionPreorderIter().isSameTree(p, q)
# Input: 1 1
# / \
# 2 2
# [1,2], [1,null,2]
# Output: false
p = TreeNode(1)
p.left = TreeNode(2)
q = TreeNode(1)
q.right = TreeNode(2)
print SolutionPreorderRecur().isSameTree(p, q)
print SolutionPreorderIter().isSameTree(p, q)
# Input: 1 1
# / \ / \
# 2 1 1 2
# [1,2,1], [1,1,2]
# Output: false
p = TreeNode(1)
p.left = TreeNode(2)
p.right = TreeNode(1)
q = TreeNode(1)
q.left = TreeNode(1)
q.right = TreeNode(2)
print SolutionPreorderRecur().isSameTree(p, q)
print SolutionPreorderIter().isSameTree(p, q)
# Input: [10,5,15], [10,5,null,null,15]
p = TreeNode(10)
p.left = TreeNode(5)
p.right =
|
TreeNode(15)
q = TreeNode(10)
q.left = TreeNode(5)
q.left.right = TreeNode(15)
print SolutionPreorderRecur().isSameTree(p, q)
|
print SolutionPreorderIter().isSameTree(p, q)
if __name__ == '__main__':
main()
|
WuShichao/computational-physics
|
2/2_5/2_5.py
|
Python
|
gpl-3.0
| 1,353
| 0.026667
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 11 17:55:48 2016
用欧拉法计算自行车从静止起步后的速度
@author: nightwing
"""
import matplotlib.pyplot as plt
DENSITY = 1.29 #空气密度(kg/m3)
C = 1.0 #阻力系数
A = 0.33 #截面积(m2)
M = 70.0 #人车质量(kg)
v = 7.0 #转折速度(m/s)
v1 = 0.0 #(无阻力)速度(m/s)
v2 = 0.0 #(有阻力)速度(m/s)
P = 400.0 #功率(w)
t = 0 #初始时间
t_max = 200 #截止时间(s)
dt = 0.1 #时间间隔
time = [] #此列表存储时间
velocity1 = [] #此列表存储无空气阻力时的速度
velocity2 = [] #此列表存储有空气阻力时的速度
#---欧拉法计算自行车运动速度---
while t <= t_max:
velocity1.append(v1)
velocity2.append(v2)
time.append(t)
if v1 <=
|
v:
v1 += P/(M*v)*dt
if
|
v2 <= v:
v2 += P/(M*v)*dt-C*DENSITY*A*v2**2/(2*M)*dt
if v1 > v:
v1 += P/(M*v1)*dt
if v2 > v:
v2 += P/(M*v2)*dt-C*DENSITY*A*v2**2/(2*M)*dt
t += dt
#------------绘图---------------
plt.title("Bicycling simulation: velocity vs. time")
plt.xlabel("time (s)")
plt.ylabel("velocity (m/s)")
plt.plot(time,velocity1,"k-",label="No air resistence")
plt.plot(time,velocity2,"k--",label="With air resistence")
plt.legend(loc=2)
plt.show()
|
jbedorf/tensorflow
|
tensorflow/python/ops/distributions/util.py
|
Python
|
apache-2.0
| 54,998
| 0.005364
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for probability distributions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import hashlib
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.util import tf_inspect
def assert_integer_form(
x, data=None, summarize=None, message=None,
int_dtype=None, name="assert_integer_form"):
"""Assert that x has integer components (or floats equal to integers).
Args:
x: Floating-point `Tensor`
data: The tensors to print out if the condition is `False`. Defaults to
error message and first few entries of `x` and `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
int_dtype: A `tf.dtype` used to cast the float to. The default (`None`)
implies the smallest possible signed int will b
|
e used for casting.
name: A name for this operation (optional).
Returns:
Op raising `InvalidArgumentError` if `cast(x, int_dtype) != x`.
"""
with ops.name_scope(name, values=[x, data]):
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_integer:
|
return control_flow_ops.no_op()
message = message or "{} has non-integer components".format(x)
if int_dtype is None:
try:
int_dtype = {
dtypes.float16: dtypes.int16,
dtypes.float32: dtypes.int32,
dtypes.float64: dtypes.int64,
}[x.dtype.base_dtype]
except KeyError:
raise TypeError("Unrecognized type {}".format(x.dtype.name))
return check_ops.assert_equal(
x, math_ops.cast(math_ops.cast(x, int_dtype), x.dtype),
data=data, summarize=summarize, message=message, name=name)
def assert_symmetric(matrix):
matrix_t = array_ops.matrix_transpose(matrix)
return control_flow_ops.with_dependencies(
[check_ops.assert_equal(matrix, matrix_t)], matrix)
def embed_check_nonnegative_integer_form(
x, name="embed_check_nonnegative_integer_form"):
"""Assert x is a non-negative tensor, and optionally of integers."""
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
assertions = [
check_ops.assert_non_negative(
x, message="'{}' must be non-negative.".format(x)),
]
if not x.dtype.is_integer:
assertions += [
assert_integer_form(
x, message="'{}' cannot contain fractional components.".format(
x)),
]
return control_flow_ops.with_dependencies(assertions, x)
def same_dynamic_shape(a, b):
"""Returns whether a and b have the same dynamic shape.
Args:
a: `Tensor`
b: `Tensor`
Returns:
`bool` `Tensor` representing if both tensors have the same shape.
"""
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
# Here we can't just do math_ops.equal(a.shape, b.shape), since
# static shape inference may break the equality comparison between
# shape(a) and shape(b) in math_ops.equal.
def all_shapes_equal():
return math_ops.reduce_all(math_ops.equal(
array_ops.concat([array_ops.shape(a), array_ops.shape(b)], 0),
array_ops.concat([array_ops.shape(b), array_ops.shape(a)], 0)))
# One of the shapes isn't fully defined, so we need to use the dynamic
# shape.
return control_flow_ops.cond(
math_ops.equal(array_ops.rank(a), array_ops.rank(b)),
all_shapes_equal,
lambda: constant_op.constant(False))
def maybe_get_static_value(x, dtype=None):
"""Helper which tries to return a static value.
Given `x`, extract it's value statically, optionally casting to a specific
dtype. If this is not possible, None is returned.
Args:
x: `Tensor` for which to extract a value statically.
dtype: Optional dtype to cast to.
Returns:
Statically inferred value if possible, otherwise None.
"""
if x is None:
return x
try:
# This returns an np.ndarray.
x_ = tensor_util.constant_value(x)
except TypeError:
x_ = x
if x_ is None or dtype is None:
return x_
return np.array(x_, dtype)
def get_logits_and_probs(logits=None,
probs=None,
multidimensional=False,
validate_args=False,
name="get_logits_and_probs",
dtype=None):
"""Converts logit to probabilities (or vice-versa), and returns both.
Args:
logits: Floating-point `Tensor` representing log-odds.
probs: Floating-point `Tensor` representing probabilities.
multidimensional: Python `bool`, default `False`.
If `True`, represents whether the last dimension of `logits` or `probs`,
a `[N1, N2, ... k]` dimensional tensor, representing the
logit or probability of `shape[-1]` classes.
validate_args: Python `bool`, default `False`. When `True`, either assert
`0 <= probs <= 1` (if not `multidimensional`) or that the last dimension
of `probs` sums to one.
name: A name for this operation (optional).
dtype: `tf.DType` to prefer when converting args to `Tensor`s.
Returns:
logits, probs: Tuple of `Tensor`s. If `probs` has an entry that is `0` or
`1`, then the corresponding entry in the returned logit will be `-Inf` and
`Inf` respectively.
Raises:
ValueError: if neither `probs` nor `logits` were passed in, or both were.
"""
with ops.name_scope(name, values=[probs, logits]):
if (probs is None) == (logits is None):
raise ValueError("Must pass probs or logits, but not both.")
if probs is None:
logits = ops.convert_to_tensor(logits, name="logits", dtype=dtype)
if not logits.dtype.is_floating:
raise TypeError("logits must having floating type.")
# We can early return since we constructed probs and therefore know
# they're valid.
if multidimensional:
if validate_args:
logits = embed_check_categorical_event_shape(logits)
return logits, nn.softmax(logits, name="probs")
return logits, math_ops.sigmoid(logits, name="probs")
probs = ops.convert_to_tensor(probs, name="probs", dtype=dtype)
if not probs.dtype.is_floating:
raise TypeError("probs must having floating type.")
if validate_args:
with ops.name_scope("validate_probs"):
one = constant_op.constant(1., probs.dtype)
dependencies = [check_ops.assert_non_negative(probs)]
if multidimensional:
probs = embed_check_categorical_event_shape(probs)
dependencies += [
check_ops.assert_near(
math_ops.reduce_sum(probs, -1),
one,
message="probs does not sum to 1.")
]
else:
dependencies += [check_ops.assert_less_equal(
probs, one, message="probs has components greater than 1.")]
probs = control_flow_ops.with_dependencies(dependencies, probs)
with ops.name_scope("
|
taejoonlab/taejoonlab-toolbox
|
KEGG/make-pathway2list.py
|
Python
|
gpl-3.0
| 2,013
| 0.027322
|
#!/usr/bin/env python
import os
import sys
## A name of directory containing 'path:...' file
## You can download them using 'make-wget_pathway.sh' script
dir_name = sys.argv[1]
f_summary = open('%s.summary'%dir_name,'w')
f_genes = open('%s.genes'%dir_name,'w')
f_compounds = open('%s.compounds'%dir_name,'w')
gene_total = []
for filename in os.listdir( dir_name ):
if( not filename.startswith('path:') ):
continue
#sys.stderr.write('Read %s ... '%filename)
path_id = ''
path_name = ''
gene_list = []
comp_list = []
prev_tag = ''
f = open(os.path.join(dir_name,filename),'r')
for line in f:
tmp_tag = line[:11].strip()
if( tmp_tag == 'ENTRY' ):
path_id = line.strip().split()[1]
if( tmp_tag == 'NAME' ):
path_name = line[11:].split(' - ')[0].strip()
if( tmp_tag == 'COMPOUND' ):
comp_list.append( line[11:].strip().split()[0] )
f_compounds.write('path:%s\t%s\n'%(path_id,line[11:].strip()))
elif( tmp_tag == '' and prev_tag == 'COMPOUND' ):
comp_list.append( line[11:].strip().split()[0]
|
)
f_compounds.write('path:%s\t%s\n'%(path_id,line[11:].strip()))
elif( tmp_tag == 'GENE' ):
gene_list.append( line[11:].strip().split()[0] )
f_genes.write('path:%s\t%s\n'%(path_id,line[11:].strip()))
#print line[1
|
1:].strip()
elif( tmp_tag == '' and prev_tag == 'GENE' ):
gene_list.append( line[11:].strip().split()[0] )
f_genes.write('path:%s\t%s\n'%(path_id,line[11:].strip()))
#print line[11:].strip()
if( tmp_tag != '' ):
prev_tag = tmp_tag
f.close()
if( len(gene_list) == 0 ):
sys.stderr.write('//SKIP// %s(%d) %s\n'%(path_id, len(gene_list), path_name))
continue
f_summary.write('path:%s\t%s\t%d\t%d\n'%(path_id, path_name, len(gene_list), len(comp_list)))
f_summary.close()
f_genes.close()
f_compounds.close()
|
SymbiFlow/prjuray
|
fuzzers/007-timing/bel/tim2json.py
|
Python
|
isc
| 10,507
| 0
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020-2022 F4PGA Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
import re
import argparse
import json
import functools
from utils import parse_raw_timing, merged_dict
# =============================================================================
NUMBER_RE = re.compile(r'\d+$')
def get_sdf_type(type):
"""
Returns a SDF timing type for the given type plus information whether it
is sequential or not. Returns None, None if the type is unknown
"""
# Known keywords and their SDF counterparts
seq_keywords = {
"Setup": "setup",
"Hold": "hold",
"Recov": "recovery",
"Remov": "removal",
}
comb_keywords = {
"Prop": "iopath",
}
# Sequential
if type in seq_keywords:
return seq_keywords[type], True
# Combinational
if type in comb_keywords:
return comb_keywords[type], False
# Unknown
return None, None
def parse_logical_names(phy_type, log_names, cell_pins):
"""
Parses logical cell names. Extracts the cell name, input pin name and
output pin name. Uses dumped library cell definitions to achive that since
a logical name string uses "_" as a separator and "_" can also occur in
cell/pin name.
Returns a list of tuples with (cell_name, src_pin, dst_pin)
"""
log_cells = []
# Process logical names that should correspond to bel timings
for log_name in log_names.split(","):
# Since both cell and pin names may also contain "_" the
# logical name is split iteratively.
# The timing type is the first, it has to equal the timing
# type of the speed model
if not log_name.startswith(phy_type):
continue
log_name = log_name[len(phy_type) + 1:]
# Find the cell name in the library and strip it
for c in cell_pins:
if log_name.startswith(c):
log_cell = c
log_name = log_name[len(c) + 1:]
break
else:
continue
log_pins = cell_pins[log_cell]
# Find the input pin in the library and strip it
for p in log_pins:
if log_name.startswith(p):
log_src = p
log_name = log_name[len(p) + 1:]
break
else:
continue
# Find the output pin in the library and strip it
for p in log_pins:
if log_name == p:
log_dst = p
break
else:
continue
# Append
log_cells.append((
log_cell,
log_src,
log_dst,
))
return log_cells
def read_raw_timings(fin, cell_pins):
"""
Reads and parses raw t
|
imings, converts them into data used for SDF
generation.
"""
REGEX_CFG = re.compile(r".*__CFG([0-9]+)$")
def inner():
raw = list(parse_raw_timing(fin))
for slice, site_name, bel_name, speed_model, properties in raw:
# Check if we have a bel timing
|
# TODO: There are other naming conventions for eg. BRAM and DSP
if not speed_model.startswith("bel_"):
continue
# Get timings from properties
timings = [(k, properties[k]) for k in [
"DELAY",
"FAST_MAX",
"FAST_MIN",
"SLOW_MAX",
"SLOW_MIN",
]]
# Get edge from the model name
if "RISING" in speed_model:
edge = "rising"
elif "FALLING" in speed_model:
edge = "falling"
else:
# Supposedly means either edge
edge = None
# Get configuration. Look for "__CFG<n>"
# FIXME: How to correlate that with a configuration name ?
match = REGEX_CFG.match(speed_model)
if match is not None:
cfg = match.group(1)
else:
cfg = None
# Process physical names for the timing model. These should
# correspond to site timings
phy_names = properties["NAME"].split(",")
for phy_name in phy_names:
# Extract data from the name. Each name field should hava the
# format: "<type>_<bel>_<site>_<src_pin>_<dst_pin>". The split
# has to be done in complex way as the bel name may have "_"
# within.
phy_type, phy_name = phy_name.split("_", maxsplit=1)
if 'URAM288' in phy_name:
uram_info = speed_model.split("__")
site = 'URAM288'
bel = uram_info[1]
phy_src = uram_info[2]
phy_dst = uram_info[3]
else:
phy_name, phy_src, phy_dst = phy_name.rsplit(
"_", maxsplit=2)
bel_site = phy_name.rsplit("_", maxsplit=1)
if len(bel_site) == 2:
bel, site = bel_site
else:
continue
sdf_type, is_seq = get_sdf_type(phy_type)
if sdf_type is None:
continue
# Process logical names that should correspond to bel timings
log_cells = parse_logical_names(
phy_type, properties["NAME_LOGICAL"], cell_pins)
# If we have log cells then yield them
for log_cell, log_src, log_dst in log_cells:
# Format cell type
cell_type = log_cell
if edge is not None:
cell_type += "_{}_{}".format(log_src, edge)
# Format cell location
location = "{}/{}".format(site, bel)
# Yield stuff
key = (site_name, location, cell_type, speed_model)
yield (*key, "type"), cell_type.upper()
yield (*key, "location"), location.upper()
yield (*key, "model"), speed_model
if is_seq:
yield (*key, "clock"), log_src.upper()
yield (*key, "input"), log_dst.upper()
else:
yield (*key, "input"), log_src.upper()
yield (*key, "output"), log_dst.upper()
if is_seq:
yield (*key, "sequential"), sdf_type
for t, v in timings:
yield (*key, t), v
# We don't have any logical cells, stick to the bel
#
# TODO: This can be modified so we always dump timing for the
# bel regardless of if we can decode logical cells. This way
# we may have SDFs with both bels and logical cells.
if not len(log_cells):
# Format cell type
cell_type = bel
if cfg is not None:
cell_type += "_CFG{}".format(cfg)
if edge is not None:
cell_type += "_{}_{}".format(phy_src, edge)
# Format cell location
location = "{}/{}".format(site, bel)
# Yield stuff
key = (site_name, location, cell_type, speed_model)
yield (*key, "type"), cell_type.upper()
|
dke-knu/i2am
|
i2am-app/AlgorithmSelectionEngine/SamplingAccuracyEvaluation/SamplingAccuracyEvaluation.py
|
Python
|
apache-2.0
| 3,665
| 0.006276
|
from SamplingAccuracyEvaluation import SamplingAlgorithm as SA
from SamplingAccuracyEvaluation import AccuracyEvaluation as AE
from SamplingAccuracyEvaluation import PrintGraph as PG
from SamplingAccuracyEvaluation import StatisticalCalculation as SC
import operator
def populationListGenerate(filePath, target):
print('Generate Population List')
po
|
pulationList = []
populationFile = open(filePath, 'r')
while True:
line = populationFile.readline()
if not line: break
line_data = line.split(',')
populationList.append(line_data[target])
populationFile.close()
return populationList
def calculateScore(evalList):
score = 0
for i in range(len(evalList)):
if i == 0:
score = s
|
core + abs(evalList[i])/4
else:
score = score + abs(evalList[i])/3
return score
def run(windowSize, sampleSize, filePath, target=0):
print('############## Sampling Accuracy Evaluation ##############')
count = 1
numOfTrials = 1
jSDPieceCount = 20
pAAPieceCount = 20
print('Window Size: ' ,windowSize)
print('Sample Size: ' ,sampleSize)
print('JSD Piece Count: ' ,jSDPieceCount)
print('PAA Piece Count: ' ,pAAPieceCount)
populationList = populationListGenerate(filePath, target)
windowList = []
accuracyMeasureCount = 3
evalDic = {}
reservoirEvalList = [0.0 for _ in range(accuracyMeasureCount)]
hashEvalList = [0.0 for _ in range(accuracyMeasureCount)]
priorityEvalList = [0.0 for _ in range(accuracyMeasureCount)]
print()
for data in populationList:
windowList.append(data)
if count == windowSize:
print('################## ' + str(numOfTrials) + ' Evaluation Start ####################')
# if numOfTrials == 1: PG.printGraph(windowList, 'Population', numOfTrials)
print()
print(str(numOfTrials)+'_ReservoirSampling')
sampleList = SA.sortedReservoirSam(sampleSize, windowList)
tempEvalList = AE.run(windowList, sampleList, jSDPieceCount, pAAPieceCount)
SC.sumPerIndex(reservoirEvalList, tempEvalList)
# if numOfTrials == 1: PG.printGraph(sampleList, 'Reservoir', numOfTrials)
print()
print(str(numOfTrials)+'_HashSampling')
sampleList = SA.hashSam(sampleSize, windowList)
tempEvalList = AE.run(windowList, sampleList, jSDPieceCount, pAAPieceCount)
SC.sumPerIndex(hashEvalList, tempEvalList)
# if numOfTrials == 1: PG.printGraph(sampleList, 'Hash', numOfTrials)
print()
print(str(numOfTrials)+'_PrioritySampling')
sampleList = SA.sortedPrioritySam(sampleSize, windowList)
tempEvalList = AE.run(windowList, sampleList, jSDPieceCount, pAAPieceCount)
SC.sumPerIndex(priorityEvalList, tempEvalList)
# if numOfTrials == 1: PG.printGraph(sampleList, 'Priority', numOfTrials)
print()
numOfTrials = numOfTrials + 1
count = 0
windowList = []
count = count + 1
for i in range(accuracyMeasureCount):
reservoirEvalList[i] = reservoirEvalList[i] / numOfTrials
hashEvalList[i] = hashEvalList[i] / numOfTrials
priorityEvalList[i] = priorityEvalList[i] / numOfTrials
evalDic['RESERVOIR_SAMPLING'] = calculateScore(reservoirEvalList)
evalDic['HASH_SAMPLING'] = calculateScore(hashEvalList)
evalDic['PRIORITY_SAMPLING'] = calculateScore(priorityEvalList)
sortedEvalList = sorted(evalDic.items(), key = operator.itemgetter(1))
return sortedEvalList[0][0]
|
XXLRay/libreshot
|
libreshot/blender/scripts/blinds.py
|
Python
|
gpl-3.0
| 5,718
| 0.019237
|
# OpenShot Video Editor is a program that creates, modifies, and edits video files.
# Copyright (C) 2009 Jonathan Thomas
#
# This file is part of Open
|
Shot Video Editor (http://launchpad.net/openshot/).
#
# OpenShot Video Editor is free software: you can redistribute i
|
t and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenShot Video Editor is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenShot Video Editor. If not, see <http://www.gnu.org/licenses/>.
# Import Blender's python API. This only works when the script is being
# run from the context of Blender. Blender contains it's own version of Python
# with this library pre-installed.
import bpy
# Load a font
def load_font(font_path):
""" Load a new TTF font into Blender, and return the font object """
# get the original list of fonts (before we add a new one)
original_fonts = bpy.data.fonts.keys()
# load new font
bpy.ops.font.open(filepath=font_path)
# get the new list of fonts (after we added a new one)
for font_name in bpy.data.fonts.keys():
if font_name not in original_fonts:
return bpy.data.fonts[font_name]
# no new font was added
return None
# Debug Info:
# ./blender -b test.blend -P demo.py
# -b = background mode
# -P = run a Python script within the context of the project file
# Init all of the variables needed by this script. Because Blender executes
# this script, OpenShot will inject a dictionary of the required parameters
# before this script is executed.
params = {
'title' : 'Oh Yeah! OpenShot!',
'extrude' : 0.1,
'bevel_depth' : 0.02,
'spacemode' : 'CENTER',
'text_size' : 1.5,
'width' : 1.0,
'fontname' : 'Bfont',
'color' : [0.8,0.8,0.8],
'alpha' : 1.0,
'alpha_mode' : 'TRANSPARENT',
'output_path' : '/tmp/',
'fps' : 24,
'quality' : 90,
'file_format' : 'PNG',
'color_mode' : 'RGBA',
'horizon_color' : [0.57, 0.57, 0.57],
'resolution_x' : 1920,
'resolution_y' : 1080,
'resolution_percentage' : 100,
'start_frame' : 20,
'end_frame' : 25,
'animation' : True,
}
#INJECT_PARAMS_HERE
# The remainder of this script will modify the current Blender .blend project
# file, and adjust the settings. The .blend file is specified in the XML file
# that defines this template in OpenShot.
#----------------------------------------------------------------------------
# Modify Text / Curve settings
#print (bpy.data.curves.keys())
text_object = bpy.data.curves["Title"]
text_object.extrude = params["extrude"]
text_object.bevel_depth = params["bevel_depth"]
text_object.body = params["title"]
text_object.align = params["spacemode"]
text_object.size = params["text_size"]
text_object.space_character = params["width"]
# Get font object
font = None
if params["fontname"] != "Bfont":
# Add font so it's available to Blender
font = load_font(params["fontname"])
else:
# Get default font
font = bpy.data.fonts["Bfont"]
text_object.font = font
text_object = bpy.data.curves["Subtitle"]
text_object.extrude = params["extrude"]
text_object.bevel_depth = params["bevel_depth"]
text_object.body = params["sub_title"]
text_object.align = params["spacemode"]
text_object.size = params["text_size"]
text_object.space_character = params["width"]
# set the font
text_object.font = font
# Change the material settings (color, alpha, etc...)
material_object = bpy.data.materials["Text"]
material_object.diffuse_color = params["diffuse_color"]
material_object.specular_color = params["specular_color"]
material_object.specular_intensity = params["specular_intensity"]
material_object.alpha = params["alpha"]
# Set the render options. It is important that these are set
# to the same values as the current OpenShot project. These
# params are automatically set by OpenShot
bpy.context.scene.render.filepath = params["output_path"]
bpy.context.scene.render.fps = params["fps"]
#bpy.context.scene.render.quality = params["quality"]
try:
bpy.context.scene.render.file_format = params["file_format"]
bpy.context.scene.render.color_mode = params["color_mode"]
except:
bpy.context.scene.render.image_settings.file_format = params["file_format"]
bpy.context.scene.render.image_settings.color_mode = params["color_mode"]
try:
bpy.context.scene.render.alpha_mode = params["alpha_mode"]
except:
pass
bpy.data.worlds[0].horizon_color = params["horizon_color"]
bpy.context.scene.render.resolution_x = params["resolution_x"]
bpy.context.scene.render.resolution_y = params["resolution_y"]
bpy.context.scene.render.resolution_percentage = params["resolution_percentage"]
bpy.context.scene.frame_start = params["start_frame"]
bpy.context.scene.frame_end = params["end_frame"]
# Animation Speed (use Blender's time remapping to slow or speed up animation)
animation_speed = int(params["animation_speed"]) # time remapping multiplier
new_length = int(params["end_frame"]) * animation_speed # new length (in frames)
bpy.context.scene.frame_end = new_length
bpy.context.scene.render.frame_map_old = 1
bpy.context.scene.render.frame_map_new = animation_speed
if params["start_frame"] == params["end_frame"]:
bpy.context.scene.frame_start = params["end_frame"]
bpy.context.scene.frame_end = params["end_frame"]
# Render the current animation to the params["output_path"] folder
bpy.ops.render.render(animation=params["animation"])
|
nanchenchen/script-analysis
|
pyanalysis/settings/common.py
|
Python
|
mit
| 12,572
| 0.00517
|
"""
Django settings for pyanalysis project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
import sys
import os
from path import path
import dj_database_url
# Normally you should not import ANYTHING from Django directly
# into your settings, but ImproperlyConfigured is an exception.
from django.core.exceptions import ImproperlyConfigured
def get_env_setting(setting, default=None):
""" Get the environment setting or return exception """
if default is not None:
return os.environ.get(setting, default)
else:
try:
return os.environ[setting]
except KeyError:
error_msg = "Set the %s env variable" % setting
raise ImproperlyConfigured(error_msg)
########## PATH CONFIGURATION
# Absolute filesystem path to the django site folder
SITE_ROOT = path(__file__).abspath().realpath().dirname().parent
# Absolute path to the top-level project folder
PROJECT_ROOT = SITE_ROOT.parent
# Site name:
SITE_NAME = SITE_ROOT.basename()
# Id for the Sites framework
SITE_ID = 1
########## END PATH CONFIGURATION
########## DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = bool(get_env_setting('DEBUG', False))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
# Is this a development instance? Set this to True on development/master
# instances and False on stage/prod.
DEV = False
DEBUG_DB = bool(get_env_setting('DEBUG_DB', False))
########## END DEBUG CONFIGURATION
########## DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': dj_database_url.config(default='sqlite:///%s' % (PROJECT_ROOT / 'development.sqlite'))
}
# enable utf8mb4 on mysql
if DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':
# enable utf8mb4 on mysql
DATABASES['default']['OPTIONS'] = {
'charset': 'utf8mb4',
'init_command': 'SET storage_engine=INNODB',
}
########## END DATABASE CONFIGURATION
########## CACHE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
if get_env_setting('MEMCACHED_LOCATION', '') is not '':
CACHES['default'] = {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': get_env_setting('MEMCACHED_LOCATION'),
'PREFIX': SITE_NAME + ':',
}
########## END CACHE CONFIGURATION
########## GENERAL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone
TIME_ZONE = 'UTC'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-LOGIN_REDIRECT_URL
LOGIN_REDIRECT_URL = "/"
########## END GENERAL CONFIGURATION
########## MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = (SITE_ROOT / 'media').normpath()
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
########## END MEDIA CONFIGURATION
########## STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = get_env_setting('STATIC_ROOT', (PROJECT_ROOT / 'assets').normpath())
if not isinstance(STATIC_ROOT, path):
STATIC_ROOT = path(STATIC_ROOT)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
(SITE_ROOT / 'static').normpath(),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
########## END STATIC FILE CONFIGURATION
########## FIXTURE
|
CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
(SITE_ROOT / 'fixtures').normpath(),
)
########## END FIXTURE CONFIGURATION
########## SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
|
SECRET_KEY = get_env_setting('SECRET_KEY', 'secret')
########## END SECRET CONFIGURATION
########## TEST CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#test-runner
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
########## END TEST CONFIGURATION
########## TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.media',
'django.core.context_processors.request',
'django.core.context_processors.i18n',
'django.core.context_processors.static',
'django.core.context_processors.csrf',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'pyanalysis.apps.base.context_processors.google_analytics',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_DIRS = (
(SITE_ROOT / 'templates').normpath(),
)
########## END TEMPLATE CONFIGURATION
########## MIDDLEWARE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#middleware-classes
MIDDLEWARE_CLASSES = (
# Default Django middleware.
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
########## END MIDDLEWARE CONFIGURATION
########## URL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = '%s.urls' % SITE_NAME
########## END URL CONFIGURATION
########## APP CONFIGURATION
DJANGO_APPS = (
# Default Django apps:
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Utilities
'django_extensions',
'widget_tweaks',
)
# Apps specific for this project go here.
LOCAL_APPS = (
'pyanalysis.apps.base',
'pyanalysis.apps.api',
'pyanalysis.apps.corpus',
'pyanalysis.apps.importer',
'pyanalysis.apps.enhance',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + LOCAL_APPS
########## END APP CONFIGURATION
########## LOGGING CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGS_ROOT = get_env_setting('LOGS_ROOT', PROJECT_ROOT / 'logs')
if not isinstance(LOGS_ROOT, path):
LOGS_ROOT = path(LOGS_ROOT)
if not LOGS_ROOT.exists():
LOGS_ROOT.mkdir()
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'fi
|
SMALLplayer/smallplayer-image-creator
|
storage/.xbmc/addons/script.artwork.downloader/lib/common.py
|
Python
|
gpl-2.0
| 1,305
| 0.008429
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011-2013 Martijn Kaijser
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import xbmc
import xbmcaddon
### get addon info
__addon__ = xbmcaddon.Addon(id='script.artwork.downloader')
__addonid__ = __addon__.getAddonInf
|
o('id')
__addonname__ = __addon__.getAddonInfo('name')
__author__ = __addon__.getAddonInfo('author')
__version__ = __addon__.getAddonInfo('version')
__addonpath__ = __addon__.getAddonInfo('path')
__addonprofile__= xbmc.translatePath(__addon__.getAddonInfo('profile')).decode('utf-8')
__icon__ = __addon__.getAddonInfo('icon')
__loca
|
lize__ = __addon__.getLocalizedString
|
ddico/odoo
|
addons/note/controllers/__init__.py
|
Python
|
agpl-3.0
| 118
| 0.008475
|
# -*- c
|
oding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing d
|
etails.
from . import note
|
kingosticks/mopidy-tunein
|
tests/test_extension.py
|
Python
|
apache-2.0
| 483
| 0
|
import unittest
from mopidy_tunein import Extension
class ExtensionTest(unittest.TestCase):
def test_get_default_config(self)
|
:
ext = Extension()
config = ext.get_default_config()
self.assertIn("[tunein]", config)
self.assertIn("enabled = true", config)
def test_get_config_schema(self):
ext = Extension()
schema = ext.get_config_schema()
self.assertIn("timeout", schem
|
a)
self.assertIn("filter", schema)
|
leanton/cianParser
|
appartment.py
|
Python
|
mit
| 5,447
| 0.035596
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import re
COST_AUTO = 5
class Appartment(object):
"""Appartment class consists of features that have all appartments"""
def __init__(self, address, metro, transportation, rooms, space, price, floor, addInfo):
super(Appartment, self).__init__()
self.address = self.setAddress(address)
self.metro = self.setMetro(metro)
self.transportation = self.setTransportation(transportation)
self.rooms = self.setRooms(rooms)
self.space = self.setSpace(space)
self.price = self.setPrice(price)
self.floor = self.setFloor(floor)
self.addInfo = self.setAddInfo(addInfo)
# Getter methods
def getAddress(self):
return self.address
def getMetro(self):
return self.metro
def getTransportation(self):
return self.transportation
def getRooms(self):
return self.rooms
def getSpace(self):
return self.space
def getPrice(self):
return self.price
def getFloor(self):
return self.floor
def getAddInfo(self):
return self.addInfo
# Setter methods
def setAddress(self, address):
if (type(address) is str) or (type(address) is unicode):
self.address = address
else:
print "Can't set proper address, type is not string"
self.address = None
return self.address
def setMetro(self, metro):
if (type(metro) is str) or (type(metro) is unicode):
self.metro = metro
else:
print "Can't set metro, type is not string"
self.metro = None
return self.metro
def setTransportation(self, transportation):
if type(transportation) is dict:
self.transportation = transportation
elif (type(transportation) is str) or (type(transportation) is unicode):
time = re.search(u'\d+', transportation)
auto = re.search(u'авто', transportation)
foot = re.search(u'пешком', transportation)
if time and auto:
time = int(time.group())
d = {}
d['auto'] = time
self.transportation = d
elif time and foot:
time = int(time.group())
d = {}
d['foot'] = time
self.transportation = d
else:
self.transportation = None
return self.transportation
def setRooms(self, rooms):
if type(rooms) is int:
self.rooms = rooms
elif (type(rooms) is str) or (type(rooms) is unicode):
room = re.search(u'\d', rooms)
if room:
room = int(room.group())
self.rooms = room
else:
print "error, no match"
self.rooms = None
else:
print "type error, current type is " + type(rooms)
self.rooms = None
return self.rooms
def setSpace(self, space):
if type(space) is dict:
self.space = space
elif type(space) is list:
d = {}
for typo in space:
if re.search(u'кухня', typo):
area = re.search(u'\d+', typo)
area = int(area.group())
d['kitchen'] = area
elif re.search(u'жилая', typo):
area = re.search(u'\d+', typo)
area = int(area.group())
d['dwelling'] = area
elif re.search(u'общая', typo):
area = re.search(u'\d+', typo)
area = int(area.group())
d['full'] = area
elif typo == "NULL":
pass
else:
print "Error, no matching typo's. Current typo is " + typo
self.space = d
else:
print "Error with setting space"
self.space = None
return self.space
def setPrice(self, price):
if (type(price) is int) or (type
|
(price) is float):
print "type is " +
|
str(type(price))
self.price = int(price)
elif (type(price) is str) or (type(price) is unicode):
price = price.replace(u',', '')
price = re.search(u'^\d+', price)
if price:
self.price = int(price.group())
else:
print "No match of price in string"
self.price = None
else:
print "Type error, current type is " + str(type(price))
self.price = None
return self.price
def setFloor(self, floor):
if type(floor) is tuple:
self.floor = floor
elif (type(floor) is str) or (type(floor) is unicode):
floor = floor.split("/")
if len(floor) == 2:
floor = (int(floor[0]), int(floor[1]))
self.floor = floor
else:
print "length of floor array is not 2, len = " + len(floor)
self.floor = None
else:
print "Type error, current type is " + type(floor)
self.floor = None
return self.floor
def setAddInfo(self, addInfo):
if type(addInfo) is list:
self.addInfo = addInfo
elif (type(addInfo) is str) or (type(addInfo) is unicode):
addInfo = addInfo.split('|')
self.addInfo = addInfo
else:
print "Type error, current type is " + type(addInfo)
self.addInfo = None
return self.addInfo
# Helper methods to preprocess data
def preprocessData1(self):
line = []
address = self.address
if address:
line.append(address)
metro = self.metro
if metro:
line.append(metro)
transportation = self.transportation
if transportation:
if 'auto' in transportation:
line.append(str(COST_AUTO*transportation['auto']))
elif 'foot' in transportation:
line.append(str(transportation['foot']))
else:
print "no line about transportation"
rooms = self.rooms
if rooms:
line.append(str(rooms))
space = self.space
if space:
if 'kitchen' in space:
line.append(str(space['kitchen']))
if 'dwelling' in space:
line.append(str(space['dwelling']))
if 'full' in space:
line.append(str(space['full']))
price = self.price
if price:
line.append(str(price))
floor = self.floor
if floor and floor[1]!=0:
num = round(float(floor[0])/float(floor[1]), 2)
line.append(str(num))
return line
|
XeryusTC/projman
|
functional_tests/management/commands/create_project.py
|
Python
|
mit
| 728
| 0.001374
|
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
from django.contrib.auth import get_user_model
from projects.models import Project
User = get_user_model()
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('user')
parser.add_argument('
|
name', nargs='+')
parser.add_argument('--description', nargs='*', default='')
def handle(self, *args, **options):
name = ' '.join(options['name'])
description = ' '.join(options['description'])
u
|
= User.objects.get(username=options['user'])
p = Project.objects.create(user=u, name=name, description=description)
p.save()
self.stdout.write(str(p.pk))
|
pugpe/pugpe
|
apps/cert/management/commands/send_certificates.py
|
Python
|
mit
| 2,215
| 0.000904
|
# -*- coding: utf-8 -*-
import traceback
from datetime import timedelta
from django.core import mail
from django.core.mail import EmailMultiAlternatives, mail_admins
from django.core.management.base import BaseCommand
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from django.contrib.sites.models import Site
from django.conf import settings
from django.utils import translation
from django.utils import timezone
from cert.models import Attendee
class Command(BaseCommand):
help = u'Send certificate e-mails'
def get_email(self, attendee):
translation.activate(settings.LANGUAGE_CODE)
subject = _(u'Certificado de participação | PUG-PE')
from_email = settings.DEFAULT_FROM_EMAIL
ctx = {
'site': Site.objects.get_current().domain,
'event': attendee.event,
'attendee': attendee,
}
text_content = render_to_string('cert/cert_email.txt', ctx)
html_content = render_to_string('cert/cert_email.html', ctx)
msg = EmailMultiAlternatives(
subject, text_content, from_email, [attendee.email],
)
msg.attach_alternative(html_content, "text/html")
return msg
def handle(self, *args, **options):
connection = mail.get_connection()
num_emails = 0
attendees = Attendee.objects.filter(sent_date__isnul
|
l=True)
|
# Evitar envio para eventos muito antigos
attendees = attendees.filter(
pub_date__gte=timezone.now() - timedelta(days=10),
)
for attendee in attendees:
msg = self.get_email(attendee)
try:
num_emails += connection.send_messages([msg])
except Exception as exc:
subject = _(u'PUG-PE: Problema envio certificado')
body = 'except: '.format(exc)
body += traceback.format_exc()
mail_admins(subject, body)
else:
attendee.sent_date = timezone.now()
attendee.save()
self.stdout.write(
unicode(_(u'Foram enviados {0} emails\n'.format(num_emails))),
)
|
pouyana/teireader
|
webui/scripts/fixws.py
|
Python
|
mit
| 646
| 0.001548
|
import sys
import
|
glob
def read_fileb(filename, mode='rb'):
f = open(filename, mode)
try:
return f.read()
finally:
f.close()
def write_fileb(filename, value, mode='wb'):
f = open(filename, mode)
try:
f.write(value)
finally:
f.close()
for filename in glob.glob(sys.argv[1]):
data
|
1 = read_fileb(filename)
write_fileb(filename + '.bak2', data1)
data2lines = read_fileb(filename).strip().split('\n')
data2 = '\n'.join([line.rstrip(
).replace('\t', ' ' * 2) for line in data2lines]) + '\n'
write_fileb(filename, data2)
print filename, len(data1) - len(data2)
|
keybar/keybar
|
.hypothesis/eval_source/hypothesis_temporary_module_f921b2988402df61d5f91622b67a9e50eddd05fc.py
|
Python
|
bsd-3-clause
| 138
| 0.007246
|
f
|
rom hypothesis.utils.conventions import not_set
def accept(f):
def complex_numbers():
return f()
return complex_numb
|
ers
|
Barrog/C4-Datapack
|
data/jscript/village_master/7026_bitz_occupation_change/__init__.py
|
Python
|
gpl-2.0
| 2,377
| 0.03618
|
#
# Created by DraX on 2005.08.08
#
# Updated by ElgarL on 28.09.2005
#
print "importing village master data: Talking Island Village ...done"
import sys
from net.sf.l2j.gameserver.model.quest import State
from net.sf.l2j.gameserver.model.quest import QuestState
from net.sf.l2j.gameserver.model.quest.jython import QuestJython as JQuest
GRAND_MASTER_BITZ = 7026
|
class Quest (JQuest) :
def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr)
def onEvent (self,event,st):
htmltext = "No Quest"
if event == "7026-01.htm":
htmltext =
|
event
if event == "7026-02.htm":
htmltext = event
if event == "7026-03.htm":
htmltext = event
if event == "7026-04.htm":
htmltext = event
if event == "7026-05.htm":
htmltext = event
if event == "7026-06.htm":
htmltext = event
if event == "7026-07.htm":
htmltext = event
return htmltext
def onTalk (Self,npc,st):
npcId = npc.getNpcId()
Race = st.getPlayer().getRace()
pcId = st.getPlayer().getClassId().getId()
# Humans got accepted
if npcId == GRAND_MASTER_BITZ and Race in [Race.human]:
#fighter
if pcId == 0x00:
htmltext = "7026-01.htm"
#warrior, knight, rogue
if pcId == 0x01 or pcId == 0x04 or pcId == 0x07:
htmltext = "7026-08.htm"
#warlord, paladin, treasureHunter
if pcId == 0x03 or pcId == 0x05 or pcId == 0x08:
htmltext = "7026-09.htm"
#gladiator, darkAvenger, hawkeye
if pcId == 0x02 or pcId == 0x06 or pcId == 0x09:
htmltext = "7026-09.htm"
#mage, wizard, cleric]:
if pcId == 0x0a or pcId == 0x0b or pcId == 0x0f:
htmltext = "7026-10.htm"
#sorceror, necromancer, warlock, bishop, prophet
if pcId == 0x0c or pcId == 0x0d or pcId == 0x0e or pcId == 0x10 or pcId == 0x11:
htmltext = "7026-10.htm"
st.setState(STARTED)
return htmltext
# All other Races must be out
if npcId == GRAND_MASTER_BITZ and Race in [Race.dwarf, Race.darkelf, Race.elf, Race.orc]:
st.setState(COMPLETED)
st.exitQuest(1)
return "7026-10.htm"
QUEST = Quest(7026,"7026_bitz_occupation_change","village_master")
CREATED = State('Start', QUEST)
STARTED = State('Started', QUEST)
COMPLETED = State('Completed', QUEST)
QUEST.setInitialState(CREATED)
QUEST.addStartNpc(7026)
STARTED.addTalkId(7026)
|
sproutsocial/botanist
|
packages/github_backup.py
|
Python
|
mit
| 8,574
| 0.003032
|
#!/usr/bin/env python
"""
Backup all your organization's repositories, private or otherwise.
"""
import argparse
import base64
import contextlib
import json
import os
import sys
import urllib2
from collections import namedtuple
from urllib import urlencode
from urllib import quote
API_BASE = 'https://api.github.com/'
REPO_TYPE_CHOICES = ('all', 'public', 'private', 'forks', 'sources', 'member')
@contextlib.contextmanager
def chdir(dirname=None):
curdir = os.getcwd()
try:
if dirname is not None:
os.chdir(dirname)
yield
finally:
os.chdir(curdir)
class Helpers(object):
def __init__(self, args):
self.args = args
def exec_cmd(self, command):
"""
Executes an external command taking into account errors and logging.
"""
print "Executing command: %s" % self.redact(command)
resp = os.system(command)
if resp != 0:
raise Exception(self.redact("Command [%s] failed (%s)" % (command, resp)))
def https_url_with_auth(self, base_url):
_, suffix = base_url.split('https://')
return 'https://%s:%s@%s' % (quote(self.args.username), quote(self.args.password), suffix)
def redact(self, s):
if hasattr(self.args, 'password'):
s = s.replace(self.args.password, 'REDACTED')
if hasattr(self.args, 'username'):
s = s.replace(self.args.username, 'REDACTED')
return s
Pagination = namedtuple('Pagination', 'first prev next last')
def get_pagination(raw_link_header):
link_map = {}
for link, rel in (lh.split(';') for lh in raw_link_header.split(',')):
link_map[rel.split('=')[1].strip('"')] = link.strip(' <>')
return Pagination(*(link_map.get(f) for f in Pagination._fields))
def add_https_basic_auth(request, username, password):
base64string = base64.encodestring('%s:%s' % (username, password)).replace('\n', '')
request.add_header("Authorization", "Basic %s" % base64string)
def get_repos(org, repo_type, access_token=None, username=None, password=None, per_page=25):
"""
Paginates through all of the repositories using github's Link header.
https://developer.github.com/v3/#link-header
"""
url = API_BASE + 'orgs/%s/repos?' % org
qs_params = {'type': repo_type, 'per_page': per_page}
if access_token:
qs_params.update({'access_token': args.access_token})
url += urlencode(qs_params)
request = urllib2.Request(url)
elif username and password:
url += urlencode(qs_params)
request = urllib2.Request(url)
add_https_basic_auth(request, username, password)
else:
raise ValueError('unworkable combination of authentication inputs')
response = urllib2.urlopen(request)
try:
pagination = get_pagination(response.headers['Link'])
except KeyError:
print 'no Link header, nothing to paginate through.'
pagination = Pagination(None, None, None, None)
repos = json.loads(response.read())
for r in repos:
if not r.get('archived'):
yield r
# so, this isn't the DRYest code ;-)
while pagination.next:
request = urllib2.Request(pagination.next)
if username and password:
add_https
|
_basic_auth(request, username, password)
response = urllib2.urlopen(request)
pagination = get_pagination(response.headers['Link'])
repos = json.loads(response.read())
for r in repos:
if not r.get('archived'):
yield r
# Github API call, can authenticate via access token, or username and password
# git cloning/pulling, can authenticate via ssh key, or username & password via https
def repocsv(str
|
ing):
"""
>>> repocsv('org1/repo1, org2/repo2,org3/repo3 ,org4/repo4')
['org1/repo1', 'org2/repo2', 'org3/repo3', 'org4/repo4']
"""
try:
repos = [r.strip() for r in string.split(',')]
return set(repos)
except Exception as exc:
raise argparse.ArgumentTypeError(exc.message)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='backup github repositories for an organization')
subparsers = parser.add_subparsers(dest='authtype')
# uses an access token to fetch repositories names from github's API,
# but then assumes you have SSH keys setup for cloning/pulling
ssh_parser = subparsers.add_parser('ssh', help='use ssh for cloning/pulling, and use access token for github api access')
ssh_parser.add_argument('-d', '--dir', type=str, dest='directory', required=True, help='full or relative path to store backed up repositories')
ssh_parser.add_argument('-o', '--org', type=str, required=True, help='github organization name')
ssh_parser.add_argument('-t', '--type', type=str, dest='rtype', nargs='?', default='all', choices=REPO_TYPE_CHOICES, help='repository types to backup')
ssh_parser.add_argument('-a', '--access-token', type=str, help='personal access token or oauth access token')
ssh_parser.add_argument('-f', '--forks', action='store_true', help='add this arg if you want to backup fork repositories also')
ssh_parser.add_argument('-i', '--ignore-list', type=repocsv, default=set(), help='add repos you dont want to fetch/index, e.g. --ignore-list org1/repo1,org2/repo2')
# uses a username and password for fetching repositories names from
# github's API, and uses same username and password for
# cloning/updating via HTTPS as well.
#
# note: you can also use your personal access token as a password for https
# basic auth when talking to github's api or cloning
https_parser = subparsers.add_parser('https', help='use https for cloning/pulling, and use username and password (https basic auth) for github api access. note that github also allows using a personal access token as a password via this method')
https_parser.add_argument('-d', '--dir', type=str, dest='directory', required=True, help='full or relative path to store backed up repositories')
https_parser.add_argument('-o', '--org', type=str, required=True, help='github organization name')
https_parser.add_argument('-t', '--type', type=str, dest='rtype', nargs='?', default='all', choices=REPO_TYPE_CHOICES, help='repository types to backup')
https_parser.add_argument('-u', '--username', dest='username', type=str, required=True, help='github username')
https_parser.add_argument('-p', '--password', dest='password', type=str, required=True, help='github password or github personal access token')
https_parser.add_argument('-f', '--forks', action='store_true', help='add this arg if you want to backup fork repositories also')
https_parser.add_argument('-i', '--ignore-list', type=repocsv, default=set(), help='add repos you dont want to fetch/index, e.g. --ignore-list org1/repo1,org2/repo2')
args = parser.parse_args()
if not os.path.exists(args.directory):
os.makedirs(args.directory)
if args.authtype == 'ssh':
org_repos = get_repos(args.org, args.rtype, args.access_token)
else:
org_repos = get_repos(args.org, args.rtype, username=args.username, password=args.password)
h = Helpers(args)
for repo in org_repos:
# skip ignored repos
if repo['full_name'] in args.ignore_list:
print 'skipping ignored repository %s' % repo['full_name']
continue
# skip forks unless asked not to
if not args.forks and repo['fork']:
print 'skipping fork repository %s' % repo['full_name']
continue
destdir = os.path.join(args.directory, repo['name'])
if args.authtype == 'ssh':
repo_path = repo['ssh_url']
else:
repo_path = h.https_url_with_auth(repo['clone_url'])
if os.path.exists(destdir):
# pull in new commits to an already tracked repository
print '*** updating %s... ***' % h.redact(repo_path)
with chdir(destdir):
try:
h.exec_cmd('git pull origin %s' % repo['default_branch'])
continue
excep
|
jddixon/upax_py
|
src/upax/ftlog.py
|
Python
|
mit
| 16,036
| 0
|
# dev/py/upax3/upax3/ftlog.py
""" Fault-tolerant log for a Upax node. """
import os
import re
# import sys
from collections import Container, Sized
from xlattice import (HashTypes, check_hashtype, # u,
SHA1_HEX_NONE, SHA2_HEX_NONE, SHA3_HEX_NONE,
BLAKE2B_HEX_NONE)
from upax import UpaxError
from upax.node import check_hex_node_id_160, check_hex_node_id_256
__all__ = ['ATEXT', 'AT_FREE',
'PATH_RE',
'BODY_LINE_1_RE', 'BODY_LINE_256_RE',
'IGNORABLE_RE',
# classes
'Log', 'BoundLog', 'LogEntry',
'Reader', 'FileReader', 'StringReader', ]
# -------------------------------------------------------------------
# CLASS LOG AND SUBCLASSES
# -------------------------------------------------------------------
# Take care: this pattern is used in xlmfilter, possibly elsewhere
# this is RFC2822's atext; *,+,?,- are escaped; needs to be enclosed in []+
ATEXT = r"[a-z0-9!#$%&'\*\+/=\?^_`{|}~\-]+"
AT_FREE = ATEXT + r'(?:\.' + ATEXT + r')*'
# this permits an RFC2822 message ID but is a little less restrictive
PATH_PAT = AT_FREE + r'(?:@' + AT_FREE + ')?'
PATH_RE = re.compile(PATH_PAT, re.I)
BODY_LINE_1_PAT =\
r'^(\d+) ([0-9a-f]{40}) ([0-9a-f]{40}) "([^"]*)" (%s)$' % PATH_PAT
BODY_LINE_1_RE = re.compile(BODY_LINE_1_PAT, re.I)
BODY_LINE_256_PAT =\
r'^(\d+) ([0-9a-f]{64}) ([0-9a-f]{64}) "([^"]*)" (%s)$' % PATH_PAT
BODY_LINE_256_RE = re.compile(BODY_LINE_256_PAT, re.I)
IGNORABLE_PAT = '(^ *$)|^ *#'
IGNORABLE_RE = re.compile(IGNORABLE_PAT)
class Log(Container, Sized):
"""a fault-tolerant log"""
def __init__(self, reader, hashtype):
self._hashtype = hashtype
(timestamp, prev_log_hash, prev_master, entries, index) = reader.read()
self._timestamp = timestamp # seconds from epoch
self._prev_hash = prev_log_hash # SHA1/3 hash of previous Log
if hashtype == HashTypes.SHA1:
check_hex_node_id_160(self._prev_hash)
else:
check_hex_node_id_256(self._prev_hash)
self._prev_master = prev_master # nodeID of master writing prev log
if hashtype == HashTypes.SHA1:
check_hex_node_id_160(self._prev_master)
else:
check_hex_node_id_256(self._prev_master)
self._entries = entries # a list
self._index = index # a map, hash => entry
def __contains__(self, key):
""" Return whether this key is in the Log. """
return key in self._index
def __len__(self):
""" Return the length of this Log. """
return len(self._entries)
def __str__(self):
"""used for serialization, so includes newline"""
# first line
if self._hashtype == HashTypes.SHA1:
fmt = "%013u %40s %40s\n"
else:
fmt = "%013u %64s %64s\n"
ret = fmt % (self._timestamp, self._prev_hash, self._prev_master)
# list of entries
for entry in self._entries:
ret += str(entry) # woefully inefficient :-)
return ret
def add_entry(self, tstamp, key, node_id, src, path):
"""
Create a LogEntry with the given timestamp, key, nodeID, src, and path.
If the LogEntry is already present in the Log, return a reference to
the existing LogEntry. Otherwise, add the LogEntry to the list and
index it by key.
"""
entry = LogEntry(tstamp, key, node_id, src, path)
if key in self._index:
existing = self._index[key]
if entry == existing:
return existing # silently ignore duplicates
self._entries.append(entry) # increases size of list
self._index[key] = entry # overwrites any earlier duplicates
return entry
def get_entry(self, key):
""" Given a key, return the correspondi
|
ng LogEntry or None. """
if key not in self._index:
return None
return self._index[key]
@property
def entries(self):
|
""" Return the list of LogEntries. """
return self._entries
@property
def index(self):
""" Return the index by key into the list of LogEntries. """
return self._index
@property
def prev_hash(self):
""" Return the content hash of the previous Log. """
return self._prev_hash
@property
def prev_master(self):
"""
Return the ID of the master of the previous Log.
"""
return self._prev_master
@property
def timestamp(self):
""" Return the timestamp for this Log. """
return self._timestamp
class BoundLog(Log):
""" A fult tolerant log bound to a file. """
def __init__(self, reader, hashtype=HashTypes.SHA2,
u_path=None, base_name='L'):
super(). __init__(reader, hashtype)
self.fd_ = None
self.is_open = False # for appending
overwriting = False
if u_path:
self.u_path = u_path
self.base_name = base_name
overwriting = True
else:
if isinstance(reader, FileReader):
self.u_path = reader.u_path
self.base_name = reader.base_name
overwriting = False
else:
msg = "no target uPath/baseName specified"
raise UpaxError(msg)
self.path_to_log = "%s/%s" % (self.u_path, self.base_name)
if overwriting:
with open(self.path_to_log, 'w') as file:
log_contents = super(BoundLog, self).__str__()
file.write(log_contents)
file.close()
self.fd_ = open(self.path_to_log, 'a')
self.is_open = True
def add_entry(self, tstamp, key, node_id, src, path):
if not self.is_open:
msg = "log file %s is not open for appending" % self.path_to_log
raise UpaxError(msg)
# XXX NEED TO THINK ABOUT THE ORDER OF OPERATIONS HERE
entry = super(
BoundLog,
self).add_entry(tstamp, key, node_id, src, path)
stringified = str(entry)
self.fd_.write(stringified)
return entry
def flush(self):
"""
Flush the log.
This should write the contents of any internal buffers to disk,
but no particular behavior is guaranteed.
"""
self.fd_.flush()
def close(self):
""" Close the log. """
self.fd_.close()
self.is_open = False
# -------------------------------------------------------------------
class LogEntry():
"""
The entry made upon adding a file to the Upax content-keyed data store.
This consists of a timestamp; an SHA content key, the hash of the
contents of the file, the NodeID identifying the contributor,
its source (which may be a program name, and a UNIX/POSIX path
associated with the file. The path will normally be relative.
"""
__slots__ = ['_timestamp', '_key', '_node_id', '_src', '_path', ]
def __init__(self,
timestamp, key, node_id, source, pathToDoc):
self._timestamp = timestamp # seconds from epoch
if key is None:
raise UpaxError('LogEntry key may not be None')
hashtype = len(key) == 40
self._key = key # 40 or 64 hex digits, content hash
if hashtype == HashTypes.SHA1:
check_hex_node_id_160(self._key)
else:
check_hex_node_id_256(self._key)
if node_id is None:
raise UpaxError('LogEntry nodeID may not be None')
self._node_id = node_id # 40/64 digits, node providing entry
# XXX This is questionable. Why can't a node with a SHA1 id store
# a datum with a SHA3 key?
if hashtype == HashTypes.SHA1:
check_hex_node_id_160(self._node_id)
else:
check_hex_node_id_256(self._node_id)
self._src = source # tool or person responsible
self._path = pathToDoc # file name
@property
def key(self):
"
|
bkahlert/seqan-research
|
raw/workshop13/workshop2013-data-20130926/trunk/misc/renaming/invalid_identifiers.py
|
Python
|
mit
| 5,253
| 0.002094
|
#!/usr/bin/env python
import sys
import re
from helpers import *
PROGRAM_USAGE = """
SeqAn invalid identifiers detection script.
USAGE: invalid_identifiers.py BASE_PATH
BASE_PATH is the root path of all the folders to be searched.
This script generates a list of invalid identifiers found in the code base,
paired with their suggested replacement string in the format ``"old: new"``,
one identifier per line.
The result is written to the standard output.
""".strip()
INVALID_IDENTIFIER = re.compile(r'\b_[A-Z_]\w*\b')
REPLACEMENT_ID = re.compile(r'\b(__?)(\w*)\b')
# The following IDs are exempted from replacement since they are either defined
# by some compiler (-specific library) or are solely used within a string.
VALID_IDENTIFIERS = map(
lambda rx: re.compile(rx),
[ '___+',
'^__$',
'_N',
'_L',
'_H',
'__u?int64(_t)?',
'_A123456',
'__OPTIMIZE__',
'__gnu_cxx',
'_Resize_String', # will be done manually
'_Fill_String', #
'_Transcript_',
'_Confidence_99',
'_PARSER_H',
'_POSIX_TIMERS',
'__GNUC_MINOR__',
'_S_IREAD',
'_S_IWRITE',
'_O_BINARY',
'_O_CREAT',
'_O_RDONLY',
'_O_RDWR',
'_O_TEMPORARY',
'_O_TRUNC',
'_O_WRONLY',
'_KMER_H',
'_MSC_EXTENSIONS',
'_GLIBCXX_PARALLEL',
'_FILE_OFFSET_BITS',
'_POSIX_SYNCHRONIZED_IO',
'__cplusplus',
'__(force)?inline(__)?',
'__alignof(__)?',
'__attribute__',
'__GLOBAL__',
'_DELETIONS____',
'_INSERTS______',
'_REPLACEMENTS_',
'__int128',
'__SSE2__',
'__m128i',
'__VA_ARGS__',
'__FILE__',
'__LINE__',
'__GET_OPT_H__',
'_OPENMP',
'__SINIX__',
'__sgi',
'__BEOS__',
'__aix__',
'__ICC',
'__WATCOMC__',
'__ADSPBLACKFIN__',
'_BEOS',
'__SUNPRO_CC?',
'__tru64',
'__FreeBSD__',
'__ultrix',
'__OPENBSD',
'_MPRAS',
'_HAIKU',
'_SGI_COMPILER_VERSION',
'_POSIX_C_SOURCE',
'_XOPEN_SOURCE',
'__OpenBSD__',
'__AIX__',
'__ADSP21000__',
'__HAIKU__',
'__riscos__',
'__hpux',
'__HP_aCC',
'__riscos',
'__hpua',
'__GNUC__',
'_ULTRIX',
'_SCO_SV',
'__DECCXX',
'_XENIX',
'__sgi__',
'_WIN32',
'__PGI',
'__QNX__',
'__APPLE__',
'__AIX',
'_SGI',
'_AIX',
'__XENIX__',
'__INTEL_COMPILER',
'__osf',
'__linux__',
'__sinix__',
'__bsdos__',
'__ADSPTS__',
'__sun',
'__sinix',
'__NetBSD',
'__FreeBSD',
'__osf__',
'__ultrix__',
'__COMPILER_VER__',
'__COMO__',
'__linux',
'__UNIX_SV__',
'__HAIKU',
'__WIN32__',
'__NetBSD__',
'__CYGWIN__',
'_COMPILER_VERSION',
'__BORLANDC__',
'__TRU64__',
'__MINGW32__',
'__aix',
'__BeOS',
'__QNXNTO__',
'__hpux__',
'__IBMCPP__',
'__IAR_SYSTEMS_ICC__',
'__18CXX',
'__HP_cc',
'__SUNPRO_C',
'__DECC',
'__IBMC__',
'_MSC_VER' ])
def valid(id):
"""
Returns whether the given ``id`` is in fact valid and shouldn't be replaced.
"""
return any(VALID_ID.match(id) for VALID_ID in VALID_IDENTIFIERS)
def find_all(file):
"""
Returns all invalid identifiers found in a given ``file``.
"""
f = open(file, 'r
|
')
result = []
for line in f:
matches = INVALID_IDENTIFIER.findall(line)
invalids = [match for match in matches if not valid(match)]
result += invalids
return result
def replacement(orig):
"""
Returns the replacement string for a given invalid identifier.
"""
return REPLACEMENT_ID.sub(r'\2\1', orig)
def generate_replacements(ids):
"""
Generates a dictionary of replacement strings for a list of invalid
|
identifiers.
"""
return dict([(original, replacement(original)) for original in ids])
def main():
if len(sys.argv) != 2:
print >>sys.stderr, 'ERROR: Invalid number of arguments.'
print >>sys.stderr, PROGRAM_USAGE
return 1
results = {}
project_path = sys.argv[1]
for file in all_files(project_path):
results[file] = set(find_all(file))
all_ids = set()
for ids in results.values():
all_ids |= ids
replacements = generate_replacements(all_ids)
for id in sorted(all_ids):
print '%s: %s' % (id, replacements[id])
#for file in sorted(results.keys()):
# for id in results[file]:
# print '%s: %s' % (file, id)
return 0
if __name__ == '__main__':
sys.exit(main())
|
CZ-NIC/foris
|
foris/config_handlers/__init__.py
|
Python
|
gpl-3.0
| 833
| 0
|
# Foris - web administration interface for OpenWrt based on
|
NETCONF
# Copyright (C) 2017 CZ.NIC, z.s.p.o. <http://www.nic.cz>
#
# This progr
|
am is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from .base import BaseConfigHandler
__all__ = ["BaseConfigHandler"]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.