repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
rfmcpherson/killerbee
|
killerbee/GoodFETAVR.py
|
Python
|
bsd-3-clause
| 5,235
| 0.025788
|
#!/usr/bin/env python
# GoodFET SPI and SPIFlash Client Library
#
# (C) 2009 Travis Goodspeed <travis at radiantmachines.com>
#
# This code is being rewritten and refactored. You've been warned!
import sys, time, string, cStringIO, struct, glob, os;
from GoodFET import GoodFET;
class GoodFETAVR(GoodFET):
AVRAPP=0x32;
APP=AVRAPP;
AVRVendors={0x1E: "Atmel",
0x00: "Locked",
};
#List imported from http://avr.fenceline.de/device_data.html
AVRDevices={
0x9003: "ATtiny10",
0x9004: "ATtiny11",
0x9005: "ATtiny12",
0x9007: "ATtiny13",
0x9006: "ATtiny15",
0x9106: "ATtiny22",
0x910A: "ATtiny2313",
0x9108: "ATtiny25",
0x9109: "ATtiny26",
0x9107: "ATtiny28",
0x9206: "ATtiny45",
0x930B: "ATtiny85",
0x9304: "AT90C8534",
0x9001: "AT90S1200",
0x9101: "AT90S2313",
0x9102: "AT90S2323",
0x9105: "AT90S2333",
0x9103: "AT90S2343",
0x9201: "AT90S4414",
0x9203: "AT90S4433",
0x9202: "AT90S4434",
0x9301: "AT90S8515",
0x9303: "AT90S8535",
0x9381: "AT90PWM2",
0x9381: "AT90PWM3",
0x9781: "AT90CAN128",
0x9205: "ATmega48",
0x9306: "ATmega8515",
0x9308: "ATmega8535",
0x9307: "ATmega8",
0x930A: "ATm
|
ega88",
0x9403: "ATmega16",
0x9401: "ATmega161",
0x9404: "ATmega162",
0x9402: "ATmega163",
0x9407: "ATmega165",
0x9406: "ATmega
|
168",
0x9405: "ATmega169",
0x9502: "ATmega32",
0x958a: "ATmega32U2", #TODO add the other U series.
0x9501: "ATmega323",
0x9503: "ATmega325",
0x9504: "ATmega3250",
0x9503: "ATmega329",
0x9504: "ATmega3290",
0x9507: "ATmega406",
0x9602: "ATmega64",
0x9607: "ATmega640",
0x9603: "ATmega645",
0x9604: "ATmega6450",
0x9603: "ATmega649",
0x9604: "ATmega6490",
0x0101: "ATmega103",
0x9701: "ATmega103",
0x9702: "ATmega128",
0x9703: "ATmega1280",
0x9704: "ATmega1281",
0x9801: "ATmega2560",
0x9802: "ATmega2561",
0x9002: "ATtiny19",
0x9302: "ATmega85",
0x9305: "ATmega83",
0x9601: "ATmega603",
#These are missing from the Fenceline DB.
0x960a: "ATmega644P",
};
def setup(self):
"""Move the FET into the AVR application."""
self.writecmd(self.AVRAPP,0x10,0,self.data); #SPI/SETUP
def trans(self,data):
"""Exchange data by AVR.
Input should probably be 4 bytes."""
self.data=data;
self.writecmd(self.AVRAPP,0x00,len(data),data);
return self.data;
def start(self):
"""Start the connection."""
self.writecmd(self.AVRAPP,0x20,0,None);
def forcestart(self):
"""Forcibly start a connection."""
for i in range(0x880,0xfff):
#self.glitchVoltages(0x880, i);
self.start();
bits=self.lockbits();
print "At %04x, Lockbits: %02x" % (i,bits);
if(bits==0xFF): return;
def erase(self):
"""Erase the target chip."""
self.writecmd(self.AVRAPP,0xF0,0,None);
def lockbits(self):
"""Read the target's lockbits."""
self.writecmd(self.AVRAPP,0x82,0,None);
return ord(self.data[0]);
def setlockbits(self,bits=0x00):
"""Read the target's lockbits."""
self.writecmd(self.AVRAPP,0x92,1,[bits]);
return self.lockbits();
def lock(self):
self.setlockbits(0xFC);
def eeprompeek(self, adr):
"""Read a byte of the target's EEPROM."""
self.writecmd(self.AVRAPP,0x81 ,2,
[ (adr&0xFF), (adr>>8)]
);#little-endian address
return ord(self.data[0]);
def flashpeek(self, adr):
"""Read a byte of the target's Flash memory."""
self.writecmd(self.AVRAPP,0x02 ,2,
[ (adr&0xFF), (adr>>8)]
);#little-endian address
return ord(self.data[0]);
def flashpeekblock(self, adr):
"""Read a byte of the target's Flash memory."""
self.writecmd(self.AVRAPP,0x02 ,4,
[ (adr&0xFF), (adr>>8) &0xFF, 0x80, 0x00]
);
return self.data;
def eeprompoke(self, adr, val):
"""Write a byte of the target's EEPROM."""
self.writecmd(self.AVRAPP,0x91 ,3,
[ (adr&0xFF), (adr>>8), val]
);#little-endian address
return ord(self.data[0]);
def identstr(self):
"""Return an identifying string."""
self.writecmd(self.AVRAPP,0x83,0, None);
vendor=self.AVRVendors.get(ord(self.data[0]));
deviceid=(ord(self.data[1])<<8)+ord(self.data[2]);
device=self.AVRDevices.get(deviceid);
#Return hex if device is unknown.
#They are similar enough that it needn't be known.
if device==None:
device=("0x%04x" % deviceid);
return "%s %s" % (vendor,device);
|
zinderud/ysa
|
python/first/inheritance.py
|
Python
|
apache-2.0
| 468
| 0.027778
|
class Hayvan:
def __init__(self,isim, renk):
self.isim=isim
self.renk=renk
def yuru(self):
print(self.isim+" yurumeye basladi")
def ye(self):
print(self.isim+" yemeye basladi")
class Fare(Hayvan):
def __init__(self,isim,renk):
super().__init__(isim,renk)
def yuru(self):
print(self.isim+" hizlica yurudu")
my_fare=Fare("siyah Avrasya sert sicani ","mavi")
|
my_fare.yuru();
my_f
|
are.ye()
|
franciscogarate/pyliferisk
|
Examples/Example_2_2_3b.py
|
Python
|
gpl-3.0
| 608
| 0.021382
|
#!/usr/bin/python
from pyliferisk import MortalityTable
from pylife
|
risk.mortalitytables import GKM95
import numpy as np
mt = MortalityTable(nt=GKM95)
x = 40 #age
n = 20 #horizon
C = 10000 #capital
i = 0.03 #interest rate
payments = []
for t in range(0,n):
payments.append((mt.lx[x+t] - mt.lx[x+t+1]) / mt.lx[x] *
|
C)
discount_factor = []
for y in range(0,n):
discount_factor.append(1 / (1 + i) ** (y + 0.5))
print('{0:5} {1:10} {2:10}'.format(' t', 'factor', 'payment'))
for t in range(0,n):
print('{0:2} {1:10} {2:10}'.format(t, np.around(discount_factor[t], 5), np.around(payments[t], 4)))
|
0xMF/pelican
|
pelican/urlwrappers.py
|
Python
|
agpl-3.0
| 2,731
| 0
|
import os
import functools
import logging
import six
from pelican.utils import (slugify, python_2_unicode_compatible)
logger = logging.getLogger(__name__)
@python_2_unicode_compatible
@functools.total_ordering
class URLWrapper(object):
def __init__(self, name, settings):
# next 2 lines are r
|
edundant with the setter of the name property
# but are here for clarity
self.settings = settings
self._name = name
self.slug = slugify(name, self.settings.get('SLUG_SUBSTITUTIONS', ()))
self.name = name
@property
def name(self):
return self._name
@name.setter
def name(self
|
, name):
self._name = name
self.slug = slugify(name, self.settings.get('SLUG_SUBSTITUTIONS', ()))
def as_dict(self):
d = self.__dict__
d['name'] = self.name
return d
def __hash__(self):
return hash(self.slug)
def _key(self):
return self.slug
def _normalize_key(self, key):
subs = self.settings.get('SLUG_SUBSTITUTIONS', ())
return six.text_type(slugify(key, subs))
def __eq__(self, other):
return self._key() == self._normalize_key(other)
def __ne__(self, other):
return self._key() != self._normalize_key(other)
def __lt__(self, other):
return self._key() < self._normalize_key(other)
def __str__(self):
return self.name
def __repr__(self):
return '<{} {}>'.format(type(self).__name__, str(self))
def _from_settings(self, key, get_page_name=False):
"""Returns URL information as defined in settings.
When get_page_name=True returns URL without anything after {slug} e.g.
if in settings: CATEGORY_URL="cat/{slug}.html" this returns
"cat/{slug}" Useful for pagination.
"""
setting = "%s_%s" % (self.__class__.__name__.upper(), key)
value = self.settings[setting]
if not isinstance(value, six.string_types):
logger.warning('%s is set to %s', (setting, value))
return value
else:
if get_page_name:
return os.path.splitext(value)[0].format(**self.as_dict())
else:
return value.format(**self.as_dict())
page_name = property(functools.partial(_from_settings, key='URL',
get_page_name=True))
url = property(functools.partial(_from_settings, key='URL'))
save_as = property(functools.partial(_from_settings, key='SAVE_AS'))
class Category(URLWrapper):
pass
class Tag(URLWrapper):
def __init__(self, name, *args, **kwargs):
super(Tag, self).__init__(name.strip(), *args, **kwargs)
class Author(URLWrapper):
pass
|
nuncjo/Delver
|
delver/parser.py
|
Python
|
mit
| 2,824
| 0.001416
|
# -*- coding:utf-8 -*-
from urllib.parse import urlparse
from lxml import html
from lxml.html.clean import Cleaner
from .forms import FormWrapper
from .helpers import (
match_form,
filter_element
)
class HtmlParser:
""" Parses response content string to valid html using `lxml.html`
"""
def __init__(self, response, session=None, use_cleaner=None, cleaner_params=None):
self._html_tree = html.fromstring(response.content)
self.links = {}
self._forms = []
self._cleaner = Cleaner(**cleaner_params) if use_cleaner else None
self._session = session
self._url = response.url
def make_links_absolute(self):
"""Makes absolute links http://domain.com/index.html from the relative ones /index.html
"""
pars
|
ed_url = urlparse(self._url)
self._html_tree.make_links_absolute(
'{url.scheme}://{url.netloc}/'.format(url=parsed_url),
resolve_base_href=True
)
def find_links(self, tags=None, filters=None, match='
|
EQUAL'):
""" Find links and iterate through them checking if they are matching given filters and
tags
usage::
>>> import requests
>>> response = requests.get('https://httpbin.org/links/10/0')
>>> tags = ['style', 'link', 'script', 'a']
>>> parser = HtmlParser(response)
>>> links = parser.find_links(tags)
>>> len(links)
9
"""
filters = filters or {}
tags = tags or ['a']
for link, _, url, _ in self._html_tree.iterlinks():
matched = filter_element(
link,
tags=tags,
filters=filters,
match=match
)
if matched:
self.links[url] = matched
return self.links
def find_forms(self, filters=None):
""" Find forms and wraps them with class::`<FormWrapper>` object
usage::
>>> import requests
>>> response = requests.get('https://httpbin.org/forms/post')
>>> parser = HtmlParser(response)
>>> forms = parser.find_forms()
>>> len(forms)
1
"""
filters = filters or {}
self._forms = []
for form in self._html_tree.forms:
wrapped_form = FormWrapper(form, session=self._session, url=self._url)
if match_form(wrapped_form, filters):
self._forms.append(wrapped_form)
return self._forms
def xpath(self, path):
"""Select elements using xpath selectors"""
return self._html_tree.xpath(path)
def css(self, selector):
"""Select elements by css selectors"""
return self._html_tree.cssselect(selector)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
Nitrate/Nitrate
|
src/tcms/issuetracker/migrations/0005_adjust_issue_report_fields.py
|
Python
|
gpl-2.0
| 1,490
| 0.002013
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-07-29 13:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("issuetracker", "0004_data_migration_set_bugzilla_to_allow_add_case_to_issue"),
]
operations = [
migrations.RemoveField(
model_name="issuetracker",
|
name="issue_report_fmt",
),
migrations.AddField(
model_na
|
me="issuetracker",
name="issue_report_templ",
field=models.CharField(
blank=True,
default="",
help_text="The issue content template, which could be arbitrary text with format arguments. Nitrate provides these format arguments: <code>TestBuild.name</code>, <code>setup</code>, <code>action</code> and <code>effect</code>. The text is formatted with keyward arguments.",
max_length=255,
),
),
migrations.AlterField(
model_name="issuetracker",
name="issue_report_params",
field=models.CharField(
blank=True,
default="",
help_text="Parameters used to format URL for reporting issue. Each line is a <code>key:value</code> pair of parameters. Nitrate provides a few parameters to format URL and additional parameters could be provided by system administrator as well. ",
max_length=255,
),
),
]
|
kienpham2000/stockdelta-api
|
stockdelta/api/alerts.py
|
Python
|
mit
| 717
| 0
|
from firebase import firebase
from flask import request
from flask.ext.restful import Resource
class BaseAlert(Resource):
db = firebase.FirebaseApplication(
dsn='https://stockdelta.firebaseio.com/',
authentication=None)
class Alerts(BaseAlert):
def get(self):
result = self.db.get(url='/alerts', name=None)
print result
return result
def post(self):
# save alert to db:
print request.json
result = self.db.post('alerts',
'{"email": "shop@kienpham.com"
|
,"active": Tru
|
e}')
print result
return result
class Alert(BaseAlert):
def get(self, alert_id):
return {"an alert": alert_id}
|
wireservice/agate-excel
|
docs/conf.py
|
Python
|
mit
| 7,310
| 0.006156
|
# -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']
autodoc_member_order = 'bysource'
intersphinx_mapping = {
'python': ('http://docs.python.org/3.5/', None),
'agate': ('http://agate.readthedocs.org/en/latest/', None)
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'agate-excel'
copyright = u'2017, Christopher Groskopf'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2.5'
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'agateexceldoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'agate-excel.tex', u'agate-excel Documentation',
u'Christopher Groskopf', 'manual
|
'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = Non
|
e
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
]
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/python2.7/dist-packages/papyon/sip/call_manager.py
|
Python
|
gpl-3.0
| 56
| 0.017857
|
../../../../../share/pyshared/papyon
|
/sip/call_manager.py
|
|
michath/ConMonkey
|
testing/mozbase/mozdevice/tests/sut_mkdir.py
|
Python
|
mpl-2.0
| 2,854
| 0.001051
|
# Any copyright is dedicated to the Public Domain.
# http://creativecommons.org/publicdomain/zero/1.0/
import mozdevice
import mozl
|
og
import unittest
from sut import MockAgent
class M
|
kDirsTest(unittest.TestCase):
def test_mkdirs(self):
subTests = [{'cmds': [('isdir /mnt/sdcard/baz/boop', 'FALSE'),
('isdir /mnt', 'TRUE'),
('isdir /mnt/sdcard', 'TRUE'),
('isdir /mnt/sdcard/baz', 'FALSE'),
('mkdr /mnt/sdcard/baz',
'/mnt/sdcard/baz successfully created'),
('isdir /mnt/sdcard/baz/boop', 'FALSE'),
('mkdr /mnt/sdcard/baz/boop',
'/mnt/sdcard/baz/boop successfully created')],
'expectException': False},
{'cmds': [('isdir /mnt/sdcard/baz/boop', 'FALSE'),
('isdir /mnt', 'TRUE'),
('isdir /mnt/sdcard', 'TRUE'),
('isdir /mnt/sdcard/baz', 'FALSE'),
('mkdr /mnt/sdcard/baz',
'##AGENT-WARNING## Could not create the directory /mnt/sdcard/baz')],
'expectException': True},
]
for subTest in subTests:
a = MockAgent(self, commands=subTest['cmds'])
exceptionThrown = False
try:
d = mozdevice.DroidSUT('127.0.0.1', port=a.port,
logLevel=mozlog.DEBUG)
d.mkDirs('/mnt/sdcard/baz/boop/bip')
except mozdevice.DMError:
exceptionThrown = True
self.assertEqual(exceptionThrown, subTest['expectException'])
a.wait()
def test_repeated_path_part(self):
"""
Ensure that all dirs are created when last path part also found
earlier in the path (bug 826492).
"""
cmds = [('isdir /mnt/sdcard/foo', 'FALSE'),
('isdir /mnt', 'TRUE'),
('isdir /mnt/sdcard', 'TRUE'),
('isdir /mnt/sdcard/foo', 'FALSE'),
('mkdr /mnt/sdcard/foo',
'/mnt/sdcard/foo successfully created')]
a = MockAgent(self, commands=cmds)
d = mozdevice.DroidSUT('127.0.0.1', port=a.port,
logLevel=mozlog.DEBUG)
d.mkDirs('/mnt/sdcard/foo/foo')
a.wait()
def test_mkdirs_on_root(self):
cmds = [('isdir /', 'TRUE')]
a = MockAgent(self, commands=cmds)
d = mozdevice.DroidSUT('127.0.0.1', port=a.port,
logLevel=mozlog.DEBUG)
d.mkDirs('/foo')
a.wait()
if __name__ == '__main__':
unittest.main()
|
proxama/zorp
|
zorp/serialiser.py
|
Python
|
mit
| 566
| 0.001767
|
"""
Zorp serialiser
"""
from bson import BSON
class Serialiser(object):
"""
This is simply a wrapper
|
for the bson encoder/decoder
that can deal with non-dict types
"""
WRAPPER = "data"
@staticmethod
def encode(obj):
"""
Wrap the object in a dict and bson encode it
"""
return BSON.encode({
Serialiser.WRAPPER: obj
})
@staticmethod
def decode(obj):
"""
bson decode the object and unwrap it
"""
return BSON(obj).decode(
|
)[Serialiser.WRAPPER]
|
tulsluper/sanscript
|
apps/da/scripts/form_capacity.py
|
Python
|
gpl-3.0
| 6,110
| 0.002782
|
#!/usr/bin/env python3
import os
from settings import JSONDIR, logging
from defs import load_data, dump_data
def sort_storage_records(records, sorted_systems):
try:
records.sort(key=lambda x: sorted_systems.index(x['Storage']))
except:
pass
return records
def sum_3par():
capD = {}
filepath = os.path.join(JSONDIR, '3par', 'sys')
data = load_data(filepath, [])
for record in data:
xdict = {
'RawTotal': record['TotalCap'],
'RawData': record['TotalCap'],
'RawSpare': 0,
'RawAllocated': record['
|
AllocCap'],
'RawFree': record['FreeCap'],
}
for key, val in xdict.items():
xdict[key] = float(val)/1024/1024
capD[record['Storage']] = xdict
return capD
def su
|
m_eva():
capD = {}
filepath = os.path.join(JSONDIR, 'eva', 'system')
data = load_data(filepath, [])
for record in data:
xdict = {
'RawTotal': record['totalstoragespace'],
'RawData': record['totalstoragespace'],
'RawSpare': 0,
'RawAllocated': record['usedstoragespace'],
'RawFree': float(record['totalstoragespace']) - float(record['usedstoragespace']),
'FormattedAvailable': (float(record['totalstoragespace']) - float(record['usedstoragespace']))/2
}
for key, val in xdict.items():
xdict[key] = float(val)/1024
capD[record['Storage']] = xdict
return capD
def sum_hds():
capD = {}
filepath = os.path.join(JSONDIR, 'hds', 'drive_status')
data = load_data(filepath, [])
filepath = os.path.join(JSONDIR, 'hds', 'drive_vendor')
data2 = load_data(filepath, [])
for record in data:
for record2 in data2:
if record['Storage'] == record2['Storage'] and record['HDU'] == record2['HDU']:
record.update(record2)
for record in data:
storage = record['Storage']
#0.9313 = 1000*1000*1000/1024/1024/1024/1000
drive_size = int(record['Capacity'].replace('GB', ''))*0.9313/1024
Type = record['Type']
Status = record['Status']
if not storage in capD:
capD[storage] = {'RawTotal': 0, 'RawData': 0, 'RawSpare': 0, 'RawAllocated': 0, 'RawFree': 0}
capD[storage]['RawTotal'] += drive_size
if Type == 'Data':
capD[storage]['RawData'] += drive_size
if Type == 'Spare' and Status == 'Standby':
capD[storage]['RawSpare'] += drive_size
return capD
def sum_form_cap():
filepath = os.path.join(JSONDIR, 'volumes')
data = load_data(filepath, [])
xdict = {}
for record in data:
storage = record['Storage']
size = record['Size']
if not storage in xdict:
xdict[storage] = {
'FormattedTotal': 0,
'FormattedUsed': 0,
'FormattedPresented': 0,
'FormattedNotPresented': 0,
'FormattedAvailable': 0,
}
xdict[storage]['FormattedUsed'] += size
if record['Hosts']:
xdict[storage]['FormattedPresented'] += size
else:
xdict[storage]['FormattedNotPresented'] += size
for storage, size in xdict.items():
xdict[storage]['FormattedUsed'] = round(size['FormattedUsed']/1024, 2)
xdict[storage]['FormattedPresented'] = round(size['FormattedPresented']/1024, 2)
xdict[storage]['FormattedNotPresented'] = round(size['FormattedNotPresented']/1024, 2)
return xdict
def sum_form_3par_avail():
filepath = os.path.join(JSONDIR, 'capacity_3par')
data = load_data(filepath, [])
xdict = {}
for record in data:
xdict[record['Storage']] = {
'FormattedAvailable': record['FREE'],
}
return xdict
def sum_form_hds_cap():
filepath = os.path.join(JSONDIR, 'hds/dppool')
data1 = load_data(filepath, [])
filepath = os.path.join(JSONDIR, 'hds/rgref')
data2 = load_data(filepath, [])
xdict = {}
for record in data1:
storage = record['Storage']
if not storage in xdict:
xdict[storage] = {'FormattedAvailable': 0}
FREE = float(record['Total_Capacity']) - float(record['Consumed_Capacity'])
xdict[storage]['FormattedAvailable'] += FREE
for record in data2:
storage = record['Storage']
if not storage in xdict:
xdict[storage] = {'FormattedAvailable': 0}
FREE = float(record['Free_Capacity'])
xdict[storage]['FormattedAvailable'] += FREE
for storage, stordict in xdict.items():
for key, val in stordict.items():
xdict[storage][key] = float(val)/1024
return xdict
def main():
filepath = os.path.join(JSONDIR, 'models')
models = load_data(filepath, [])
RawCapD = {}
RawCapD.update(sum_3par())
RawCapD.update(sum_eva())
RawCapD.update(sum_hds())
FormCapD = sum_form_cap()
Form3parAvailD = sum_form_3par_avail()
FormHdsAvailD = sum_form_hds_cap()
records = []
for storage in RawCapD:
record = {'Storage': storage}
record.update(RawCapD[storage])
record.update(FormCapD[storage])
record.update(Form3parAvailD.get(storage, {}))
record.update(FormHdsAvailD.get(storage, {}))
if storage in models.get('eva', []):
record['FormattedAvailable'] = record['RawFree']/2
elif storage in models.get('hds', []):
rate = record['RawData']/(record['FormattedUsed'] + record['FormattedAvailable'])
record['RawAllocated'] = record['FormattedUsed']*rate
record['RawFree'] = record['FormattedAvailable']*rate
record['FormattedTotal'] = record['FormattedUsed'] + record['FormattedAvailable']
records.append(record)
sorted_systems = load_data(os.path.join(JSONDIR, 'sorted_systems'), [])
records = sort_storage_records(records, sorted_systems)
filepath = os.path.join(JSONDIR, 'capacity')
dump_data(filepath, records)
return
if __name__ == '__main__':
main()
|
openthread/openthread
|
tools/harness-automation/cases_R140/router_5_5_2.py
|
Python
|
bsd-3-clause
| 1,876
| 0
|
#!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the follo
|
wing disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT L
|
IMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
from autothreadharness.harness_case import HarnessCase
import unittest
class Router_5_5_2(HarnessCase):
role = HarnessCase.ROLE_ROUTER
case = '5 5 2'
golden_devices_required = 2
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
|
jantman/pymonitoringapi
|
examples/get_service_status.py
|
Python
|
lgpl-3.0
| 2,213
| 0.003615
|
#!/usr/bin/env python
"""
pymonitoringapi example script to find the server type
and version of our monitoring server.
"""
import optparse
import sys
import os
import pprint
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../")))
from pymonitoringapi import MonitoringAPI
VERBOSE = False
def show_service_status(host, service, user, password, url, cgibin):
mon = MonitoringAPI(url, user, password, cgibin)
client = mon.get_client()
status = client.get_service_information(host, service)
pprint.pprint(status)
if __name__ == "__main__":
parser = optparse.OptionParser()
parser.add_option('-u', '--user', dest='user',
help='username to authenticate with (HTTP Basic Auth)')
parser.add_option('-p', '--password', dest='password',
help='password to authenticate with (HTTP Basic Auth)')
parser.add_option('-U', '--url', dest='url',
help='url to the top-level nagios web directory
|
(i.e. http://host.example.com/nagios/)')
parser.add_option('-c', '--cgibin', dest='cgibin', default='cgi-bin/',
help='path to nagios cgi-bin directory, rel
|
ative to url (default: cgi-bin/)')
parser.add_option('-v', '--verbose', dest='verbose', default=False, action='store_true',
help='verbose (debug-level) output')
parser.add_option('-H', '--host', dest='host',
help='hostname to check status for')
parser.add_option('-s', '--service', dest='service',
help='(optional) service to check status for')
options, args = parser.parse_args()
if not options.user or not options.password or not options.url:
print "ERROR: you must specify -u/--user, -p/--password and -U/--url"
sys.exit(1)
if options.verbose:
VERBOSE = True
if not options.host:
print "ERROR: you must specify -h/--host to check status for"
sys.exit(1)
if not options.service:
print "host status not implemented yet"
else:
show_service_status(options.host, options.service, options.user, options.password, options.url, options.cgibin)
|
blundeln/pylens
|
scripts/generate_docs.py
|
Python
|
bsd-3-clause
| 5,581
| 0.023114
|
#
# Copyright (c) 2010-2011, Nick Blundell
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Nick Blundell nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENT
|
IAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABI
|
LITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
#
# Author: Nick Blundell <blundeln [AT] gmail [DOT] com>
# Organisation: www.nickblundell.org.uk
#
# Description:
#
#
import os
import sys
import re
import glob
SOURCE_DIR = os.getcwd()
try :
from nbdebug import d
except ImportError:
def d(*args, **kargs):pass
def run(command) :
d(command)
return os.system(command)
def generate_pages_from_example_code(python_file) :
d(python_file)
content = open(python_file).read()
matches = []
multiline_comment_matches = []
for match in re.finditer("^\s\s\"\"\"(.+?)\"\"\".*?\n", content, re.DOTALL + re.MULTILINE) :
if match:
multiline_comment_matches.append(match)
hash_comment_matches = []
for match in re.finditer("^\s\s#(.*?)\n", content, re.DOTALL + re.MULTILINE) :
if match:
hash_comment_matches.append(match)
test_function_matches = []
for match in re.finditer("def\s+(.+?)_test.+?\n", content, re.DOTALL) :
if match:
test_function_matches.append(match)
all_matches = []
all_matches.extend(multiline_comment_matches)
all_matches.extend(hash_comment_matches)
all_matches.extend(test_function_matches)
# Sort matches by their source positions.
all_matches = sorted(all_matches, key=lambda x: x.start())
output_blocks = []
prev_match = None
for match in all_matches:
if prev_match :
code_block = content[prev_match.end(0):match.start(0)].strip()
if code_block :
# Convert our assertions to showing expected outputs.
def f(match) :
args = match.group(1)
if ", " in args :
# Since ',' may appear inside two args, we use double space to
# indicate docs should be transformed in this way.
return args.replace(", ", " ---> ")
else :
return match.group(0)
if "assert_equal" in code_block :
code_block = re.sub("assert_equal\((.+?)\)\s*$", f, code_block, flags=re.MULTILINE)
# TODO: Need to handle multilines.
#code_block = re.sub("assert_equal\((.+?)\)\s*\n", f, code_block, flags=re.DOTALL)
code_block = "\n\n::\n\n %s\n\n" % code_block
output_blocks.append([prev_match.end(0), code_block])
prev_match = match
match = None
for match in all_matches:
text = match.group(1)
if match in test_function_matches:
text = "\n\n" + " ".join([s.capitalize() for s in text.split("_")]) + "\n" + "-"*80 + "\n\n"
elif match in multiline_comment_matches:
text = text.replace("\n ","\n")+"\n\n"
elif match in hash_comment_matches:
text = text[1:]+"\n"
output_blocks.append((match.start(0), text))
output_blocks = sorted(output_blocks, key=lambda x: x[0])
output = ""
for x in output_blocks :
output += x[1]
return output
def generate_docs_from_example_tests():
examples_path = os.path.join(SOURCE_DIR, "examples")
try :
os.makedirs("docs/source/examples")
except :
run("rm docs/source/examples/*.rst")
for source_file in glob.glob(examples_path+"/*.py") :
name = os.path.basename(source_file).replace(".py","")
# TODO: Improve handling of fully-left justified code.
if name != "basic" :
continue
if name.startswith("_") :
continue
output = generate_pages_from_example_code(source_file)
# Add page title
output = name.capitalize() + "\n" + "="*80 + "\n\n" + output
if output.strip() :
open("docs/source/examples/%s.rst" % name,"w").write(output)
def main():
# Generate tutorials from source.
generate_docs_from_example_tests()
# Generate index.rst from our README file.
index_content = open("README.rst").read()
index_content = index_content.replace(".. TOC", "\n\n\n" + open("docs/source/master_toc").read())
open("docs/source/index.rst", "w").write(index_content)
exit_code = run("sphinx-build -W -b html docs/source docs/build/html")
if exit_code :
raise Exception("Sphinx build failed")
if __name__ == "__main__" :
main()
|
googleapis/google-resumable-media-python
|
google/_async_resumable_media/_upload.py
|
Python
|
apache-2.0
| 37,337
| 0.000214
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Virtual bases classes for uploading media via Google APIs.
Supported here are:
* simple (media) uploads
* multipart uploads that contain both metadata and a small file as payload
* resumable uploads (with metadata as well)
"""
import http.client
import json
import os
import random
import sys
from google import _async_resumable_media
from google._async_resumable_media import _helpers
from google.resumable_media import _helpers as sync_helpers
from google.resumable_media import _upload as sync_upload
from google.resumable_media import common
from google.resumable_media._upload import (
_CONTENT_TYPE_HEADER,
_CONTENT_RANGE_TEMPLATE,
_RANGE_UNKNOWN_TEMPLATE,
_EMPTY_RANGE_TEMPLATE,
_BOUNDARY_FORMAT,
_MULTIPART_SEP,
_CRLF,
_MULTIPART_BEGIN,
_RELATED_HEADER,
_BYTES_RANGE_RE,
_STREAM_ERROR_TEMPLATE,
_POST,
_PUT,
_UPLOAD_CHECKSUM_MISMATCH_MESSAGE,
_UPLOAD_METADATA_NO_APPROPRIATE_CHECKSUM_MESSAGE,
)
class UploadBase(object):
"""Base class for upload helpers.
Defines core shared behavior across different upload types.
Args:
upload_url (str): The URL where the content will be uploaded.
headers (Optional[Mapping[str, str]]): Extra headers that should
be sent with the request, e.g. headers for encrypted data.
Attributes:
upload_url (str): The URL where the content will be uploaded.
"""
def __init__(self, upload_url, headers=None):
self.upload_url = upload_url
if headers is None:
headers = {}
self._headers = headers
self._finished = False
self._retry_strategy = common.RetryStrategy()
@property
def finished(self):
"""bool: Flag indicating if the upload has completed."""
return self._finished
def _process_response(self, response):
"""Process the response from an HTTP request.
This is everything that must be done after a request that doesn't
require network I/O (or other I/O). This is based on the `sans-I/O`_
philosophy.
Args:
response (object): The HTTP response object.
Raises:
~google.resumable_media.common.InvalidResponse: If the status
code is not 200.
.. _sans-I/O: https://sans-io.readthedocs.io/
"""
# Tombstone the current upload so it cannot be used again (in either
# failure or success).
self._finished = True
_helpers.require_status_code(response, (http.client.OK,), self._get_status_code)
@staticmethod
def _get_status_code(response):
"""Access the status code from an HTTP response.
Args:
response (object): The HTTP response object.
Raises:
NotImplementedError: Always, since virtual.
"""
raise NotImplementedError("This implementation is virtual.")
@staticmethod
def _get_headers(response):
"""Access the headers
|
from an HTTP response.
|
Args:
response (object): The HTTP response object.
Raises:
NotImplementedError: Always, since virtual.
"""
raise NotImplementedError("This implementation is virtual.")
@staticmethod
def _get_body(response):
"""Access the response body from an HTTP response.
Args:
response (object): The HTTP response object.
Raises:
NotImplementedError: Always, since virtual.
"""
raise NotImplementedError("This implementation is virtual.")
class SimpleUpload(UploadBase):
"""Upload a resource to a Google API.
A **simple** media upload sends no metadata and completes the upload
in a single request.
Args:
upload_url (str): The URL where the content will be uploaded.
headers (Optional[Mapping[str, str]]): Extra headers that should
be sent with the request, e.g. headers for encrypted data.
Attributes:
upload_url (str): The URL where the content will be uploaded.
"""
def _prepare_request(self, data, content_type):
"""Prepare the contents of an HTTP request.
This is everything that must be done before a request that doesn't
require network I/O (or other I/O). This is based on the `sans-I/O`_
philosophy.
.. note:
This method will be used only once, so ``headers`` will be
mutated by having a new key added to it.
Args:
data (bytes): The resource content to be uploaded.
content_type (str): The content type for the request.
Returns:
Tuple[str, str, bytes, Mapping[str, str]]: The quadruple
* HTTP verb for the request (always POST)
* the URL for the request
* the body of the request
* headers for the request
Raises:
ValueError: If the current upload has already finished.
TypeError: If ``data`` isn't bytes.
.. _sans-I/O: https://sans-io.readthedocs.io/
"""
if self.finished:
raise ValueError("An upload can only be used once.")
if not isinstance(data, bytes):
raise TypeError("`data` must be bytes, received", type(data))
self._headers[_CONTENT_TYPE_HEADER] = content_type
return _POST, self.upload_url, data, self._headers
def transmit(self, transport, data, content_type, timeout=None):
"""Transmit the resource to be uploaded.
Args:
transport (object): An object which can make authenticated
requests.
data (bytes): The resource content to be uploaded.
content_type (str): The content type of the resource, e.g. a JPEG
image has content type ``image/jpeg``.
timeout (Optional[Union[float, aiohttp.ClientTimeout]]):
The number of seconds to wait for the server response.
Depending on the retry strategy, a request may be repeated
several times using the same timeout each time.
Can also be passed as an `aiohttp.ClientTimeout` object.
Raises:
NotImplementedError: Always, since virtual.
"""
raise NotImplementedError("This implementation is virtual.")
class MultipartUpload(UploadBase):
"""Upload a resource with metadata to a Google API.
A **multipart** upload sends both metadata and the resource in a single
(multipart) request.
Args:
upload_url (str): The URL where the content will be uploaded.
headers (Optional[Mapping[str, str]]): Extra headers that should
be sent with the request, e.g. headers for encrypted data.
checksum Optional([str]): The type of checksum to compute to verify
the integrity of the object. The request metadata will be amended
to include the computed value. Using this option will override a
manually-set checksum value. Supported values are "md5", "crc32c"
and None. The default is None.
Attributes:
upload_url (str): The URL where the content will be uploaded.
"""
def __init__(self, upload_url, headers=None, checksum=None):
super(MultipartUpload, self).__init__(upload_url, headers=headers)
self._checksum_type = checksum
def _prepare_request(self, data, metadata, content_type):
"""Prepare the contents of an HTTP request.
This is everything that must be done before a request that doesn't
r
|
GoogleCloudPlatform/training-data-analyst
|
quests/sparktobq/spark_analysis.py
|
Python
|
apache-2.0
| 2,849
| 0.004563
|
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--bucket", help="bucket for input and output")
args = parser.parse_args()
BUCKET = args.bucket
from pyspark.sql import SparkSession, SQLContext, Row
spark = SparkSession.builder.appName("kdd").getOrCreate()
sc = spark.sparkContext
data_file = "gs://{}/kddcup.data_10_percent.gz".format(BUCKET)
raw_rdd = sc.textFile(data_file).cache()
#raw_rdd.take(5)
csv_rdd = raw_rdd.map(lambda row: row.split(","))
parsed_rdd = csv_rdd.map(lambda r: Row(
|
duration=int(r[0]),
protocol_type=r[1],
service=r[2],
flag=r[3],
src_bytes=int(r[4]),
dst_bytes=int(r[5]),
wrong_fragment=int(r[7]),
urgent=int(r[8]),
hot=int(r[9]),
num_failed_logins=int(r[10]),
num_compromised=int(r[12]),
su_attempted=r[14],
num_root=i
|
nt(r[15]),
num_file_creations=int(r[16]),
label=r[-1]
)
)
#parsed_rdd.take(5)
sqlContext = SQLContext(sc)
df = sqlContext.createDataFrame(parsed_rdd)
connections_by_protocol = df.groupBy('protocol_type').count().orderBy('count', ascending=False)
connections_by_protocol.show()
df.registerTempTable("connections")
attack_stats = sqlContext.sql("""
SELECT
protocol_type,
CASE label
WHEN 'normal.' THEN 'no attack'
ELSE 'attack'
END AS state,
COUNT(*) as total_freq,
ROUND(AVG(src_bytes), 2) as mean_src_bytes,
ROUND(AVG(dst_bytes), 2) as mean_dst_bytes,
ROUND(AVG(duration), 2) as mean_duration,
SUM(num_failed_logins) as total_failed_logins,
SUM(num_compromised) as total_compromised,
SUM(num_file_creations) as total_file_creations,
SUM(su_attempted) as total_root_attempts,
SUM(num_root) as total_root_acceses
FROM connections
GROUP BY protocol_type, state
ORDER BY 3 DESC
""")
attack_stats.show()
ax = attack_stats.toPandas().plot.bar(x='protocol_type', subplots=True, figsize=(10,25))
ax[0].get_figure().savefig('report.png');
#!gsutil rm -rf gs://$BUCKET/sparktobq/
#!gsutil cp report.png gs://$BUCKET/sparktobq/
import google.cloud.storage as gcs
bucket = gcs.Client().get_bucket(BUCKET)
for blob in bucket.list_blobs(prefix='sparktobq/'):
blob.delete()
bucket.blob('sparktobq/report.png').upload_from_filename('report.png')
connections_by_protocol.write.format("csv").mode("overwrite").save(
"gs://{}/sparktobq/connections_by_protocol".format(BUCKET))
|
MungoRae/home-assistant
|
homeassistant/components/sensor/zoneminder.py
|
Python
|
apache-2.0
| 3,361
| 0
|
"""
Support for ZoneMinder Sensors.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.zoneminder/
"""
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFOR
|
M_SCHEMA
from homeassistant.const import STATE_UNKNOWN
from homea
|
ssistant.helpers.entity import Entity
import homeassistant.components.zoneminder as zoneminder
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['zoneminder']
CONF_INCLUDE_ARCHIVED = "include_archived"
DEFAULT_INCLUDE_ARCHIVED = False
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_INCLUDE_ARCHIVED, default=DEFAULT_INCLUDE_ARCHIVED):
cv.boolean,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the ZoneMinder sensor platform."""
include_archived = config.get(CONF_INCLUDE_ARCHIVED)
sensors = []
monitors = zoneminder.get_state('api/monitors.json')
for i in monitors['monitors']:
sensors.append(
ZMSensorMonitors(int(i['Monitor']['Id']), i['Monitor']['Name'])
)
sensors.append(
ZMSensorEvents(int(i['Monitor']['Id']), i['Monitor']['Name'],
include_archived)
)
add_devices(sensors)
class ZMSensorMonitors(Entity):
"""Get the status of each ZoneMinder monitor."""
def __init__(self, monitor_id, monitor_name):
"""Initialize monitor sensor."""
self._monitor_id = monitor_id
self._monitor_name = monitor_name
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return '{} Status'.format(self._monitor_name)
@property
def state(self):
"""Return the state of the sensor."""
return self._state
def update(self):
"""Update the sensor."""
monitor = zoneminder.get_state(
'api/monitors/%i.json' % self._monitor_id
)
if monitor['monitor']['Monitor']['Function'] is None:
self._state = STATE_UNKNOWN
else:
self._state = monitor['monitor']['Monitor']['Function']
class ZMSensorEvents(Entity):
"""Get the number of events for each monitor."""
def __init__(self, monitor_id, monitor_name, include_archived):
"""Initialize event sensor."""
self._monitor_id = monitor_id
self._monitor_name = monitor_name
self._include_archived = include_archived
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return '{} Events'.format(self._monitor_name)
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return 'Events'
@property
def state(self):
"""Return the state of the sensor."""
return self._state
def update(self):
"""Update the sensor."""
archived_filter = '/Archived:0'
if self._include_archived:
archived_filter = ''
event = zoneminder.get_state(
'api/events/index/MonitorId:%i%s.json' % (self._monitor_id,
archived_filter)
)
self._state = event['pagination']['count']
|
biotty/rmg
|
graphics/py/posterize.py
|
Python
|
bsd-2-clause
| 3,589
| 0.002229
|
#!/usr/bin/env python3
#
# (c) Christian Sommerfeldt OEien
# All rights reserved
from rmg.color import Color, InkColor, white_ink
from rmg.board import Photo, Image, Board
from rmg.plane import XY
from argparse import ArgumentParser
from sys import stderr
from math import sqrt, floor
from copy import copy
def most_central_available(board):
c = XY(board.n_columns, board.n_rows) * 0.5
min_distance = abs(c) + 1
the_most = None
for y in range(board.n_rows):
for x in range(board.n_columns):
if board.int_at(x, y) == 0:
p = XY(x, y)
d = abs(p - c)
if d <= min_distance:
min_distance = d
the_most = p
return the_most
def status(m):
stderr.write(m)
def fill_order(s):
status("calculating %dx%d pattern\n" % (s, s))
board = Board([[0]*s for i in range(s)])
for i in range(s * s):
p = most_central_available(board)
board.set_int(p.x, p.y, 1 + i)
# consider: cache this on /tmp/posterize.X_Y.cache
# so that we don't recalculate at each invokation
return board
def averages(p, s, cmy_index):
q = p.quality
matrix = []
for row in range(0, q.height, s):
status("%d/%d\r" % (row, q.height))
row_bound = min(q.height, row + s)
a = []
for column in range(0, q.width, s):
column_bound = min(q.width, column + s)
color = Color(0, 0, 0)
for y in range(row, row_bound):
for x in range(column, column_bound):
color += p.color_at(x, y)
color *= 1/float((column_bound - column) * (row_bound - row))
a.append(int(s * s * \
InkColor.from_hsv(*color.hsv()).cmy()[cmy_index]))
matrix.append(a)
return Board(matrix)
class Poster:
def __init__(self, p, s, cmy_index):
status("averaging for %s\n" % (["cyan", "magenta", "yellow"][cmy_index],))
self.board = averages(p, s, cmy_index)
status("%dx%d board of fill-counts\n" % \
(self.board.n_columns, self.board.n_rows))
self.pattern = fill_order(s)
self.s = s
self.cmy_index = cmy_index
def ink(self, x, y):
a = [0, 0, 0]
a[self.cmy_index] = 1
component_ink = InkColor(*a)
n = self.board.int_at(x // self.s, y // self.s)
s_x = x % self.s
s_y = y % self.s
if self.pattern.int_at(s_x, s_y) <= n:
return component_ink
else:
return white_ink
def ink_post(x, y):
ink = white_ink
ink += cyan_poster.ink(x, y)
ink += magenta_poster.ink(x, y)
ink += yellow_poster.ink(x, y)
return Color.from_hsv(*ink.hsv())
ap = ArgumentParser()
ap.add_argument("infile", type=str)
ap.add_argument("outfile", type=str, nargs="?", default="-")
ap.add_argument("side_cyan", type=int, nargs="?", default=17)
ap.add_argument("side_magenta", type=int, nargs="?", default=19)
ap.add_argument("side_yellow", type=int, nargs="?", default=23)
args = ap.parse_args()
p = Photo.from_file(args.infile)
q = p.quality
r = Image(q.width, q.height,
None if args.outfile=="-" else args.outfile)
cyan_poster = Poster(p, args.side_cyan, 0)
magenta_poster = Poster(p, args.side_magenta, 1)
yellow_poster = Poster(p, args.side_yellow, 2)
for row in range(q.height):
status("rende
|
ring row %d\r" % (row,))
for column in range(q.width):
r.put(ink_post(column, row))
r.close()
status("rendered %dx%d image\n" % (
|
q.width, q.height))
|
Glottotopia/aagd
|
moin/local/moin/build/lib.linux-x86_64-2.6/MoinMoin/util/diff_html.py
|
Python
|
mit
| 5,991
| 0.00434
|
# -*- coding: iso-8859-1 -*-
"""
MoinMoin - Side by side diffs
@copyright: 2002 Juergen Hermann <jh@web.de>,
2002 Scott Moonen <smoonen@andstuff.org>
@license: GNU GPL, see COPYING for details.
"""
from MoinMoin.support import difflib
from MoinMoin.wikiutil import escape
def indent(line):
eol = ''
while line and line[0] == '\n':
eol += '\n'
line = line[1:]
stripped = line.lstrip()
if len(line) - len(stripped):
line = " " * (len(line) - len(stripped)) + stripped
#return "%d / %d / %s" % (len(line), len(stripped), line)
return eol + line
# This code originally by Scott Moonen, used with permission.
def diff(request, old, new, old_top='', new_top='', old_bottom='', new_bottom='', old_top_class='', new_top_class='', old_bottom_class='', new_bottom_class=''):
""" Find changes between old and new and return
HTML markup visualising them.
@param old: old text [unicode]
@param new: new text [unicode]
@param old_top: Custom html for adding ontop of old revision column (optional)
@param old_bottom: Custom html for adding at bottom of old revision column (optional)
@param new_top: Custom html for adding ontop of new revision column (optional)
@param new_bottom: Custom html for adding at bottom of new revision colum
|
n (optional)
@param old_top_class: Custom class for <td> with old_top content (optional)
@param new_top_class: Custom class for <td> with new_top content (option
|
al)
@param old_bottom_class: Custom class for <td> with old_bottom content (optional)
@param new_bottom_class: Custom class for <td> with new_bottom content (optional)
"""
_ = request.getText
t_line = _("Line") + " %d"
seq1 = old.splitlines()
seq2 = new.splitlines()
seqobj = difflib.SequenceMatcher(None, seq1, seq2)
linematch = seqobj.get_matching_blocks()
result = """
<table class="diff">
"""
if old_top or new_top:
result += '<tr><td class="%s">%s</td><td class="%s">%s</td></tr>' % (old_top_class, old_top, new_top_class, new_top)
if len(seq1) == len(seq2) and linematch[0] == (0, 0, len(seq1)):
# No differences.
result += '<tr><td class="diff-same" colspan="2">' + _("No differences found!") + '</td></tr>'
else:
result += """
<tr>
<td class="diff-removed"><span>%s</span></td>
<td class="diff-added"><span>%s</span></td>
</tr>
""" % (_('Deletions are marked like this.'), _('Additions are marked like this.'), )
lastmatch = (0, 0)
# Print all differences
for match in linematch:
# Starts of pages identical?
if lastmatch == match[0:2]:
lastmatch = (match[0] + match[2], match[1] + match[2])
continue
llineno, rlineno = lastmatch[0]+1, lastmatch[1]+1
result += """
<tr class="diff-title">
<td>%s:</td>
<td>%s:</td>
</tr>
""" % (request.formatter.line_anchorlink(1, llineno) + request.formatter.text(t_line % llineno) + request.formatter.line_anchorlink(0),
request.formatter.line_anchorlink(1, rlineno) + request.formatter.text(t_line % rlineno) + request.formatter.line_anchorlink(0))
leftpane = ''
rightpane = ''
linecount = max(match[0] - lastmatch[0], match[1] - lastmatch[1])
for line in range(linecount):
if line < match[0] - lastmatch[0]:
if line > 0:
leftpane += '\n'
leftpane += seq1[lastmatch[0] + line]
if line < match[1] - lastmatch[1]:
if line > 0:
rightpane += '\n'
rightpane += seq2[lastmatch[1] + line]
charobj = difflib.SequenceMatcher(None, leftpane, rightpane)
charmatch = charobj.get_matching_blocks()
if charobj.ratio() < 0.5:
# Insufficient similarity.
if leftpane:
leftresult = """<span>%s</span>""" % indent(escape(leftpane))
else:
leftresult = ''
if rightpane:
rightresult = """<span>%s</span>""" % indent(escape(rightpane))
else:
rightresult = ''
else:
# Some similarities; markup changes.
charlast = (0, 0)
leftresult = ''
rightresult = ''
for thismatch in charmatch:
if thismatch[0] - charlast[0] != 0:
leftresult += """<span>%s</span>""" % indent(
escape(leftpane[charlast[0]:thismatch[0]]))
if thismatch[1] - charlast[1] != 0:
rightresult += """<span>%s</span>""" % indent(
escape(rightpane[charlast[1]:thismatch[1]]))
leftresult += escape(leftpane[thismatch[0]:thismatch[0] + thismatch[2]])
rightresult += escape(rightpane[thismatch[1]:thismatch[1] + thismatch[2]])
charlast = (thismatch[0] + thismatch[2], thismatch[1] + thismatch[2])
leftpane = '<br>'.join([indent(x) for x in leftresult.splitlines()])
rightpane = '<br>'.join([indent(x) for x in rightresult.splitlines()])
# removed width="50%%"
result += """
<tr>
<td class="diff-removed">%s</td>
<td class="diff-added">%s</td>
</tr>
""" % (leftpane, rightpane)
lastmatch = (match[0] + match[2], match[1] + match[2])
if old_bottom or new_bottom:
result += '<tr><td class="%s">%s</td><td class="%s">%s</td></tr>' % (old_top_class, old_top, new_top_class, new_top)
result += '</table>\n'
return result
|
hail-is/hail
|
hail/python/hailtop/aiocloud/aiogoogle/client/iam_client.py
|
Python
|
mit
| 272
| 0.003676
|
from .base_client impor
|
t GoogleBaseClient
class GoogleIAmClient(GoogleBaseClient):
def __init__(self, project, **kwargs):
super().__init__(f'https://iam.googleapis.com/v1/projects/{project}', **kwargs)
# https://cloud.google.com/iam/docs/ref
|
erence/rest
|
forfuturellc/peterhellberg
|
peterhellberg/main.py
|
Python
|
mit
| 1,258
| 0.000795
|
'''
The Core of peterhellberg
'''
import requests
from . import metadata
def parse(url, use_ssl=False):
'''Send a http request to the remote server
Return the response, as a dictionary
Raises requests.exceptions.ConnectionError in event of a network error
Raises requests.exceptions.HTTPError in event of invalid HTTP response
Raises requests.exceptions.Timeout in event of a request timeout
Raises requests.exceptions.TooManyRedirects in event of too many redirects'''
params = {}
params['use_ssl'] = use_ssl
res = requests.get('{0}/{1}'.format(metadata.APP_URL, url), params=params)
res.raise_for_status()
return res.json()
def diagnose(exception):
'''Diagnose issues. Useful when errors occur.'''
|
if isinstance(exception, requests.exceptions.ConnectionError):
print("error: network error")
elif isinstance(exception, requests.exceptions.HTTPError):
print("error: invalid HTTP response")
elif isinstance(exception, requests.except
|
ions.Timeout):
print("error: request timeout")
elif isinstance(exception, requests.exceptions.TooManyRedirects):
print("error: too many redirects")
else:
print("error: unknown exception")
print exception
|
algorythmic/bash-completion
|
test/t/test_java.py
|
Python
|
gpl-2.0
| 1,508
| 0
|
import pytest
from conftest import is_bash_type
@pytest.mark.bashcomp(
pre_cmds=("CLASSPATH=$PWD/java/a:$PWD/java/bashcomp.jar",)
)
class TestJava:
@pytest.fixture(scope="class")
def can_list_jar(self, bash):
return (
is_bash_type(bash, "zipinfo")
or is_bash_type(bash, "unzip")
or is_bash_type(bash, "jar")
)
@pytest.mark.complete("java -", require_cmd=True)
def test_1(self, completion):
assert completion
@pytest.mark.complete("java ")
def test_2(self, completion, can_list_jar):
|
if can_list_jar:
assert completion == "b bashcomp.jarred c. toplevel".split()
else:
assert completion == "b c.".split()
@pytest.mark.
|
complete("java -classpath java/bashcomp.jar ")
def test_3(self, completion, can_list_jar):
if can_list_jar:
assert completion == "bashcomp.jarred toplevel".split()
else:
assert not completion
@pytest.mark.complete("java -cp java/bashcomp.jar:java/a/c ")
def test_4(self, completion, can_list_jar):
if can_list_jar:
assert completion == "bashcomp.jarred d toplevel".split()
else:
assert completion == ["d"]
@pytest.mark.complete("java -cp '' ")
def test_5(self, completion):
assert not completion
@pytest.mark.complete("java -jar java/")
def test_6(self, completion):
assert completion == "a/ bashcomp.jar bashcomp.war".split()
|
cloudera/hue
|
desktop/core/ext-py/pycryptodomex-3.9.7/lib/Cryptodome/SelfTest/Util/test_strxor.py
|
Python
|
apache-2.0
| 10,618
| 0.00292
|
#
# SelfTest/Util/test_strxor.py: Self-test for XORing
#
# ===================================================================
#
# Copyright (c) 2014, Legrandin <helderijs@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ===================================================================
import unittest
from binascii import unhexlify, hexlify
from Cryptodome.SelfTest.st_common import list_test_cases
from Cryptodome.Util.strxor import strxor, strxor_c
class StrxorTests(unittest.TestCase):
def test1(self):
term1 = unhexlify(b"ff339a83e5cd4cdf5649")
term2 = unhexlify(b"383d4ba020573314395b")
result = unhexlify(b"c70ed123c59a7fcb6f12")
self.assertEqual(strxor(term1, term2), result)
self.assertEqual(strxor(term2, term1), result)
def test2(self):
es = b""
self.assertEqual(strxor(es, es), es)
def test3(self):
term1 = unhexlify(b"ff339a83e5cd4cdf5649")
all_zeros = b"\x00" * len(term1)
self.assertEqual(strxor(term1, term1), all_zeros)
def test_wrong_length(self):
term1 = unhexlify(b"ff339a83e5cd4cdf5649")
term2 = unhexlify(b"ff339a83e5cd4cdf564990")
self.assertRaises(ValueError, strxor, term1, term2)
def test_bytearray(self):
term1 = unhexlify(b"ff339a83e5cd4cdf5649")
term1_ba = bytearray(term1)
term2 = unhexlify(b"383d4ba020573314395b")
result = unhexlify(b"c70ed123c59a7fcb6f12")
self.assertEqual(strxor(term1_ba, term2), result)
def test_memoryview(self):
term1 = unhexlify(b"ff339a83e5cd4cdf5649")
term1_mv = memoryview(term1)
term2 = unhexlify(b"383d4ba020573314395b")
result = unhexlify(b"c70ed123c59a7fcb6f12")
self.assertEqual(strxor(term1_mv, term2), result)
def test_output_bytearray(self):
"""Verify result can be stored in pre-allocated memory"""
term1 = unhexlify(b"ff339a83e5cd4cdf5649")
term2 = unhexlify(b"383d4ba020573314395b")
original_term1 = term1[:]
original_term2 = term2[:]
expected_xor = unhexlify(b"c70ed123c59a7fcb6f12")
output = bytearray(len(term1))
result = strxor(term1, term2, output=output)
self.assertEqual(result, None)
self.assertEqual(output, expected_xor)
self.assertEqual(term1, original_term1)
self.assertEqual(term2, original_term2)
def test_output_memoryview(self):
"""Verify result can be stored in pre-allocated memory"""
term1 = unhexlify(b"ff339a83e5cd4cdf5649")
term2 = unhexlify(b"383d4ba020573314395b")
original_term1 = term1[:]
original_term2 = term2[:]
expected_xor = unhexlify(b"c70ed123c59a7fcb6f12")
output = memoryview(bytearray(len(term1)))
result = strxor(term1, term2, output=output)
self.assertEqual(result, None)
self.assertEqual(output, expected_xor)
self.assertEqual(term1, original_term1)
self.assertEqual(term2, original_term2)
def test_output_overlapping_bytearray(self):
"""Verify result can be stored in overlapping memory"""
term1 = bytearray(unhexlify(b"ff339a83e5cd4cdf5649"))
term2 = unhexlify(b"383d4ba020573314395b")
original_term2 = term2[:]
expected_xor = unhexlify(b"c70ed123c59a7fcb6f12")
result = strxor(term1, term2, output=term1)
self.assertEqual(result, None)
self.assertEqual(term1, expected_xor)
self.assertEqual(term2, original_term2)
def test_output_overlapping_memoryview(self):
"""Verify result can be stored in overlapping memory"""
term1 = memoryview(bytearray(unhexlify(b"ff339a83e5cd4cdf5649")))
term2 = unhexlify(b"383d4ba020573314395b")
original_term2 = term2[:]
expected_xor = unhexlify(b"c70ed123c59a7fcb6f12")
result = strxor(term1, term2, output=term1)
self.assertEqual(result, None)
self.assertEqual(term1, expected_xor)
self.assertEqual(term2, original_term2)
def test_output_ro_bytes(self):
"""Verify result cannot be stored in read-only memory"""
term1 = unhexlify(b"ff339a83e5cd4cdf5649")
|
term2 = unhexlify(b"383d4ba02057331
|
4395b")
self.assertRaises(TypeError, strxor, term1, term2, output=term1)
def test_output_ro_memoryview(self):
"""Verify result cannot be stored in read-only memory"""
term1 = memoryview(unhexlify(b"ff339a83e5cd4cdf5649"))
term2 = unhexlify(b"383d4ba020573314395b")
self.assertRaises(TypeError, strxor, term1, term2, output=term1)
def test_output_incorrect_length(self):
"""Verify result cannot be stored in memory of incorrect length"""
term1 = unhexlify(b"ff339a83e5cd4cdf5649")
term2 = unhexlify(b"383d4ba020573314395b")
output = bytearray(len(term1) - 1)
self.assertRaises(ValueError, strxor, term1, term2, output=output)
import sys
if sys.version[:3] == "2.6":
del test_memoryview
del test_output_memoryview
del test_output_overlapping_memoryview
del test_output_ro_memoryview
class Strxor_cTests(unittest.TestCase):
def test1(self):
term1 = unhexlify(b"ff339a83e5cd4cdf5649")
result = unhexlify(b"be72dbc2a48c0d9e1708")
self.assertEqual(strxor_c(term1, 65), result)
def test2(self):
term1 = unhexlify(b"ff339a83e5cd4cdf5649")
self.assertEqual(strxor_c(term1, 0), term1)
def test3(self):
self.assertEqual(strxor_c(b"", 90), b"")
def test_wrong_range(self):
term1 = unhexlify(b"ff339a83e5cd4cdf5649")
self.assertRaises(ValueError, strxor_c, term1, -1)
self.assertRaises(ValueError, strxor_c, term1, 256)
def test_bytearray(self):
term1 = unhexlify(b"ff339a83e5cd4cdf5649")
term1_ba = bytearray(term1)
result = unhexlify(b"be72dbc2a48c0d9e1708")
self.assertEqual(strxor_c(term1_ba, 65), result)
def test_memoryview(self):
term1 = unhexlify(b"ff339a83e5cd4cdf5649")
term1_mv = memoryview(term1)
result = unhexlify(b"be72dbc2a48c0d9e1708")
self.assertEqual(strxor_c(term1_mv, 65), result)
def test_output_bytearray(self):
term1 = unhexlify(b"ff339a83e5cd4cdf5649")
original_term1 = term1[:]
expected_result = unhexlify(b"be72dbc2a48c0d9e1708")
output = bytearray(len(term1))
result = strxor_c(term1, 65, output=output)
self.assertEqual(result, None)
self.assertEqual(output, expected_result)
self.assertEqual(term1, original_term1)
def test_output_memoryview(self):
term1 = unhexlify(b"ff339a83e5cd4cdf5649")
origin
|
smly/nips17_adversarial_attack
|
defense/import_from_tf_baseline/dump_ens_adv_inception_resnet_v2.py
|
Python
|
apache-2.0
| 30,348
| 0
|
# -*- coding: utf-8 -*-
import os
import sys
import math
from pathlib import Path
import scipy.misc
import h5py
import numpy as np
import tensorflow as tf
import torch
import torch.nn as nn
import click
from models.slim.nets.inception_resnet_v2 import (
inception_resnet_v2,
inception_resnet_v2_arg_scope)
slim = tf.contrib.slim
FMT_CONV = 'InceptionResnetV2/InceptionResnetV2/{}/convolution'
FMT_RELU = 'InceptionResnetV2/InceptionResnetV2/{}/Relu'
FMT_OTHER = 'InceptionResnetV2/{}/{}'
TEST_THRESHOLD = 1e-2
def _make_padding(padding_name, conv_shape):
padding_name = padding_name.decode("utf-8")
if padding_name == "VALID":
return [0, 0]
elif padding_name == "SAME":
return [
math.floor(int(conv_shape[0])/2),
math.floor(int(conv_shape[1])/2)
]
else:
raise RuntimeError(f"Invalid padding name: {padding_name}")
def get_store_path(outdir, name):
return (
Path(outdir) /
Path('EnsAdvInceptionResnetV2') /
Path(f'{name}.h5'))
def dump_conv2d(sess, name='Conv2d_1a_3x3', outdir='./dump'):
conv_operation = sess.graph.get_operation_by_name(FMT_CONV.format(name))
weights_tensor = sess.graph.get_tensor_by_name(
FMT_OTHER.format(name, 'weights:0'))
weights = weights_tensor.eval()
padding = _make_padding(
conv_operation.get_attr('padding'),
weights_tensor.get_shape())
strides = conv_operation.get_attr('strides')
conv_out = sess.graph.get_operation_by_name(
FMT_CONV.format(name)).outputs[0].eval()
bn_beta = sess.graph.get_tensor_by_name(
FMT_OTHER.format(name, 'BatchNorm/beta:0')).eval()
bn_mean = sess.graph.get_tensor_by_name(
FMT_OTHER.format(name, 'BatchNorm/moving_mean:0')).eval()
bn_var = sess.graph.get_tensor_by_name(
FMT_OTHER.format(name, 'BatchNorm/moving_variance:0')).eval()
relu_out = sess.graph.get_operation_by_name(
FMT_RELU.format(name)).outputs[0].eval()
store_path = get_store_path(outdir, name)
if not store_path.parent.exists():
store_path.parent.mkdir(parents=True)
with h5py.File(str(store_path), 'w') as h5f:
# conv
h5f.create_dataset("weights", data=weights)
h5f.create_dataset("strides", data=strides)
h5f.create_dataset("padding", data=padding)
h5f.create_dataset("conv_out", data=conv_out)
# batch norm
h5f.create_dataset("beta", data=bn_beta)
h5f.create_dataset("mean", data=bn_mean)
h5f.create_dataset("var", data=bn_var)
h5f.create_dataset("relu_out", data=relu_out)
def dump_conv2d_nobn(sess, name='Conv2d_1x1', outdir='./dump'):
conv_operation = sess.graph.get_operation_by_name(FMT_CONV.format(name))
weights_tensor = sess.graph.get_tensor_by_name(
FMT_OTHER.format(name, 'weights:0'))
weights = weights_tensor.eval()
biases_tensor = sess.graph.get_tensor_by_name(
FMT_OTHER.format(name, 'biases:0'))
biases = biases_tensor.eval()
padding = _make_padding(
conv_operation.get_attr('padding'),
weights_tensor.get_shape())
strides = conv_operation.get_attr('strides')
conv_out = sess.graph.get_operation_by_name(
'InceptionResnetV2/InceptionResnetV2/' +
name +
'/BiasAdd').outputs[0].eval()
store_path = get_store_path(outdir, name)
if not store_path.parent.exists():
store_path.parent.mkdir(parents=True)
with h5py.File(str(store_path), 'w') as h5f:
h5f.create_dataset("weights", data=weights)
h5f.create_dataset("biases", data=biases)
h5f.create_dataset("strides", data=strides)
h5f.create_dataset("padding", data=padding)
h5f.create_dataset("conv_out", data=conv_out)
def dump_logits(sess, outdir='./dump'):
operation = sess.graph.get_operation_by_name(
FMT_OTHER.format('Logits', 'Predictions'))
weights_tensor = sess.graph.get_tensor_by_name(
FMT_OTHER.format('Logits', 'Logits/weights:0'))
weights = weights_tensor.eval()
biases_tensor = sess.graph.get_tensor_by_name(
FMT_OTHER.format('Logits', 'Logits/biases:0'))
biases = biases_tensor.eval()
out = operation.outputs[0].eval()
store_path = get_store_path(outdir, 'Logits')
if not store_path.parent.exists():
store_path.parent.mkdir(parents=True)
with h5py.File(str(store_path), 'w') as h5f:
h5f.create_dataset("weights", data=weights)
h5f.create_dataset("biases", data=biases)
h5f.create_dataset("out", data=out)
def dump_mixed_5b(sess, name='Mixed_5b', outdir='./dump'):
dump_conv2d(sess, name=name+'/Branch_0/Conv2d_1x1', outdir=outdir)
dump_conv2d(sess, name=name+'/Branch_1/Conv2d_0a_1x1', outdir=outdir)
dump_conv2d(sess, name=name+'/Branch_1/Conv2d_0b_5x5', outdir=outdir)
dump_conv2d(sess, name=name+'/Branch_2/Conv2d_0a_1x1', outdir=outdir)
dump_conv2d(sess, name=name+'/Branch_2/Conv2d_0b_3x3', outdir=outdir)
dump_conv2d(sess, name=name+'/Branch_2/Conv2d_0c_3x3', outdir=outdir)
dump_conv2d(sess, name=name+'/Bra
|
nch_3/Conv2d_0b_1x1', outdir=outdir)
def dump_block35(sess, name='Repeat/block35_1', outdir='./dump'):
dump_conv2d(sess, name=name+'/Branch_0/Conv2d_1x1', outdir=outdir)
dump_conv2d(sess, name=name+'/Branch_1/Conv2d_0a_1x1', outdir=outdir)
dump_conv2d(sess, name=name+'/Branch_1/Conv2d_0b_3x3', outdir=outdir)
dump_conv2d(sess, name=name+'/Branch_2/Conv2d_0a_1x1', outdir=outdir)
dump_conv2d(sess, name=name+'/Branch_2/Conv2d_0b_3x3', outdir=outdir)
dump
|
_conv2d(sess, name=name+'/Branch_2/Conv2d_0c_3x3', outdir=outdir)
dump_conv2d_nobn(sess, name=name+'/Conv2d_1x1', outdir=outdir)
def dump_mixed_6a(sess, name='Mixed_6a', outdir='./dump'):
dump_conv2d(sess, name=name+'/Branch_0/Conv2d_1a_3x3', outdir=outdir)
dump_conv2d(sess, name=name+'/Branch_1/Conv2d_0a_1x1', outdir=outdir)
dump_conv2d(sess, name=name+'/Branch_1/Conv2d_0b_3x3', outdir=outdir)
dump_conv2d(sess, name=name+'/Branch_1/Conv2d_1a_3x3', outdir=outdir)
def dump_block17(sess, name='Repeat_1/block17_1', outdir='./dump'):
dump_conv2d(sess, name=name+'/Branch_0/Conv2d_1x1', outdir=outdir)
dump_conv2d(sess, name=name+'/Branch_1/Conv2d_0a_1x1', outdir=outdir)
dump_conv2d(sess, name=name+'/Branch_1/Conv2d_0b_1x7', outdir=outdir)
dump_conv2d(sess, name=name+'/Branch_1/Conv2d_0c_7x1', outdir=outdir)
dump_conv2d_nobn(sess, name=name+'/Conv2d_1x1', outdir=outdir)
def dump_mixed_7a(sess, name='Mixed_7a', outdir='./dump'):
dump_conv2d(sess, name=name+'/Branch_0/Conv2d_0a_1x1', outdir=outdir)
dump_conv2d(sess, name=name+'/Branch_0/Conv2d_1a_3x3', outdir=outdir)
dump_conv2d(sess, name=name+'/Branch_1/Conv2d_0a_1x1', outdir=outdir)
dump_conv2d(sess, name=name+'/Branch_1/Conv2d_1a_3x3', outdir=outdir)
dump_conv2d(sess, name=name+'/Branch_2/Conv2d_0a_1x1', outdir=outdir)
dump_conv2d(sess, name=name+'/Branch_2/Conv2d_0b_3x3', outdir=outdir)
dump_conv2d(sess, name=name+'/Branch_2/Conv2d_1a_3x3', outdir=outdir)
def dump_block8(sess, name='Repeat_2/block8_1', outdir='./dump'):
dump_conv2d(sess, name=name+'/Branch_0/Conv2d_1x1', outdir=outdir)
dump_conv2d(sess, name=name+'/Branch_1/Conv2d_0a_1x1', outdir=outdir)
dump_conv2d(sess, name=name+'/Branch_1/Conv2d_0b_1x3', outdir=outdir)
dump_conv2d(sess, name=name+'/Branch_1/Conv2d_0c_3x1', outdir=outdir)
dump_conv2d_nobn(sess, name=name+'/Conv2d_1x1', outdir=outdir)
def _assign_from_checkpoint(sess, checkpoint):
init_fn = slim.assign_from_checkpoint_fn(
checkpoint,
slim.get_model_variables('InceptionResnetV2'))
init_fn(sess)
def show_all_variables():
for v in slim.get_model_variables():
print(v.name, v.get_shape())
def dump_all(sess, logits, outdir):
tf.summary.scalar('logs', logits[0][0])
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter("logs", sess.graph)
# run for comparing output values later
out = sess.run(summary_op)
summary_writer.add_summary(out, 0)
dump_conv2d(sess, name='Conv2d_1a_3x3', outd
|
eamars/webserver
|
site-package/index2.py
|
Python
|
mit
| 3,741
| 0.00294
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import os
import mysql.connector
import datetime
HEADER_TEMPLATE = \
"HTTP/1.1 200 OK\r\n" \
"Server: webhttpd/2.0\r\n" \
"Cache-Control: no-cache, no-store, must-revalidate\r\n" \
"Connection: keep-alive\r\n" \
"Content-Type: text/html; charset=utf-8\r\n" \
"Date: {}\r\n" \
"\r\n"
# Connection configuration for MySQL Connection
SQL_CONFIG = {
"host": "192.168.2.5",
"user": "eamars",
"password": "931105",
"autocommit": True
}
DB_NAME = "JAV"
TABLE_NAME = "GENRE_INDEX_TABLE"
COL = """
<html>
<head>
<title>JAV Genre</title>
<style>
table {
white-space: nowrap;
font-family: 'Arial';
margin: 25px auto;
border-collapse: collapse;
border: 1px solid #eee;
border-bottom: 2px solid #00cccc;
box-shadow: 0px 0px 20px rgba(0, 0, 0, 0.1), 0px 10px 20px rgba(0, 0, 0, 0.05), 0px 20px 20px rgba(0, 0, 0, 0.05), 0px 30px 20px rgba(0, 0, 0, 0.05);
}
table tr:hover {
background: #f4f4f4;
}
table tr:hover td {
color: #555;
}
table th, table td {
color: #999;
border: 1px solid #eee;
padding: 12px 35px;
|
border-collapse: collapse;
}
table th {
background: #00cccc;
color: #fff;
text-transform: uppercase;
font-size: 12px;
}
table th.last {
border-right: none;
|
}
h3 {
font:1.2em normal Arial,sans-serif;
color:#34495E;
text-align:center;
letter-spacing:-2px;
font-size:2.5em;
margin:20px 0;
}
</style>
</head>
<body>
<h3>JAV Genre</h3>
<table>
<tr>
<th>GENRE_ID</th>
<th>GENRE_NAME_EN</th>
<th>GENRE_NAME_CN</th>
<th>GENRE_NAME_TW</th>
<th>GENRE_NAME_JA</th>
</tr>
"""
def main():
# Connect to server
connection = mysql.connector.connect(**SQL_CONFIG)
# Connect to database
connection.database = DB_NAME
# Create sql
sql = "SELECT * FROM `{}`".format(TABLE_NAME)
# Execute sql
cursor = connection.cursor()
cursor.execute(sql)
sys.stdout.write(HEADER_TEMPLATE.format(datetime.datetime.now().strftime("%a, %d %b %Y %H:%M:%S GMT")))
sys.stdout.write(COL)
for result in cursor:
row = "<tr><td><a href='http://www.javmoo.info/cn/genre/{}'>{}</a></td><td>{}</td><td>{}</td><td>{}</td><td>{}</td></tr>\n".format(
result[0],
result[0],
result[1],
result[2],
result[3],
result[4]
)
sys.stdout.write(row)
sys.stdout.write("</table></body></html>")
sys.stdout.write("\r\n\r\n")
if __name__ == "__main__":
main()
|
entrepidea/projects
|
python/tutorials/algo/leetcode/easy/is_sebsequence.py
|
Python
|
gpl-3.0
| 641
| 0.014041
|
"""
Given two strings s and
|
t, return true if s is a subsequence of t, or false otherwise.
https://leetcode.com/problems/is-subsequence/
Date: 10/29/21
"""
def is_seq(s,t):
ls = list(s)
lt = list(t)
idx = []
for e in ls:
if e in lt:
idx.append(lt.index(e))
if len(idx)>1 and idx[len(idx)-1]<idx[len(idx)-2]:
return False
else:
return False
return True
if __name__ == '__main__':
s = 'abc'
t =
|
'ahbgdc'
print(is_seq(s,t))
s = 'axc'
t = 'ahbgdc'
print(is_seq(s,t))
s = 'acb'
t = 'ahbgdc'
print(is_seq(s,t))
|
beproud/bpcommons
|
tests/test_forms_field.py
|
Python
|
bsd-2-clause
| 1,310
| 0.00229
|
#:coding=utf-8:
from django.test import TestCase as DjangoTestCase
from django.forms import Form
from beproud.django.commons.forms import EmailField
__all__ = (
'EmailFieldTest',
'JSONFormFieldTest',
'JSONWidgetTest',
)
class EmailTestForm(Form):
email = EmailField(label="email")
class EmailFieldTest(DjangoTestCase):
def test_basic_email(self):
form = EmailTestForm({"email": "spam@eggs.com"})
self.assertTru
|
e(form.is_valid())
def test_keitai_email(self):
form = EmailTestForm({"email": "-spam..eggs-@softbank.ne.jp"})
self.assertTrue(form.is_valid())
form = EmailTestForm({"email": ".*&$.-spam..!!eggs!!-.*.@ezweb.ne.jp"})
self.assertTrue(form.is_valid())
def test_plus_email(self):
form = EmailTestForm({"email": "spam+extra@eggs.com"})
self.assertTrue(form.is_valid())
def test_multi_
|
email(self):
form = EmailTestForm({"email": "aaa spam+extra@eggs.com email@email.com"})
self.assertFalse(form.is_valid())
def test_longtld(self):
form = EmailTestForm({"email": "spam@eggs.engineer"})
self.assertTrue(form.is_valid())
def test_punycode(self):
form = EmailTestForm({"email": "spam@eggs.xn--i1b6b1a6a2e"})
self.assertTrue(form.is_valid())
|
plilja/project-euler
|
problem_16/power_digit_sum.py
|
Python
|
apache-2.0
| 111
| 0.009009
|
def power_digit_sum
|
(exponent):
power_of_2 = str(2 ** exponent)
return sum([int(x) for x in power_of
|
_2])
|
xalt/xalt
|
proj_mgmt/py_build_tools/build_python_filter_routine.py
|
Python
|
lgpl-2.1
| 3,443
| 0.01946
|
# -*- python -*-
# Git Version: @git@
#-----------------------------------------------------------------------
# XALT: A tool that tracks users jobs and environments on a cluster.
# Copyright (C) 2013-2014 University of Texas at Austin
# Copyright (C) 2013-2014 University of Tennessee
#
# This library is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of
# the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free
# Software Foundation, Inc., 59 Temple Place, Suite 330,
# Boston, MA 02111-1307 USA
#-----------------------------------------------------------------------
from __future__ import print_function
import os, sys, re, argparse
class CmdLineOptions(object):
""" Command line Options class """
def __init__(self):
""" Empty Ctor """
pass
def execute(self):
""" Specify command line arguments and parse the command line"""
parser = argparse.ArgumentParser()
parser.add_argu
|
ment("--confFn", dest='confFn', action="store", help="python config file")
parser.add_argument("--xalt_cfg", dest='xaltCFG', action="store", help="XALT std config")
parser.add_argument("--input", dest='input', action="store", help="input template file")
parser.add_argument("--output", dest='output',
|
action="store", help="output file")
args = parser.parse_args()
return args
def convert_template(pattern, replaceA ,inputFn, outputFn):
try:
f = open(inputFn,"r")
except IOError as e:
print("Unable to open \"%s\", aborting!" % (inputFn))
sys.exit(-1)
outA = []
for line in f:
idx = line.find(pattern)
if (idx == -1):
outA.append(line)
else:
outA.append("python_pkg_patternA = [\n")
for entry in replaceA:
kind = entry['kind'].lower()
patt = entry['patt'].replace("\\/","/")
k_s = entry['k_s'].upper()
if ( k_s != "KEEP" and k_s != "SKIP"):
print("Bad k_s value for patt: ",patt)
sys.exit(-1)
if ( kind != "name" and kind != "path"):
print("Bad k_s value for patt: ",patt)
sys.exit(-1)
line = " { 'k_s' : '" + k_s +"', 'kind' : '" + kind + "', 'patt' : re.compile( r'" + patt + "'), 'bare_patt' : r'"+patt+"' },\n"
outA.append(line)
outA.append("]\n")
f.close()
try:
of = open(outputFn,"w")
except IOError as e:
print("Unable to open \"%s\", aborting!" % (outputFn))
sys.exit(-1)
of.write("".join(outA))
of.close()
def main():
my_replacement = "python_pkg_patterns"
args = CmdLineOptions().execute()
namespace = {}
exec(open(args.confFn).read(), namespace)
replaceA = namespace.get(my_replacement, [])
namespace = {}
exec(open(args.xaltCFG).read(), namespace)
replaceA.extend(namespace.get(my_replacement, []))
convert_template("@"+my_replacement+"@", replaceA, args.input, args.output)
if ( __name__ == '__main__'): main()
|
lnielsen/invenio
|
invenio/ext/babel/selectors.py
|
Python
|
gpl-2.0
| 2,593
| 0.00887
|
# -*- coding: utf-8 -*-
## This file is part of Invenio.
## Copyright (C) 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
invenio.ext.babel.selectors
---------------------------
Implements various selectors.
"""
from flask import request, session, current_app
from flask.ext.login import current_user
def get_locale():
"""
Computes the language needed to return the answer to the client.
|
This information will then be available in the session['ln'].
"""
from invenio.base.i18n import wash_language
required_ln = None
passed_ln = request.values.get('ln', type=str)
if passed_ln:
## If ln is specified explictly as a GET or POST argument
## let's take it!
required_ln = wash_language(passed_ln)
if passed_ln != re
|
quired_ln:
## But only if it was a valid language
required_ln = None
if required_ln:
## Ok it was. We store it in the session.
session["ln"] = required_ln
if not "ln" in session:
## If there is no language saved into the session...
user_language = current_user.get("language")
if user_language:
## ... and the user is logged in, we try to take it from its
## settings.
session["ln"] = user_language
else:
## Otherwise we try to guess it from its request headers
for value, quality in request.accept_languages:
value = str(value)
ln = wash_language(value)
if ln == value or ln[:2] == value[:2]:
session["ln"] = ln
break
else:
## Too bad! We stick to the default :-)
session["ln"] = current_app.config.get('CFG_SITE_LANG')
return session["ln"]
def get_timezone():
"""Returns timezone from user settings."""
return current_user.get('timezone')
|
OpusVL/odoo
|
addons/stock/wizard/make_procurement_product.py
|
Python
|
agpl-3.0
| 5,960
| 0.003859
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv, orm
from openerp.tools.translate import _
class make_procurement(osv.osv_memory):
_name = 'make.procurement'
_description = 'Make Procurements'
def onchange_product_id(self, cr, uid, ids, prod_id):
""" On Change of Product ID getting the value of related UoM.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param prod_id: Changed ID of Product
@return: A dictionary which gives the UoM of the changed Product
"""
product = self.pool.get('product.product').browse(cr, uid, prod_id)
return {'value': {'uom_id': product.uom_id.id}}
_columns = {
'qty': fields.float('Quantity', digits=(16,2), required=True),
'product_id': fields.many2one('product.product', 'Product', required=True, readonly=1),
'uom_id': fields.many2one('product.uom', 'Unit of Measure', required=True),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', required=True),
'date_planned': fields.date('Planned Date', required=True),
}
_defaults = {
'date_planned': fields.date.context_today,
'qty': lambda *args: 1.0,
}
def make_procurement(self, cr, uid, ids, context=None):
""" Creates procurement order for selected product.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
@return: A dictionary which loads Procurement form view.
"""
user = self.pool.get('res.users').browse(cr, uid, uid, context=context).login
wh_obj = self.pool.get('stock.warehouse')
procurement_obj = self.pool.get('procurement.order')
data_obj = self.pool.get('ir.model.data')
for proc in self.browse(cr, uid, ids, context=context):
wh = wh_obj.browse(cr, uid, proc.warehouse_id.id, context=context)
procure_id = procurement_obj.create(cr, uid, {
'name':'INT: '+str(user),
'date_planned': proc.date_planned,
'product_id': proc.product_id.id,
'product_qty': proc.qty,
'product_uom': proc.uom_id.id,
'location_id': wh.lot_stock_id.id,
'company_id': wh.company_id.id,
})
procurement_obj.signal_workflow(cr, uid, [procure_id], 'button_confirm')
id2 = data_obj._get_id(cr, uid, 'procurement', 'procurement_tree_view')
id3 = data_obj._get_id(cr, uid, 'procurement', 'procurement_form_view')
if id2:
id2 = data_obj.browse(cr, uid, id2, context=context).res_id
if id3:
id3 = data_obj.browse(cr, uid, id3, context=context).res_id
return {
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'procurement.order',
'res_id' : procure_id,
'views': [(id3,'form'),(id2,'tree')],
'type': 'ir.actions.act_window',
}
def default_get(self, cr, uid, fields, context=None):
""" To get default values for the object.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID
|
of the user currently logged in
@param fields: List of fields for which we want default values
@param context: A standard dictionary
@return: A dictionary which of fields with values.
"""
if context is None:
context = {}
record_id = context.get('active_id')
if context.get(
|
'active_model') == 'product.template':
product_ids = self.pool.get('product.product').search(cr, uid, [('product_tmpl_id', '=', context.get('active_id'))], context=context)
if len(product_ids) == 1:
record_id = product_ids[0]
else:
raise orm.except_orm(_('Warning'), _('Please use the Product Variant vue to request a procurement.'))
res = super(make_procurement, self).default_get(cr, uid, fields, context=context)
if record_id and 'product_id' in fields:
proxy = self.pool.get('product.product')
product_ids = proxy.search(cr, uid, [('id', '=', record_id)], context=context, limit=1)
if product_ids:
product_id = product_ids[0]
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
res['product_id'] = product.id
res['uom_id'] = product.uom_id.id
if 'warehouse_id' in fields:
warehouse_id = self.pool.get('stock.warehouse').search(cr, uid, [], context=context)
res['warehouse_id'] = warehouse_id[0] if warehouse_id else False
return res
|
dantebarba/docker-media-server
|
plex/Sub-Zero.bundle/Contents/Libraries/Shared/subliminal_patch/refiners/omdb.py
|
Python
|
gpl-3.0
| 2,505
| 0.001597
|
# coding=utf-8
import os
import subliminal
import base64
import zlib
from subliminal import __short_version__
from subliminal.refiners.omdb import OMDBClient, refine as refine_orig, Episode, Movie
from subliminal_patch.http import TimeoutSession
class SZOMDBClient(OMDBClient):
def __init__(self, version=1, session=None, headers=None, timeout=10):
if not session:
session = TimeoutSession(timeout=timeout)
super(SZOMDBClient, self).__init__(version=version, session=session, headers=headers, timeout=timeout)
def get_params(self, params):
self.session.params['apikey'] = \
zlib.decompress(base64.b16decode(os.environ['U1pfT01EQl9LRVk']))\
.decode('cm90MTM=\n'.decode("base64")) \
.decode('YmFzZTY0\n'.decode("base64")).split("x")[0]
return dict(self.session.params, **params)
def get(self, id=None, title=None, type=None, year=None, plot='short', tomatoes=False):
# build the params
params = {}
if id:
params['i'] = id
if title:
params['t'] = title
if not params:
raise ValueError('At least id or title is required')
params['type'] = type
params['y'] = year
params['plot'] = plot
params['tomatoes'] = tomatoes
# perform the request
r = self.session.get(self.base_url, params=self.get_params(params))
r.raise_for_status()
# get the response as json
j = r.json()
# check response status
if j['Response'] == 'False':
return None
return j
def search(self, title, type=None, year=None, page=1):
# build the params
params = {'s': title, 'type': type, 'y': year, 'page': page}
# perform the request
r = self.session.get(self.base_url, params=self.get_params(params))
r.raise_for_status()
|
# get the response as json
j = r.json()
# check response status
if j['Response'] == 'False':
return N
|
one
return j
def refine(video, **kwargs):
refine_orig(video, **kwargs)
if isinstance(video, Episode) and video.series_imdb_id:
video.series_imdb_id = video.series_imdb_id.strip()
elif isinstance(video, Movie) and video.imdb_id:
video.imdb_id = video.imdb_id.strip()
omdb_client = SZOMDBClient(headers={'User-Agent': 'Subliminal/%s' % __short_version__})
subliminal.refiners.omdb.omdb_client = omdb_client
|
dataplumber/nexus
|
analysis/tests/algorithms/StandardDeviationSearch_test.py
|
Python
|
apache-2.0
| 9,139
| 0.002954
|
"""
Copyright (c) 2016 Jet Propulsion Laboratory,
California Institute of Technology. All rights reserved
"""
import json
import unittest
import urllib
from multiprocessing.pool import ThreadPool
from unittest import skip
import numpy as np
from mock import Mock
from nexustiles.model.nexusmodel import Tile, BBox
from nexustiles.nexustiles import NexusTileService
from tornado.testing import AsyncHTTPTestCase, bind_unused_port
from tornado.web import Application
from webservice.NexusHandler import AlgorithmModuleWrapper
from webservice.algorithms import StandardDeviationSearch
from webservice.webapp import ModularNexusHandlerWrapper
class HttpParametersTest(AsyncHTTPTestCase):
def get_app(self):
path = StandardDeviationSearch.StandardDeviationSearchHandlerImpl.path
algorithm = AlgorithmModuleWrapper(StandardDeviationSearch.StandardDeviationSearchHandlerImpl)
thread_pool = ThreadPool(processes=1)
return Application(
[(path, ModularNexusHandlerWrapper, dict(clazz=algorithm, algorithm_config=None, thread_pool=thread_pool))],
default_host=bind_unused_port()
)
def test_no_ds_400(self):
response = self.fetch(StandardDeviationSearch.StandardDeviationSearchHandlerImpl.path)
self.assertEqual(400, response.code)
body = json.loads(response.body)
self.assertEqual("'ds' argument is required", body['error'])
def test_no_longitude_400(self):
params = {
"ds": "dataset"
}
path = StandardDeviationSearch.StandardDeviationSearchHandlerImpl.path + '?' + urllib.urlencode(params)
response = self.fetch(path)
self.assertEqual(400, response.code)
body = json.loads(response.body)
self.assertEqual("'longitude' argument is required", body['error'])
def test_no_latitude_400(self):
params = {
"ds": "dataset",
"longitude": "22.4"
}
path = StandardDeviationSearch.StandardDeviationSearchHandlerImpl.path + '?' + urllib.urlencode(params)
response = self.fetch(path)
self.assertEqual(400, response.code)
body = json.loads(response.body)
self.assertEqual("'latitude' argument is required", body['error'])
def test_no_day_or_date_400(self):
params = {
"ds": "dataset",
"longitude": "22.4",
"latitude": "-84.32"
}
path = StandardDeviationSearch.StandardDeviationSearchHandlerImpl.path + '?' + urllib.urlencode(params)
response = self.fetch(path)
self.assertEqual(400, response.code)
body = json.loads(response.body)
self.assertEqual("At least one of 'day' or 'date' arguments are required but not both.", body['error'])
def test_no_day_not_int_400(self):
params = {
"ds": "dataset",
"longitude": "22.4",
"latitude": "-84.32",
"day": "yayday"
}
path = StandardDeviationSearch.StandardDeviationSearchHandlerImpl.path + '?' + urllib.urlencode(params)
response = self.fetch(path)
self.assertEqual(400, response.code)
body = json.loads(response.body)
self.assertEqual("At least one of 'day' or 'date' arguments are required but not both.", body['error'])
def test_day_and_date_400(self):
params = {
"ds": "dataset",
"longitude": "22.4",
"latitude": "-84.32",
"day": "35",
"date": "1992-01-01T00:00:00Z"
}
path = StandardDeviationSearch.StandardDeviationSearchHandlerImpl.path + '?' + urllib.urlencode(params)
response = self.fetch(path)
self.assertEqual(400, response.code)
body = json.loads(response.body)
self.assertEqual("At least one of 'day' or 'date' arguments are required but not both.", body['error'])
def test_no_allInTile_200(self):
params = {
"ds": "dataset",
"longitude": "22.4",
"latitude": "-84.32",
"day": "35"
}
path = StandardDeviationSearch.StandardDeviationSearchHandlerImpl.path + '?' + urllib.urlencode(params)
response = self.fetch(path)
self.assertEqual(200, response.code)
def test_allInTile_false_200(self):
params = {
"ds": "dataset",
"longitude": "22.4",
"latitude": "-84.32",
"date": "1992-01-01T00:00:00Z",
"allInTile": "false"
}
path = StandardDeviationSearch.StandardDeviationSearchHandlerImpl.path + '?' + urllib.urlencode(params)
|
response = self.fetch(path)
self.assertEqual(200, response.code)
@skip("Integration test only. Works only if you have Solr and Cassandra running locally with data ingested")
def test_integration_all_in_tile(self):
params = {
"ds": "AVHRR_OI_L4_GHRSST_NCEI_CLIM",
"
|
longitude": "-177.775",
"latitude": "-78.225",
"day": "1"
}
path = StandardDeviationSearch.StandardDeviationSearchHandlerImpl.path + '?' + urllib.urlencode(params)
response = self.fetch(path)
self.assertEqual(200, response.code)
print response.body
body = json.loads(response.body)
self.assertEqual(560, len(body['data']))
@skip("Integration test only. Works only if you have Solr and Cassandra running locally with data ingested")
def test_integration_all_in_tile_false(self):
params = {
"ds": "AVHRR_OI_L4_GHRSST_NCEI_CLIM",
"longitude": "-177.875",
"latitude": "-78.125",
"date": "2016-01-01T00:00:00Z",
"allInTile": "false"
}
path = StandardDeviationSearch.StandardDeviationSearchHandlerImpl.path + '?' + urllib.urlencode(params)
# Increase timeouts when debugging
# self.http_client.fetch(self.get_url(path), self.stop, connect_timeout=99999999, request_timeout=999999999)
# response = self.wait(timeout=9999999999)
response = self.fetch(path)
self.assertEqual(200, response.code)
print response.body
body = json.loads(response.body)
self.assertAlmostEqual(-177.875, body['data'][0]['longitude'], 3)
self.assertAlmostEqual(-78.125, body['data'][0]['latitude'], 3)
self.assertAlmostEqual(0.4956, body['data'][0]['standard_deviation'], 4)
class TestStandardDeviationSearch(unittest.TestCase):
def setUp(self):
tile = Tile()
tile.bbox = BBox(-1.0, 1.0, -2.0, 2.0)
tile.latitudes = np.ma.array([-1.0, -0.5, 0, .5, 1.0])
tile.longitudes = np.ma.array([-2.0, -1.0, 0, 1.0, 2.0])
tile.times = np.ma.array([0L])
tile.data = np.ma.arange(25.0).reshape((1, 5, 5))
tile.meta_data = {"std": np.ma.arange(25.0).reshape((1, 5, 5))}
attrs = {'find_tile_by_polygon_and_most_recent_day_of_year.return_value': [tile]}
self.tile_service = Mock(spec=NexusTileService, **attrs)
def test_get_single_exact_std_dev(self):
result = StandardDeviationSearch.get_single_std_dev(self.tile_service, "fake dataset", 1.0, .5, 83)
self.assertEqual(1, len(result))
self.assertEqual((1.0, 0.5, 18.0), result[0])
def test_get_single_close_std_dev(self):
result = StandardDeviationSearch.get_single_std_dev(self.tile_service, "fake dataset", 1.3, .25, 83)
self.assertEqual(1, len(result))
self.assertEqual((1.0, 0.0, 13.0), result[0])
def test_get_all_std_dev(self):
result = StandardDeviationSearch.get_all_std_dev(self.tile_service, "fake dataset", 1.3, .25, 83)
self.assertEqual(25, len(result))
@skip("Integration test only. Works only if you have Solr and Cassandra running locally with data ingested")
class IntegrationTestStandardDeviationSearch(unittest.TestCase):
def setUp(self):
self.tile_service = NexusTileService()
def test_get_single_exact_std_dev(self):
result = StandardDeviationSearch.get_single_std_dev(self.tile_service, "AVHRR_OI_L4_GHRSST_NCEI_CLIM", -177.625,
|
mozilla/webowonder
|
bin/generate_l10n_css_overides.py
|
Python
|
bsd-3-clause
| 1,931
| 0.00725
|
#!/usr/bin/env python
"""
{'friendly_name': '',
'string_id': '',
'css_selector': ''},
"""
elements = [
{'friendly_name': 'Golden Tickets',
'string_id': 'magic-tickets',
'css_selector': '#magic-tickets'},
{'friendly_name': 'Marvels Await Poster',
'string_id': 'marvels-await-poster',
'css_selector': '#marvels-await'},
{'friendly_name': 'Marvels Await Title',
'string_id': 'marvels-await-title',
'css_selector': '#marvels-await h3'},
{'friendly_name': 'Marvels Await Title Line',
'string_id': 'marvels-await-title-line',
'css_selector': '#marvels-await h3 span'},
{'friendly_name': 'Submit Demo!',
'string_id':
|
'submit-demo',
'css_selector': '#submit-demo a'},
{'friendly_name'
|
: 'Mozilla Presents',
'string_id': 'mozilla-presents',
'css_selector': '#home-link'},
{'friendly_name': 'This Way',
'string_id': 'this-way',
'css_selector': '#this-way'},
{'friendly_name': 'That Way',
'string_id': 'that-way',
'css_selector': '#that-way'},
{'friendly_name': 'Experience it now button',
'string_id': 'experience-it',
'css_selector': '#experience-it'},
]
t = """
%(css_selector)s {
{# L10n: DO NOT LOCALIZE! Controls size of %(friendly_name)s. Value is a number. Examples: 0.8 or 1.0 or 1.3 #}
{%% if _('custom-css-%(string_id)s-size') and _('custom-css-%(string_id)s-size') != 'custom-css-%(string_id)s-size' %%}
font-size: {{ _('custom-css-%(string_id)s-size') }}em;
{%% endif %%}
{# L10n: DO NOT LOCALIZE! Controls font family for %(friendly_name)s. Values: Arial, sans-serif #}
{%% if _('custom-css-%(string_id)s-font') and _('custom-css-%(string_id)s-font') != 'custom-css-%(string_id)s-font' %%}
font-family: {{ _('custom-css-%(string_id)s-font') }};
{%% endif %%}
}
"""
for el in elements:
print t % el
|
mrdon/flask
|
werkzeug/wrappers.py
|
Python
|
bsd-3-clause
| 76,207
| 0.000276
|
# -*- coding: utf-8 -*-
"""
werkzeug.wrappers
~~~~~~~~~~~~~~~~~
The wrappers are simple request and response objects which you can
subclass to do whatever you want them to do. The request object contains
the information transmitted by the client (webbrowser) and the response
object contains all the information sent back to the browser.
An important detail is that the request object is created with the WSGI
environ and will act as high-level proxy whereas the response object is an
actual WSGI application.
Like everything else in Werkzeug these objects will work correctly with
unicode data. Incoming form data parsed by the response object will be
decoded into an unicode object if possible and if it makes sense.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from functools import update_wrapper
from datetime import datetime, timedelta
from werkzeug.http import HTTP_STATUS_CODES, \
parse_accept_header, parse_cache_control_header, parse_etags, \
parse_date, generate_etag, is_resource_modified, unquote_etag, \
quote_etag, parse_set_header, parse_authorization_header, \
parse_www_authenticate_header, remove_entity_headers, \
parse_options_header, dump_options_header, http_date, \
parse_if_range_header, parse_cookie, dump_cookie, \
parse_range_header, parse_content_range_header, dump_header
from werkzeug.urls import url_decode, iri_to_uri, url_join
from werkzeug.formparser import FormDataParser, default_stream_factory
from werkzeug.utils import cached_property, environ_property, \
header_property, get_content_type, yields, call_maybe_yield
from werkzeug.wsgi import get_current_url, get_host, \
ClosingIterator, get_input_stream, get_content_length
from werkzeug.datastructures import MultiDict, CombinedMultiDict, Headers, \
EnvironHeaders, ImmutableMultiDict, ImmutableTypeConversionDict, \
ImmutableList, MIMEAccept, CharsetAccept, LanguageAccept, \
ResponseCacheControl, RequestCacheControl, CallbackDict, \
ContentRange, iter_multi_items
from werkzeug._internal import _get_environ
from werkzeug._compat import to_bytes, string_types, text_type, \
integer_types, wsgi_decoding_dance, wsgi_get_bytes, \
to_unicode, to_native, BytesIO
def _run_wsgi_app(*args):
"""This function replaces itself to ensure that the test module is not
imported unless required. DO NOT USE!
"""
global _run_wsgi_app
from werkzeug.test import run_wsgi_app as _run_wsgi_app
return _run_wsgi_app(*args)
def _warn_if_string(iterable):
"""Helper for the response objects to check if the iterable returned
to the WSGI server is not a string.
"""
if isinstance(iterable, string_types):
from warnings import warn
warn(Warning('response iterable was set to a string. This appears '
'to work but means that the server will send the '
'data to the client char, by char. This is almost '
'never intended behavior, use response.data to assign '
'strings to the response object.'), stacklevel=2)
def _assert_not_shallow(request):
if request.shallow:
raise RuntimeError('A shallow request tried to consume '
'form data. If you really want to do '
'that, set `shallow` to False.')
def _iter_encoded(iterable, charset):
for item in iterable:
if isinstance(item, text_type):
yield item.encode(charset)
else:
yield item
class BaseRequest(object):
"""Very basic request object. This does not implement advanced stuff like
entity tag parsing or cache controls. The request object is created with
the WSGI environment as first argument and will add itself to the WSGI
environment as ``'werkzeug.request'`` unless it's created with
`populate_request` set to False.
There are a couple of mixins available that add additional functionality
|
to the request object, there is also a class called `Request` which
subclasses `BaseRequest` and all the important mixins.
It's a good idea to create a custom subclass of the :class:`BaseRequest`
and add missing functionality either via mixins o
|
r direct implementation.
Here an example for such subclasses::
from werkzeug.wrappers import BaseRequest, ETagRequestMixin
class Request(BaseRequest, ETagRequestMixin):
pass
Request objects are **read only**. As of 0.5 modifications are not
allowed in any place. Unlike the lower level parsing functions the
request object will use immutable objects everywhere possible.
Per default the request object will assume all the text data is `utf-8`
encoded. Please refer to `the unicode chapter <unicode.txt>`_ for more
details about customizing the behavior.
Per default the request object will be added to the WSGI
environment as `werkzeug.request` to support the debugging system.
If you don't want that, set `populate_request` to `False`.
If `shallow` is `True` the environment is initialized as shallow
object around the environ. Every operation that would modify the
environ in any way (such as consuming form data) raises an exception
unless the `shallow` attribute is explicitly set to `False`. This
is useful for middlewares where you don't want to consume the form
data by accident. A shallow request is not populated to the WSGI
environment.
.. versionchanged:: 0.5
read-only mode was enforced by using immutables classes for all
data.
"""
#: the charset for the request, defaults to utf-8
charset = 'utf-8'
#: the error handling procedure for errors, defaults to 'replace'
encoding_errors = 'replace'
#: the maximum content length. This is forwarded to the form data
#: parsing function (:func:`parse_form_data`). When set and the
#: :attr:`form` or :attr:`files` attribute is accessed and the
#: parsing fails because more than the specified value is transmitted
#: a :exc:`~werkzeug.exceptions.RequestEntityTooLarge` exception is raised.
#:
#: Have a look at :ref:`dealing-with-request-data` for more details.
#:
#: .. versionadded:: 0.5
max_content_length = None
#: the maximum form field size. This is forwarded to the form data
#: parsing function (:func:`parse_form_data`). When set and the
#: :attr:`form` or :attr:`files` attribute is accessed and the
#: data in memory for post data is longer than the specified value a
#: :exc:`~werkzeug.exceptions.RequestEntityTooLarge` exception is raised.
#:
#: Have a look at :ref:`dealing-with-request-data` for more details.
#:
#: .. versionadded:: 0.5
max_form_memory_size = None
#: the class to use for `args` and `form`. The default is an
#: :class:`~werkzeug.datastructures.ImmutableMultiDict` which supports
#: multiple values per key. alternatively it makes sense to use an
#: :class:`~werkzeug.datastructures.ImmutableOrderedMultiDict` which
#: preserves order or a :class:`~werkzeug.datastructures.ImmutableDict`
#: which is the fastest but only remembers the last key. It is also
#: possible to use mutable structures, but this is not recommended.
#:
#: .. versionadded:: 0.6
parameter_storage_class = ImmutableMultiDict
#: the type to be used for list values from the incoming WSGI environment.
#: By default an :class:`~werkzeug.datastructures.ImmutableList` is used
#: (for example for :attr:`access_list`).
#:
#: .. versionadded:: 0.6
list_storage_class = ImmutableList
#: the type to be used for dict values from the incoming WSGI environment.
#: By default an
#: :class:`~werkzeug.datastructures.ImmutableTypeConversionDict` is used
#: (for example for :attr:`cookies`).
#:
#: .. versionadded:: 0.6
dict_storage_class = ImmutableTypeConversionDict
#: The form data parser that shoud be use
|
dials/dials
|
tests/algorithms/indexing/test_max_cell.py
|
Python
|
bsd-3-clause
| 2,598
| 0.001155
|
from __future__ import annotations
import random
import pytest
import scitbx.matrix
from cctbx import sgtbx
from cctbx.sgtbx import bravais_types
from dials.algorithms.indexing.max_cell import find_max_cell
from dials.array_family import flex
@pytest.fixture(params=bravais_types.acentric)
def setup(request):
space_group_symbol = request.param
sgi = sgtbx.space_group_info(sp
|
ace_group_symbol)
cs = sgi.any_compatible_crystal_symmetry(volume=random.randint(1e4, 1e6))
ms = cs.build_miller_set(anomalous_flag=True, d_min=3).expand_to_p1()
# the reciprocal matrix
B = scitbx.matrix.sqr(cs.unit_cell().fractionalization_matrix()).transpose()
# randomly select 25% of reflections
ms = ms.select(flex.random_permutation(ms.size())[: int(0.25 * ms.size())])
refl = flex.refle
|
ction_table()
refl["rlp"] = B.elems * ms.indices().as_vec3_double()
refl["imageset_id"] = flex.int(len(refl))
refl["xyzobs.mm.value"] = flex.vec3_double(len(refl))
d = {}
d["crystal_symmetry"] = cs
d["reflections"] = refl
return d
@pytest.mark.parametrize(
"histogram_binning,nearest_neighbor_percentile", [("linear", None), ("log", 0.99)]
)
def test_max_cell(setup, histogram_binning, nearest_neighbor_percentile):
reflections = setup["reflections"]
crystal_symmetry = setup["crystal_symmetry"]
max_cell_multiplier = 1.3
max_cell = find_max_cell(
reflections,
max_cell_multiplier=max_cell_multiplier,
histogram_binning=histogram_binning,
nearest_neighbor_percentile=nearest_neighbor_percentile,
)
known_max_cell = max(
crystal_symmetry.primitive_setting().unit_cell().parameters()[:3]
)
assert max_cell.max_cell > known_max_cell
def test_max_cell_low_res_with_high_res_noise(setup):
reflections = setup["reflections"]
crystal_symmetry = setup["crystal_symmetry"]
rlp = reflections["rlp"]
# select only low resolution reflections
reflections = reflections.select(1 / rlp.norms() > 4)
n = int(0.1 * reflections.size())
rlp_noise = flex.vec3_double(*(flex.random_double(n) for i in range(3)))
reflections["rlp"].extend(rlp_noise)
reflections["imageset_id"].extend(flex.int(rlp_noise.size()))
reflections["xyzobs.mm.value"].extend(flex.vec3_double(rlp_noise.size()))
max_cell_multiplier = 1.3
max_cell = find_max_cell(reflections, max_cell_multiplier=max_cell_multiplier)
known_max_cell = max(
crystal_symmetry.primitive_setting().unit_cell().parameters()[:3]
)
assert max_cell.max_cell > known_max_cell
|
stefanseefeld/qmtest
|
qm/label.py
|
Python
|
gpl-2.0
| 6,102
| 0.003933
|
########################################################################
#
# File: label.py
# Author: Alex Samuel
# Date: 2001-03-17
#
# Contents:
# Label
#
# Copyright (c) 2001, 2002 by CodeSourcery, LLC. All rights reserved.
#
# For license terms see the file COPYING.
#
########################################################################
########################################################################
# Imports
########################################################################
from __future__ import nested_scopes
import os
import re
import string
import types
########################################################################
# Classes
########################################################################
class Label:
"""A 'Label' identifies an entity.
A 'Label' is a generalization of a filename. Like filenames, labels
consist of one or more directories followed by a basename. However,
the format used for a label need not be the same as that used by
filenames.
Each label class defines a separator character to take the place of
the '/' character used by many file systems.
All labels are relative labels; no label may begin with a separator
character."""
def __init__(self, label):
"""Construct a new 'Label'.
'label' -- A string giving the value of the label."""
assert type(label) in (types.StringType, types.UnicodeType)
self._label = label
def Join(self, *labels):
"""Combine this label and the 'labels' into a single label.
'labels' -- A sequence of strings giving the components of the
new label. All but the last are taken as directory names; the
last is treated as a basename."""
result = self._label
for l in
|
labels:
if not result:
# If the label is empty so far, l is the first component.
result = l
elif result and result[-1] == self._sep:
# If the label thus far ends with a sepa
|
rator, we do not
# want to add another one.
result += l
else:
result = result + self._sep + l
return self.__class__(result)
def Split(self):
"""Split the label into a pair '(directory, basename)'.
returns -- A pair '(directory, basename)', each of which is
a label.
It is always true that 'directory.join(basename)' will return a
label equivalent to the original label."""
last_sep = self._label.rfind(self._sep)
if last_sep != -1:
return (self.__class__(self._label[:last_sep]),
self.__class__(self._label[last_sep + 1:]))
else:
return (self.__class__(""),
self.__class__(self._label))
def SplitLeft(self):
"""Split the label into a pair '(parent, subpath)'. This is
the same operation as Split, except the split occurs at the
leftmost separator, not the rightmost.
returns -- A pair '(directory, basename)', each of which is
a label.
It is always true that 'directory.join(basename)' will return a
label equivalent to the original label."""
first_sep = self._label.find(self._sep)
if first_sep != -1:
return (self.__class__(self._label[:first_sep]),
self.__class__(self._label[first_sep + 1:]))
else:
return (self.__class__(self._label),
self.__class__(""))
def Basename(self):
"""Return the basename for the label.
returns -- A string giving the basename for the label. The
value returned for 'l.basename()' is always the same as
'l.split()[1]'."""
return self.Split()[1]
def Dirname(self):
"""Return the directory name for the 'label'.
returns -- A string giving the directory name for the 'label'.
The value returned for 'l.dirname()' is always the same as
'l.split()[0]'."""
return self.Split()[0]
def IsValid(self, label, is_component):
"""Returns true if 'label' is valid.
'label' -- The string being tested for validity.
'is_component' -- True if the string being tested is just a
single component of a label path.
returns -- True if 'label' is not valid."""
if label and label[0] == self._sep:
# All labels are relative; a valid label cannot begin with a
# separator.
return 0
elif is_component and self._sep in label:
# A component label cannot contain a separator.
return 0
elif label.find(self._sep + self._sep) != -1:
# It is invalid to have two separators in a row.
return 0
return 1
def __str__(self):
"""Return the string form of this label."""
return self._label
########################################################################
# Functions
########################################################################
__thunk_regex = re.compile("[^a-z0-9_]")
def thunk(label):
"""Sanitize and convert 'label' to a valid label.
Makes a best-effort attempt to keep 'label' recognizable during
the conversion.
returns -- A valid label."""
# Strip leading and trailing whitespace.
label = string.strip(label)
# Lower capital letters.
label = string.lower(label)
# Replace all invalid characters with underscores.
label = string.replace(label, "+", "x")
label = __thunk_regex.sub("_", label)
# Trim leading underscores.
while len(label) > 0 and label[0] == "_":
label = label[1:]
# Make sure the label isn't empty.
if label == "":
raise ValueError, "Empty label"
return label
########################################################################
# Local Variables:
# mode: python
# indent-tabs-mode: nil
# fill-column: 72
# End:
|
phelmig/outside_
|
mvp/outside/frontend_auth/__init__.py
|
Python
|
mit
| 1,006
| 0.006958
|
"""
frontend_auth is a wrapper around the `django.contrib.auth.view` Views.
* **templates/** Contains the templates for login / pw change / pw reset views and the password_reset emails
* **auth_mixins.py** Provides a simple mixin that tests if `instance.agency == request.user.agencyemployee.agency`, more to come
* **forms.py** Contains
|
simple wrapper around `django.contrib.auth.forms` to use placeholders instead of labels
* **urls.py** Creates `django.contrib.auth.views` for with the following names:
* login - Login
* logout - Logout -> Redirect to 'login'
* change_password - unused view to change the password
* password_change_done - unused as well
* password_reset - Shows a form to request a pw reset
* password_reset_done - The view after
|
the requested pw reset
* password_reset_confirm - The view after the user clicked on the reset link in the mail
* password_reset_complete - The view after the manual password reset
"""
|
HewlettPackard/python-proliant-sdk
|
examples/Redfish/ex31_set_license_key.py
|
Python
|
apache-2.0
| 2,431
| 0.014809
|
# Copyright 2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from _redfishobject import RedfishObject
from redfish.rest.v1 import ServerDownOrUnreachableError
def ex31_set_license_key(redfishobj, iLO_Key):
sys.stdout.write("\nEXAMPLE 31: Set iLO License Key\n")
instances = redfishobj.search_for_type("Manager.")
for instance in instances:
rsp = redfishobj.redfish_get(instance["@odata.id"])
body = dict()
body["LicenseKey"] = iLO_Key
if redfishobj.typepath.defs.isgen9:
oemhpdict = rsp.dict["Oem"]["Hp"]
else:
oemhpdict = rsp.dict["Oem"]["Hpe"]
response = redfishobj.redfish_post(oemhpdict["Links"]\
["LicenseService"]["@odata.id"], body)
redfishobj.error_handler(response)
if __name__ == "__main__":
# When running on the server locally use the following commented values
# iLO_https_url = "blobstore://."
# iLO_account
|
= "None"
# iLO_password = "None"
# When running remotely connect using the iLO secured (https://) address,
# iLO account name, and password to send https requests
#
|
iLO_https_url acceptable examples:
# "https://10.0.0.100"
# "https://f250asha.americas.hpqcorp.net"
iLO_https_url = "https://10.0.0.100"
iLO_account = "admin"
iLO_password = "password"
# Create a REDFISH object
try:
REDFISH_OBJ = RedfishObject(iLO_https_url, iLO_account, iLO_password)
except ServerDownOrUnreachableError, excp:
sys.stderr.write("ERROR: server not reachable or doesn't support " \
"RedFish.\n")
sys.exit()
except Exception, excp:
raise excp
ex31_set_license_key(REDFISH_OBJ, "test_iLO_Key")
|
jasonstack/cassandra
|
pylib/cqlshlib/cqlshhandling.py
|
Python
|
apache-2.0
| 10,355
| 0.001545
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from cqlshlib import cqlhandling
# we want the cql parser to understand our cqlsh-specific commands too
my_commands_ending_with_newline = (
'help',
'?',
'consistency',
'serial',
'describe',
'desc',
'show',
'source',
'capture',
'login',
'debug',
'tracing',
'expand',
'paging',
'exit',
'quit',
'clear',
'cls'
)
cqlsh_syntax_completers = []
def cqlsh_syntax_completer(rulename, termname):
def registrator(f):
cqlsh_syntax_completers.append((rulename, termname, f))
return f
return registrator
cqlsh_cmd_syntax_rules = r'''
<cqlshCommand> ::= <CQL_Statement>
| <specialCommand> ( ";" | "\n" )
;
'''
cqlsh_special_cmd_command_syntax_rules = r'''
<specialCommand> ::= <describeCommand>
| <consistencyCommand>
| <serialConsistencyCommand>
| <showCommand>
| <sourceCommand>
| <captureCommand>
| <copyCommand>
| <loginCommand>
| <debugCommand>
| <helpCommand>
| <tracingCommand>
| <expandCommand>
| <exitCommand>
| <pagingCommand>
| <clearCommand>
;
'''
cqlsh_describe_cmd_syntax_rules = r'''
<describeCommand> ::= ( "DESCRIBE" | "DESC" )
( "FUNCTIONS"
| "FUNCTION" udf=<anyFunctionName>
| "AGGREGATES"
| "AGGREGATE" uda=<userAggregateName>
| "KEYSPACES"
| "KEYSPACE" ksname=<keyspaceName>?
| ( "COLUMNFAMILY" | "TABLE" ) cf=<columnFamilyName>
| "INDEX" idx=<indexName>
| "MATERIALIZED" "VIEW" mv=<materializedViewName>
| ( "COLUMNFAMILIES" | "TABLES" )
| "FULL"? "SCHEMA"
| "CLUSTER"
| "TYPES"
| "TYPE" ut=<userTypeName>
| (ksname=<keyspaceName> | cf=<columnFamilyName> | idx=<indexName> | mv=<materializedViewName>))
;
'''
cqlsh_consistency_cmd_syntax_rules = r'''
<consistencyCommand> ::= "CONSISTENCY" ( level=<consistencyLevel> )?
;
'''
cqlsh_consistency_level_syntax_rules = r'''
<consistencyLevel> ::= "ANY"
| "ONE"
| "TWO"
| "THREE"
| "QUORUM"
| "ALL"
| "LOCAL_QUORUM"
| "EACH_QUORUM"
| "SERIAL"
| "LOCAL_SERIAL"
| "LOCAL_ONE"
| "NODE_LOCAL"
;
'''
cqlsh_serial_consistency_cmd_syntax_rules = r'''
<serialConsistencyCommand> ::= "SERIAL" "CONSISTENCY" ( level=<serialConsistencyLevel> )?
;
'''
cqlsh_serial_consistency_level_syntax_rules = r'''
<serialConsistencyLevel> ::= "SERIAL"
| "LOCAL_SERIAL"
;
'''
cqlsh_show_cmd_syntax_rules = r'''
<showCommand> ::= "SHOW" what=( "VERSION" | "HOST" | "SESSION" sessionid=<uuid> )
;
'''
cqlsh_source_cmd_syntax_rules = r'''
<sourceCommand> ::= "SOURCE" fname=<stringLiteral>
;
'''
cqlsh_capture_cmd_syntax_rules = r'''
<captureCommand> ::= "CAPTURE" ( fname=( <stringLiteral> | "OFF" ) )?
;
'''
cqlsh_copy_cmd_syntax_rules = r'''
<copyCommand> ::= "COPY" cf=<columnFamilyName>
( "(" [colnames]=<colname> ( "," [colnames]=<colname> )* ")" )?
( dir="FROM" ( fname=<stringLiteral> | "STDIN" )
| dir="TO" ( fname=<stringLiteral> | "STDOUT" ) )
( "WITH" <copyOption> ( "AND" <copyOption> )* )?
;
'''
cqlsh_copy_option_syntax_rules = r'''
<copyOption> ::= [optnames]=(<identifier>|<reserved_identifier>) "=" [optvals]=<copyOptionVal>
;
'''
cqlsh_copy_option_val_syntax_rules = r'''
<copyOptionVal> ::= <identifier>
| <reserved_identifier>
| <term>
;
'''
cqlsh_debug_cmd_syntax_rules = r'''
# avoiding just "DEBUG" so that this rule doesn't get treated as a terminal
<debugCommand> ::= "DEBUG" "THINGS"?
;
'''
cqlsh_help_cmd_syntax_rule
|
s = r'''
<helpCommand> ::= ( "HELP" | "?" ) [topic]=( /[a-z_]*/ )*
;
'''
cqlsh_tracing_cmd_syntax_rules = r'''
<tracingCommand> ::= "TRACING" ( switch=( "ON" | "OFF" ) )?
;
'''
cqlsh_expand_cmd_syntax_rules = r'''
<expandCommand> ::= "EXPAND" ( switch=( "ON" | "OFF" ) )?
;
'''
cqlsh_paging_cmd_syntax_rules = r'''
<pagingCommand> ::= "PAGING" ( switch=( "ON" | "OFF" | /[0-9]+/) )?
|
;
'''
cqlsh_login_cmd_syntax_rules = r'''
<loginCommand> ::= "LOGIN" username=<username> (password=<stringLiteral>)?
;
'''
cqlsh_exit_cmd_syntax_rules = r'''
<exitCommand> ::= "exit" | "quit"
;
'''
cqlsh_clear_cmd_syntax_rules = r'''
<clearCommand> ::= "CLEAR" | "CLS"
;
'''
cqlsh_question_mark = r'''
<qmark> ::= "?" ;
'''
cqlsh_extra_syntax_rules = cqlsh_cmd_syntax_rules + \
cqlsh_special_cmd_command_syntax_rules + \
cqlsh_describe_cmd_syntax_rules + \
cqlsh_consistency_cmd_syntax_rules + \
cqlsh_consistency_level_syntax_rules + \
cqlsh_serial_consistency_cmd_syntax_rules + \
cqlsh_serial_consistency_level_syntax_rules + \
cqlsh_show_cmd_syntax_rules + \
cqlsh_source_cmd_syntax_rules + \
cqlsh_capture_cmd_syntax_rules + \
cqlsh_copy_cmd_syntax_rules + \
cqlsh_copy_option_syntax_rules + \
cqlsh_copy_option_val_syntax_rules + \
cqlsh_debug_cmd_syntax_rules + \
cqlsh_help_cmd_syntax_rules + \
cqlsh_tracing_cmd_syntax_rules + \
cqlsh_expand_cmd_syntax_rules + \
cqlsh_paging_cmd_syntax_rules + \
cqlsh_login_cmd_syntax_rules + \
cqlsh_exit_cmd_syntax_rules + \
cqlsh_clear_cmd_syntax_rules + \
cqlsh_question_mark
def complete_source_quoted_filename(ctxt, cqlsh):
partial_path = ctxt.get_binding('partial', '')
head, tail = os.path.split(partial_path)
exhead = os.path.expanduser(head)
try:
contents = os.listdir(exhead or '.')
except OSError:
return ()
matches = [f for f in contents if f.startswith(tail)]
annotated = []
for f in matches:
match = os.path.join(head, f)
if os.path.isdir(os.path.join(exhead, f)):
match += '/'
annotated.append(match)
return annotated
cqlsh_syntax_completer('sourceCommand', 'fname')(complete_source_quoted_filename)
cqlsh_syntax_completer('captureCommand', 'fname')(complete_source_quoted_filename)
@cqlsh_syntax_completer('copyCommand', 'fname')
def copy_fname_completer(ctxt, cqlsh):
lasttype = ctxt.get_binding('*LASTTYPE*')
if lasttype == 'unclosedString':
return complete_source
|
vrutkovs/dogtail
|
dogtail/i18n.py
|
Python
|
gpl-2.0
| 10,076
| 0.002084
|
# -*- coding: utf-8 -*-
"""
Internationalization facilities
Authors: David Malcolm <dmalcolm@redhat.com>
"""
__author__ = """David Malcolm <dmalcolm@redhat.com>, Zack Cerza <zcerza@redhat.com>"""
import config
import os
import re
import gettext
from logging import debugLogger as logger
from __builtin__ import unicode
def safeDecode(string):
if not isinstance(string, unicode):
try:
string = string.decode('utf-8')
except UnicodeDecodeError:
# logger.log(traceback.format_exc())
#logger.log("The following string is invalid and caused the above error: '%s'" % string)
string = string.decode('utf-8', 'replace')
return string
def safeEncode(string):
pass
"""
Singleton list of TranslationDb instances, to be initialized by the script with
whatever translation databases it wants.
"""
translationDbs = []
class TranslationDb(object):
"""
Abstract base class representing a database of translations
"""
def getTranslationsOf(self, srcName):
"""
Pure virtual method to look up the translation of a string.
Returns a list of candidate strings (the translation), empty if not found.
Note that a source string can map to multiple translated strings. For
example, in the French translation of Evolution, the string "Forward" can
translate to both
(i) "Faire suivre" for forwarding an email, and
(ii) "Suivant" for the next page in a wizard.
"""
raise NotImplementedError
class GettextTranslationDb(TranslationDb):
"""
Implementation of TranslationDb which leverages gettext, using a single
translation mo-file.
"""
def __init__(self, moFile):
self.__moFile = moFile
self.__gnutranslations = gettext.GNUTranslations(open(moFile))
def getTranslationsOf(self, srcName):
srcName = safeDecode(srcName)
# print "searching for translations of %s"%srcName
# Use a dict to get uniqueness:
results = {}
result = self.__gnutranslations.ugettext(srcName)
if result != srcName:
results[result] = None
# Hack alert:
#
# Note that typical UI definition in GTK etc contains strings with
# underscores to denote accelerators.
# For example, the stock GTK "Add" item has text "_Add" which e.g.
# translates to "A_jouter" in French
#
# Since these underscores have been stripped out before we see these strings,
# we are looking for a translation of "Add" into "Ajouter" in this case, so
# we need to fake it, by looking up the string multiple times, with underscores
# inserted in all possible positions, stripping underscores out of the result.
# Ugly, but it works.
for index in range(len(srcName)):
candidate = srcName[:index] + "_" + srcName[index:]
result = self.__gnutranslations.ugettext(candidate)
if result != candidate:
# Strip out the underscore, and add to the result:
results[result.replace('_', '')] = True
return results.keys()
def translate(srcString):
"""
Look up srcString in the various translation databases (if any), returning
a list of all matches found (potentially the empty list)
"""
# Use a dict to get uniqueness:
results = {}
# Try to translate the string:
for translationDb in translationDbs:
for result in translationDb.getTranslationsOf(srcString):
result = safeDecode(result)
results[result] = True
# No translations found:
if len(results) == 0:
if config.config.debugTranslation:
logger.log('Translation not found for "%s"' % srcString)
return results.keys()
class TranslatableString(object):
"""
Class representing a string that we want to match strings against, handling
translation for us, by looking it up once at construction time.
"""
def __init__(self, untranslatedString):
"""
Constructor looks up the string in all of the translation databases, storing
the various translations it finds.
"""
if isinstance(untranslatedString, unicode):
untranslatedString = safeDecode(untranslatedString)
else:
untranslatedString = safeDecode(untranslatedString)
self.untranslatedString = untranslatedString
self.translatedStrings = translate(untranslatedString)
def matchedBy(self, string):
"""
Compare the test string against either the translation of the original
string (or simply the original string, if no translation was found).
"""
# print "comparing %s against %s"%(string, self)
def stringsMatch(inS, outS):
"""
Compares a regular expression to a string
inS: the regular expression (or normal string)
outS: the normal string to be compared against
"""
inString = str(inS)
outString = outS
if inString == outString:
return True
inString = inString + '$'
inString = safeDecode(inString)
outString = safeDecode(outString)
if inString[0] == '*':
inString = "\\" + inString
# Escape all parentheses, since grouping will never be needed here
inString = re.sub('([\(\)])', r'\\\1', inString)
|
match = re.match(inString, outString)
matched = match is not None
return matched
matched = False
# the 'ts' variable keeps track of whether we're working with
# translated strings. it's only used for debugging purposes.
#ts = 0
# print string, str(self)
for translatedString in self.translatedStrings:
#ts = ts + 1
matched = str
|
ingsMatch(translatedString, string)
if not matched:
matched = translatedString == string
if matched:
return matched
# ts=0
return stringsMatch(self.untranslatedString, string)
def __str__(self):
"""
Provide a meaningful debug version of the string (and the translation in
use)
"""
if len(self.translatedStrings) > 0:
# build an output string, with commas in the correct places
translations = ""
for tString in self.translatedStrings:
translations += u'"%s", ' % safeDecode(tString)
result = u'"%s" (%s)' % (
safeDecode(self.untranslatedString), translations)
return safeDecode(result)
else:
return '"%s"' % (self.untranslatedString)
def isMoFile(filename, language=''):
"""
Does the given filename look like a gettext mo file?
Optionally: Does the file also contain translations for a certain language,
for example 'ja'?
"""
if re.match('(.*)\\.mo$', filename):
if not language:
return True
elif re.match('/usr/share/locale(.*)/%s(.*)/LC_MESSAGES/(.*)\\.mo$' %
language, filename):
return True
else:
return False
else:
return False
def loadAllTranslationsForLanguage(language):
import distro
for moFile in distro.packageDb.getMoFiles(language):
translationDbs.append(GettextTranslationDb(moFile))
def getMoFilesForPackage(packageName, language='', getDependencies=True):
"""
Look up the named package and find all gettext mo files within it and its
dependencies. It is possible to restrict the results to those of a certain
language, for example 'ja'.
"""
import distro
result = []
for filename in distro.packageDb.getFiles(packageName):
if isMoFile(filename, language):
result.append(filename)
if getDependencies:
# Recurse:
for dep in distro.packageDb.getDependencies(packageName):
# We pass False to the inner call because getDependencies has alrea
|
DarrenBellew/CloudCompDT228-3
|
Lab3/1000FibNumber.py
|
Python
|
mit
| 158
| 0.006329
|
var = 2
start = 1
last = 1
while(len(str(start)) < 1000):
var=var+1
temp = start
s
|
tart = start+last
|
last = temp
print("Final: " + str(var))
|
walchko/pygecko
|
dev/services/test.py
|
Python
|
mit
| 1,384
| 0.001445
|
#!/usr/bin/env python3
#
#
# copyright Kevin Walchko
#
# Basically a rostopic
from __future__ import print_function
import argparse
import time
# from pygecko import TopicSub
from pygecko.transport import zmqTCP, GeckoCore
from pygecko.multiprocessing import GeckoPy
from pygecko.test import GeckoSimpleProcess
# from pygecko.transport.zmqclass import
# def publisher(**kwargs):
# geckopy = GeckoPy()
#
# p = geckopy.Publisher()
#
# hertz = kwargs.get('rate', 10)
# rate = geckopy.Rate(hertz)
#
# topic = kwargs.get('topic')
# msg = kwargs.get('msg')
#
# cnt = 0
# start = time.time()
# while not geckopy.is_shutdown():
# p.pub(topic, msg) # topic msg
# if cnt % hertz == 0:
# print(">> {}[{:.1f}]: published {} msgs".format(topic, time.time()-start, hertz))
# cnt += 1
# rate.sleep()
def subscriber(**kwargs):
geckopy = GeckoPy(**kwargs)
def f(topic, msg):
print(">> {}: {}".format(topic, msg))
topic = kwargs.get('topic')
s = g
|
eckopy.Subscriber([topic], f)
geckopy.spin()
if __name__ == '__main__':
p = GeckoSimpleProcess()
p.start(func=subscriber, name='subscriber', kwargs=args)
# while True:
# try:
# time.sleep(1)
# except KeyboardInterrupt:
# break
#
# # shutdown the processes
# p.join(0
|
.1)
|
alcides/rdflib
|
rdflib/sparql/sql/RelationalOperators.py
|
Python
|
bsd-3-clause
| 4,107
| 0.012661
|
class NoResultsException(Exception):
def __init__(self):
Exception("No Results.")
class RelationalOperator(object):
def __init__(self, parent):
self.parent = parent
def GenSql(self, sqlBuilder):
"""
Main external interface for SQL generation.
The client code should construct and pass an RdfSqlBuilder instance.
"""
self.BuildSql(sqlBuilder, True) # treat outermost query as 'nested' since it must return variables
def BuildSql(sqlBuilder, isNested):
"""
Main (internal) interface for SQL generation. The logic required to
implement the operator should be implemented
using the provided SqlBuilder class.
If isNested=True, then output (SELECT clause)
variables must be set.
"""
raise Exception("BuildSql must be overridden in child classes.")
def AddNestedOp(self, sqlBuilder, relOp, tuplePrefix):
childBuilder = sqlBuilder.GetNestedSQLBuilder()
#childBuilder.SetComment(repr(relOp)) # useful for SQL debugging
relOp.BuildSql(childBuilder, True)
#sqlBuilder.SetVariablesFromChild(childBuilder)
tt = sqlBuilder.AddNestedTableTuple(childBuilder.Sql(),tuplePrefix)
sqlBuilder.AddChildVariables(childBuilder, tt)
return (tt, childBuilder)
def GetUsedVariables(self, sqlBuilder, includeBNodes=True):
"""
The operator should return a set of
variable name strings
of the variables used in this operator and
all child operators.
"""
raise Exception("GetUsedVariables must be overridden in child classes.")
def GetChildren(self):
"""
Returns all child operators.
"""
raise Exception("GetChildren must be overridden in child classes.")
def GetDescendantOperators(self, returnType, excludeSubtreeTypes, includeSelf=False):
"""
Returns all child operators of a given type, recursively
(including the operator itself is specified).
If an operator belonging to one of the excludeSubtreeTypes is found,
do not returning and ignore its children as well.
"""
results = []
queue = []
queue.extend(self.GetChildren())
if includeSelf:
queue.append(self)
while len(queue) > 0:
q = queue.pop()
if isinstance(q,returnType):
results.append(q)
exclude = False
for e in excludeSubtreeTypes:
if isinstance(q,e):
exclude = True
break
if exclude:
continue
queue.extend(q.GetChildren())
return results
class RelationalExpOperator(object):
def GetDecendentLexicalComparators(self):
pass
def BuildSqlExpression(self,sqlBuilder,tupleTable):
raise Exception("BuildSqlExpression must be overridden in
|
child classes.")
def AdjustCostEstimate(self,cost,colDist):
# Returns (cost, colDist, varSet)
raise Exception("AdjustCostEstimate must be overridden in child classes.")
class RelationalTerminalExpOperator(object):
def BuildTerminalExpression(self,sqlBuilder, tupleTable):
raise Exception("BuildTerminalExpression must be overridden in child classes.")
def BuildHash(self, sqlBuilder):
raise Exception("BuildHash m
|
ust be overridden in child classes.")
def BuildSqlExpression(sqlBuilder,tupleTable):
raise Exception("BuildSqlExpression must be overridden in child classes.")
def GetDataTypeExp(self,sqlBuilder):
raise Exception("GetDataTypeExp must be overridden in child classes.")
def GetLanguageExp(self,sqlBuilder):
raise Exception("GetLanguageExp must be overridden in child classes.")
def GetTermExp(self,sqlBuilder):
raise Exception("GetTermExp must be overridden in child classes.")
|
fraoustin/flask-monitor
|
flask_monitor/util.py
|
Python
|
gpl-2.0
| 634
| 0.015773
|
# -*- coding: utf-8 -*-
def toflat(obj, ns=""):
res = {}
for key in obj:
if type(obj[key]) is dict:
subdict = toflat(obj[key], "%s%s" % (ns,key[0].upper()+key[1:]
|
))
for k in subdict:
res[k[0].upper()+k[1:]] = subdict[k]
else:
res["%s%s" % (ns, key[0].upper()+key[1:])] = str
|
(obj[key])
return res
def todict(obj):
res = {}
for key in obj:
if type(obj[key]) is dict:
subdict = todict(obj[key])
for k in subdict:
res[k] = subdict[k]
else:
res[key] = obj[key]
return res
|
lucastanure/GoGo-Real-Software
|
core/communication/serial/serialposix.py
|
Python
|
gpl-3.0
| 25,395
| 0.005828
|
#!/usr/bin/env python
#
# Python Serial Port Extension for Win32, Linux, BSD, Jython
# module for serial IO for POSIX compatible systems, like Linux
# see __init__.py
#
# (C) 2001-2010 Chris Liechti <cliechti@gmx.net>
# this is distributed under a free software license, see license.txt
#
# parts based on code from Grant B. Edwards <grante@visi.com>:
# ftp://ftp.visi.com/users/grante/python/PosixSerial.py
#
# references: http://www.easysw.com/~mike/serial/serial.html
import sys, os, fcntl, termios, struct, select, errno, time
from serialutil import *
# Do check the Python version as some constants have moved.
if (sys.hexversion < 0x020100f0):
import TERMIOS #@UnresolvedImport
else:
TERMIOS = termios
if (sys.hexversion < 0x020200f0):
import FCNTL#@UnresolvedImport
else:
FCNTL = fcntl
# try to detect the OS so that a device can be selected...
# this code block should supply a device() and set_special_baudrate() function
# for the platform
plat = sys.platform.lower()
if plat[:5] == 'linux': # Linux (confirmed)
def device(port):
return '/dev/ttyS%d' % port
ASYNC_SPD_MASK = 0x1030
ASYNC_SPD_CUST = 0x0030
def set_special_baudrate(port, baudrate):
import array
buf = array.array('i', [0] * 32)
# get serial_struct
FCNTL.ioctl(port.fd, TERMIOS.TIOCGSERIAL, buf)
# set custom divisor
buf[6] = buf[7] / baudrate
# update flags
buf[4] &= ~ASYNC_SPD_MASK
buf[4] |= ASYNC_SPD_CUST
# set serial_struct
try:
res = FCNTL.ioctl(port.fd, TERMIOS.TIOCSSERIAL, buf)
except IOError:
raise ValueError('Failed to set custom baud rate: %r' % baudrate)
baudrate_constants = {
0: 0000000, # hang up
50: 0000001,
75: 0000002,
110: 0000003,
134: 0000004,
150: 0000005,
200: 0000006,
300: 0000007,
600: 0000010,
1200: 0000011,
1800: 0000012,
2400: 0000013,
4800: 0000014,
9600: 0000015,
19200: 0000016,
38400: 0000017,
57600: 0010001,
115200: 0010002,
230400: 0010003,
460800: 0010004,
500000: 0010005,
576000: 0010006,
921600: 0010007,
1000000: 0010010,
1152000: 0010011,
1500000: 0010012,
2000000: 0010013,
2500000: 0010014,
3000000: 0010015,
3500000: 0010016,
4000000: 0010017
}
elif plat == 'cygwin': # cygwin/win32 (confirmed)
def device(port):
return '/dev/com%d' % (port + 1)
def set_special_baudrate(port, baudrate):
raise ValueError("sorry don't know how to handle non standard baud rate on this platform")
baudrate_constants = {}
elif plat[:7] == 'openbsd': # OpenBSD
def device(port):
return '/dev/cua%02d' % port
def set_special_baudrate(port, baudrate):
raise ValueError("sorry don't know how to handle non standard baud rate on this platform")
baudrate_constants = {}
elif plat[:3] == 'bsd' or \
plat[:7] == 'freebsd':
def device(port):
return '/dev/cuad%d' % port
def set_special_baudrate(port, baudrate):
raise ValueError("sorry don't know how to handle non standard baud rate on this platform")
baudrate_constants = {}
elif plat[:6] == 'darwin': # OS X
version = os.uname()[2].split('.')
# Tiger or above can support arbitrary serial speeds
if int(version[0]) >= 8:
def set_special_baudrate(port, baudrate):
# use IOKit-specific call to set up high speeds
import array, fcntl
buf = array.array('i', [baudrate])
IOSSIOSPEED = 0x80045402 #_IOW('T', 2, speed_t)
fcntl.ioctl(port.fd, IOSSIOSPEED, buf, 1)
else: # version < 8
def set_special_baudrate(port, baudrate):
raise ValueError("baud rate not supported")
def device(port):
return '/dev/cuad%d' % port
baudrate_constants = {}
elif plat[:6] == 'netbsd': # NetBSD 1.6 testing by Erk
def device(port):
return '/dev/dty%02d' % port
def set_special_baudrate(port, baudrate):
raise ValueError("sorry don't know how to handle non standard baud rate on this platform")
baudrate_constants = {}
elif plat[:4] == 'irix': # IRIX (partially tested)
def device(port):
return '/dev/ttyf%d' % (port+1) #XXX different device names depending on flow control
def set_special_baudrate(port, baudrate):
raise ValueError("sorry don't know how to handle non standard baud rate on this platform")
baudrate_constants = {}
elif plat[:2] == 'hp': # HP-UX (not tested)
def device(port):
return '/dev/tty%dp0' % (port+1)
def set_special_baudrate(port, baudrate):
raise ValueError("sorry don't know how to handle non standard baud rate on this platform")
baudrate_constants = {}
elif plat[:5] == 'sunos': # Solaris/SunOS (confirmed)
def device(port):
return '/dev/tty%c' % (ord('a')+port)
def set_special_baudrate(port, baudrate):
raise ValueError("sorry don't know how to handle non standard baud rate on this platform")
baudrate_constants = {}
elif plat[:3] == 'aix': # AIX
def device(port):
return '/dev/tty%d' % (port)
def set_special_baudrate(port, baudrate):
raise ValueError("sorry don't know how to handle non standard baud rate on this platform")
baudrate_constants = {}
else:
# platform detection has failed...
sys.stderr.write("""\
don't know how to number ttys on this system.
! Use an explicit path (eg /dev/ttyS1) or send this information to
! the author of this module:
sys.platform = %r
os.name = %r
serialposix.py version = %s
also add the device name of the serial port and where the
counting starts for the first serial port.
e.g. 'first serial port: /dev/ttyS0'
and with a bit luck you can get this module running...
""" % (sys.platform, os.name, VERSION))
# no exception, just continue with a brave attempt to build a device name
# even if the device name is not correct for the platform it has chances
# to work using a string with the real device name as port parameter.
def device(portum):
return '/dev/ttyS%d' % portnum
def set_special_baudrate(port, baudrate):
raise SerialException("sorry don't know how to handle non standard baud rate on this platform")
baudrate_constants = {}
#~ raise Exception, "this module does not run on this platform, sorry."
# whats up with "aix", "beos", ....
# they should work, just need to know the device names.
# load some constants for later use.
# try to use values from TERMIOS,
|
use defaults from linux otherwise
TIOCMGET = hasattr(TERMIOS, 'TIOCMGET') and TERMIOS.TIOCMGET or 0x5415
TIOCMBIS = hasattr(TERMIOS, 'TIOCMBIS') and TERMIOS.TIOCMBIS or 0x5416
TIOCMBIC =
|
hasattr(TERMIOS, 'TIOCMBIC') and TERMIOS.TIOCMBIC or 0x5417
TIOCMSET = hasattr(TERMIOS, 'TIOCMSET') and TERMIOS.TIOCMSET or 0x5418
#TIOCM_LE = hasattr(TERMIOS, 'TIOCM_LE') and TERMIOS.TIOCM_LE or 0x001
TIOCM_DTR = hasattr(TERMIOS, 'TIOCM_DTR') and TERMIOS.TIOCM_DTR or 0x002
TIOCM_RTS = hasattr(TERMIOS, 'TIOCM_RTS') and TERMIOS.TIOCM_RTS or 0x004
#TIOCM_ST = hasattr(TERMIOS, 'TIOCM_ST') and TERMIOS.TIOCM_ST or 0x008
#TIOCM_SR = hasattr(TERMIOS, 'TIOCM_SR') and TERMIOS.TIOCM_SR or 0x010
TIOCM_CTS = hasattr(TERMIOS, 'TIOCM_CTS') and TERMIOS.TIOCM_CTS or 0x020
TIOCM_CAR = hasattr(TERMIOS, 'TIOCM_CAR') and TERMIOS.TIOCM_CAR or 0x040
TIOCM_RNG = hasattr(TERMIOS, 'TIOCM_RNG') and TERMIOS.TIOCM_RNG or 0x080
TIOCM_DSR = hasattr(TERMIOS, 'TIOCM_DSR') and TERMIOS.TIOCM_DSR or 0x100
TIOCM_CD = hasattr(TERMIOS, 'TIOCM_CD') and TERMIOS.TIOCM_CD or TIOCM_CAR
TIOCM_RI = hasattr(TERMIOS, 'TIOCM_RI') and TERMIOS.TIOCM_RI or TIOCM_RNG
#TIOCM_OUT1 = hasattr(TERMIOS, 'TIOCM_OUT1') and TERMIOS.TIOCM_OUT1 or 0x2000
#TIOCM_OUT2 = hasattr(TERMIOS, 'TIOCM_OUT2') and TERMIOS.TIOCM_OUT2 or 0x4000
if hasattr(TERMIOS, 'TIOCINQ'):
TIOCINQ = TERMIO
|
markflyhigh/incubator-beam
|
sdks/python/apache_beam/options/pipeline_options_validator_test.py
|
Python
|
apache-2.0
| 12,764
| 0.005719
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for the pipeline options validator module."""
from __future__ import absolute_import
import logging
import unittest
from builtins import object
from hamcrest import assert_that
from hamcrest import contains_string
from hamcrest import only_contains
from hamcrest.core.base_matcher import BaseMatcher
from apache_beam.internal import pickler
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options_validator import PipelineOptionsValidator
# Mock runners to use for validations.
class MockRunners(object):
class DataflowRunner(object):
pass
class TestDataflowRunner(object):
pass
class OtherRunner(object):
pass
# Matcher that always passes for testing on_success_matcher option
class AlwaysPassMatcher(BaseMatcher):
def _matches(self, item):
return True
class SetupTest(unittest.TestCase):
def check_errors_for_arguments(self, errors, args):
"""Checks that there is exactly one error for each given argument."""
missing = []
remaining = list(errors)
for arg in args:
found = False
for error in remaining:
if arg in error:
remaining.remove(error)
found = True
break
if not found:
missing.append('Missing error for: ' + arg)
# Return missing and remaining (not matched) errors.
return missing + remaining
def test_local_runner(self):
runner = MockRunners.OtherRunner()
options = PipelineOptions([])
validator = PipelineOptionsValidator(options, runner)
errors = validator.validate()
self.assertEqual(len(errors), 0)
def test_missing_required_options(self):
options = PipelineOptions([''])
runner = MockRunners.DataflowRunner()
validator = PipelineOptionsValidator(options, runner)
errors = validator.validate()
self.assertEqual(
self.check_errors_for_arguments(
errors,
['project', 'staging_location', 'temp_location']),
[])
def test_gcs_path(self):
def get_validator(temp_location, staging_location):
options = ['--project=example:example', '--job_name=job']
if temp_location is not None:
options.append('--temp_location=' + temp_location)
if staging_location is not None:
options.append('--staging_location=' + staging_location)
pipeline_options = PipelineOptions(options)
runner = MockRunners.DataflowRunner()
validator = PipelineOptionsValidator(pipeline_options, runner)
return validator
test_cases = [
{'temp_location': None,
'staging_location': 'gs://foo/bar',
'errors': ['temp_location']},
{'temp_location': None,
'staging_location': None,
'errors': ['staging_location', 'temp_location']},
{'temp_location': 'gs://foo/bar',
'staging_location': None,
'errors': []},
{'temp_location': 'gs://foo/bar',
'staging_location': 'gs://ABC/bar',
'errors': ['staging_location']},
{'temp_location': 'gcs:/foo/bar',
'staging_location': 'gs://foo/bar',
'errors': ['temp_location']},
{'temp_location': 'gs:/foo/bar',
'staging_location': 'gs://foo/bar',
'errors': ['temp_location']},
{'temp_location': 'gs://ABC/bar',
'staging_location': 'gs://foo/bar',
'errors': ['temp_location']},
{'temp_location': 'gs://ABC/bar',
'staging_location': 'gs://foo/bar',
'errors': ['temp_location']},
{'temp_location': 'gs://foo',
'staging_location': 'gs://foo/bar',
'errors': ['temp_location']},
{'temp_location': 'gs://foo/',
'staging_location': 'gs://foo/bar',
'errors': []},
{'temp_location': 'gs://foo/bar',
'staging_location': 'gs://foo/bar',
'errors':
|
[]},
]
for case in test_cases:
errors = get_validator(case['temp_location'],
case['staging_location']).validate()
self.assertEqual(
self.check_errors_for_arguments(errors, case['errors']), [])
def test_project(self):
|
def get_validator(project):
options = ['--job_name=job', '--staging_location=gs://foo/bar',
'--temp_location=gs://foo/bar']
if project is not None:
options.append('--project=' + project)
pipeline_options = PipelineOptions(options)
runner = MockRunners.DataflowRunner()
validator = PipelineOptionsValidator(pipeline_options, runner)
return validator
test_cases = [
{'project': None, 'errors': ['project']},
{'project': '12345', 'errors': ['project']},
{'project': 'FOO', 'errors': ['project']},
{'project': 'foo:BAR', 'errors': ['project']},
{'project': 'fo', 'errors': ['project']},
{'project': 'foo', 'errors': []},
{'project': 'foo:bar', 'errors': []},
]
for case in test_cases:
errors = get_validator(case['project']).validate()
self.assertEqual(
self.check_errors_for_arguments(errors, case['errors']), [])
def test_job_name(self):
def get_validator(job_name):
options = ['--project=example:example', '--staging_location=gs://foo/bar',
'--temp_location=gs://foo/bar']
if job_name is not None:
options.append('--job_name=' + job_name)
pipeline_options = PipelineOptions(options)
runner = MockRunners.DataflowRunner()
validator = PipelineOptionsValidator(pipeline_options, runner)
return validator
test_cases = [
{'job_name': None, 'errors': []},
{'job_name': '12345', 'errors': ['job_name']},
{'job_name': 'FOO', 'errors': ['job_name']},
{'job_name': 'foo:bar', 'errors': ['job_name']},
{'job_name': 'fo', 'errors': []},
{'job_name': 'foo', 'errors': []},
]
for case in test_cases:
errors = get_validator(case['job_name']).validate()
self.assertEqual(
self.check_errors_for_arguments(errors, case['errors']), [])
def test_num_workers(self):
def get_validator(num_workers):
options = ['--project=example:example', '--job_name=job',
'--staging_location=gs://foo/bar',
'--temp_location=gs://foo/bar']
if num_workers is not None:
options.append('--num_workers=' + num_workers)
pipeline_options = PipelineOptions(options)
runner = MockRunners.DataflowRunner()
validator = PipelineOptionsValidator(pipeline_options, runner)
return validator
test_cases = [
{'num_workers': None, 'errors': []},
{'num_workers': '1', 'errors': []},
{'num_workers': '0', 'errors': ['num_workers']},
{'num_workers': '-1', 'errors': ['num_workers']},
]
for case in test_cases:
errors = get_validator(case['num_workers']).validate()
self.assertEqual(
self.check_errors_for_arguments(errors, case['errors']), [])
def test_is_service_runner(self):
test_cases = [
{
'runner': MockRunners.OtherRunner(),
'options': [],
'expected': False,
},
{
'runner': MockRunners.OtherRunner(),
'options': ['--dataflow_endpoint=https://dataflow.googleapis.com'],
'expected': False,
},
{
'r
|
toobaz/pandas
|
pandas/tests/reshape/test_qcut.py
|
Python
|
bsd-3-clause
| 6,328
| 0.000474
|
import os
import numpy as np
import pytest
from pandas import (
Categorical,
DatetimeIndex,
Interval,
IntervalIndex,
NaT,
Series,
TimedeltaIndex,
Timestamp,
cut,
date_range,
isna,
qcut,
timedelta_range,
)
from pandas.api.types import CategoricalDtype as CDT
from pandas.core.algorithms import quantile
import pandas.util.testing as tm
from pandas.tseries.offsets import Day, Nano
def test_qcut():
arr = np.random.randn(1000)
# We store the bins as Index that have been
# rounded to comparisons are a bit tricky.
labels, bins = qcut(arr, 4, retbins=True
|
)
ex_bins = quantile(arr, [0, 0.25, 0.5, 0.75, 1.0])
result = labels.categories.left.values
assert np.allclose(result, ex_bins[:-1], atol=1e-2)
result = labels.categories.right.values
assert np.allclose(result, ex_bins[1:], atol=1e-2)
ex_levels = cut(arr, ex_bins, include_lowest
|
=True)
tm.assert_categorical_equal(labels, ex_levels)
def test_qcut_bounds():
arr = np.random.randn(1000)
factor = qcut(arr, 10, labels=False)
assert len(np.unique(factor)) == 10
def test_qcut_specify_quantiles():
arr = np.random.randn(100)
factor = qcut(arr, [0, 0.25, 0.5, 0.75, 1.0])
expected = qcut(arr, 4)
tm.assert_categorical_equal(factor, expected)
def test_qcut_all_bins_same():
with pytest.raises(ValueError, match="edges.*unique"):
qcut([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 3)
def test_qcut_include_lowest():
values = np.arange(10)
ii = qcut(values, 4)
ex_levels = IntervalIndex(
[
Interval(-0.001, 2.25),
Interval(2.25, 4.5),
Interval(4.5, 6.75),
Interval(6.75, 9),
]
)
tm.assert_index_equal(ii.categories, ex_levels)
def test_qcut_nas():
arr = np.random.randn(100)
arr[:20] = np.nan
result = qcut(arr, 4)
assert isna(result[:20]).all()
def test_qcut_index():
result = qcut([0, 2], 2)
intervals = [Interval(-0.001, 1), Interval(1, 2)]
expected = Categorical(intervals, ordered=True)
tm.assert_categorical_equal(result, expected)
def test_qcut_binning_issues(datapath):
# see gh-1978, gh-1979
cut_file = datapath(os.path.join("reshape", "data", "cut_data.csv"))
arr = np.loadtxt(cut_file)
result = qcut(arr, 20)
starts = []
ends = []
for lev in np.unique(result):
s = lev.left
e = lev.right
assert s != e
starts.append(float(s))
ends.append(float(e))
for (sp, sn), (ep, en) in zip(
zip(starts[:-1], starts[1:]), zip(ends[:-1], ends[1:])
):
assert sp < sn
assert ep < en
assert ep <= sn
def test_qcut_return_intervals():
ser = Series([0, 1, 2, 3, 4, 5, 6, 7, 8])
res = qcut(ser, [0, 0.333, 0.666, 1])
exp_levels = np.array(
[Interval(-0.001, 2.664), Interval(2.664, 5.328), Interval(5.328, 8)]
)
exp = Series(exp_levels.take([0, 0, 0, 1, 1, 1, 2, 2, 2])).astype(CDT(ordered=True))
tm.assert_series_equal(res, exp)
@pytest.mark.parametrize(
"kwargs,msg",
[
(dict(duplicates="drop"), None),
(dict(), "Bin edges must be unique"),
(dict(duplicates="raise"), "Bin edges must be unique"),
(dict(duplicates="foo"), "invalid value for 'duplicates' parameter"),
],
)
def test_qcut_duplicates_bin(kwargs, msg):
# see gh-7751
values = [0, 0, 0, 0, 1, 2, 3]
if msg is not None:
with pytest.raises(ValueError, match=msg):
qcut(values, 3, **kwargs)
else:
result = qcut(values, 3, **kwargs)
expected = IntervalIndex([Interval(-0.001, 1), Interval(1, 3)])
tm.assert_index_equal(result.categories, expected)
@pytest.mark.parametrize(
"data,start,end", [(9.0, 8.999, 9.0), (0.0, -0.001, 0.0), (-9.0, -9.001, -9.0)]
)
@pytest.mark.parametrize("length", [1, 2])
@pytest.mark.parametrize("labels", [None, False])
def test_single_quantile(data, start, end, length, labels):
# see gh-15431
ser = Series([data] * length)
result = qcut(ser, 1, labels=labels)
if labels is None:
intervals = IntervalIndex([Interval(start, end)] * length, closed="right")
expected = Series(intervals).astype(CDT(ordered=True))
else:
expected = Series([0] * length)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"ser",
[
Series(DatetimeIndex(["20180101", NaT, "20180103"])),
Series(TimedeltaIndex(["0 days", NaT, "2 days"])),
],
ids=lambda x: str(x.dtype),
)
def test_qcut_nat(ser):
# see gh-19768
intervals = IntervalIndex.from_tuples(
[(ser[0] - Nano(), ser[2] - Day()), np.nan, (ser[2] - Day(), ser[2])]
)
expected = Series(Categorical(intervals, ordered=True))
result = qcut(ser, 2)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("bins", [3, np.linspace(0, 1, 4)])
def test_datetime_tz_qcut(bins):
# see gh-19872
tz = "US/Eastern"
ser = Series(date_range("20130101", periods=3, tz=tz))
result = qcut(ser, bins)
expected = Series(
IntervalIndex(
[
Interval(
Timestamp("2012-12-31 23:59:59.999999999", tz=tz),
Timestamp("2013-01-01 16:00:00", tz=tz),
),
Interval(
Timestamp("2013-01-01 16:00:00", tz=tz),
Timestamp("2013-01-02 08:00:00", tz=tz),
),
Interval(
Timestamp("2013-01-02 08:00:00", tz=tz),
Timestamp("2013-01-03 00:00:00", tz=tz),
),
]
)
).astype(CDT(ordered=True))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"arg,expected_bins",
[
[
timedelta_range("1day", periods=3),
TimedeltaIndex(["1 days", "2 days", "3 days"]),
],
[
date_range("20180101", periods=3),
DatetimeIndex(["2018-01-01", "2018-01-02", "2018-01-03"]),
],
],
)
def test_date_like_qcut_bins(arg, expected_bins):
# see gh-19891
ser = Series(arg)
result, result_bins = qcut(ser, 2, retbins=True)
tm.assert_index_equal(result_bins, expected_bins)
|
jarretraim/euler_py
|
1-10/03.py
|
Python
|
apache-2.0
| 1,446
| 0.040111
|
#!/usr/bin/env python
# Python 3 required
# THIS TAKES WAAAAY TO LONG. ONLY SOLVED IT BY LETTING IT RUN
# OVERNIGHT ACCIDENTALLY :/
import locale
import sys
import math
import timeit
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
PRIMES = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29,
31, 37, 41, 43, 47, 53, 59, 61, 67, 71,
73, 79, 83, 89, 97, 101, 103, 107, 109, 113,
127, 131, 137, 139, 149, 151, 157, 163, 167, 173,
179, 181, 191, 19
|
3, 197, 199]
def is_prime(num):
for i in PRIMES:
if i >= num:
break
if num % i == 0:
#print ("%d is divisible by %d" % (num, i))
return False
for i in range(PRIMES[-1], int(math.sqrt(num)), 2):
if num % i == 0:
return False
return True
def is_prime2(num):
k =
|
3
while k*k <= num:
if num % k == 0:
return False
k += 2
return True
def main():
num = int(sys.argv[1])
print("Factoring %s" % locale.format("%d", num, grouping=True))
if num % 2 == 0:
prime_factors = [2]
else:
prime_factors = []
pager = 0
for i in range(3, num, 2):
if num % i == 0:
print ("Factor: %s" % locale.format("%d", i, grouping=True))
if is_prime2(i):
prime_factors.append(i)
print ("Prime Factor: %s" % locale.format("%d", i, grouping=True))
# if pager % 1000000 == 0:
# print("%.2f%%" % ((i / num) * 100))
# pager = 0
# pager += 1
print("Prime Factors for %s" % locale.format("%d", num, grouping=True))
print(prime_factors)
main()
|
opesci/devito
|
benchmarks/user/make-pbs.py
|
Python
|
mit
| 2,496
| 0.002404
|
import os
import click
from benchmark import option_simulation
import devito
@click.group()
def menu():
pass
@menu.command(name='generate')
@option_simulation
@click.option('-nn', multiple=True, default=[1], help='Number of nodes')
@click.option('-ncpus', default=1, help='Number of cores *per node*') # Should be ncores
@click.option('-mem', default=120, help='Requested DRAM *per node*')
@click.option('-np', default=1, help='Number of MPI processes *per node*')
@click.option('-nt', default=1, help='Number of OpenMP threads *per MPI process*')
@click.option('--mpi', multiple=True, default=['basic'], help='Devito MPI mode(s)')
@click.option('--arch', default='unknown', help='Test-bed architecture')
@click.option('-r', '--resultsdir', default='results', help='Results directory')
@click.option('--load', multiple=True, default=[], help='Modules to be loaded')
@click.option('--export', multiple=True, default=[], help='Env vars to be exported')
def generate(**kwargs):
join = lambda l: ' '.join('%d' % i for i in l)
args = dict(kwargs)
args['shape'] = join(args['shape'])
args['space_order'] = join(args['space_order'])
args['home'] = os.path.dirname(os.path.dirname(devito.__file__))
args['load'] = '\n'.join('module load %s' % i for i in args['load'])
args['export'] = '\n'.join('export %s' % i for i in args['export'])
template_header = """\
#!/bin/bash
#PBS -lselect=%(nn)s:ncpus=%(ncpus)s:mem=120gb:mpiprocs=%(np)s:ompthreads=%(nt)s
#PBS -lwalltime=02:00:00
lscpu
%(load)s
cd %(home)s
source activate devito
export DEVITO_HOME=%(home)s
export DEVITO_ARCH=intel
export DEVITO_LANGUAG
|
E=openmp
export DEVITO_LOGGING=DEBUG
%(export)s
cd benchmarks/user
""" # noqa
template_cmd = """\
DEVITO_MPI=%(mpi)s mpiexec python benchmark.py bench -P %(problem)s -bm O2 -d %(shape)s -so %(space_order
|
)s --tn %(tn)s -x 1 --arch %(arch)s -r %(resultsdir)s\
""" # noqa
# Generate one PBS file for each `np` value
for nn in kwargs['nn']:
args['nn'] = nn
cmds = []
for i in kwargs['mpi']:
args['mpi'] = i
cmds.append(template_cmd % args)
cmds = ' \n'.join(cmds)
body = ' \n'.join([template_header % args, cmds])
with open('pbs_nn%d.gen.sh' % int(nn), 'w') as f:
f.write(body)
@menu.command(name='cleanup')
def cleanup():
for f in os.listdir():
if f.endswith('.gen.sh'):
os.remove(f)
if __name__ == "__main__":
menu()
|
isolationism/django-cleaver
|
django_cleaver/imagecreator.py
|
Python
|
bsd-3-clause
| 2,540
| 0.001575
|
# Python standard library
from os import path, listdir
from ConfigParser import ConfigParser
# Third-party libraries
from imagecraft import ImageGenerator
# Django settings
from django.conf import settings
# This module
from cleaver import ini_to_context, flatten_context
# Retrieve execution params
MEDIA_ROOT = getattr(settings, 'MEDIA_ROOT', False)
if not MEDIA_ROOT:
raise ValueError("You must define a MEDIA_ROOT in settings.py!")
CLEVERCSS_SOURCE = getattr(settings, 'CLEVERCSS_SOURCE', \
path.join(MEDIA_ROOT, 'clevercss'))
CLEVERCSS_AUTOGENERATE = getattr(settings, 'CLEVERCSS_AUTOGENERATE', False)
CLEVERCSS_CONTEXTFILES = getattr(settings, 'CLEVERCSS_CONTEXTFILES', False)
if not CLEVERCSS_CONTEXTFILES:
raise ValueError("You must define CLEVERCSS_CONTEXTFILES in settings.py!")
CLEVERCSS_IMAGE_JOBS = getattr(settings, "CLEVERCSS_IMAGE_JOBS", None)
CLEVERCSS_IMAGE_SOURCE = getattr(settings, "CLEVERCSS_IMAGE_SOURCE", None)
CLEVERCSS_IMAGE_OUTPUT = getattr(settings, "CLEVERCSS_IMAGE_OUTPUT", None)
# Throw errors if information is missing
if not CLEVERCSS_CONTEXTFILES:
raise ValueError("You must define CLEVERCSS_CONTEXTFILES")
if not CLEVERCSS_IMAGE_SOURCE:
raise ValueError("You must define CLEVERCSS_IMAGE_SOU
|
RCE")
if not CLEVERCSS_IMAGE_OUTPUT:
raise ValueError("You must define CLEVERCSS_IMAGE_OUTPUT")
class DynamicImageGenerator(ImageGenerator):
"""Dynamically generates images by using arguments instead of constants"""
#layers = None
_default_source_path = CLEVERCSS_IMAGE_SOURCE
_default_output_path = CLEVERCSS_IMAGE_OUTPUT
image_format = 'PNG'
def __init__(self, color_dict, source_path=None, output_path=None,
layers=(), output_filename=None):
"
|
""Constructor"""
self.layers = layers
self.output_filename = output_filename
super(DynamicImageGenerator, self).__init__(color_dict, source_path,
output_path)
def generate_images():
"""Reads the context file and uses it to execute all CLEVERCSS_IMAGE_JOBS
specified in a settings file"""
# If there are no jobs, die
if not CLEVERCSS_IMAGE_JOBS:
return
context = flatten_context(ini_to_context())
# Unpack SortedDict with tuple for values
for filename, values in CLEVERCSS_IMAGE_JOBS.items():
layers = values.values()
DynamicImageGenerator(context, layers=layers,
output_filename=filename).render()
|
chtyim/infrastructure-puppet
|
modules/buildbot_asf/files/configscanner.py
|
Python
|
apache-2.0
| 12,920
| 0.016718
|
#!/usr/bin/env python
############################################################
# ConfigScanner - A buildbot config scanner and updater #
# Also does ReviewBoard (and at some point Bugzilla?) #
# Built for Python 3, works with 2.7 with a few tweaks #
############################################################
buildbotDir = "/x1/buildmaster/master1"
blamelist = ["infrastructure@apache.org"]
# SMTP Lib
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from smtplib import SMTPException
# Threading
from threading import Thread
from datetime import datetime
# Rest
import sys, os
import argparse, grp, pwd, shutil
version = 2
if sys.hexversion < 0x03000000:
print("Using Python 2...")
import json, httplib, urllib, urllib2, re, base64, sys, os, time, atexit, signal, logging, socket, subprocess
socket._fileobject.default_bufsize = 0
else:
print("Using Python 3")
version = 3
import json, httplib2, http.client, urllib.request, urllib.parse, re, base64, sys, os, time, atexit, signal, logging, subprocess
############################################
# Get path, set up logging and read config #
############################################
debug = False
logging.basicConfig(filename='configscanner.log', format='[%(asctime)s]: %(message)s', level=logging.INFO)
path = os.path.dirname(sys.argv[0])
if len(path) == 0:
path = "."
def sendEmail(rcpt, subject, message):
sender = "<buildbot@buildbot-vm.apache.org>"
receivers = [rcpt]
msg = """From: %s
To: %s
Subject: %s
%s
With regards,
BuildBot
""" % (sender, rcpt, subject, message)
try:
smtpObj = smtplib.SMTP("localhost")
smtpObj.sendmail(sender, receivers, msg)
except SMTPException:
raise Exception("Could not send email - SMTP server down??")
###########################################################
# Daemon class, curtesy of an anonymous good-hearted soul #
###########################################################
class daemon:
"""A generic daemon class.
Usage: subclass the daemon class and override the run() method."""
def __init__(self, pidfile): self.pidfile = pidfile
def daemonize(self):
"""Deamonize class. UNIX double fork mechanism."""
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError as err:
sys.stderr.write('fork #1 failed: {0}\n'.format(err))
sys.exit(1)
# decouple from parent environment
os.chdir('/')
os.setsid()
os.umask(0)
# do second fork
try:
pid = o
|
s.fork()
if pid > 0:
# exit from second parent
sys.exit(0)
except OSError as err:
sys.stderr.write('fork #2 failed: {0}\n'.format(err))
sys.exit(1)
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = open(os.devnull, 'r')
so = open(os.devnull, 'a+')
se = open(os.devnull, 'a+')
os.dup2(si.fileno(),
|
sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# write pidfile
atexit.register(self.delpid)
pid = str(os.getpid())
with open(self.pidfile,'w+') as f:
f.write(pid + '\n')
def delpid(self):
os.remove(self.pidfile)
def start(self):
"""Start the daemon."""
# Check for a pidfile to see if the daemon already runs
try:
with open(self.pidfile,'r') as pf:
pid = int(pf.read().strip())
except IOError:
pid = None
if pid:
message = "pidfile {0} already exist. " + \
"Daemon already running?\n"
sys.stderr.write(message.format(self.pidfile))
sys.exit(1)
# Start the daemon
self.daemonize()
self.run()
def stop(self):
"""Stop the daemon."""
# Get the pid from the pidfile
try:
with open(self.pidfile,'r') as pf:
pid = int(pf.read().strip())
except IOError:
pid = None
if not pid:
message = "pidfile {0} does not exist. " + \
"Daemon not running?\n"
sys.stderr.write(message.format(self.pidfile))
return # not an error in a restart
# Try killing the daemon process
try:
while 1:
os.kill(pid, signal.SIGTERM)
time.sleep(0.1)
except OSError as err:
e = str(err.args)
if e.find("No such process") > 0:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
print (str(err.args))
sys.exit(1)
def restart(self):
"""Restart the daemon."""
self.stop()
self.start()
def run(self):
"""You should override this method when you subclass Daemon.
It will be called after the process has been daemonized by
start() or restart()."""
####################
# Helper functions #
####################
# read_chunk: iterator for reading chunks from the stream
# since this is all handled via urllib now, this is quite rudimentary
def read_chunk(req):
while True:
try:
line = req.readline().strip()
if line:
yield line
else:
print("No more lines?")
break
except Exception as info:
logging.warning("Error reading from stream: %s", info)
break
return
#########################
# Main listener program #
#########################
# PubSub class: handles connecting to a pubsub service and checking commits
class PubSubClient(Thread):
def run(self):
while True:
logging.info("Connecting to " + self.url + "...")
self.req = None
while not self.req:
try:
if version == 3:
self.req = urllib.request.urlopen(self.url, None, 30)
else:
self.req = urllib2.urlopen(self.url, None, 30)
logging.info("Connected to " + self.url + ", reading stream")
except:
logging.warning("Could not connect to %s, retrying in 30 seconds..." % self.url)
time.sleep(30)
continue
for line in read_chunk(self.req):
if version == 3:
line = str( line, encoding='ascii' ).rstrip('\r\n,').replace('\x00','') # strip away any old pre-0.9 commas from gitpubsub chunks and \0 in svnpubsub chunks
else:
line = str( line ).rstrip('\r\n,').replace('\x00','') # strip away any old pre-0.9 commas from gitpubsub chunks and \0 in svnpubsub chunks
try:
obj = json.loads(line)
if "commit" in obj and "repository" in obj['commit']:
if debug:
logging.info("Found a commit in %s", obj['commit']['repository'])
if obj['commit']['repository'] == "git":
# grab some vars
commit = obj['commit']
project = commit['project']
body = commit['body']
sha = commit['sha']
ssha = commit['hash']
author = commit['author']
email = commit['email']
ref = commit['ref']
# If it's not git (and not JIRA), it must be subversion
elif obj['commit']['repository']:
#Grab some vars
commit = obj['commit']
body = commit['log']
svnuser = commit['committer']
revision = commit['id']
email = svnuser + "@apache.org"
for path in commit['changed']:
m = re.match(r"infrastructure/buildbot/aegis/buildmaster/master1/projects/(.+\.conf)", path)
if m:
buildbotFile = m.group(1)
time.sle
|
DudLab/nanshe
|
tests/test_nanshe/test_converter.py
|
Python
|
bsd-3-clause
| 2,908
| 0.005158
|
__author__ = "John Kirkham <kirkhamj@janelia.hhmi.org>"
__date__ = "$Mar 30, 2015 08:25:33 EDT$"
import collections
import json
import os
import os.path
import shutil
import tempfile
import numpy
import h5py
import vigra
import vigra.impex
import nanshe.util.iters
import nanshe.util.xnumpy
import nanshe.io.xtiff
import nanshe.converter
class TestConverter(object):
def setup(self):
self.temp_dir = ""
self.filedata = collections.OrderedDict()
self.data = None
self.data = numpy.random.random_integers(0, 255, (1000, 1, 102, 101, 1)).astype(numpy.uint8)
self.temp_dir = tempfile.mkdtemp()
for i, i_str, (a_b, a_e) in nanshe.util.iters.filled_stringify_enumerate(
nanshe.util.iters.izip(
*nanshe.util.iters.lagged_generators(
nanshe.util.iters.irange(
0,
self.data.shape[0] + 100 - 1,
|
100
)
)
)
):
each_filename = os.path.join(self.temp_dir, "test_tiff_" + str(i) + ".tif")
each_data = self.data[a_b:a_e]
self.filedata[each_filename] = each_data
vigra.impex.writeVolume(nanshe.util.x
|
numpy.tagging_reorder_array(each_data, to_axis_order="czyxt")[0, 0],
os.path.join(self.temp_dir, "test_tiff_" + str(i) + ".tif"), "")
def test_main(self):
params = {
"axis" : 0,
"channel" : 0,
"z_index" : 0,
"pages_to_channel" : 1
}
config_filename = os.path.join(self.temp_dir, "config.json")
hdf5_filename = os.path.join(self.temp_dir, "test.h5")
hdf5_filepath = hdf5_filename + "/data"
with open(config_filename, "w") as fid:
json.dump(params, fid)
fid.write("\n")
main_args = ["./converter.py"] + ["tiff"] + [config_filename] + list(self.filedata.keys()) + [hdf5_filepath]
assert (nanshe.converter.main(*main_args) == 0)
assert os.path.exists(hdf5_filename)
data = None
with h5py.File(hdf5_filename, "r") as hdf5_handle:
data = hdf5_handle["data"].value
self_data_h5 = nanshe.util.xnumpy.tagging_reorder_array(self.data, to_axis_order="cztyx")[0, 0]
assert (data == self_data_h5).all()
os.remove(hdf5_filename)
def teardown(self):
shutil.rmtree(self.temp_dir)
self.temp_dir = ""
self.filedata = collections.OrderedDict()
self.data = None
|
alexandrucoman/vbox-neutron-agent
|
neutron/tests/unit/plugins/sriovnicagent/test_pci_lib.py
|
Python
|
apache-2.0
| 4,204
| 0
|
# Copyright 2014 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from neutron.plugins.sriovnicagent.common import exceptions as exc
from neutron.plugins.sriovnicagent import pci_lib
from neutron.tests import base
class TestPciLib(base.BaseTestCase):
DEV_NAME = "p7p1"
VF_INDEX = 1
VF_INDEX_DISABLE = 0
PF_LINK_SHOW = ('122: p7p1: <BROADCAST,MULTICAST> mtu 1500 qdisc noop'
' state DOWN mode DEFAULT group default qlen 1000')
PF_MAC = ' link/ether f4:52:14:2a:3e:c0 brd ff:ff:ff:ff:ff:ff'
VF_0_LINK_SHOW = (' vf 0 MAC fa:16:3e:b4:81:ac, vlan 4095, spoof'
' checking off, link-state disable')
VF_1_LINK_SHOW = (' vf 1 MAC 00:00:00:00:00:11, vlan 4095, spoof'
' checking off, link-state enable')
VF_2_LINK_SHOW = (' vf 2 MAC fa:16:3e:68:4e:79, vlan 4095, spoof'
' checking off, link-state enable')
VF_LINK_SHOW = '\n'.join((PF_LINK_SHOW, PF_MAC, VF_0_LINK_SHOW,
VF_1_LINK_SHOW, VF_2_LINK_SHOW))
MAC_MAPPING = {
0: "fa:16:3e:b4:81:ac",
1: "00:00:00:00:00:11",
2: "fa:16:3e:68:4e:79",
}
def setUp(self):
super(TestPciLib, self).setUp()
self.pci_wrapper = pci_lib.PciDeviceIPWrapper(self.DEV_NAME)
def test_get_assigned_macs(self):
with mock.patch.object(self.pci_wrapper,
"_as_root") as mock_as_root:
mock_as_root.return_value = self.VF_LINK_SHOW
result = self.pci_wrapper.get_assigned_macs([self.VF_INDEX])
self.assertEqual([self.MAC_MAPPING[self.VF_INDEX]], result)
def test_get_assigned_macs_fail(self):
with mock.patch.object(self.pci_wrapper,
"_as_root") as mock_as_root:
mock_as_root.side_effect = Exception()
self.assertRaise
|
s(exc.IpCommandError,
self.pci_wrapper.get_assigned_macs,
[self.VF_INDEX])
def test_get_vf_state_enable(self):
with mock.patch.object(self.pci_wrapper,
"_as_root") as mock_as_root:
mock_as_root.return_value = self.VF_LINK_SHOW
|
result = self.pci_wrapper.get_vf_state(self.VF_INDEX)
self.assertTrue(result)
def test_get_vf_state_disable(self):
with mock.patch.object(self.pci_wrapper,
"_as_root") as mock_as_root:
mock_as_root.return_value = self.VF_LINK_SHOW
result = self.pci_wrapper.get_vf_state(self.VF_INDEX_DISABLE)
self.assertFalse(result)
def test_get_vf_state_fail(self):
with mock.patch.object(self.pci_wrapper,
"_as_root") as mock_as_root:
mock_as_root.side_effect = Exception()
self.assertRaises(exc.IpCommandError,
self.pci_wrapper.get_vf_state,
self.VF_INDEX)
def test_set_vf_state(self):
with mock.patch.object(self.pci_wrapper, "_as_root"):
result = self.pci_wrapper.set_vf_state(self.VF_INDEX,
True)
self.assertIsNone(result)
def test_set_vf_state_fail(self):
with mock.patch.object(self.pci_wrapper,
"_as_root") as mock_as_root:
mock_as_root.side_effect = Exception()
self.assertRaises(exc.IpCommandError,
self.pci_wrapper.set_vf_state,
self.VF_INDEX,
True)
|
dseredyn/velma_scripts
|
scripts/test_hierarchy_control.py
|
Python
|
bsd-3-clause
| 18,738
| 0.007792
|
#!/usr/bin/env python
# Copyright (c) 2015, Robot Control and Pattern Recognition Group,
# Institute of Control and Computation Engineering
# Warsaw University of Technology
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Warsaw University of Technology nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYright HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author: Dawid Seredynski
#
import roslib
roslib.load_manifest('velma_scripts')
import rospy
import tf
from std_msgs.msg import *
from sensor_msgs.msg import *
from geometry_msgs.msg import *
from visualization_msgs.msg import *
import tf
from tf import *
from tf.transformations import *
import tf_conversions.posemath as pm
from tf2_msgs.msg import *
import PyKDL
import math
import numpy as np
import copy
import matplotlib.pyplot as plt
import thread
import random
import openravepy
from openravepy import *
from optparse import OptionParser
from openravepy.misc import OpenRAVEGlobalArguments
import itertools
import rospkg
import multiprocessing
import velmautils
from velma import Velma
import openraveinstance
import conversions as conv
#import rrt_star_planner_ee
import rrt_star_connect_planner
import tree
import rosparam
import tasks
def identityMatrix(size):
I = np.matrix(numpy.zeros( (size, size) ))
for idx in range(size):
I[idx,idx] = 1.0
return I
class TestHierarchyControl:
"""
"""
def __init__(self):
self.pub_marker = velm
|
autils.MarkerPublisher()
def spin(self):
simulation = True
rospack = rospkg.RosPack()
env_file=rospack.get_path('velma_scripts') + '/data/jar/cabinet_test.env.xml'
srdf_path=rospack.get_path('velma_description') + '/robots/'
print "creating interface for Velma..."
# create the interface for Velma robot
velma = Velma()
print "done."
#
# Initialise Openrave
#
openrave = openraveinstance
|
.OpenraveInstance()
openrave.startOpenraveURDF(env_file=env_file, viewer=True)
openrave.readRobot(srdf_path=srdf_path)
openrave.setCamera(PyKDL.Vector(2.0, 0.0, 2.0), PyKDL.Vector(0.60, 0.0, 1.10))
velma.waitForInit()
openrave.updateRobotConfigurationRos(velma.js_pos)
non_adj_links_ids = openrave.robot_rave.GetNonAdjacentLinks()
velma.switchToJoint()
lim_bo_soft, lim_up_soft = velma.getJointSoftLimitsVectors()
lim_bo, lim_up = velma.getJointLimitsVectors()
velma.fk_ik_solver.createJacobianFkSolvers('torso_base', 'right_HandPalmLink', velma.getJointStatesVectorNames())
velma.fk_ik_solver.createJacobianFkSolvers('torso_base', 'left_HandPalmLink', velma.getJointStatesVectorNames())
velma.fk_ik_solver.createSegmentToJointMap(velma.getJointStatesVectorNames(), velma.getInactiveJointStatesVector())
print velma.getJointStatesVectorNames()
r_HAND_targets = [
# PyKDL.Frame(PyKDL.Vector(0.5,0,1.8)),
# PyKDL.Frame(PyKDL.Rotation.RotY(170.0/180.0*math.pi), PyKDL.Vector(0.5,0,1.6)),
PyKDL.Frame(PyKDL.Rotation.RotY(90.0/180.0*math.pi), PyKDL.Vector(0.2,0.0,1.0)),
PyKDL.Frame(PyKDL.Rotation.RotY(90.0/180.0*math.pi), PyKDL.Vector(0.2,-0.5,1.0)),
]
l_HAND_targets = [
# PyKDL.Frame(PyKDL.Vector(0.5,0,1.8)),
# PyKDL.Frame(PyKDL.Rotation.RotY(170.0/180.0*math.pi), PyKDL.Vector(0.5,0,1.6)),
PyKDL.Frame(PyKDL.Rotation.RotY(90.0/180.0*math.pi) * PyKDL.Rotation.RotZ(180.0/180.0*math.pi), PyKDL.Vector(0.2,0.0,1.0)),
PyKDL.Frame(PyKDL.Rotation.RotY(90.0/180.0*math.pi) * PyKDL.Rotation.RotZ(180.0/180.0*math.pi), PyKDL.Vector(0.2,0.5,1.0)),
]
target_idx = 0
r_HAND_target = r_HAND_targets[target_idx]
l_HAND_target = l_HAND_targets[target_idx]
target_idx += 1
last_time = rospy.Time.now()
q = velma.getJointStatesVector()
q_names = velma.getJointStatesVectorNames()
iq = velma.getInactiveJointStatesVector()
counter = 0
while not rospy.is_shutdown():
if counter > 300:
r_HAND_target = r_HAND_targets[target_idx]
l_HAND_target = l_HAND_targets[target_idx]
target_idx = (target_idx + 1)%len(r_HAND_targets)
counter = 0
counter += 1
time_elapsed = rospy.Time.now() - last_time
J_JLC = np.matrix(numpy.zeros( (len(q), len(q)) ))
delta_V_JLC = np.empty(len(q))
for q_idx in range(len(q)):
if q[q_idx] < lim_bo_soft[q_idx]:
delta_V_JLC[q_idx] = q[q_idx] - lim_bo_soft[q_idx]
J_JLC[q_idx,q_idx] = min(1.0, 10*abs(q[q_idx] - lim_bo_soft[q_idx]) / abs(lim_bo[q_idx] - lim_bo_soft[q_idx]))
elif q[q_idx] > lim_up_soft[q_idx]:
delta_V_JLC[q_idx] = q[q_idx] - lim_up_soft[q_idx]
J_JLC[q_idx,q_idx] = min(1.0, 10*abs(q[q_idx] - lim_up_soft[q_idx]) / abs(lim_up[q_idx] - lim_up_soft[q_idx]))
else:
delta_V_JLC[q_idx] = 0.0
J_JLC[q_idx,q_idx] = 0.0
J_JLC_inv = np.linalg.pinv(J_JLC)
N_JLC = identityMatrix(len(q)) - (J_JLC_inv * J_JLC)
N_JLC_inv = np.linalg.pinv(N_JLC)
v_max_JLC = 20.0/180.0*math.pi
kp_JLC = 1.0
dx_JLC_des = kp_JLC * delta_V_JLC
# min(1.0, v_max_JLC/np.linalg.norm(dx_JLC_des))
if v_max_JLC > np.linalg.norm(dx_JLC_des):
vv_JLC = 1.0
else:
vv_JLC = v_max_JLC/np.linalg.norm(dx_JLC_des)
dx_JLC_ref = - vv_JLC * dx_JLC_des
# right hand
J_r_HAND = velma.fk_ik_solver.getJacobian('torso_base', 'right_HandPalmLink', q)
J_r_HAND_inv = np.linalg.pinv(J_r_HAND)
N_r_HAND = identityMatrix(len(q)) - (J_r_HAND_inv * J_r_HAND)
T_B_E = velma.fk_ik_solver.calculateFk2('torso_base', 'right_HandPalmLink', q)
r_HAND_current = T_B_E
r_HAND_diff = PyKDL.diff(r_HAND_target, r_HAND_current)
delta_V_HAND = np.empty(6)
delta_V_HAND[0] = r_HAND_diff.vel[0]
delta_V_HAND[1] = r_HAND_diff.vel[1]
delta_V_HAND[2] = r_HAND_diff.vel[2]
delta_V_HAND[3] = r_HAND_diff.rot[0]
delta_V_HAND[4] = r_HAND_diff.rot[1]
delta_V_HAND[5] = r_HAND_diff.rot[2]
v_max_HAND = 2.0
kp_HAND = 2.0
dx_HAND_des = kp_HAND * delta_V_HAND
if v_max_HAND > np.linalg.norm(dx_HAND_des):
vv_HAND = 1.0
else:
vv_HAND = v_max_HAND/np.linalg.norm(dx_HAND_des)
dx_r_HAND_ref = - vv_HAND * dx_HAND_des
# le
|
libvirt/autotest
|
client/virt/tests/clock_getres.py
|
Python
|
gpl-2.0
| 1,155
| 0.000866
|
import logging, os
from autotest_lib.client.common_lib import error
from autotest_lib.client.bin import utils
def run_clock_getres(test, params, env):
"""
Verify if guests using kvm-clock as the time source have a sane clock
resolution.
@param test: kvm test object.
@param params: Dictionary with test parameters.
@param env: Dictionary with the test environment.
"
|
""
source_name = "test_clock_getres/test_clock_getres.c"
source_name = os.path.join(test.virtdir, "deps", source_name)
dest_name = "/tmp/test_clock_ge
|
tres.c"
bin_name = "/tmp/test_clock_getres"
if not os.path.isfile(source_name):
raise error.TestError("Could not find %s" % source_name)
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
timeout = int(params.get("login_timeout", 360))
session = vm.wait_for_login(timeout=timeout)
vm.copy_files_to(source_name, dest_name)
session.cmd("gcc -lrt -o %s %s" % (bin_name, dest_name))
session.cmd(bin_name)
logging.info("PASS: Guest reported appropriate clock resolution")
logging.info("Guest's dmesg:\n%s", session.cmd_output("dmesg").strip())
|
amitsaha/learning
|
python/collapse_range.py
|
Python
|
unlicense
| 1,067
| 0.004686
|
'''
Given a number o
|
f integer intervals, collapse them:
Input: [(0, 1), (3, 5), (4, 8), (10, 12), (9, 10)]
Output: [(0, 1), (3, 8), (9, 12)]
'''
def collapse_range(slots):
# order based on the first slot
ordered_slots = sorted(slots, key=lambda range: range[0])
merged_slots = []
|
i = 0
j = 0
while i < len(ordered_slots):
j = i+1
l, u = ordered_slots[i]
while j < len(ordered_slots):
if ordered_slots[i][1] >= ordered_slots[j][0]:
u = ordered_slots[j][1]
i += 1
j += 1
continue
else:
break
i = j
merged_slots.append((l, u))
return merged_slots
assert collapse_range([(0, 1), (3, 5), (4, 8), (10, 12), (12, 14), (9, 10)]) == \
[(0, 1), (3, 8), (9, 14)]
assert collapse_range([(0, 1), (3, 5), (6, 8), (10, 12), (8, 10)]) == \
[(0, 1), (3, 5), (6, 12)]
assert collapse_range([(0, 1), (3, 5)]) == \
[(0, 1), (3, 5)]
assert collapse_range([(0, 2), (2, 5)]) == \
[(0, 5)]
|
smartybit/stepic_webtech1
|
web/hello.py
|
Python
|
mit
| 221
| 0.027149
|
#
|
!/usr/bin/python
def app(environ, start_response):
request = environ['QUERY_STRING']
start_response("200 OK", [
("Content-Type", "text/plain"),
])
return [request.replace('&','\n') ]
| |
RLovelett/qt
|
doc/src/diagrams/contentspropagation/customwidget.py
|
Python
|
lgpl-2.1
| 6,694
| 0.007619
|
#!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
## Contact: http://www.qt-project.org/legal
##
## This file is part of the test suite of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:LGPL$
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and Digia. For licensing terms and
## conditions see http://qt.digia.com/licensing. For further information
## use the contact form at http://qt.digia.com/contact-us.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 as published by the Free Software
## Foundation and appearing in the file LICENSE.LGPL included in the
## packaging of this file. Please review the following information to
## ensure the GNU Lesser General Public License version 2.1 requirements
## will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, Digia gives you certain additional
## rights. These rights are described in the Digia Qt LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3.0 as published by the Free Software
## Foundation and appearing in the file LICENSE.GPL included in the
## packaging of this file. Please review the following information to
## ensure the GNU General Public License version 3.0 requirements will be
## met: http://www.gnu.org/copyleft/gpl.html.
##
##
## $QT_END_LICENSE$
##
#############################################################################
import os, sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
class CustomWidget(QWidget):
def __init__(self, parent, fake = False):
QWidget.__init__(self, parent)
gradient = QLinearGradient(QPointF(0, 0), QPointF(100.0, 100.0))
baseColor = QColor(0xa6, 0xce, 0x39, 0x7f)
gradient.setColorAt(0.0, baseColor.light(150))
gradient.setColorAt(0.75, baseColor.light(75))
self.brush = QBrush(gradient)
self.fake = fake
self.fakeBrush = QBrush(Qt.red, Qt.DiagCrossPattern)
qtPath = QPainterPath()
qtPath.setFillRule(Qt.OddEvenFill)
qtPath.moveTo(-45.0, -20.0)
qtPath.lineTo(0.0, -45.0)
qtPath.lineTo(45.0, -20.0)
qtPath.lineTo(45.0, 45.0)
qtPath.lineTo(-45.0, 45.0)
qtPath.lineTo(-45.0, -20.0)
qtPath.closeSubpath()
qtPath.moveTo(15.0, 5.0)
qtPath.lineTo(35.0, 5.0)
qtPath.lineTo(35.0, 40.0)
qtPath.lineTo(15.0, 40.0)
qtPath.lineTo(15.0, 5.0)
qtPath.moveTo(-35.0, -15.0)
qtPath.closeSubpath()
qtPath.lineTo(-10.0, -15.0)
qtPath.lineTo(-10.0, 10.0)
qtPath.lineTo(-35.0, 10.0)
qtPath.lineTo(-35.0, -15.0)
qtPath.closeSubpath()
self.path = qtPath
def paintEvent(self, event):
painter = QPainter()
painter.begin(self)
painter.setRenderHint(QPainter.Antialiasing)
if self.fake:
painter.fillRect(event.rect(), QBrush(Qt.white))
painter.fillRect(event.rect(), self.fakeBrush)
painter.setBrush(self.brush)
painter.translate(60, 60)
painter.drawPath(self.path)
painter.end()
def sizeHint(self):
return QSize(120, 120)
def minimumSizeHint(self):
return QSize(120, 120)
if __name__ == "__main__":
try:
qt = sys.argv[1]
except IndexError:
qt = "4.1"
if qt != "4.0" and qt != "4.1":
sys.stderr.write("Usage: %s [4.0|4.1]\n" % sys.argv[0])
sys.exit(1)
app = QApplication(sys.argv)
exec_dir = os.path.split(os.path.abspath(sys.argv[0]))[0]
label = QLabel()
label.setPixmap(QPixmap(os.path.join(exec_dir, "background.png")))
layout = QGridLayout()
label.setLayout(layout)
if qt == "4.0":
layout.addWidget(CustomWidget(label), 0, 0, Qt.AlignCenter)
caption = QLabel("Opaque (Default)", label)
caption.setMargin(2)
layout.addWidget(caption, 1, 0, Qt.AlignCenter | Qt.AlignTop)
elif qt == "4.1":
layout.addWidget(CustomWidget(label), 0, 0, Qt.AlignCenter)
caption = QLabel("Contents Propagated (Default)", label)
caption.setAutoFillBackground(True)
|
caption.setMargin(2)
layout.addWidget(caption, 1, 0, Qt.AlignCenter | Qt.AlignTop)
if qt == "4.0":
contentsWidget = CustomWidget(label)
contentsWidget.setAttribute(Qt.WA_ContentsPropagated, True)
layout.addWidget(contentsWidget, 0, 1, Qt.AlignCenter)
|
caption = QLabel("With WA_ContentsPropagated set", label)
caption.setMargin(2)
layout.addWidget(caption, 1, 1, Qt.AlignCenter | Qt.AlignTop)
elif qt == "4.1":
autoFillWidget = CustomWidget(label)
autoFillWidget.setAutoFillBackground(True)
layout.addWidget(autoFillWidget, 0, 1, Qt.AlignCenter)
caption = QLabel("With autoFillBackground set", label)
caption.setAutoFillBackground(True)
caption.setMargin(2)
layout.addWidget(caption, 1, 1, Qt.AlignCenter | Qt.AlignTop)
if qt == "4.0":
noBackgroundWidget = CustomWidget(label, fake = True)
noBackgroundWidget.setAttribute(Qt.WA_NoBackground, True)
layout.addWidget(noBackgroundWidget, 0, 2, Qt.AlignCenter)
caption = QLabel("With WA_NoBackground set", label)
caption.setWordWrap(True)
caption.setMargin(2)
layout.addWidget(caption, 1, 2, Qt.AlignCenter | Qt.AlignTop)
elif qt == "4.1":
opaqueWidget = CustomWidget(label, fake = True)
opaqueWidget.setAttribute(Qt.WA_OpaquePaintEvent, True)
layout.addWidget(opaqueWidget, 0, 2, Qt.AlignCenter)
caption = QLabel("With WA_OpaquePaintEvent set", label)
caption.setAutoFillBackground(True)
caption.setMargin(2)
layout.addWidget(caption, 1, 2, Qt.AlignCenter | Qt.AlignTop)
if qt == "4.0":
label.setWindowTitle("Qt 4.0: Painting Custom Widgets")
elif qt == "4.1":
label.setWindowTitle("Qt 4.1: Painting Custom Widgets")
label.resize(404, 160)
label.show()
sys.exit(app.exec_())
|
eldarion/formly
|
formly/tests/urls.py
|
Python
|
bsd-3-clause
| 249
| 0.004016
|
from django.conf.urls import include, url
from django.views.generic import T
|
emplateView
urlpatterns = [
url(r"^home/", TemplateView.as_view(template_name="no-ie.html"), name="home"),
url(r"^",
|
include("formly.urls", namespace="formly")),
]
|
bl8/bockbuild
|
packages/glib.py
|
Python
|
mit
| 1,520
| 0.046711
|
class GlibPackage (GnomeXzPackage):
def __init__ (s
|
elf):
GnomePackage.__init__ (self,
'glib',
version_major = '2.30',
|
version_minor = '3')
self.darwin = Package.profile.name == 'darwin'
if Package.profile.name == 'darwin':
#link to specific revisions for glib 2.30.x
self.sources.extend ([
'https://trac.macports.org/export/62644/trunk/dports/devel/glib2/files/config.h.ed',
'https://trac.macports.org/export/87503/trunk/dports/devel/glib2/files/patch-configure.diff',
'https://trac.macports.org/export/92347/trunk/dports/devel/glib2/files/patch-glib_gunicollate.c.diff',
'https://trac.macports.org/export/92347/trunk/dports/devel/glib2/files/patch-gi18n.h.diff',
'https://trac.macports.org/export/92347/trunk/dports/devel/glib2/files/patch-gio_xdgmime_xdgmime.c.diff',
'https://trac.macports.org/export/87469/trunk/dports/devel/glib2/files/patch-glib-2.0.pc.in.diff',
'https://trac.macports.org/export/87469/trunk/dports/devel/glib2/files/patch-gio_gdbusprivate.c.diff',
])
def prep (self):
Package.prep (self)
if self.darwin:
for p in range (2, len (self.sources)):
self.sh ('patch -p0 < %{sources[' + str (p) + ']}')
def build (self):
if not self.darwin:
Package.build (self)
return
self.sh (
# 'autoconf',
'%{configure}',
'ed - config.h < %{sources[1]}',
'%{make}'
)
def install (self):
Package.install (self)
if self.darwin:
# FIXME: necessary?
self.sh ('rm -f %{prefix}/lib/charset.alias')
GlibPackage ()
|
nestof/domoCore
|
com/nestof/domocore/domain/Parameter.py
|
Python
|
gpl-3.0
| 482
| 0.006224
|
# -*- coding: utf-8 -*-
'''
Created on 23 mars 2014
@author: nestof
'''
class Parameter(object):
'''
classdocs
'''
tableName = 'parametrage'
colCodeName = 'code'
colTypeName = 'type'
colValueName = 'valeur'
colCommentName = 'commentaire'
def __init__(self):
'''
Constructor
'''
self._code = None
self._type = None
s
|
elf._value = None
|
self._comment = None
|
liushu2000/django-crispy-formwizard
|
contact/views.py
|
Python
|
gpl-2.0
| 9,658
| 0.003935
|
from django.shortcuts import redirect, render_to_response, render
from django.contrib.formtools.wizard.views import SessionWizardView
from django import forms, http
from models import Contact
from forms import ContactForm2
import collections
from datatableview.views import XEditableDatatableView, DatatableView
from datatableview import helpers
from django_datatables_view.base_datatable_view import BaseDatatableView
from django.db.models import Q
from django.template import RequestContext
from django.views.decorators.csrf import csrf_exempt
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.utils.translation import ugettext_lazy as _
from datetime import datetime
from formwizard import settings
from django.http import HttpResponse
from django.template import Req
|
uestContext
from django.utils import translation
from django.utils.translation import check_for_language
def set_language(request):
next = request.REQUEST.get('next', None)
if not next:
next = request.META.get('HTTP_REFERER', None)
if not next:
next = '/'
response = http.HttpResponseRedirect(next)
if request.method == 'GET':
lang_code = request.GET.get('language', None)
if lang_code and check_for_language(lang_code):
|
if hasattr(request, 'session'):
request.session['django_language'] = lang_code
else:
response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code)
translation.activate(lang_code)
return response
@csrf_exempt
def users_plain(request):
#users = Contact.objects.all()
if request.method == 'GET':
users = Contact.objects.all()
paginator = Paginator(users, 1) # Show 25 contacts per page
page = request.GET.get('page')
try:
users = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
users = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
users = paginator.page(paginator.num_pages)
else:
query = request.POST.get('q')
if query:
users = Contact.objects.filter(name=query)
return render_to_response('user_grid4.html', {"users": users,},)
#return render_to_response('user_grid4.html', {'users': users})
def users_bitbucket(request):
# from autofixture import AutoFixture
# fixture = AutoFixture(Contact)
# fixture.create(220)
#language= _("Chinese")
context = RequestContext(request)
if request.method == 'GET':
language_code = request.LANGUAGE_CODE
#translation.activate(language)
# if settings.LANGUAGE_CODE:
# translation.activate(settings.LANGUAGE_CODE)
return render_to_response('user_grid_bitbucket.html',
{"language_code": language_code},
context_instance=RequestContext(request))
def users_jqueryui(request):
return render_to_response('users_jqueryui.html',
context_instance=RequestContext(request))
def users_bootstrap3(request):
return render_to_response('user_bootstrap3.html',
context_instance=RequestContext(request))
class UserListJson(BaseDatatableView):
# The model we're going to show
model = Contact
# define the columns that will be returned
columns = ['name', 'email', 'phone', 'location']
# define column names that will be used in sorting
# order is important and should be same as order of columns
# displayed by datatables. For non sortable columns use empty
# value like ''
order_columns = [ 'name', 'email', 'phone', 'location']
# set max limit of records returned, this is used to protect our site if someone tries to attack our site
# and make it return huge amount of data
max_display_length = 500
def render_column(self, row, column):
# We want to render user as a custom column
if column == 'name':
return '%s %s' % (row.name, row.email)
else:
return super(UserListJson, self).render_column(row, column)
def filter_queryset(self, qs):
# use request parameters to filter queryset
# simple example:
sSearch = self.request.GET.get('sSearch', None)
if sSearch:
qs = qs.filter(name__istartswith=sSearch)
# more advanced example
filter_customer = self.request.POST.get('customer', None)
if filter_customer:
customer_parts = filter_customer.split(' ')
qs_params = None
for part in customer_parts:
q = Q(customer_firstname__istartswith=part)|Q(customer_lastname__istartswith=part)
qs_params = qs_params | q if qs_params else q
qs = qs.filter(qs_params)
return qs
class XEditableColumnsDatatableView(XEditableDatatableView):
def get_queryset(self):
return Contact.objects.filter(name='Sean')
def get_template_names(self):
""" Try the view's snake_case name, or else use default simple template. """
# name = self.__class__.__name__.replace("DatatableView", "")
# name = re.sub(r'([a-z]|[A-Z]+)(?=[A-Z])', r'\1_', name)
return ["user_grid.html", "example_base.html"]
#model = Contact
datatable_options = {
'columns': [
'id',
("Name", 'name', helpers.make_xeditable),
("Email", 'email', helpers.make_xeditable),
("Phone", 'phone', helpers.make_xeditable),
]
}
class User_display(XEditableDatatableView):
#model = Contact
def get_queryset(self):
return Contact.objects.filter(name='Sean')
def get_template_names(self):
""" Try the view's snake_case name, or else use default simple template. """
# name = self.__class__.__name__.replace("DatatableView", "")
# name = re.sub(r'([a-z]|[A-Z]+)(?=[A-Z])', r'\1_', name)
return ["user_grid3.html", "example_base.html"]
#model = Contact
datatable_options = {
'columns': [
'id',
("Name", 'name', ),
("Email", 'email', ),
("Phone", 'phone',helpers.make_xeditable(placeholder="Enter a valid date")),
("Edit",)
]
}
def get_column_Edit_data(self, instance, *args, **kwargs):
# Simplest use, "text" will be the unicode of instance.blog
#return helpers.link_to_model(instance)
# Specify a custom text
return helpers.link_to_model(instance, text="Edit")
from django.shortcuts import render
import django_tables2 as tables
class User_Table(tables.Table):
selection = tables.CheckBoxColumn()
class Meta:
model = Contact
def user_list(request):
table = User_Table(Contact.objects.all())
return render(request, 'user_grid2.html', {'table': table})
def user_test(request):
users = Contact.objects.filter(name="Sean")
print users
users2 = users.filter(email="sean@msn.com")
return render_to_response('user_grid.html', {
'users2': users2,
})
class ContactWizard(SessionWizardView):
template_name = 'contact_wizard_form.html'
def process_step(self, form):
current_step = self.steps.current
if current_step == '0' and self.request.POST['0-name'] == 'Sean':
if '1' in self.form_list:
del self.form_list['1']
location = forms.CharField(required=False)
self.form_list['2'].base_fields['location'] = location
return self.get_form_step_data(form)
def done(self, form_list, **kwargs):
new = Contact()
#new.user = self.request.user
for form in form_list:
for k, v in form.cleaned_data.iteritems():
setattr(new, k, v)
new.save()
return redirect('/users/')
# return rende
|
MrYsLab/PyMata
|
examples/digital_analog_io/callback_buttonLed_toggle.py
|
Python
|
agpl-3.0
| 2,453
| 0.000408
|
#!/usr/bin/env python
"""
Copyright (c) 2015-2017 Alan Yorinks All rights reserved.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
Version 3 as published by the Free Software Foundation; either
or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Publ
|
ic License for more details.
You should have received a copy of the GNU AFFERO GEN
|
ERAL PUBLIC LICENSE
along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
This example illustrates using callbacks to toggle an LED. Each time the
button switch is pressed the LED state will toggle to the opposite state.
The latch is rearmed within the callback routing.
"""
import time
import signal
import sys
from PyMata.pymata import PyMata
# Digital pins
GREEN_LED = 6
PUSH_BUTTON = 12
# Switch states
ON = 1
OFF = 0
# Default state of the LED
led_state = OFF
def get_led_state():
global led_state
return led_state
def set_led_state(state):
global led_state
led_state = state
# Callback function
# Set the LED to current state of the pushbutton switch
def cb_push_button(data):
print(data)
if get_led_state() == OFF:
board.digital_write(GREEN_LED, ON)
set_led_state(ON)
else:
board.digital_write(GREEN_LED, OFF)
set_led_state(OFF)
# Re-arm the latch to fire on the next transition to high
board.set_digital_latch(PUSH_BUTTON, board.DIGITAL_LATCH_HIGH, cb_push_button)
def signal_handler(sig, frame):
print('You pressed Ctrl+C')
if board is not None:
board.reset()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
# Create a PyMata instance
board = PyMata("/dev/ttyACM0", verbose=True)
# Set pin modes
# Set the pin to digital output to light the green LED
board.set_pin_mode(GREEN_LED, board.OUTPUT, board.DIGITAL)
# Set the pin to digital input to receive button presses
board.set_pin_mode(PUSH_BUTTON, board.INPUT, board.DIGITAL)
# Arm the digital latch to detect when the button is pressed
board.set_digital_latch(PUSH_BUTTON, board.DIGITAL_LATCH_HIGH, cb_push_button)
# A forever loop until user presses Ctrl+C
while 1:
pass
|
netsoc/admin_utils
|
freshers_signup.py
|
Python
|
gpl-3.0
| 4,811
| 0.002702
|
#!/usr/bin/env python3
import datetime
import json
import os
import time
import signal
# attempt to ignore ctrl-z
signal.signal(signal.SIGTSTP, signal.SIG_IGN)
def blinking(string):
return '\33[5m' + string + '\33[0m'
BANNER = """ W e l c o m e t o
______ __
/ ____ \ ___ / /__________ _____
/ / __ `// _ \/ __/ ___/ __ \/ ___/
/ / / / // __/ /_(__ ) /_/ / /__
\/_/ /_/ \___/\__/____/\____/\___/
\____/
F r e s h e r s ' """
BANNER += ' '.join(list(str(datetime.datetime.now().year))) + '\n'
def yes_no(prompt):
answers = {
'yes': True, 'y': True,
'no': False, 'n': False
}
text = ''
print(prompt)
while not text.lower() in answers:
text = input(' [y/n] >> ')
return answers[text.lower()]
def get_email(prompt):
text = input(prompt)
while '@' not in text:
print(' An email address typically has an `@` in it...')
text = input(' >> ')
return text
def get_username(prompt):
while True:
username = input(prompt)
if username == "":
break
confirmed = input(" And again, just to confirm.\n >> ")
if (confirmed != username):
print(" Entries do not match!\n")
else:
break
return username
def get_name(prompt):
while True:
name = input(prompt)
if not name:
print(" We need something, anything!")
elif name.lower() in ['hugh mungus', 'hugh mongus']:
print(blinking(" Now that's one SPICY MEME"))
else:
return name
def get_user_details():
user = dict()
prompts = (
("What's your full name?\n"
" >> ",
'name',
get_name),
("What's your email address?\n"
" >> ",
'email',
get_email),
("Are you already a member?",
'renewing',
yes_no),
("What's your username? "
" (if you remember it, leave blank if you don't!)"
"\n >> ",
'username',
get_username),
("Want to subscribe to our jobs & internships mailing list?",
'jobseeker',
yes_no),
)
for prompt_text, key, input_func in prompts:
if key == 'username' and not user['renewing']:
continue
response = input_func(prompt_text)
if key != 'username' or response != "":
user[key] = response
return user
def fmt_details(user):
l = 60
details = (' Name: {name}\n'
' Email address: {email}'
.format(name=user['name'], email=user['email']))
if 'username' in user:
username = ('\n Username: {username}'
.format(username=user['username']))
else:
username = ''
jobseeker_text = (" You{}want to receive job & internship emails"
.format(' ' if user['jobseeker'] else " don't "))
renewing_text = (" You are renewing your membership\n" if user['renewing']
else " You are not already a member\n")
return ("So, to confirm:\n\n" +
("#" * l) + '\n' +
details +
username + '\n\n' +
jobseeker_text + '\n' +
renewing_text +
("#" * l) + '\n')
def register_user(users_file, user):
with open(users_file, 'r') as f:
users = json.load(f)
users.append(user)
with open(users_file, 'w') as f:
json.dump(users, f)
def prep_screen():
os.system('clear')
print(BANNER)
def signups(users_file):
while True:
try:
prep_screen()
user = get_user_details()
print('\n')
print(fmt_details(user))
valid = yes_no("Are **all** of these details correct?")
if valid:
register_user(users_file, user)
print("Welcome, {}!".format(user['name'].split(' ')[0]))
time.sleep(2)
for _ in range(25):
print()
time.sleep(0.05)
else:
continue
except (KeyboardInterrupt, EOFError):
try:
kill = input('<')
if kill == 'kill':
|
break
except (KeyboardInterrupt, EOFError):
continue
continue
def main():
timestamp = datetime.datetime.now().strftime("%Y-%m-%d")
filename = "freshers_{}.json".format(timestamp)
if not os.path.isfile(f
|
ilename):
with open(filename, 'w') as f:
json.dump([], f)
signups(filename)
if __name__ == '__main__':
main()
|
rcuza/init
|
devops/templates/python/python_class.py
|
Python
|
isc
| 501
| 0
|
#!/usr/bin/env python
# encoding: utf-8
"""
${TM_NEW_FILE_BASENAME}.py
Created by ${TM_FULLNAME} on ${TM_DATE}.
Copyright (c) ${TM_YEAR} ${TM_ORGANIZATION_NAME}. All rights reserved.
"""
from __f
|
uture__ import absolute_import, division, print_func
|
tion
import sys
import os
import unittest
class ${TM_NEW_FILE_BASENAME}:
def __init__(self):
pass
class ${TM_NEW_FILE_BASENAME}Tests(unittest.TestCase):
def setUp(self):
pass
if __name__ == '__main__':
unittest.main()
|
uclouvain/OSIS-Louvain
|
assessments/tests/business/test_score_encoding_progress.py
|
Python
|
agpl-3.0
| 14,789
| 0.004328
|
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2021 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
#
|
(at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.or
|
g/licenses/.
#
##############################################################################
from random import randint
from django.test import TestCase
from assessments.business import score_encoding_progress
from attribution.tests.factories.attribution import AttributionFactory
from attribution.tests.models import test_attribution
from base.models.enums import number_session
from base.tests.factories.academic_calendar import AcademicCalendarExamSubmissionFactory
from base.tests.factories.academic_year import AcademicYearFactory
from base.tests.factories.education_group_year import EducationGroupYearFactory
from base.tests.factories.learning_unit_year import LearningUnitYearFactory
from base.tests.factories.offer_year_calendar import OfferYearCalendarFactory
from base.tests.factories.person import PersonFactory
from base.tests.factories.program_manager import ProgramManagerFactory
from base.tests.factories.session_exam_calendar import SessionExamCalendarFactory
from base.tests.factories.session_examen import SessionExamFactory
from base.tests.factories.student import StudentFactory
from base.tests.factories.student_specific_profile import StudentSpecificProfileFactory
from base.tests.factories.tutor import TutorFactory
from base.tests.models import test_exam_enrollment, test_offer_enrollment, test_learning_unit_enrollment
class ScoreEncodingProgressTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.academic_year = AcademicYearFactory(current=True)
cls.academic_calendar = AcademicCalendarExamSubmissionFactory(title="Submission of score encoding - 1",
data_year=cls.academic_year)
SessionExamCalendarFactory(academic_calendar=cls.academic_calendar, number_session=number_session.ONE)
# Offer year CHIM1BA
cls.educ_group_year = EducationGroupYearFactory(acronym="CHIM1BA", academic_year=cls.academic_year)
cls.learning_unit_year = LearningUnitYearFactory(acronym="LBIR1210", academic_year=cls.academic_year)
cls._create_context_exam_enrollments(cls.learning_unit_year, cls.educ_group_year, 10, 3)
cls.learning_unit_year_2 = LearningUnitYearFactory(acronym="LBIR1211", academic_year=cls.academic_year)
learning_unit_year_2_enrollments = cls._create_context_exam_enrollments(
cls.learning_unit_year_2,
cls.educ_group_year,
5
)
peps_student = StudentSpecificProfileFactory()
learning_unit_year_2_enrollments[0].offer_enrollment.student = peps_student.student
learning_unit_year_2_enrollments[0].offer_enrollment.save()
# Offer year DIR2BA
cls.educ_group_year_2 = EducationGroupYearFactory(acronym="DIR2BA", academic_year=cls.academic_year)
cls._create_context_exam_enrollments(cls.learning_unit_year, cls.educ_group_year_2, 8, 5)
cls.program_manager = ProgramManagerFactory(education_group=cls.educ_group_year.education_group)
ProgramManagerFactory(education_group=cls.educ_group_year_2.education_group, person=cls.program_manager.person)
# Tutor [Tom Dupont] have an attribution to LBIR1210
cls.tutor = TutorFactory(person=PersonFactory(last_name="Dupont", first_name="Thierry"))
AttributionFactory(tutor=cls.tutor, learning_unit_year=cls.learning_unit_year, score_responsible=True)
def test_get_scores_encoding_progress_program_manager(self):
progress_list = score_encoding_progress.get_scores_encoding_progress(
user=self.program_manager.person.user,
education_group_year_id=None,
number_session=number_session.ONE,
academic_year=self.academic_year
)
# Group by learning unit year
progress_list = score_encoding_progress.group_by_learning_unit_year(progress_list)
self.assertEqual(len(progress_list), 2)
# Check if sort by learning unit acronym
self.assertEqual(progress_list[0].learning_unit_year_acronym, self.learning_unit_year.acronym)
self.assertEqual(progress_list[1].learning_unit_year_acronym, self.learning_unit_year_2.acronym)
# Check total enrollment
self.assertEqual(progress_list[0].total_exam_enrollments, 18)
self.assertEqual(progress_list[1].total_exam_enrollments, 5)
# Check progress
self.assertEqual(progress_list[1].progress_int, 100)
self.assertFalse(progress_list[0].has_student_specific_profile)
self.assertTrue(progress_list[1].has_student_specific_profile)
def test_get_scores_encoding_progress_program_manager_with_filter_offer_year(self):
progress_list = score_encoding_progress.get_scores_encoding_progress(
user=self.program_manager.person.user,
education_group_year_id=self.educ_group_year_2,
number_session=number_session.ONE,
academic_year=self.academic_year
)
progress_list = score_encoding_progress.group_by_learning_unit_year(progress_list)
self.assertEqual(len(progress_list), 1)
self.assertEqual(progress_list[0].learning_unit_year_acronym, self.learning_unit_year.acronym)
self.assertEqual(progress_list[0].total_exam_enrollments, 8)
def test_get_scores_encoding_progress_with_tutors_and_score_responsible(self):
# Create tutors
test_attribution.create_attribution(tutor=TutorFactory(), learning_unit_year=self.learning_unit_year)
test_attribution.create_attribution(tutor=TutorFactory(), learning_unit_year=self.learning_unit_year)
progress_list = score_encoding_progress.get_scores_encoding_progress(
user=self.program_manager.person.user,
education_group_year_id=self.educ_group_year_2,
number_session=number_session.ONE,
academic_year=self.academic_year
)
progress_list = score_encoding_progress.append_related_tutors_and_score_responsibles(progress_list)
progress_list = score_encoding_progress.group_by_learning_unit_year(progress_list)
self.assertEqual(len(progress_list), 1)
self.assertEqual(len(progress_list[0].tutors), 3)
self.assertEqual(len(progress_list[0].score_responsibles), 1)
self.assertFalse(progress_list[0].has_student_specific_profile)
def test_get_scores_encoding_progress_filter_only_incomplete(self):
progress_list = score_encoding_progress.get_scores_encoding_progress(
user=self.program_manager.person.user,
education_group_year_id=None,
number_session=number_session.ONE,
academic_year=self.academic_year
)
progress_list = score_encoding_progress.group_by_learning_unit_year(progress_list)
self.assertEqual(len(progress_list), 2)
progress_list = score_encoding_progress.filter_only_incomplete(progress_list)
self.assertEqual(len(progress_list), 1)
def test_get_scores_encoding_progres
|
ibbad/dna-lceb-web
|
app/api_v1_0/__init__.py
|
Python
|
apache-2.0
| 331
| 0.003021
|
"""
Initialization
|
script for restapi for the application.
"""
from flask import Blueprint
from app.common.logging import setup_logging
api = Blueprint('api', __name__)
# Setup logger
# api_log = setup_logging(__name__, 'logs/api.log', maxFilesize=1000000,
# backup_count=5)
from . import views, e
|
rrors
|
YeoLab/gscripts
|
gscripts/general/submit_fastqc.py
|
Python
|
mit
| 403
| 0.009926
|
from gscripts import qtools
import sys, os
if not os.path.exists("fastqc/"):
os.mkdir("fastqc")
cmds = []
Sub = qtools.Submitter()
for fileName in sys.argv[1:]:
fastqc_command = "fastqc -o fastqc %s"
|
%fileName
cmds.append(fastqc_command)
Sub.job(command_list=cmds, sh_file="runFastqc.sh", job_name="Fastqc", array=True, queue="h
|
ome", nodes=1, ppn=1, submit=True, max_running=1000)
|
devdelay/home-assistant
|
tests/helpers/test_entity_component.py
|
Python
|
mit
| 10,909
| 0
|
"""The tests for the Entity component helper."""
# pylint: disable=protected-access,too-many-public-methods
from collections import OrderedDict
import logging
import unittest
from unittest.mock import patch, Mock
import homeassistant.core as ha
import homeassistant.loader as loader
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers import discovery
import homeassistant.util.dt as dt_util
from tests.common import (
get_test_home_assistant, MockPlatform, MockModule, fire_time_changed)
_LOGGER = logging.getLogger(__name__)
DOMAIN = "test_domain"
class EntityTest(Entity):
"""Test for the Entity component."""
def __init__(self, **values):
"""Initialize an entity."""
self._values = values
if 'entity_id' in values:
self.entity_id = values['entity_id']
@property
def name(self):
"""Return the name of the entity."""
return self._handle('name')
@property
def should_poll(self):
"""Return the ste of the polling."""
return self._handle('should_poll')
@property
def unique_id(self):
"""Return the unique ID of the entity."""
return self._handle('unique_id')
def _handle(self, attr):
"""Helper for the attributes."""
if attr in self._values:
return self._values[attr]
return getattr(super(), attr)
class TestHelpersEntityComponent(unittest.TestCase):
"""Test homeassistant.helpers.entity_component module."""
def setUp(self): # pylint: disable=invalid-name
"""Initialize a test Home Assistant instance."""
self.hass = get_test_home_assistant()
def tearDown(self): # pylint: disable=invalid-name
"""Clean up the test Home Assistant instance."""
self.hass.stop()
def test_setting_up_group(self):
"""Setup the setting of a group."""
component = EntityComponent(_LOGGER, DOMAIN, self.hass,
group_name='everyone')
# No group after setup
assert 0 == len(self.hass.states.entity_ids())
component.add_entities([EntityTest(name='hello')])
# group exists
assert 2 == len(self.hass.states.entity_ids())
assert ['group.everyone'] == self.hass.states.entity_ids('group')
group = self.hass.states.get('group.everyone')
assert ('test_domain.hello',) == group.attributes.get('entity_id')
# group extended
component.add_entities([EntityTest(name='hello2')])
assert 3 == len(self.hass.states.entity_ids())
group = self.hass.states.get('group.everyone')
assert ['test_domain.hello', 'test_domain.hello2'] == \
sorted(group.attributes.get('entity_id'))
def test_polling_only_updates_entities_it_should_poll(self):
"""Test the polling of only updated entities."""
component = EntityComponent(_LOGGER, DOMAIN, self.hass, 20)
no_poll_ent = EntityTest(should_poll=False)
no_poll_ent.update_ha_state = Mock()
poll_ent = EntityTest(should_poll=True)
poll_ent.update_ha_state = Mock()
component.add_entities([no_poll_ent, poll_ent])
no_poll_ent.update_ha_state.reset_mock()
poll_ent.update_ha_state.reset_mock()
fire_time_changed(self.hass, dt_util.utcnow().replace(second=0))
self.hass.pool.block_till_done()
assert not no_poll_ent.update_ha_state.called
assert poll_ent.update_ha_state.called
def test_update_state_adds_entities(self):
"""Test if updating poll entities cause an entity to be added works."""
component = EntityComponent(_LOGGER, DOMAIN, self.hass)
ent1 = EntityTest()
ent2 = EntityTest(should_poll=True)
component.add_entities([ent2])
assert 1 == len(self.hass.states.entity_ids())
ent2.update_ha_state = lambda *_: component.add_entities([ent1])
fire_time_changed(self.hass, dt_util.utcnow().replace(second=0))
self.hass.pool.block_till_done()
assert 2 == len(self.hass.states.entity_ids())
def test_not_adding_duplicate_entities(self):
"""Test for not adding duplicate entities."""
component = EntityComponent(_LOGGER, DOMAIN, self.hass)
assert 0 == len(self.hass.states.entity_ids())
component.add_entities([None, EntityTest(unique_id='not_very_unique')])
assert 1 == len(self.hass.states.entity_ids())
component.add_entities([EntityTest(unique_id='not_very_unique')])
assert 1 == len(self.hass.states.entity_ids())
def test_not_assigning_entity_id_if_prescribes_one(self):
"""Test for not assigning an entity ID."""
component = EntityComponent(_LOGGER, DOMAIN, self.hass)
assert 'hello.world' not in self.hass.states.entity_ids()
component.add_entities([EntityTest(entity_id='hello.world')])
assert 'hello.world' in self.hass.states.entity_ids()
def test_extract_from_service_returns_all_if_no_entity_id(self):
"""Test the extraction of everything from service."""
component = EntityComponent(_LOGGER, DOMAIN, self.hass)
component.add_entities([
EntityTest(name='test_1'),
EntityTest(name='test_2'),
])
call = ha.ServiceCall('test', 'service')
assert ['test_domain.test_1', 'test_domain.test_2'] == \
sorted(ent.entity_id for ent in
component.extract_from_service(call))
def test_extract_from_service_filter_out_non_existing_entities(self):
"""Test the extraction of non existing entities from service."""
component = EntityComponent(_LOGGER, DOMAIN, self.hass)
component.add_entities([
EntityTest(name='test_1'),
EntityTest(name='test_2'),
])
call = ha.ServiceCall('test', 'service', {
'entity_id': ['test_domain.test_2', 'test_domain.non_exist']
})
assert ['test_domain.test_2'] == \
[ent.entity_id for ent in component.extract_from_service(call)]
def test_setup_loads_platforms(self):
"""Test the loading of the platforms."""
component_setup = Mock(return_value=True)
|
platform_setup = Mock(return_value=None)
loader.set_component(
'test_component',
MockModule('test_component', setup=component_setup))
loader.set_component('test_domain.mod2',
MockPlatform(platform_setup, ['test_component']))
component = EntityComponent(_LOGGER, DO
|
MAIN, self.hass)
assert not component_setup.called
assert not platform_setup.called
component.setup({
DOMAIN: {
'platform': 'mod2',
}
})
assert component_setup.called
assert platform_setup.called
def test_setup_recovers_when_setup_raises(self):
"""Test the setup if exceptions are happening."""
platform1_setup = Mock(side_effect=Exception('Broken'))
platform2_setup = Mock(return_value=None)
loader.set_component('test_domain.mod1', MockPlatform(platform1_setup))
loader.set_component('test_domain.mod2', MockPlatform(platform2_setup))
component = EntityComponent(_LOGGER, DOMAIN, self.hass)
assert not platform1_setup.called
assert not platform2_setup.called
component.setup(OrderedDict([
(DOMAIN, {'platform': 'mod1'}),
("{} 2".format(DOMAIN), {'platform': 'non_exist'}),
("{} 3".format(DOMAIN), {'platform': 'mod2'}),
]))
assert platform1_setup.called
assert platform2_setup.called
@patch('homeassistant.helpers.entity_component.EntityComponent'
'._setup_platform')
@patch('homeassistant.bootstrap.setup_component', return_value=True)
def test_setup_does_discovery(self, mock_setup_component, mock_setup):
"""Test setup for discovery."""
component = EntityComponent(_LOGGER, DOMAIN, self.hass)
component.setup({})
di
|
bronikkk/tirpan
|
tests/accept_mir09.py
|
Python
|
gpl-3.0
| 1,313
| 0.007616
|
#!/usr/bin/env python
"""
Created on 28.10.2014
@author: evg-zhabotinsky
"""
from test_mir_common import *
mir = tirpan_get_mir('test_mir09.py')
n = find_mir_nodes(mir,
x_call = func_checker('x'),
y_call = func_checker('y'),
z_call = func_checker('z'),
a_call = func_checker('a'),
b_call = func_checker('b'))
n.x_if = find_node_down_mir_nojoin(n.x_call, if_cond_c
|
hecker(n.x_call.left))
n
|
.y_if = find_node_down_mir_nojoin(n.y_call, if_cond_checker(n.y_call.left))
n.z_if = find_node_down_mir_nojoin(n.z_call, if_cond_checker(n.z_call.left))
n.join = find_node_down_mir(n.a_call, isinstance_checker(ti.mir.JoinMirNode))
find_node_down_mir_nojoin(mir, same_node_checker(n.x_call))
find_node_down_mir_nojoin(n.x_if.true, same_node_checker(n.z_call))
find_node_down_mir_nojoin(n.x_if.false, same_node_checker(n.y_call))
find_node_down_mir_nojoin(n.y_if.true, same_node_checker(n.z_call))
find_node_down_mir_nojoin(n.y_if.false, same_node_checker(n.b_call))
find_node_down_mir_nojoin(n.z_if.true, same_node_checker(n.a_call))
find_node_down_mir_nojoin(n.z_if.false, same_node_checker(n.b_call))
find_node_down_mir_nojoin(n.b_call, same_node_checker(n.join))
find_node_down_mir_nojoin(n.join, same_node_checker(None))
|
skilledindia/pyprimes
|
src/pyprimes/speed.py
|
Python
|
mit
| 4,872
| 0.00349
|
# -*- coding: utf-8 -*-
## Part of the pyprimes.py package.
##
## Copyright © 2014 Steven D'Aprano.
## See the file __init__.py for the licence terms for this software.
"""\
=====================================
Timing the speed of primes algorithms
=====================================
"""
from __future__ import division
import sys
from itertools import islice
# Conditionally hack the PYTHONPATH.
if __name__ == '__main__':
import os
path = os.path.dirname(__file__)
parent, here = os.path.split(path)
sys.path.append(parent)
from pyprimes.compat23 import next
import pyprimes.awful as awful
import pyprimes.probabilistic as probabilistic
import pyprimes.sieves as sieves
YEAR100 = 100*365*24*60*60 # One hundred years, in seconds.
class Stopwatch(object):
def __init__(self, timer=None):
if timer is None:
from timeit import default_timer as timer
self.timer = timer
self.reset()
def reset(self):
"""Reset all the collected timer results."""
try:
del self._start
except AttributeError:
pass
self._elapsed = 0.0
def start(self):
"""Start the timer."""
self._start = self.timer()
def stop(self):
"""Stop the timer."""
t = self.timer()
self._elapsed = t - self._start
del self._start
@property
def elapsed(self):
return self._elapsed
def trial(generator, count, repeat=1):
timer = Stopwatch()
best = YEAR100
for i in range(repeat):
it = generator()
timer.reset()
timer.start()
# Go to the count-th prime as fast as possible.
p = next(islice(it, count-1, count))
timer.stop()
best = min(best, timer.elapsed)
return best
def run(generators, number, repeat=1):
print ("Calculating speeds for first %d primes..." % number)
template = "\r ...%d of %d %s"
heading = """\
Generator Elapsed Speed
(sec) (primes/sec)
=============================================================="""
records = []
timer = Stopwatch() # For measuring the total elapsed time.
timer.start()
N = len(generators)
for i, generator in enumerate(generators):
name = generator.__module__ + '.' + generator.__name__
sys.stdout.write((template % (i+1, N, name)).ljust(69))
sys.stdout.flush()
t = trial(generator, number, repeat)
|
records.append((number/t, t, name))
timer.stop()
sys.stdout.write("\r%-69s\n" % "Done!")
print ('Total elapsed time: %.1f seconds' % timer.elapsed)
print ('')
records.sort()
print (heading)
for speed, elapsed, name in records:
print ("%-36s %4.2f %8.1f" % (name, elapsed, speed))
print ('====================================
|
==========================\n')
VERY_SLOW = [awful.primes0, awful.primes1, awful.primes2, awful.turner]
SLOW = [awful.primes3, awful.primes4, probabilistic.primes]
FAST = [sieves.cookbook, sieves.croft, sieves.sieve, sieves.wheel]
MOST = SLOW + FAST
ALL = VERY_SLOW + MOST
run(VERY_SLOW + SLOW, 1000)
run([awful.primes3, awful.trial_division], 5000)
#run([awful.primes3, awful.trial_division], 50000)
#run([awful.primes3, awful.trial_division], 100000)
#run([awful.primes3, awful.trial_division], 200000)
exit()
run(ALL, 500, 3)
run(MOST, 10000)
run(FAST, 1000000)
"""
Python 2.6 or better
import multiprocessing
import time
# bar
def bar():
for i in range(100):
print "Tick"
time.sleep(1)
if __name__ == '__main__':
# Start bar as a process
p = multiprocessing.Process(target=bar)
p.start()
# Wait for 10 seconds or until process finishes
p.join(10)
# If thread is still active
if p.is_alive():
print "running... let's kill it..."
# Terminate
p.terminate()
p.join()
"""
"""
Unix only, Python 2.5 or better.
In [1]: import signal
# Register an handler for the timeout
In [2]: def handler(signum, frame):
...: print "Forever is over!"
...: raise Exception("end of time")
...:
# This function *may* run for an indetermined time...
In [3]: def loop_forever():
...: import time
...: while 1:
...: print "sec"
...: time.sleep(1)
...:
...:
# Register the signal function handler
In [4]: signal.signal(signal.SIGALRM, handler)
Out[4]: 0
# Define a timeout for your function
In [5]: signal.alarm(10)
Out[5]: 0
In [6]: try:
...: loop_forever()
...: except Exception, exc:
...: print exc
....:
sec
sec
sec
sec
sec
sec
sec
sec
Forever is over!
end of time
# Cancel the timer if the function returned before timeout
# (ok, mine won't but yours maybe will :)
In [7]: signal.alarm(0)
Out[7]: 0
"""
|
itskewpie/tempest
|
tempest/api/compute/volumes/test_volumes_negative.py
|
Python
|
apache-2.0
| 4,352
| 0
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempe
|
st import exceptions
from tempest.test import attr
class VolumesNegativeTest(base.BaseComputeTest):
_interface = 'json'
@cl
|
assmethod
def setUpClass(cls):
super(VolumesNegativeTest, cls).setUpClass()
cls.client = cls.volumes_extensions_client
if not cls.config.service_available.cinder:
skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
raise cls.skipException(skip_msg)
@attr(type=['negative', 'gate'])
def test_volume_get_nonexistant_volume_id(self):
# Negative: Should not be able to get details of nonexistant volume
# Creating a nonexistant volume id
# Trying to GET a non existant volume
self.assertRaises(exceptions.NotFound, self.client.get_volume,
str(uuid.uuid4()))
@attr(type=['negative', 'gate'])
def test_volume_delete_nonexistant_volume_id(self):
# Negative: Should not be able to delete nonexistant Volume
# Creating nonexistant volume id
# Trying to DELETE a non existant volume
self.assertRaises(exceptions.NotFound, self.client.delete_volume,
str(uuid.uuid4()))
@attr(type=['negative', 'gate'])
def test_create_volume_with_invalid_size(self):
# Negative: Should not be able to create volume with invalid size
# in request
v_name = data_utils.rand_name('Volume-')
metadata = {'Type': 'work'}
self.assertRaises(exceptions.BadRequest, self.client.create_volume,
size='#$%', display_name=v_name, metadata=metadata)
@attr(type=['negative', 'gate'])
def test_create_volume_with_out_passing_size(self):
# Negative: Should not be able to create volume without passing size
# in request
v_name = data_utils.rand_name('Volume-')
metadata = {'Type': 'work'}
self.assertRaises(exceptions.BadRequest, self.client.create_volume,
size='', display_name=v_name, metadata=metadata)
@attr(type=['negative', 'gate'])
def test_create_volume_with_size_zero(self):
# Negative: Should not be able to create volume with size zero
v_name = data_utils.rand_name('Volume-')
metadata = {'Type': 'work'}
self.assertRaises(exceptions.BadRequest, self.client.create_volume,
size='0', display_name=v_name, metadata=metadata)
@attr(type=['negative', 'gate'])
def test_get_invalid_volume_id(self):
# Negative: Should not be able to get volume with invalid id
self.assertRaises(exceptions.NotFound,
self.client.get_volume, '#$%%&^&^')
@attr(type=['negative', 'gate'])
def test_get_volume_without_passing_volume_id(self):
# Negative: Should not be able to get volume when empty ID is passed
self.assertRaises(exceptions.NotFound, self.client.get_volume, '')
@attr(type=['negative', 'gate'])
def test_delete_invalid_volume_id(self):
# Negative: Should not be able to delete volume when invalid ID is
# passed
self.assertRaises(exceptions.NotFound,
self.client.delete_volume, '!@#$%^&*()')
@attr(type=['negative', 'gate'])
def test_delete_volume_without_passing_volume_id(self):
# Negative: Should not be able to delete volume when empty ID is passed
self.assertRaises(exceptions.NotFound, self.client.delete_volume, '')
class VolumesNegativeTestXML(VolumesNegativeTest):
_interface = "xml"
|
shenson/cobbler
|
cobbler/modules/authz_ownership.py
|
Python
|
gpl-2.0
| 7,391
| 0.000541
|
"""
Authorization module that allow users listed in
/etc/cobbler/users.conf to be permitted to access resources, with
the further restriction that cobbler objects can be edited to only
allow certain users/groups to access those specific objects.
Copyright 2008-2009, Red Hat, Inc and Others
Michael DeHaan <michael.dehaan AT gmail>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
import ConfigParser
import os
from cobbler.cexceptions import CX
from cobbler.utils import _
def register():
"""
The mandatory cobbler module registration hook.
"""
return "authz"
def __parse_config():
etcfile = '/etc/cobbler/users.conf'
if not os.path.exists(etcfile):
raise CX(_("/etc/cobbler/users.conf does not exist"))
config = ConfigParser.ConfigParser()
# Make users case sensitive to handle kerberos
config.optionxform = str
config.read(etcfile)
alldata = {}
sections = config.sections()
for g in sections:
alldata[str(g)] = {}
opts = config.options(g)
for o in opts:
alldata[g][o] = 1
return alldata
def __authorize_autoinst(api_handle, groups, user, autoinst):
# the authorization rules for automatic installation file editing are a bit
# of a special case. Non-admin users can edit a automatic installation file
# only if all objects that depend on that automatic installation file are
# editable by the user in question.
#
# Example:
# if Pinky owns ProfileA
# and the Brain owns ProfileB
# and both profiles use the same automatic installation template
# and neither Pinky nor the Brain is an admin
# neither is allowed to edit the automatic installation template
# because they would make unwanted changes to each other
#
# In the above scenario the UI will explain the problem
# and ask that the user asks the admin to resolve it if required.
# NOTE: this function is only called by authorize so admin users are
# cleared before this function is called.
lst = api_handle.find_profile(autoinst=autoinst, return_list=True)
lst.extend(api_handle.find_system(autoinst=autoinst, return_list=True))
for obj in lst:
if not __is_user_allowed(obj, groups, user, "write_autoinst", autoinst, None):
return 0
return 1
def __authorize_snippet(api_handle, groups, user, autoinst):
# only allow admins to edit snippets -- since we don't have detection to see
# where each snippet is in use
for group in groups:
if group not in ["admins", "admin"]:
return False
return True
def __is_user_allowed(obj, groups, user, resource, arg1, arg2):
if user == "<DIRECT>":
# system user, logged in via web.ss
return True
for group in groups:
if group in ["admins", "admin"]:
return True
if obj.owners == []:
return True
for allowed in obj.owners:
if user == allowed:
# user match
return True
# else look for a group match
for group in groups:
if group == allowed:
return True
return 0
def authorize(api_handle, user, resource, arg1=None, arg2=None):
"""
Validate a user against a resource.
All users in the file are permitted by this module.
"""
if user == "<DIRECT>":
# CLI should always be permitted
return True
# everybody can get read-only access to everything
# if they pass authorization, they don't have to be in users.conf
if resource is not None:
# FIXME: /cobbler/web should not be subject to user check in any case
for x in ["get", "read", "/cobbler/web"]:
if resource.startswith(x):
return 1 # read operation is always ok.
user_groups = __parse_config()
# classify the type of operation
modify_operation = False
for criteria in ["save", "copy", "rename", "remove", "modify", "edit", "xapi", "background"]:
if resource.find(criteria) != -1:
modify_operation = True
# FIXME: is everyone allowed to copy? I think so.
# FIXME: deal with the problem of deleted parents and promotion
found_user = False
found_groups = []
grouplist = user_groups.keys()
for g in grouplist:
for x in user_groups[g]:
if x == user:
found_groups.append(g)
found_user = True
# if user is in the admin group, always authorize
# regardless of the ownership of the object.
if g == "admins" or g == "admin":
return True
if not found_user:
# if the user isn't anywhere in the file, reject regardless
# they can still use read-only XMLRPC
return 0
if not modify_operation:
# sufficient to allow access for non save/remove ops to all
# users for now, may want to refine later.
return True
# now we have a modify_operation op, so we must check ownership
# of the object. remove ops pass in arg1 as a string name,
# saves pass in actual objects, so we must treat them differently.
# automatic installaton files are even more special so we call those
# out to another function, rather than going through the rest of the
# code here.
if resource.find("write_autoinstall_template") != -1:
return __authorize_autoinst(api_handle, found_groups, user, arg1)
elif resource.find("read_autoinstall_template") != -1:
return True
# the API for editing snippets also needs to do something similar.
# as with automatic installation files, though since they are more
# widely used it's more restrictive
if resource.find("write_autoinstall_snippet") != -1:
return __authorize_snippet(api_handle, found_groups, user, arg1)
elif re
|
source.find("read_autoinstall_snipppet") != -1:
return True
obj = None
if resource.find("remove") != -1:
if resource == "remove_distro":
obj = api_handle.find_distro(arg1)
elif resource == "remove_p
|
rofile":
obj = api_handle.find_profile(arg1)
elif resource == "remove_system":
obj = api_handle.find_system(arg1)
elif resource == "remove_repo":
obj = api_handle.find_repo(arg1)
elif resource == "remove_image":
obj = api_handle.find_image(arg1)
elif resource.find("save") != -1 or resource.find("modify") != -1:
obj = arg1
# if the object has no ownership data, allow access regardless
if obj is None or obj.owners is None or obj.owners == []:
return True
return __is_user_allowed(obj, found_groups, user, resource, arg1, arg2)
|
RafaelPalomar/girder
|
girder/api/v1/file.py
|
Python
|
apache-2.0
| 18,950
| 0.002058
|
# -*- coding: utf-8 -*-
import cherrypy
import errno
from ..describe import Description, autoDescribeRoute, describeRoute
from ..rest import Resource, filtermodel
from ...constants import AccessType, TokenScope
from girder.exceptions import AccessException, GirderException, RestException
from girder.models.assetstore import Assetstore
from girder.models.file import File as FileModel
from girder.models.item import Item
from girder.models.upload import Upload
from girder.api import access
from girder.utility import RequestBodyStream
from girder.utility.model_importer import ModelImporter
from girder.utility.progress import ProgressContext
class File(Resource):
"""
API Endpoint for files. Includes utilities for uploading and downloading
them.
"""
def __init__(self):
super().__init__()
self._model = FileModel()
self.resourceName = 'file'
self.route('DELETE', (':id',), self.deleteFile)
se
|
lf.route('DELETE', ('upload', ':id'), self.cancelUpload)
self.route('GET', ('offset',), self.requestOffset)
self.route('GET', (':id',), self.getFile)
self.route('GET', (':id', 'downlo
|
ad'), self.download)
self.route('GET', (':id', 'download', ':name'), self.downloadWithName)
self.route('POST', (), self.initUpload)
self.route('POST', ('chunk',), self.readChunk)
self.route('POST', ('completion',), self.finalizeUpload)
self.route('POST', (':id', 'copy'), self.copy)
self.route('PUT', (':id',), self.updateFile)
self.route('PUT', (':id', 'contents'), self.updateFileContents)
self.route('PUT', (':id', 'move'), self.moveFileToAssetstore)
@access.public(scope=TokenScope.DATA_READ)
@filtermodel(model=FileModel)
@autoDescribeRoute(
Description("Get a file's information.")
.modelParam('id', model=FileModel, level=AccessType.READ)
.errorResponse()
.errorResponse('Read access was denied on the file.', 403)
)
def getFile(self, file):
return file
@access.user(scope=TokenScope.DATA_WRITE)
@autoDescribeRoute(
Description('Start a new upload or create an empty or link file.')
.notes('Use POST /file/chunk to send the contents of the file. '
'The data for the first chunk of the file can be included with '
'this query by sending it as the body of the request using an '
'appropriate content-type and with the other parameters as '
'part of the query string. If the entire file is uploaded via '
'this call, the resulting file is returned.')
.responseClass('Upload')
.param('parentType', 'Type being uploaded into.', enum=['folder', 'item'])
.param('parentId', 'The ID of the parent.')
.param('name', 'Name of the file being created.')
.param('size', 'Size in bytes of the file.', dataType='integer', required=False)
.param('mimeType', 'The MIME type of the file.', required=False)
.param('linkUrl', 'If this is a link file, pass its URL instead '
'of size and mimeType using this parameter.', required=False)
.param('reference', 'If included, this information is passed to the '
'data.process event when the upload is complete.',
required=False)
.param('assetstoreId', 'Direct the upload to a specific assetstore (admin-only).',
required=False)
.errorResponse()
.errorResponse('Write access was denied on the parent folder.', 403)
.errorResponse('Failed to create upload.', 500)
)
def initUpload(self, parentType, parentId, name, size, mimeType, linkUrl, reference,
assetstoreId):
"""
Before any bytes of the actual file are sent, a request should be made
to initialize the upload. This creates the temporary record of the
forthcoming upload that will be passed in chunks to the readChunk
method. If you pass a "linkUrl" parameter, it will make a link file
in the designated parent.
"""
user = self.getCurrentUser()
parent = ModelImporter.model(parentType).load(
id=parentId, user=user, level=AccessType.WRITE, exc=True)
if linkUrl is not None:
return self._model.filter(
self._model.createLinkFile(
url=linkUrl, parent=parent, name=name, parentType=parentType, creator=user,
size=size, mimeType=mimeType), user)
else:
self.requireParams({'size': size})
assetstore = None
if assetstoreId:
self.requireAdmin(
user, message='You must be an admin to select a destination assetstore.')
assetstore = Assetstore().load(assetstoreId)
chunk = None
if size > 0 and cherrypy.request.headers.get('Content-Length'):
ct = cherrypy.request.body.content_type.value
if (ct not in cherrypy.request.body.processors
and ct.split('/', 1)[0] not in cherrypy.request.body.processors):
chunk = RequestBodyStream(cherrypy.request.body)
if chunk is not None and chunk.getSize() <= 0:
chunk = None
try:
# TODO: This can be made more efficient by adding
# save=chunk is None
# to the createUpload call parameters. However, since this is
# a breaking change, that should be deferred until a major
# version upgrade.
upload = Upload().createUpload(
user=user, name=name, parentType=parentType, parent=parent, size=size,
mimeType=mimeType, reference=reference, assetstore=assetstore)
except OSError as exc:
if exc.errno == errno.EACCES:
raise GirderException(
'Failed to create upload.', 'girder.api.v1.file.create-upload-failed')
raise
if upload['size'] > 0:
if chunk:
return Upload().handleChunk(upload, chunk, filter=True, user=user)
return upload
else:
return self._model.filter(Upload().finalizeUpload(upload), user)
@access.user(scope=TokenScope.DATA_WRITE)
@autoDescribeRoute(
Description('Finalize an upload explicitly if necessary.')
.notes('This is only required in certain non-standard upload '
'behaviors. Clients should know which behavior models require '
'the finalize step to be called in their behavior handlers.')
.modelParam('uploadId', paramType='formData', model=Upload)
.errorResponse(('ID was invalid.',
'The upload does not require finalization.',
'Not enough bytes have been uploaded.'))
.errorResponse('You are not the user who initiated the upload.', 403)
)
def finalizeUpload(self, upload):
user = self.getCurrentUser()
if upload['userId'] != user['_id']:
raise AccessException('You did not initiate this upload.')
# If we don't have as much data as we were told would be uploaded and
# the upload hasn't specified it has an alternate behavior, refuse to
# complete the upload.
if upload['received'] != upload['size'] and 'behavior' not in upload:
raise RestException(
'Server has only received %s bytes, but the file should be %s bytes.' %
(upload['received'], upload['size']))
file = Upload().finalizeUpload(upload)
extraKeys = file.get('additionalFinalizeKeys', ())
return self._model.filter(file, user, additionalKeys=extraKeys)
@access.user(scope=TokenScope.DATA_WRITE)
@autoDescribeRoute(
Description('Request required offset before resuming an upload.')
.modelParam('uploadId', paramType='formData', model=Upload)
.errorResponse("The ID was invalid, or the offset
|
BeenzSyed/tempest
|
tempest/services/database/json/database_client.py
|
Python
|
apache-2.0
| 1,757
| 0.007399
|
# Copyright 2012 OpenStack, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR C
|
ONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from tempest.common.rest_client import RestClient
class DatabaseClient(RestClient):
def __init__(self,
config,
username,
password,
auth_url,token_url,
tenant_name=None):
super(DatabaseClient, self).__init__(config,
|
username,
password,
auth_url,token_url,
tenant_name)
self.service = self.config.database.catalog_type
def list_flavors(self):
resp, body = self.get("flavors")
body = json.loads(body)
return resp, body['flavors']
def get_instance(self, instanceId, region):
url = "https://%s.databases.api.rackspacecloud" \
".com/v1.0/%s/instances/%s "%(region , self.tenant_name,
instanceId )
resp, body = self.get(url,region)
if resp['status']=='200':
body = json.loads(body)
return resp ,body
|
ueg1990/flask-restful
|
tests/test_reqparse.py
|
Python
|
bsd-3-clause
| 29,848
| 0.000905
|
# -*- coding: utf-8 -*-
import unittest
from mock import Mock, patch
from flask import Flask
from werkzeug import exceptions, MultiDict
from werkzeug.wrappers import Request
from werkzeug.datastructures import FileStorage
from flask_restful.reqparse import Argument, RequestParser, Namespace
import six
import decimal
import json
class ReqParseTestCase(unittest.TestCase):
def test_default_help(self):
arg = Argument("foo")
self.assertEquals(arg.help, None)
@patch('flask_restful.abort')
def test_help(self, abort):
app = Flask(__name__)
with app.app_context():
parser = RequestParser()
parser.add_argument('foo', choices=['one', 'two'], help='Bad choice')
req = Mock(['values'])
req.values = MultiDict([('foo', 'three')])
parser.parse_args(req)
expected = {'foo': '(Bad choice) three is not a valid choice'}
abort.assert_called_with(400, message=expected)
@patch('flask_restful.abort', side_effect=exceptions.BadRequest('Bad Request'))
def test_no_help(self, abort):
def bad_choice():
parser = RequestParser()
parser.add_argument('foo', choices=['one', 'two'])
req = Mock(['values'])
req.values = MultiDict([('foo', 'three')])
parser.parse_args(req)
abort.assert_called_with(400, message='three is not a valid choice')
app = Flask(__name__)
with app.app_context():
self.assertRaises(exceptions.BadRequest, bad_choice)
def test_name(self):
arg = Argument("foo")
self.assertEquals(arg.name, "foo")
def test_dest(self):
arg = Argument("foo", dest="foobar")
self.assertEquals(arg.dest, "foobar")
def test_location_url(self):
arg = Argument("foo", location="url")
self.assertEquals(arg.location, "url")
def test_location_url_list(self):
arg = Argument("foo", location=["url"])
self.assertEquals(arg.location, ["url"])
def test_location_header(self):
arg = Argument("foo", location="headers")
self.assertEquals(arg.location, "headers")
def test_location_json(self):
arg = Argument("foo", location="json")
self.assertEquals(arg.location, "json")
def test_location_get_json(self):
arg = Argument("foo", location="get_json")
self.assertEquals(arg.location, "get_json")
def test_location_header_list(self):
arg = Argument("foo", location=["headers"])
self.assertEquals(arg.location, ["headers"])
def test_type(self):
arg = Argument("foo", type=int)
self.assertEquals(arg.type, int)
def test_default(self):
arg = Argument("foo", default=True)
self.assertEquals(arg.default, True)
def test_required(self):
arg = Argument("foo", required=True)
self.assertEquals(arg.required, True)
def test_ignore(self):
arg = Argument("foo", ignore=True)
self.assertEquals(arg.ignore, True)
def test_operator(self):
arg = Argument("foo", operators=[">=", "<=", "="])
self.assertEquals(arg.operators, [">=", "<=", "="])
def test_action_filter(self):
arg = Argument("foo", action="filter")
self.assertEquals(arg.action, u"filter")
def test_action(self):
arg = Argument("foo", action="append")
self.ass
|
ertEqu
|
als(arg.action, u"append")
def test_choices(self):
arg = Argument("foo", choices=[1, 2])
self.assertEquals(arg.choices, [1, 2])
def test_default_dest(self):
arg = Argument("foo")
self.assertEquals(arg.dest, None)
def test_default_operators(self):
arg = Argument("foo")
self.assertEquals(arg.operators[0], "=")
self.assertEquals(len(arg.operators), 1)
@patch('flask_restful.reqparse.six')
def test_default_type(self, mock_six):
arg = Argument("foo")
sentinel = object()
arg.type(sentinel)
mock_six.text_type.assert_called_with(sentinel)
def test_default_default(self):
arg = Argument("foo")
self.assertEquals(arg.default, None)
def test_required_default(self):
arg = Argument("foo")
self.assertEquals(arg.required, False)
def test_ignore_default(self):
arg = Argument("foo")
self.assertEquals(arg.ignore, False)
def test_action_default(self):
arg = Argument("foo")
self.assertEquals(arg.action, u"store")
def test_choices_default(self):
arg = Argument("foo")
self.assertEquals(len(arg.choices), 0)
def test_source(self):
req = Mock(['args', 'headers', 'values'])
req.args = {'foo': 'bar'}
req.headers = {'baz': 'bat'}
arg = Argument('foo', location=['args'])
self.assertEquals(arg.source(req), MultiDict(req.args))
arg = Argument('foo', location=['headers'])
self.assertEquals(arg.source(req), MultiDict(req.headers))
def test_convert_default_type_with_null_input(self):
"""convert() should properly handle case where input is None"""
arg = Argument('foo')
self.assertEquals(arg.convert(None, None), None)
def test_source_bad_location(self):
req = Mock(['values'])
arg = Argument('foo', location=['foo'])
self.assertTrue(len(arg.source(req)) == 0) # yes, basically you don't find it
def test_source_default_location(self):
req = Mock(['values'])
req._get_child_mock = lambda **kwargs: MultiDict()
arg = Argument('foo')
self.assertEquals(arg.source(req), req.values)
def test_option_case_sensitive(self):
arg = Argument("foo", choices=["bar", "baz"], case_sensitive=True)
self.assertEquals(True, arg.case_sensitive)
# Insensitive
arg = Argument("foo", choices=["bar", "baz"], case_sensitive=False)
self.assertEquals(False, arg.case_sensitive)
# Default
arg = Argument("foo", choices=["bar", "baz"])
self.assertEquals(True, arg.case_sensitive)
def test_viewargs(self):
req = Request.from_values()
req.view_args = {"foo": "bar"}
parser = RequestParser()
parser.add_argument("foo", location=["view_args"])
args = parser.parse_args(req)
self.assertEquals(args['foo'], "bar")
req = Mock()
req.values = ()
req.json = None
req.view_args = {"foo": "bar"}
parser = RequestParser()
parser.add_argument("foo", store_missing=True)
args = parser.parse_args(req)
self.assertEquals(args["foo"], None)
def test_parse_unicode(self):
req = Request.from_values("/bubble?foo=barß")
parser = RequestParser()
parser.add_argument("foo")
args = parser.parse_args(req)
self.assertEquals(args['foo'], u"barß")
def test_parse_unicode_app(self):
app = Flask(__name__)
parser = RequestParser()
parser.add_argument("foo")
with app.test_request_context('/bubble?foo=barß'):
args = parser.parse_args()
self.assertEquals(args['foo'], u"barß")
def test_json_location(self):
app = Flask(__name__)
parser = RequestParser()
parser.add_argument("foo", location="json", store_missing=True)
with app.test_request_context('/bubble', method="post"):
args = parser.parse_args()
self.assertEquals(args['foo'], None)
def test_get_json_location(self):
app = Flask(__name__)
parser = RequestParser()
parser.add_argument("foo", location="json")
with app.test_request_context('/bubble', method="post",
data=json.dumps({"foo": "bar"}),
content_type='application/json'):
args = parser.parse_args()
self.assertEquals(args['foo'], 'bar')
def test_parse_append_ignore(self):
req = Request.from_values("/bubble?foo=bar")
parser = RequestParser()
parser.add_argument("foo", ignore=True, typ
|
quantmind/lux
|
lux/ext/odm/fields.py
|
Python
|
bsd-3-clause
| 3,012
| 0
|
from pulsar.utils.log import lazyproperty
from lux.models import fields
from lux.utils.data import as_tuple
from sqlalchemy.orm.exc import NoResultFound
def get_primary_keys(model):
"""Get primary key properties for a SQLAlchemy model.
:param model: SQLAlchemy model class
"""
mapper = model.__mapper__
return tuple((
mapper.get_property_by_column(column)
for column in mapper.primary_key
))
class Related(fields.Field):
"""Related data represented by a SQLAlchemy `relationship`.
Must be attached to a Schema class whose options includes
a SQLAlchemy model.
:param list columns: Optional column names on related model.
If not provided, the primary key(s) of the related model
will be used.
"""
default_error_messages = {
'invalid': 'Could not deserialize related value {value!r}; '
'expected a dictionary with keys {keys!r}'
}
def __init__(self, column=None, **kwargs):
super().__init__(**kwargs)
self.columns = as_tuple(column)
@property
def related_model(self):
model = self.root.model
field = getattr(model.db_model, self.attribute or self.name)
return field.property.mapper.class_
@property
def session(self):
return self.root.session
@lazyproperty
def related_keys(self):
if self.columns:
return tuple((
self.related_model.__mapper__.columns[column]
for column in self.columns
))
return get_primary_keys(self.related_model)
def _serialize(self, value, attr, obj):
ret = {
prop.key: getattr(value, prop.key, None)
for prop in self.related_keys
}
return ret if len(ret) > 1 else list(ret.values())[0]
def _deserialize(self, value, *args, **kwargs):
if not isinstance(value, dict):
if len(self.related_keys) != 1:
self.fail(
'invalid',
value=value,
keys=[prop.key f
|
or prop in self.related_keys]
)
value = {self.related_keys[0].key: value}
query = self.session.query(self.related_model)
try:
if self.columns:
result = query.filter_by(**{
prop.key: value.get(prop.key)
for prop in self.related_keys
}).one()
else:
|
# Use a faster path if the related key is the primary key.
result = query.get([
value.get(prop.key) for prop in self.related_keys
])
if result is None:
raise NoResultFound
except NoResultFound:
# The related-object DNE in the DB
# but we still want to deserialize it
# ...perhaps we want to add it to the DB later
return self.related_model(**value)
return result
|
USGSDenverPychron/pychron
|
pychron/processing/unmix.py
|
Python
|
apache-2.0
| 4,643
| 0.001077
|
# ===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "
|
License");
# you may not use
|
this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import matplotlib.pyplot as plt
# ============= enthought library imports =======================
# from __future__ import absolute_import
# from __future__ import print_function
# import csv
#
# import numpy as np
# from six.moves import range
# from six.moves import zip
# ============= standard library imports ========================
# ============= local library imports ==========================
from numpy import exp, pi, sqrt, hstack, arange
#
# def unmix(ages, errors, initial_guess):
# ages_errors = list(zip(ages, errors))
#
# ts = initial_guess[0]
# pis = initial_guess[1]
#
# niterations = 20
# for _ in range(niterations):
# tis_n = []
# pis_n = []
# for pi, tj in zip(pis, ts):
# pn, tn = _unmix(ages_errors, pi, tj, pis, ts)
# tis_n.append(tn)
# pis_n.append(pn)
# pis = pis_n
# ts = tis_n
# # print ts, pis
# return ts, pis
#
#
# def _unmix(ages_errors, pi_j, tj_o, pis, ts):
# n = len(ages_errors)
# s = sum([pi_j * fij(ai_ei, tj_o) / Si(pis, ai_ei, ts)
# for ai_ei in ages_errors])
#
# pi_j = 1 / float(n) * s
#
# a = sum([pi_j * ai_ei[0] * fij(ai_ei, tj_o) / (ai_ei[1] ** 2 * Si(pis, ai_ei, ts))
# for ai_ei in ages_errors])
# b = sum([pi_j * fij(ai_ei, tj_o) / (ai_ei[1] ** 2 * Si(pis, ai_ei, ts))
# for ai_ei in ages_errors])
# tj = a / b
# return pi_j, tj
#
#
# def fij(ai_ei, tj):
# ai, ei = ai_ei
# return 1 / (ei * (2 * np.pi) ** 0.5) * np.exp(-(ai - tj) ** 2 / (2 * ei ** 2))
#
#
# def Si(pis, ai_ei, ts):
# return sum([pik * fij(ai_ei, tk) for pik, tk in zip(pis, ts)])
from numpy.random.mtrand import normal
def unmix(ages, ps, ts):
"""
ages = list of 2-tuples (age, 1sigma )
:param ages:
:param ps:
:param ts:
:return:
"""
niterations = 20
for _ in range(niterations):
tis_n = []
pis_n = []
for pi, ti in zip(ps, ts):
pn, tn = tj(ages, pi, ti, ps, ts)
tis_n.append(tn)
pis_n.append(pn)
ps = pis_n
ts = tis_n
return ps, ts
def si(ai, ei, ps, ts):
return sum([pk * fij(ai, ei, tk) for pk, tk in zip(ps, ts)])
def tj(ages, pj, to, ps, ts):
n = len(ages)
pj = 1 / n * sum([pj * fij(ai, ei, to) / si(ai, ei, ps, ts) for ai, ei in ages])
a = [pj * ai * fij(ai, ei, to) / (ei ** 2 * si(ai, ei, ps, ts)) for ai, ei in ages]
b = [pj * fij(ai, ei, to) / (ei ** 2 * si(ai, ei, ps, ts)) for ai, ei in ages]
return pj, sum(a) / sum(b)
def fij(ai, ei, tj):
return 1 / (ei * sqrt(2 * pi)) * exp(-((ai - tj) ** 2) / (2 * ei ** 2))
if __name__ == "__main__":
# [35.27,36.27] [0.59, 0.41]
# p = '/Users/ross/Sandbox/unmix_data.txt'
# with open(p, 'U') as rfile:
# reader = csv.reader(rfile, delimiter='\t')
# ages, errors = [], []
#
# for line in reader:
# age = float(line[0])
# error = float(line[1])
# ages.append(age)
# errors.append(error)
# a = np.random.normal(35, 1, 10)
# b = np.random.normal(35, 1, 10)
# c = np.random.normal(35, 1, 10)
# for ai, aj, ak in zip(a, b, c):
# ps = np.random.random_sample(3)
# t = ps.sum()
# ps = ps / t
#
# initial_guess = [[ai, aj, ak], ps]
# # print 'initial', initial_guess
# # initial_guess = [[30, 40], [0.9, 0.1]]
# print(unmix(ages, errors, initial_guess))
a = normal(35, 0.1, 10)
b = normal(35.5, 0.1, 10)
ages = hstack((a, b))
errors = [0.1] * 20
ts = [35, 35.5]
ps = [0.9, 0.1]
plt.plot(sorted(a), arange(10), "bo")
plt.plot(sorted(b), arange(10, 20, 1), "ro")
print(unmix(ages, errors, ps, ts))
plt.show()
# ============= EOF =============================================
|
kevinwuhoo/randomcolor-py
|
tests/test_randomcolor_visual.py
|
Python
|
mit
| 2,617
| 0.001911
|
import randomcolor
import random
def main():
hues = ['red', 'orange', 'yellow', 'green', 'blue', 'purple', 'pink',
'monochrome', 'random']
luminosities = ['bright', 'light', 'dark', 'random']
formats = ['rgb', 'hex']
colors = []
rand_color = randomcolor.RandomColor(42)
rand = random.Random(42)
rand_int = lambda: rand.randint(4, 10)
colors.append(('one random color', rand_color.generate()))
i = rand_int()
colors.append((
"%d random colors" % i,
rand_color.generate(count=i)
))
# test all hues
for hue in hues:
i = rand_int()
colors.append((
|
"%d random colors with %s hue" % (i, hue),
rand_color.generate(hue=hue, count=i)
))
# test all luminosities
for luminosity in luminosities:
i = rand_int()
colors.append((
"%d random colors with %s luminosity" % (i, luminosity),
rand_color.generate(luminosity=luminosity, count=i)
))
# test random combinations
for _ in range(50):
i = rand_int()
hue = random.choice(hues)
|
luminosity = random.choice(luminosities)
format_ = random.choice(formats)
colors.append((
"%d random colors with %s hue, %s luminosity, and %s format"
% (i, hue, luminosity, format_),
rand_color.generate(hue=hue, luminosity=luminosity,
format_=format_, count=i)
))
color_rows = colors_to_rows(colors)
html = generate_html(color_rows)
with open('randomcolors.html', 'w') as f:
f.write(html)
def colors_to_rows(colors):
s = ""
for color_name, colors in colors:
s += "<tr>"
s += "<td>%s</td>" % (color_name)
s += "<td>"
for color in colors:
s += "<div class='color' style='background-color:%s'></div>" % color
s += "</td>"
s += "</tr>"
return s
def generate_html(table_rows):
return """
<!DOCTYPE html>
<html lang="en">
<head>
<title>randomcolor test</title>
<link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.2.0/css/bootstrap.min.css" rel="stylesheet">
<style>
.color {
height: 30px;
width: 30px;
border-radius: 30px;
display: inline-block;
}
</style>
</head>
<body>
<div class="container">
<div class="row col-md-10 col-md-offset-1">
<h1>Random Color Test</h1>
<table class="table">
%s
</table>
</div>
</body>
</html>
""" % table_rows
if __name__ == "__main__":
main()
|
TeamEOS/external_chromium_org
|
tools/perf/metrics/system_memory.py
|
Python
|
bsd-3-clause
| 4,647
| 0.006025
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from metrics import memory
from metrics import Metric
class SystemMemoryMetric(Metric):
"""SystemMemoryMetric gathers system memory statistic.
This metric collects system memory stats per test. It reports the difference
(delta) in system memory starts from the start of the test to the end of it.
"""
def __init__(self, browser):
super(SystemMemoryMetric, self).__init__()
self._browser = browser
self._memory_stats_start = None
self._memory_stats_end = None
def Start(self, page, tab):
"""Start the per-page preparation for this metric.
Records the system memory stats at this point.
"""
self._memory_s
|
tats_start = self._browser.memory_stats
def Stop(self, page, tab):
"""Prepare the re
|
sults for this page.
The results are the differences between the current system memory stats
and the values when Start() was called.
"""
assert self._memory_stats_start, 'Must call Start() first'
self._memory_stats_end = self._browser.memory_stats
# |trace_name| and |exclude_metrics| args are not in base class Metric.
# pylint: disable=W0221
def AddResults(self, tab, results, trace_name=None, exclude_metrics=None):
"""Add results for this page to the results object.
Reports the delta in memory stats between the start stats and the end stats
(as *_delta metrics). It reports end memory stats in case no matching start
memory stats exists.
Args:
trace_name: Trace name to identify the summary results for current page.
exclude_metrics: List of memory metrics to exclude from results,
e.g. VM, VMPeak, etc. See AddResultsForProcesses().
"""
assert self._memory_stats_end, 'Must call Stop() first'
memory_stats = _SubtractMemoryStats(self._memory_stats_end,
self._memory_stats_start)
if not memory_stats['Browser']:
return
exclude_metrics = exclude_metrics or {}
memory.AddResultsForProcesses(results, memory_stats,
metric_trace_name=trace_name, chart_trace_name='delta',
exclude_metrics=exclude_metrics)
if 'SystemCommitCharge' not in exclude_metrics:
results.Add(trace_name or 'commit_charge', 'kb',
memory_stats['SystemCommitCharge'],
chart_name='commit_charge_delta',
data_type='unimportant')
if 'ProcessCount' not in exclude_metrics:
results.Add(trace_name or 'processes', 'count',
memory_stats['ProcessCount'],
chart_name='processes_delta',
data_type='unimportant')
def _SubtractMemoryStats(end_memory_stats, start_memory_stats):
"""Computes the difference in memory usage stats.
Each of the two stats arguments is a dict with the following format:
{'Browser': {metric: value, ...},
'Renderer': {metric: value, ...},
'Gpu': {metric: value, ...},
'ProcessCount': value,
etc
}
The metrics can be VM, WorkingSetSize, ProportionalSetSize, etc depending on
the platform/test.
NOTE: The only metrics that are not subtracted from original are the *Peak*
memory values.
Returns:
A dict of process type names (Browser, Renderer, etc.) to memory usage
metrics between the end collected stats and the start collected stats.
"""
memory_stats = {}
end_memory_stats = end_memory_stats or {}
start_memory_stats = start_memory_stats or {}
for process_type in end_memory_stats:
memory_stats[process_type] = {}
end_process_memory = end_memory_stats[process_type]
if not end_process_memory:
continue
# If a process has end stats without start stats then report the end stats.
# For example, a GPU process that started just after media playback.
if (process_type not in start_memory_stats or
not start_memory_stats[process_type]):
memory_stats[process_type] = end_process_memory
continue
if not isinstance(end_process_memory, dict):
start_value = start_memory_stats[process_type] or 0
memory_stats[process_type] = end_process_memory - start_value
else:
for metric in end_process_memory:
end_value = end_process_memory[metric]
start_value = start_memory_stats[process_type][metric] or 0
if 'Peak' in metric:
memory_stats[process_type][metric] = end_value
else:
memory_stats[process_type][metric] = end_value - start_value
return memory_stats
|
supermeng/lain-sdk
|
tests/test_lain_conf.py
|
Python
|
mit
| 81,042
| 0.000913
|
# -*- coding: utf-8 -*-
import json
import yaml
import pytest
from unittest import TestCase
from lain_sdk.yaml.parser import (
LainConf, ProcType, Proc,
just_simple_scale,
render_resource_instance_meta, DEFAULT_SYSTEM_VOLUMES,
DOMAIN,
MIN_SETUP_TIME, MAX_SETUP_TIME, MIN_KILL_TIMEOUT, MAX_KILL_TIMEOUT
)
FIXTURES_EXTRA_DOMAINS = ['extra.domain1.com', 'extra.domain2.org']
class LainConfUtilsTests(TestCase):
def test_just_simple_scale(self):
assert just_simple_scale('cpu', Proc)
assert not just_simple_scale('cmd', Proc)
class LainConfTests(TestCase):
def test_lain_conf_without_appname(self):
meta_yaml = '''
build:
base: golang
prepare:
- echo prepare1
- echo prepare2
script:
- echo buildscript1
- echo buildscript2
release:
dest_base: ubuntu
copy:
- src: hello
dest: /usr/bin/hello
- src: entry.sh
dest: /entry.sh
test:
script:
- go test
web:
cmd: hello
port: 80
env:
- ENV_A=enva
- ENV_B=envb
volumes:
- /data
- /var/lib/mysql
notify:
slack: "#hello"
'''
meta_version = '1428553798.443334-7142797e64bb7b4d057455ef13de6be156ae81cc'
hello_conf = LainConf()
with pytest.raises(Exception) as e:
hello_conf.load(meta_yaml, meta_version, None)
assert 'invalid lain conf: no appname' in str(e.value)
def test_lain_conf_smoke(self):
meta_yaml = '''
appname: hello
build:
base: golang
prepare:
- echo prepare1
- echo prepare2
script:
- echo buildscript1
- echo buildscript2
release:
dest_base: ubuntu
copy:
- src: hello
dest: /usr/bin/hello
- src: entry.sh
dest: /entry.sh
test:
script:
- go test
web:
cmd: hello
port: 80
memory: 64m
env:
- ENV_A=enva
- ENV_B=envb
volumes:
- /data
- /var/lib/mysql
web.bar:
cmd: bar
port: 8080
mountpoint:
- a.com
- b.cn/xyz
https_only: false
worker.foo:
cmd: worker
memory: 128m
notify:
slack: "#hello"
'''
meta_version = '1428553798.443334-7142797e64bb7b4d057455ef13de6be156ae81cc'
hello_conf = LainConf()
hello_conf.load(meta_yaml, meta_version, None)
assert hello_conf.appname == 'hello'
assert hello_conf.procs['web'].env == ['ENV_A=enva', 'ENV_B=envb']
assert hello_conf.procs['web'].memory == '64m'
assert hello_conf.procs['web'].user == ''
assert hello_conf.procs['web'].working_dir == ''
assert hello_conf.procs['web'].dns_search == ['hello.lain']
assert hello_conf.procs['web'].volumes == ['/data', '/var/lib/mysql']
assert hello_conf.procs['web'].port[80].port == 80
assert hello_conf.procs['web'].stateful is False
assert hello_conf.procs['foo'].memory == '128m'
assert hello_conf.procs['foo'].cmd == ['worker']
assert hello_conf.procs['foo'].type == ProcType.worker
assert hello_conf.procs['bar'].cmd == ['bar']
assert hello_conf.procs['bar'].type == ProcType.web
assert hello_conf.procs['bar'].mountpoint == ['a.com', 'b.cn/xyz']
assert hello_conf.procs['bar'].https_only is False
def test_lain_conf_notify_slack(self):
meta_yaml = '''
appname: hello
build:
base: golang
script:
- echo buildscript1
- echo buildscript2
release:
dest_base: ubuntu
copy:
- src: hello
dest: /usr/bin/hello
- src: entry.sh
dest: /entry.sh
web:
cmd: hello
port: 80
notify:
slack: "#hello"
'''
repo_name = 'lain/hello'
meta_version = '1428553798.443334-7142797e64bb7b4d057455ef13de6be156ae81cc'
hello_conf = LainConf()
hello_conf.load(meta_yaml, repo_name, meta_version)
assert hello_conf.appname == 'hello'
assert hello_conf.notify == {'slack': '#hello'}
def test_lain_conf_notify_missing(self):
meta_yaml = '''
appname: hello
build:
base: golang
script:
- echo buildscript1
- echo buildscript2
release:
dest_base: ubuntu
copy:
- src: hello
dest: /usr/bin/hello
- src: entry.sh
dest: /entry.sh
web:
cmd: hello
port: 80
'''
repo_name = 'lain/hello'
meta_version = '1428553798.443334-7142797e64bb7b4d057455ef13de6be156ae81cc'
hello_conf = LainConf()
hello_conf.load(meta_yaml, repo_name, meta_version)
assert hello_conf.appname == 'hello'
assert hello_conf.notify == {}
def test_lain_conf_empty_cmd(self):
meta_yaml = '''
appname: hello
build:
base: golang
prepare:
- echo prepare1
- echo prepare2
script:
- echo buildscript1
- echo buildscript2
release:
dest_base: ubuntu
copy:
- src: hello
dest: /usr/bin/hello
- src: entry.sh
dest: /entry.sh
test:
script:
- go test
web:
cmd:
notify:
slack: "#hello"
'''
meta_version = '1428553798.443334-7142797e64bb7b4d057455ef13de6be156ae81cc'
hello_conf = LainCo
|
nf()
hello_conf
|
.load(meta_yaml, meta_version, None)
assert hello_conf.appname == 'hello'
assert hello_conf.procs['web'].port[80].port == 80
assert hello_conf.procs['web'].cmd == []
def test_lain_conf_port_with_type(self):
meta_yaml = ''
|
google/mirandum
|
alerts/ytsubs/migrations/0004_migrate_updater.py
|
Python
|
apache-2.0
| 1,359
| 0.004415
|
# -*- coding: utf-8 -*-
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to
|
in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
from django.db import models, migrations
def migrate_updater(apps, schema_editor):
SubEvent = apps.get_model("ytsubs", "SubEvent")
UpdaterEvent = apps.get_model("ma
|
in", "UpdaterEvent")
for event in SubEvent.objects.all():
try:
ue = UpdaterEvent.objects.get(pk=event.updaterevent_ptr_id)
ue.base_updater = event.updater.updater_ptr
ue.save()
except Exception:
pass
class Migration(migrations.Migration):
dependencies = [
('main', '0009_updaterevent_base_updater'),
('ytsubs', '0003_auto_20160418_0455'),
]
operations = [
migrations.RunPython(migrate_updater)
]
|
thundernet8/WRGameVideos-Server
|
app/auth/views.py
|
Python
|
gpl-2.0
| 2,080
| 0.002404
|
# coding=utf-8
import os
import time
import urllib
import urllib2
import random
import string
import json
from flask import url_for
from flask import redirect
from flask import request
from flask import session
from flask import flash
from flask import current_app
from flask.ext.login import login_user
from . import auth
from ..models import generate_sign
from ..models import User
from .. import main
@auth.route('/login')
def login():
app_key = os.getenv('APP_KEY')
app_secret = os.getenv('APP_SECRET')
redirect_url = url_for('auth.login', _external=True)
if request.args.get('code') and request.args.get('state'):
code = request.args.get('code')
state = request.args.get('state')
if state != session['state']:
flash('unmatched state')
return redirect('main.index')
query = dict(grant_type='authorization_code', client_id=app_key, code=code, redirect_url=redirect_url)
url = (current_app.config['API_SERVER_OPEN_URL'] or 'http://api.parser.cc/open/v1.0/')+'user_token?'+urllib.urlencode(query)
req = urllib2.Request(url)
response = urllib2.urlopen(req)
f = response.read()
if f:
data = json.loads(f)
user = User.insert_social_user(data.get('open_id'), data.get('nickname'), data.get('access_token'))
login_user(user, True)
flash('you have logged successfully')
return redirect(url_for('main.index'))
else:
flash('login failed, please retry')
return redirect(url_for('main.index'))
timestamp = int(time.time())
sign = generate_sign(app_key, app_secret, timestamp, redirect_url)
state = ''.join(ra
|
ndom.choice(string.digits) for _ in range(10))
session['state'] = state
query = dict(response_type='code', client_id=app_key, tstamp=timestamp, sign=sign, state=state, redirect_url=redirect_url)
url = (current_app.c
|
onfig['API_SERVER_OPEN_URL'] or 'http://api.parser.cc/open/v1.0/')+'authorize?'+urllib.urlencode(query)
return redirect(url)
|
diogo149/Lasagne
|
lasagne/theano_extensions/conv.py
|
Python
|
mit
| 8,695
| 0.006325
|
"""
Alternative convolution implementations for Theano
"""
import numpy as np
import theano
import theano.tensor as T
## 1D convolutions
def conv1d_sc(input, filters, image_shape=None, filter_shape=None, border_mode='valid', subsample=(1,)):
"""
using conv2d with a single input channel
"""
if border_mode != 'valid':
raise RuntimeError("Unsupported border_mode for conv1d_sc: %s" % border_mode)
if image_shape is None:
image_shape_sc = None
else:
image_shape_sc = (image_shape[0], 1, image_shape[1], image_shape[2]) # (b, c, i0) to (b, 1, c, i0)
if filter_shape is None:
filter_shape_sc = None
else:
filter_shape_sc = (filter_shape[0], 1, filter_shape[1], filter_shape[2])
input_sc = input.dimshuffle(0, 'x', 1, 2)
# We need to flip the channels dimension because it will be convolved over.
filters_sc = filters.dimshuffle(0, 'x', 1, 2)[:, :, ::-1, :]
conved = T.nnet.conv2d(input_sc, filters_sc, image_shape=image_shape_sc, filter_shape=filter_shape_sc, subsample=(1, subsample[0]))
return conved[:, :, 0, :] # drop the unused dimension
def conv1d_mc0(input, filters, image_shape=None, filter_shape=None, border_mode='valid', subsample=(1,)):
"""
using conv2d with width == 1
"""
if image_shape is None:
image_shape_mc0 = None
else:
image_shape_mc0 = (image_shape[0], image_shape[1], 1, image_shape[2]) # (b, c, i0) to (b, c, 1, i0)
if filter_shape is None:
filter_shape_mc0 = None
else:
filter_shape_mc0 = (filter_shape[0], filter_shape[1], 1, filter_shape[2])
input_mc0 = input.dimshuffle(0, 1, 'x', 2)
filters_mc0 = filters.dimshuffle(0, 1, 'x', 2)
conved = T.nnet.conv2d(
input_mc0, filters_mc0, image_shape=image_shape_mc0,
filter_shape=filter_shape_mc0, subsample=(1, subsample[0]),
border_mode=border_mode)
return conved[:, :, 0, :] # drop the unused dimension
def conv1d_mc1(input, filters, image_shape=None, filter_shape=None, border_mode='valid', subsample=(1,)):
"""
using conv2d with height == 1
"""
if image_shape is None:
image_shape_mc1 = None
else:
image_shape_mc1 = (image_shape[0], image_shape[1], image_shape[2], 1) # (b, c, i0) to (b, c, i0, 1)
if filter_shape is None:
filter_shape_mc1 = None
else:
filter_shape_mc1 = (filter_shape[0], filter_shape[1], filter_shape[2], 1)
input_mc1 = input.dimshuffle(0, 1, 2, 'x')
filters_mc1 = filters.dimshuffle(0, 1, 2, 'x')
conved = T.nnet.conv2d(
input_mc1, filters_mc1, image_shape=image_shape_mc1,
filter_shape=filter_shape_mc1, subsample=(subsample[0], 1),
border_mode=border_mode)
return conved[:, :, :, 0] # drop the unused dimension
def conv1d_unstrided(input, filters, image_shape, filter_shape, border_mode='valid', subsample=(1,), implementation=conv1d_sc):
"""
perform a strided 1D convolution by reshaping input and filters so that the stride becomes 1.
This function requires that the filter length is a multiple of the stride.
It also truncates the input to have a length that is a multiple of the stride.
"""
batch_size, num_input_channels, input_length = image_shape
num_filters, num_input_channels_, filter_length = filter_shape
stride = subsample[0]
if filter_length % stride > 0:
raise RuntimeError("Filter length (%d) is not a multiple of the stride (%d)" % (filter_length, stride))
assert border_mode == 'valid' # TODO: test if this works for border_mode='full'
num_steps = filter_length // stride
# input sizes need to be multiples of the strides, truncate to correct sizes.
truncated_length = (input_length // stride) * stride
input_truncated = input[:, :, :truncated_length]
r_input_shape = (batch_size, num_input_channels, truncated_length // stride, stride)
r_input = input_truncated.reshape(r_input_shape)
# fold strides into the feature maps dimension (input)
r_input_folded_shape = (batch_size, num_input_channels * stride, truncated_length // stride)
r_input_folded = r_input.dimshuffle(0, 1, 3, 2).reshape(r_input_folded_shape)
r_filter_shape = (num_filters, num_input_channels, num_steps, stride)
r_filters_flipped = filters[:, :, ::-1].reshape(r_filter_shape)
# fold strides into the feature maps dimension (filters)
r_filter_folded_shape = (num_filters, num_input_channels * stride, num_steps)
r_filters_flipped_folded = r_filters_flipped.dimshuffle(0, 1, 3, 2).reshape(r_filter_folded_shape)
r_filters_folded = r_filters_flipped_folded[:, :, ::-1] # unflip
return implementation(r_input_folded, r_filters_folded, r_input_folded_shape, r_filter_folded_shape, border_mode, subsample=(1,))
def conv1d_sd(input, filters, image_shape, filter_shape, border_mode='valid', subsample=(1,)):
"""
using a single dot product
"""
if border_mode != 'valid':
raise RuntimeError("Unsupported border_mode for conv1d_sd: %s" % border_mode)
batch_size, num_input_channels, input_length = image_shape
num_filters, num_input_channels_, filter_length = filter_shape
stride = subsample[0]
if filter_length % stride > 0:
raise RuntimeError("Filter length (%d) is not a multiple of the stride (%d)" % (f
|
ilter_length, stride))
num_steps = filter_length // stride
output_length = (input_length - filter_length + stride) // stride
# pad the input so all the shifted dot products fi
|
t inside. shape is (b, c, l)
padded_length = (input_length // filter_length) * filter_length + (num_steps - 1) * stride
# at this point, it is possible that the padded_length is SMALLER than the input size.
# so then we have to truncate first.
truncated_length = min(input_length, padded_length)
input_truncated = input[:, :, :truncated_length]
input_padded_shape = (batch_size, num_input_channels, padded_length)
input_padded = T.zeros(input_padded_shape)
input_padded = T.set_subtensor(input_padded[:, :, :truncated_length], input_truncated)
inputs = []
for num in range(num_steps):
shift = num * stride
length = (padded_length - shift) // filter_length
r_input_shape = (batch_size, num_input_channels, length, filter_length)
r_input = input_padded[:, :, shift:length * filter_length + shift]
r_input = r_input.reshape(r_input_shape)
inputs.append(r_input)
inputs_stacked = T.stack(*inputs) # shape is (n, b, c, w, f)
filters_flipped = filters[:, :, ::-1]
r_conved = T.tensordot(inputs_stacked, filters_flipped, np.asarray([[2, 4], [1, 2]]))
# resulting shape is (n, b, w, n_filters)
# output needs to be (b, n_filters, w * n)
r_conved = r_conved.dimshuffle(1, 3, 2, 0) # (b, n_filters, w, n)
conved = r_conved.reshape((r_conved.shape[0], r_conved.shape[1], r_conved.shape[2] * r_conved.shape[3]))
# result is (b, n_f, l)
# remove padding
return conved[:, :, :output_length]
def conv1d_md(input, filters, image_shape, filter_shape, border_mode='valid', subsample=(1,)):
"""
using multiple dot products
"""
if border_mode != 'valid':
raise RuntimeError("Unsupported border_mode for conv1d_md: %s" % border_mode)
batch_size, num_input_channels, input_length = image_shape
num_filters, num_input_channels_, filter_length = filter_shape
stride = subsample[0]
if filter_length % stride > 0:
raise RuntimeError("Filter length (%d) is not a multiple of the stride (%d)" % (filter_length, stride))
num_steps = filter_length // stride
output_length = (input_length - filter_length + stride) // stride
output_shape = (batch_size, num_filters, output_length)
filters_flipped = filters[:, :, ::-1]
conved = T.zeros(output_shape)
for num in range(num_steps):
shift = num * stride
length = (input_length - shift) // filter_length
if length == 0:
# we can safely skip this product, it doesn't contribute to the final convolution.
continue
r_input_shape = (b
|
eljefe6a/kafka
|
tests/kafkatest/tests/upgrade_test.py
|
Python
|
apache-2.0
| 3,776
| 0.002913
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kafkatest.services.zookeeper import ZookeeperService
from kafkatest.services.kafka import KafkaService
from kafkatest.services.kafka.version import LATEST_0_8_2, TRUNK
from kafkatest.services.verifiable_producer import VerifiableProducer
from kafkatest.services.console_consumer import ConsoleConsumer, is_int
from kafkatest.services.kafka import config_property
from kafkatest.tests.produce_consume_validate import ProduceConsumeValidateTest
class TestUpgrade(ProduceConsumeValidateTest):
def __init__(self, test_context):
super(TestUpgrade, self).__init__(test_context=test_context)
def setUp(self):
self.topic = "test_topic"
self.zk = ZookeeperService(self.test_context, num_nodes=1)
self.kafka = KafkaService(self.test_context, num_nodes=3, zk=self.zk, version=LATEST_0_8_2, topics={self.topic: {
|
"partitions": 3,
"replication-factor": 3,
"min.insync.replicas": 2}})
self.zk.start()
self.kafka.start()
# Producer and consume
|
r
self.producer_throughput = 10000
self.num_producers = 1
self.num_consumers = 1
self.producer = VerifiableProducer(
self.test_context, self.num_producers, self.kafka, self.topic,
throughput=self.producer_throughput, version=LATEST_0_8_2)
# TODO - reduce the timeout
self.consumer = ConsoleConsumer(
self.test_context, self.num_consumers, self.kafka, self.topic,
consumer_timeout_ms=30000, message_validator=is_int, version=LATEST_0_8_2)
def perform_upgrade(self):
self.logger.info("First pass bounce - rolling upgrade")
for node in self.kafka.nodes:
self.kafka.stop_node(node)
node.version = TRUNK
node.config[config_property.INTER_BROKER_PROTOCOL_VERSION] = "0.8.2.X"
self.kafka.start_node(node)
self.logger.info("Second pass bounce - remove inter.broker.protocol.version config")
for node in self.kafka.nodes:
self.kafka.stop_node(node)
del node.config[config_property.INTER_BROKER_PROTOCOL_VERSION]
self.kafka.start_node(node)
def test_upgrade(self):
"""Test upgrade of Kafka broker cluster from 0.8.2 to 0.9.0
- Start 3 node broker cluster on version 0.8.2
- Start producer and consumer in the background
- Perform two-phase rolling upgrade
- First phase: upgrade brokers to 0.9.0 with inter.broker.protocol.version set to 0.8.2.X
- Second phase: remove inter.broker.protocol.version config with rolling bounce
- Finally, validate that every message acked by the producer was consumed by the consumer
"""
self.run_produce_consume_validate(core_test_action=self.perform_upgrade)
|
kamiben/starred
|
item.py
|
Python
|
mit
| 1,978
| 0.039939
|
import datetime
class Item(object):
def __init__(self, item):
self.title = item['title'].encode("utf-8")
self.url = self.get_starred_url(item)
self.content = self.get_content(item)
self.datestamp = item['published']
self.date = datetime.datetime.fromtimestamp(self.datestamp).strftime('%m-%Y')
def genstring_web(self):
formed_url = "{1} <small><a href=\"{0}\">Direct link</a></small></br>\n".format(self.url, self.title)
# Need to implement Lazy load before being able to add the content to the main page.
#http://twitter.github.io/bootstrap/javascript.html#collapse
#http://antjanus.com/blog/web-design-tips/user-interface-usability/customize-twitter-bootstrap-into-themes/
#https://github.com/twitter/bootstrap/issues/5796
#formed_url += "<div id=\"demo\" class=\"collapse\"> {0} </div>".format(self.content)
return formed_url
def genstring_csv(self):
date = datetime.datetime.fromtimestamp(self.datestamp).strftime('%Y-%m-%d %H:%M:%S')
return [self.title, self.url, date]
def get_starred_url(self, item):
# error handling in case canonical is not defined for the specific element
try:
return item['canonical'][0]['href'].encode("utf-8")
except KeyError:
return item['alternate'][0]['href'].encode("utf-8")
def get_content(self, item):
# error handling in case the content is not defined and is in summary instead
try:
return item['content']['content'].encode("utf-8")
except KeyError:
return item['summary']['content'].encode("utf-8")
class ItemList(objec
|
t):
def __init__(self):
self.list = []
def add(self, item):
self.list.append(item)
def date_equal(self, item, date):
# check if date is equal to item.date, returns true or false. For usage in grab_date
return item.date == date
def grab_date(self, date):
# Filter the Item list with the provided date (date_equal
|
= true)
# Returns an array of items
return [ x for x in self.list if self.date_equal(x, date) ]
|
leaot/Huffpost-Articles-in-Twitter-Trends
|
Python/article_engin.py
|
Python
|
mit
| 314
| 0.003185
|
# collect all the artic
|
les' title in a list from different sections
from search_articles import get_articles_name
def get_articles(sections):
i = 1
title_list = []
for section in sections:
for title in get_articles_name(str(section)):
title_list += [title]
return
|
title_list
|
cricketclubucd/davisdragons
|
platform-tools/systrace/catapult/common/py_trace_event/py_trace_event/trace_time.py
|
Python
|
mit
| 7,242
| 0.009804
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import ctypes
import ctypes.util
import logging
import os
import platform
import sys
import time
import threading
GET_TICK_COUNT_LAST_NOW = 0
# If GET_TICK_COUNTER_LAST_NOW is less than the current time, the clock has
# rolled over, and this needs to be accounted for.
GET_TICK_COUNT_WRAPAROUNDS = 0
# The current detected platform
_CLOCK = None
_NOW_FUNCTION = None
# Mapping of supported platforms and what is returned by sys.platform.
_PLATFORMS = {
'mac': 'darwin',
'linux': 'linux',
'windows': 'win32',
'cygwin': 'cygwin',
'freebsd': 'freebsd',
'sunos': 'sunos5',
'bsd': 'bsd'
}
# Mapping of what to pass get_clocktime based on platform.
_CLOCK_MONOTONIC = {
'linux': 1,
'freebsd': 4,
'bsd': 3,
'sunos5': 4
}
_LINUX_CLOCK = 'LINUX_CLOCK_MONOTONIC'
_MAC_CLOCK = 'MAC_MACH_ABSOLUTE_TIME'
_WIN_HIRES = 'WIN_QPC'
_WIN_LORES = 'WIN_ROLLOVER_PROTECTED_TIME_GET_TIME'
def InitializeMacNowFunction(plat):
"""Sets a monotonic clock for the Mac platform.
Args:
plat: Platform that is being run on. Unused in GetMacNowFunction. Passed
for consistency between initilaizers.
"""
del plat # Unused
global _CLOCK # pylint: disable=global-statement
global _NOW_FUNCTION # pylint: disable=global-statement
_CLOCK = _MAC_CLOCK
libc = ctypes.CDLL('/usr/lib/libc.dylib', use_errno=True)
class MachTimebaseInfoData(ctypes.Structure):
"""System timebase info. Defined in <mach/mach_time.h>."""
_fields_ = (('numer', ctypes.c_uint32),
('denom', ctypes.c_uint32))
mach_absolute_time = libc.mach_absolute_time
mach_absolute_time.restype = ctypes.c_uint64
timebase = MachTimebaseInfoData()
libc.mach_timebase_info(ctypes.byref(timebase))
ticks_per_second = timebase.numer / timebase.denom * 1.0e9
def MacNowFunctionImpl():
return mach_absolute_time() / ticks_per_second
_NOW_FUNCTION = MacNowFunctionImpl
def GetClockGetTimeClockNumber(plat):
for key in _CLOCK_MONOTONIC:
if plat.startswith(key):
return _CLOCK_MONOTONIC[key]
raise LookupError('Platform not in clock dicitonary')
def InitializeLinuxNowFunction(plat):
"""Sets a monotonic clock for linux platforms.
Args:
plat: Platform that is being run on.
"""
global _CLOCK # pylint: disable=global-statement
global _NOW_FUNCTION # pylint: disable=global-statement
_CLOCK = _LINUX_CLOCK
clock_monotonic = GetClockGetTimeClockNumber(plat)
try:
# Attempt to find clock_gettime in the C library.
clock_gettime = ctypes.CDLL(ctypes.util.find_library('c'),
use_errno=True).clock_gettime
except AttributeError:
# If not able to find int in the C library, look in rt library.
clock_gettime = ctypes.CDLL(ctypes.util.find_library('rt'),
use_errno=True).clock_gettime
class Timespec(ctypes.Structure):
"""Time specification, as described in clock_gettime(3)."""
_fields_ = (('tv_sec', ctypes.c_long),
('tv_nsec', ctypes.c_long))
def LinuxNowFunctionImpl():
ts = Timespec()
if clock_gettime(clock_monotonic, ctypes.pointer(ts)):
errno = ctypes.get_errno()
raise OSError(errno, os.strerror(errno))
return ts.tv_sec + ts.tv_nsec / 1.0e9
_NOW_FUNCTION = LinuxNowFunctionImpl
def IsQPCUsable():
"""Determines if system can query the performance counter.
The performance counter is a high resolution timer on windows systems.
Some chipsets have unreliable performance counters, so this checks that one
of those chipsets is not present.
Returns:
True if QPC is useable, false otherwise.
"""
# Sample output: 'Intel64 Family 6 Model 23 Stepping 6, GenuineIntel'
info = platform.processor()
if 'AuthenticAMD' in info and 'Family 15' in info:
return False
try: # If anything goes wrong during this, assume QPC isn't available.
frequency = ctypes.c_int64()
ctypes.windll.Kernel32.QueryPerformanceFrequency(
ctypes.byref(frequency))
if float(frequency.value) <= 0:
return False
except Exception: # pylint: disable=broad-except
logging.exception('Error when determining if QPC is usable.')
return False
return True
def InitializeWinNowFunction(plat):
"""Sets a monotonic clock for windows platforms.
Args:
plat: Platform that is being run on.
"""
global _CLOCK # pylint: disable=global-statement
global _NOW_FUNCTION # pylint: disable=global-statement
if IsQPCUsable():
_CLOCK = _WIN_HIRES
qpc_return = ctypes.c_int64()
qpc_frequency = ctypes.c_int64()
ctypes.windll.Kernel32.QueryPerformanceFrequency(
ctypes.byref(qpc_frequency))
qpc_frequency = float(qpc_frequency.value)
qpc = ctypes.windll.Kernel32.QueryPerformanceCounter
def WinNowFunctionImpl():
qpc(ctypes.byref(qpc_return))
return qpc_return.value / qpc_frequency
else:
_CLOCK = _WIN_LORES
kernel32 = (ctypes.cdll.kernel32
if plat.startswith(_PLATFORMS['cygwin'])
else ctypes.windll.kernel32)
get_tick_count_64 = getattr(kernel32, 'GetTickCount64', None)
# Windows Vista or newer
if get_tick_count_64:
get_tick_count_64.restype = ctypes.c_ulonglong
def WinNowFunctionImpl():
return get_tick_count_64() / 1000.0
else: # Pre Vista.
get_tick_count = kernel32.GetTickCount
get_tick_count.restype = ctypes.c_uint32
get_tick_count_lock = threading.Lock()
def WinNowFunctionImpl():
global GET_TICK_COUNT_LAST_NOW # pylint: disable=global-statement
global GET_TICK_COUNT_WRAPAROUNDS # pylint: disable=global-statement
with get_tick_count_lock:
current_sample = get_tick_count()
if current_samp
|
le < GET_TICK_COUNT_LAST_NOW:
GE
|
T_TICK_COUNT_WRAPAROUNDS += 1
GET_TICK_COUNT_LAST_NOW = current_sample
final_ms = GET_TICK_COUNT_WRAPAROUNDS << 32
final_ms += GET_TICK_COUNT_LAST_NOW
return final_ms / 1000.0
_NOW_FUNCTION = WinNowFunctionImpl
def InitializeNowFunction(plat):
"""Sets a monotonic clock for the current platform.
Args:
plat: Platform that is being run on.
"""
if plat.startswith(_PLATFORMS['mac']):
InitializeMacNowFunction(plat)
elif (plat.startswith(_PLATFORMS['linux'])
or plat.startswith(_PLATFORMS['freebsd'])
or plat.startswith(_PLATFORMS['bsd'])
or plat.startswith(_PLATFORMS['sunos'])):
InitializeLinuxNowFunction(plat)
elif (plat.startswith(_PLATFORMS['windows'])
or plat.startswith(_PLATFORMS['cygwin'])):
InitializeWinNowFunction(plat)
else:
raise RuntimeError('%s is not a supported platform.' % plat)
global _NOW_FUNCTION
global _CLOCK
assert _NOW_FUNCTION, 'Now function not properly set during initialization.'
assert _CLOCK, 'Clock not properly set during initialization.'
def Now():
return _NOW_FUNCTION() * 1e6 # convert from seconds to microseconds
def GetClock():
return _CLOCK
InitializeNowFunction(sys.platform)
|
andreasBihlmaier/arni
|
arni_countermeasure/src/arni_countermeasure/countermeasure_node.py
|
Python
|
bsd-2-clause
| 3,842
| 0.00026
|
from constraint_handler import *
from rated_statistic_storage import *
import rospy
from arni_msgs.msg import RatedStatistics
from arni_core.host_lookup import *
from std_srvs.srv import Empty
import helper
import time
class CountermeasureNode(object):
"""A ROS node.
Evaluates incoming rated statistics with a list of constraints.
If those constraints turn out to be true appropriate action is taken.
"""
def __init__(self):
"""Periodically (threading)
evaluate the constraints and clean old statistics."""
super(CountermeasureNode, self).__init__()
rospy.init_node("countermeasure", log_level=rospy.DEBUG)
self.__enabled = False
self.__init_params()
#: The storage of all incoming rated statistic.
self.__rated_statistic_storage = RatedStatisticStorage()
#: The handler for all constraints.
self.__constraint_handler = ConstraintHandler(
self.__rated_statistic_storage)
#: The time to wait between two evaluations.
self.__evaluation_period = helper.get_param_duration(
helper.ARNI_CTM_CFG_NS + "evaluation_period")
self.__regi
|
ster_subscriber()
self.__register_services()
def __register_subscriber(self):
"""Register to the rated statistics."""
rospy.Subscriber(
"/statistics_rated", RatedStatistics,
self.__rated_statistic_storage.callback_rated_statistic)
rospy.Subscriber(
"/statistics_rated"
|
, RatedStatistics,
HostLookup().callback_rated)
def __register_services(self):
"""Register all services"""
rospy.Service(
"~reload_constraints", Empty, self.__handle_reload_constraints)
def __handle_reload_constraints(self, req):
"""Reload all constraints from param server."""
self.__constraint_handler = ConstraintHandler(
self.__rated_statistic_storage)
return []
def __callback_evaluate_and_react(self, event):
""" Evaluate every constraint and execute reactions
if seemed necessary by the evaluation.
"""
try:
if self.__enabled:
self.__constraint_handler.evaluate_constraints()
self.__constraint_handler.execute_reactions()
except rospy.ROSInterruptException:
pass
def loop(self):
# simulation? wait for begin
while rospy.Time.now() == rospy.Time(0):
time.sleep(0.01)
#check periodically for enabled_statistic
rospy.Timer(
rospy.Duration(
rospy.get_param("arni/check_enabled_interval", 10)),
self.__callback_enable)
# evaluate periodically
rospy.Timer(
self.__evaluation_period,
self.__callback_evaluate_and_react)
rospy.spin()
def __callback_enable(self, event):
"""Simple callback to check if statistics are enabled."""
self.__enabled = rospy.get_param("/enable_statistics", False)
def __init_params(self):
"""Initializes params on the parameter server,
if they are not already set.
"""
default = {
"reaction_autonomy_level": 100,
"storage_timeout": 10,
"evaluation_period": 1,
"default/min_reaction_interval": 10,
"default/reaction_timeout": 30
}
for param in default:
if not rospy.has_param(helper.ARNI_CTM_CFG_NS + param):
rospy.set_param(helper.ARNI_CTM_CFG_NS + param, default[param])
def main():
try:
cn = CountermeasureNode()
# rospy.loginfo(rospy.get_caller_id() + ": im on ")
cn.loop()
except rospy.ROSInterruptException:
pass
if __name__ == '__main__':
main()
|
TAlonglong/trollduction-test
|
aapp_runner/aapp_dr_runner.py
|
Python
|
gpl-3.0
| 72,713
| 0.003782
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014, 2015, 2016 Adam.Dybbroe
# Author(s):
# Adam.Dybbroe <adam.dybbroe@smhi.se>
# Janne Kotro fmi.fi
# Trygve Aspenes
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""AAPP Level-1 processing on NOAA and Metop HRPT Direct Readout data. Listens
for pytroll messages from Nimbus (NOAA/Metop file dispatch) and triggers
processing on direct readout HRPT level 0 files (full swaths - no granules at
the moment)
"""
from ConfigParser import RawConfigParser
import os
import sys
import logging
from logging import handlers
from trollsift.parser import compose
sys.path.insert(0, "trollduction/")
sys.path.insert(0, "/home/trygveas/git/trollduction-test/aapp_runner")
from read_aapp_config import read_config_file_options
from tle_satpos_prepare import do_tleing
from tle_satpos_prepare import do_tle_satpos
from do_commutation import do_decommutation
import socket
import netifaces
from helper_functions import run_shell_command
LOG = logging.getLogger(__name__)
# ----------------------------
# Default settings for logging
# ----------------------------
_DEFAULT_TIME_FORMAT = '%Y-%m-%d %H:%M:%S'
_DEFAULT_LOG_FORMAT = '[%(levelname)s: %(asctime)s : %(name)s] %(message)s'
# -------------------------------
# Default settings for satellites
# -------------------------------
SUPPORTED_NOAA_SATELLITES = ['NOAA-19', 'NOAA-18', 'NOAA-16', 'NOAA-15']
SUPPORTED_METOP_SATELLITES = ['Metop-B', 'Metop-A', 'Metop-C']
SUPPORTED_SATELLITES = SUPPORTED_NOAA_SATELLITES + SUPPORTED_METOP_SATELLITES
TLE_SATNAME = {'NOAA-19': 'NOAA 19', 'NOAA-18': 'NOAA 18',
'NOAA-15': 'NOAA 15',
'Metop-A': 'METOP-A', 'Metop-B': 'METOP-B',
'Metop-C': 'METOP-C'}
METOP_NAME = {'metop01': 'Metop-B', 'metop02': 'Metop-A'}
METOP_NAME_INV = {'metopb': 'metop01', 'metopa': 'metop02'}
SATELLITE_NAME = {'NOAA-19': 'noaa19', 'NOAA-18': 'noaa18',
'NOAA-15': 'noaa15', 'NOAA-14': 'noaa14',
'Metop-A': 'metop02', 'Metop-B': 'metop01',
'Metop-C': 'metop03'}
SENSOR_NAMES = ['amsu-a', 'amsu-b', 'mhs', 'avhrr/3', 'hirs/4']
SENSOR_NAME_CONVERTER = {
'amsua': 'amsu-a', 'amsub': 'amsu-b', 'hirs': 'hirs/4',
'mhs': 'mhs', 'avhrr': 'avhrt/3'}
METOP_NUMBER = {'b': '01', 'a': '02'}
"""
These are the standard names used by the various AAPP decommutation scripts.
If you change these, you will also have to change the decommutation scripts.
"""
STD_AAPP_OUTPUT_FILESNAMES = {'amsua_file':'aman.l1b',
'amsub_file':'ambn.l1b',
'hirs_file':'hrsn.l1b',
'avhrr_file':'hrpt.l1b'
}
# FIXME! This variable should be put in the config file:
SATS_ONLY_AVHRR = []
from urlparse import urlparse
import posttroll.subscriber
from posttroll.publisher import Publish
from posttroll.message import Message
from trollduction.helper_functions import overlapping_timeinterval
import tempfile
from glob import glob
# import os
import shutil
# import aapp_stat
import threading
from subprocess import Popen, PIPE
import shlex
# import subrocess
from datetime import timedelta, datetime
from time import time as _time
def get_local_ips():
inet_addrs = [netifaces.ifaddresses(iface).get(netifaces.AF_INET)
for iface in netifaces.interfaces()]
ips = []
for addr in inet_addrs:
if addr is not None:
for add in addr:
ips.append(add['addr'])
return ips
def nonblock_read(output):
"""An attempt to catch any hangup in reading the output (stderr/stdout)
from subprocess"""
import fcntl
fd = output.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
try:
return output.readline()
except:
return ''
def reset_job_registry(objdict, key, start_end_times):
"""Remove job key from registry"""
LOG.debug("Register: " + str(objdict))
starttime, endtime = start_end_times
if key in
|
objdict:
if objdict[key] and len(objdict[key]) > 0:
objdict[key].remove(start_end_times)
LOG.debug("Release/reset job-key " + str(key) + " " +
str(starttime) + " " + str(endtime) +
" from job registry")
LOG.debug("Register: " + str(objdict))
return
|
LOG.warning("Nothing to reset/release - " +
"Register didn't contain any entry matching: " +
str(key))
return
class AappLvl1Processor(object):
"""
Container for the Metop/NOAA level-1 processing based on AAPP
"""
def __init__(self, runner_config):
"""
Init with config file options
"""
self.noaa_data_out_dir = runner_config['noaa_data_out_dir']
self.metop_data_out_dir = runner_config['metop_data_out_dir']
self.noaa_run_script = runner_config['aapp_run_noaa_script']
self.metop_run_script = runner_config['aapp_run_metop_script']
self.tle_indir = runner_config['tle_indir']
self.tle_outdir = runner_config['tle_outdir']
self.tle_script = runner_config['tle_script']
self.pps_out_dir = runner_config['pps_out_dir']
self.pps_out_dir_format = runner_config['pps_out_dir_format']
self.aapp_prefix = runner_config['aapp_prefix']
self.aapp_workdir = runner_config['aapp_workdir']
self.aapp_outdir = runner_config['aapp_outdir']
self.aapp_outdir_format = runner_config['aapp_outdir_format']
self.copy_data_directories = runner_config['copy_data_directories']
self.move_data_directory = runner_config['move_data_directory']
self.use_dyn_work_dir = runner_config['use_dyn_work_dir']
self.subscribe_topics = runner_config['subscribe_topics']
self.publish_pps_format = runner_config['publish_pps_format']
self.publish_l1_format = runner_config['publish_l1_format']
self.publish_sift_format = runner_config['publish_sift_format']
self.aapp_log_files_dir = runner_config['aapp_log_files_dir']
self.aapp_log_files_backup = runner_config['aapp_log_files_backup']
self.servername = runner_config['servername']
self.dataserver = runner_config['dataserver']
self.station = runner_config['station']
self.environment = runner_config['environment']
self.locktime_before_rerun = int(
runner_config.get('locktime_before_rerun', 10))
self.passlength_threshold = int(runner_config['passlength_threshold'])
self.fullswath = True # Always a full swath (never HRPT granules)
self.working_dir = None
self.level0_filename = None
self.starttime = None
self.endtime = None
self.platform_name = "Unknown"
self.satnum = "0"
self.orbit = "00000"
self.result_files = None
self.level0files = None
self.lvl1_home = self.pps_out_dir
self.job_register = {}
self.my_env = os.environ.copy()
self.check_and_set_correct_orbit_number = False if runner_config['check_and_set_correct_orbit_number'] == 'False' else True
self.do_ana_correction = False if runner_config['do_ana_correction'] == 'False' else True
self.initialise()
def initialise(self):
"""Initialise the processor """
self.working_dir = None
self.level0_filename = None
self.starttime = None
self.endtime = None
self.platf
|
OpenImageIO/oiio
|
testsuite/oiiotool-pattern/run.py
|
Python
|
bsd-3-clause
| 3,464
| 0.01097
|
#!/usr/bin/env python
# test --create
command += oiiotool ("--create 320x240 3 -d uint8 -o black.tif")
command += oiiotool ("--stats black.tif")
# test --pattern constant
command += oiiotool ("--pattern constant:color=.1,.2,.3,1 320x240 4 -o constant.tif")
command += oiiotool ("--stats constant.tif")
# test --pattern noise
command += oiiotool ("--pattern noise:type=uniform:min=0.25:max=0.75 64x64 3 -d uint8 -o noise-uniform3.tif")
command += oiiotool ("--pattern noise:type=gaussian:mean=0.5:stddev=0.1 64x64 3 -d uint8 -o noise-gauss.tif")
command += oiiotool ("--pattern noise:type=salt:portion=0.01:value=1 64x64 3 -d uint8 -o noise-salt.tif")
# test --pattern fill
command += oiiotool ("--pattern fill:color=0,0,0.5 64x64 3 -d uint8 -o pattern-const.tif")
command += oiiotool ("--pattern fill:top=0.1,0.1,0.1:bottom=0,0,0.5 64x64 3 -d uint8 -o pattern-gradientv.tif")
command += oiiotool ("--pattern fill:left=0.1,0.1,0.1:right=0,0.5,0 64x64 3 -d uint8 -o pattern-gradienth.tif")
command += oiiotool ("--pattern fill:topleft=0.1,0.1,0.1:topright=0,0.5,0:bottomleft=0.5,0,0:bottomright=0,0,0.5 64x64 3 -d uint8 -o pattern-gradient4.tif")
# test --fill
command += oiiotool ("--create 256x256 3 --fill:color=1,.5,.5 256x256 --fill:color=0,1,0 80x80+100+100 -d uint8 -o filled.tif")
command += oiiotool ("--create 64x64 3 --fill:top=0.1,0.1,0.1:bottom=0,0,0.5 64x64 -d uint8 -o fillv.tif")
command += oiiotool ("--create 64x64 3 --fill:left=0.1,0.1,0.1:right=0,0.5,0 64x64 -d uint8 -o fillh.tif")
command += oiiotool ("--create 64x64 3 --fill:topleft=0.1,0.1,0.1:topright=0,0.5,0:bottomleft=0.5,0,0:bottomright=0,0,0.5 64x64 -d uint8 -o fill4.tif")
# test --line
command += oiiotool ("--pattern checker:color1=.1,.1,.1:color2=0,0,0 256x256 3 " +
"-line:color=0.25,0,0,0.25 10,60,250,20 " +
"-line:color=0.5,0,0,0.5 10,62,250,100 " +
"-line:color=1,0,0,1
|
10,64,250,400 " +
"-line:color=0,1,0,1 250,100,10,184 " +
"-line:color=0,0.5,0,0.5 250,200,10,182 " +
"-line:color=0,0.25,0,0.25 100,400,10,180 " +
"-line:color=.5,.5,0,0.5 100,100,120,100,120,100,120,120,120,120,10
|
0,120,100,120,100,100 " +
"-box:color=0,0.5,0.5,0.5 150,100,240,180 " +
"-d uint8 -o lines.tif")
# test --box
command += oiiotool ("--pattern checker:color1=.1,.1,.1:color2=0,0,0 256x256 3 " +
"--box:color=0,1,1,1 150,100,240,180 " +
"--box:color=0.5,0.5,0,0.5:fill=1 100,50,180,140 " +
"-d uint8 -o box.tif")
# test --point
command += oiiotool ("--create 64x64 3 " +
"--point:color=0,1,1,1 50,10 " +
"--point:color=1,0,1,1 20,20,30,30,40,40 " +
"-d uint8 -o points.tif")
# To add more tests, just append more lines like the above and also add
# the new 'feature.tif' (or whatever you call it) to the outputs list,
# below.
# Outputs to check against references
outputs = [ "pattern-const.tif", "pattern-gradienth.tif",
"pattern-gradientv.tif", "pattern-gradient4.tif",
"noise-uniform3.tif", "noise-gauss.tif", "noise-salt.tif",
"filled.tif", "fillh.tif", "fillv.tif", "fill4.tif",
"lines.tif", "box.tif", "points.tif",
"out.txt" ]
#print "Running this command:\n" + command + "\n"
|
Lrcezimbra/ganso-music
|
gansomusic/core/models.py
|
Python
|
gpl-3.0
| 355
| 0
|
from django.db import m
|
odels
class Music(models.Model):
url = models.CharField('URL', max_length=255)
title = models.CharField('título', max_length=200, blank=True)
artist = models.CharField('artista', max_length=200, blank=True)
genre = models.CharField('gênero', max_length=100, blank=True
|
)
file = models.FileField(upload_to='')
|
sadanandb/pmt
|
src/pyasm/web/__init__.py
|
Python
|
epl-1.0
| 931
| 0.003222
|
###########################################################
#
# Copyri
|
ght (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed i
|
n any way without written permission.
#
#
#
# web framework interface and implementations
from web_environment import *
from palette import *
from callback import *
# security for redirects
from url_security import *
# global web container
from web_container import *
from web_state import *
# basic widget classes
from widget import *
from html_wdg import *
from web_tools import *
#from command_delegator import *
#from event_container import *
# the web application widget
from web_app import *
from app_server import *
from simple_app_server import *
from widget_app_server import *
from web_init import *
from monitor import *
|
dalou/django-extended
|
setup.py
|
Python
|
bsd-3-clause
| 2,148
| 0.00419
|
#from __future__ import print_function
import ast
import os
import sys
import codecs
import subprocess
from fnmatch import fnmatchcase
from distutils.util import convert_path
from setuptools import setup, find_packages
def find_version(*parts):
try:
version_py = os.path.join(os.path.dirname(__file__), 'django_extended/version.py')
version_git = subprocess.check_output(["git", "tag"]).rstrip().splitlines()[-1]
version_msg = "# Do not edit this file, pipeline versioning is governed by git tags" + os.linesep + "# following PEP 386"
open(version_py, 'wb').write(version_msg + os.linesep + '__version__ = "%s"' % version_git)
except:
# NOT RAN LOCALY
pass
from django_extended.version import __version__
return "{ver}".format(ver=__version__)
def read(*parts):
filename = os.path.join(os.path.dirname(__file__), *parts)
with codecs.open(filename, encoding='utf-8') as fp:
return fp.read()
# Provided as an attribute, so you can append to these instead
# of replicating them:
standard_exclude = ('*.py', '*.pyc', '*$py.class', '*~', '.*', '*.bak')
standard_exclude_directories = ('.*', 'CVS', '_darcs', './build',
'./dist', 'EGG-INFO', '*.egg-info')
setup(
name='django-extended',
version=find_version(),
description='Django extended stuffs',
long_description=read('README.rst'),
author='Autrusseau Damien',
author_email='autrusseau.damien@gmail.com',
url='http://github.com/dalou/django-extended',
packages=
|
find_packages(exclude=('tests*',)),
zip_safe=False,
license='MIT',
classifiers=[
'Development Status ::
|
5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
],
# test_suite='runtests.runtests',
install_requires=[
'django>=1.8.4,<=1.9',
"jsonfield==1.0.3",
"babel==1.3",
"premailer==2.9.6"
],
)
|
benfinke/ns_python
|
nssrc/com/citrix/netscaler/nitro/resource/config/authentication/authenticationpolicylabel.py
|
Python
|
apache-2.0
| 9,779
| 0.038757
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class authenticationpolicylabel(base_resource) :
""" Configuration for authentication policy label resource. """
def __init__(self) :
self._labelname = ""
self._newname = ""
self._numpol = 0
self._hits = 0
self._policyname = ""
self._priority = 0
self._gotopriorityexpression = ""
self._flowtype = 0
self._description = ""
self.___count = 0
@property
def labelname(self) :
ur"""Name for the new authentication policy label.
Must begin with a letter, number, or the underscore character (_), and must contain only letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at (@), equals (=), colon (:), and underscore characters.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my authentication policy label" or 'authentication policy label').
"""
try :
return self._labelname
except Exception as e:
raise e
@labelname.setter
def labelname(self, labelname) :
ur"""Name for the new authentication policy label.
Must begin with a letter, number, or the underscore character (_), and must contain only letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at (@), equals (=), colon (:), and u
|
nderscore characters.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my authentication policy label" or 'authentication policy label').
"""
try :
self._labelname = labelname
except Exception as e:
raise e
@property
def newname(self)
|
:
ur"""The new name of the auth policy label.<br/>Minimum length = 1.
"""
try :
return self._newname
except Exception as e:
raise e
@newname.setter
def newname(self, newname) :
ur"""The new name of the auth policy label.<br/>Minimum length = 1
"""
try :
self._newname = newname
except Exception as e:
raise e
@property
def numpol(self) :
ur"""Number of polices bound to label.
"""
try :
return self._numpol
except Exception as e:
raise e
@property
def hits(self) :
ur"""Number of times policy label was invoked.
"""
try :
return self._hits
except Exception as e:
raise e
@property
def policyname(self) :
ur"""Name of the authentication policy to bind to the policy label.
"""
try :
return self._policyname
except Exception as e:
raise e
@property
def priority(self) :
ur"""Specifies the priority of the policy.
"""
try :
return self._priority
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
ur"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@property
def flowtype(self) :
ur"""Flowtype of the bound authentication policy.
"""
try :
return self._flowtype
except Exception as e:
raise e
@property
def description(self) :
ur"""Description of the policylabel.
"""
try :
return self._description
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(authenticationpolicylabel_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.authenticationpolicylabel
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.labelname is not None :
return str(self.labelname)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
ur""" Use this API to add authenticationpolicylabel.
"""
try :
if type(resource) is not list :
addresource = authenticationpolicylabel()
addresource.labelname = resource.labelname
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ authenticationpolicylabel() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].labelname = resource[i].labelname
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
ur""" Use this API to delete authenticationpolicylabel.
"""
try :
if type(resource) is not list :
deleteresource = authenticationpolicylabel()
if type(resource) != type(deleteresource):
deleteresource.labelname = resource
else :
deleteresource.labelname = resource.labelname
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ authenticationpolicylabel() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].labelname = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ authenticationpolicylabel() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].labelname = resource[i].labelname
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def rename(cls, client, resource, new_labelname) :
ur""" Use this API to rename a authenticationpolicylabel resource.
"""
try :
renameresource = authenticationpolicylabel()
if type(resource) == cls :
renameresource.labelname = resource.labelname
else :
renameresource.labelname = resource
return renameresource.rename_resource(client,new_labelname)
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
ur""" Use this API to fetch all the authenticationpolicylabel resources that are configured on netscaler.
"""
try :
if not name :
obj = authenticationpolicylabel()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = authenticationpolicylabel()
obj.labelname = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [authenticationpolicylabel() for _ in range(len(name))]
obj = [authenticationpolicylabel() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = authenticationpolicylabel()
obj[i].labelname = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
de
|
gsnbng/erpnext
|
erpnext/education/doctype/student_group/student_group.py
|
Python
|
agpl-3.0
| 5,632
| 0.022905
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe import _
from erpnext.education.utils import validate_duplicate_student
from frappe.utils import cint
class StudentGroup(Document):
def validate(self):
self.validate_mandatory_fields()
self.validate_strength()
self.validate_students()
self.validate_and_set_child_table_fields()
validate_duplicate_student(self.students)
def validate_mandatory_fields(self):
if self.group_based_on == "Course" and not self.course:
frappe.throw(_("Please select Course"))
if self.group_based_on == "Course" and (not self.program and self.batch):
frappe.throw(_("Please select Program"))
if self.group_based_on == "Batch" and not self.program:
frappe.throw(_("Please select Program"))
def validate_strength(self):
if cint(self.max_strength) < 0:
frappe.throw(_("""Max strength cannot be less than zero."""))
if self.max_strength and len(self.students) > self.max_strength:
frappe.throw(_("""Cannot enroll more than {0} students for this student group.""").format(self.max_strength))
def validate_students(self):
program_enrollment = get_program_enrollment(self.academic_year, self.academic_term, self.program, self.batch, self.student_category, self.course)
students = [d.student for d in program_enrollment] if program_enrollment else []
for d in self.students:
if not frappe.db.get_value("Student", d.student, "enabled") and d.active and not self.disabled:
frappe.throw(_("{0} - {1} is inactive student").format(d.group_roll_number, d.student_name))
if (self.group_based_on == "Batch") and cint(frappe.defaults.get_defaults().validate_batch)\
and d.student not in students:
frappe.throw(_("{0} - {1} is not enrolled in the Batch {2}").format(d.group_roll_number, d.student_name, self.batch))
if (self.group_based_on == "Course") and cint(frappe.defaults.get_defaults().validate_course)\
and (d.student not in students):
frappe.throw(_("{0} - {1} is not enrolled in the Course {2}").format(d.group_roll_number, d.student_name, self.course))
def validate_and_set_child_table_fields(self):
roll_numbers = [d.group_roll_number for d in self.students if d.group_roll_number]
max_roll_no = max(roll_numbers) if roll_numbers else 0
roll_no_list = []
for d in self.students:
if not d.student_name:
d.student_name = frappe.db.get_value("Student", d.student, "title")
if not d.group_roll_number:
max_roll_no += 1
d.group_roll_number = max_roll_no
if d.group_roll_number in roll_no_list:
frappe.throw(_("Duplicate roll number for student {0}").format(d.student_name))
else:
roll_no_list.append(d.group_roll_number)
@frappe.whitelist()
def get_students(academic_year, group_based_on, academic_term=None, program=None, batch=None, student_category=None, course=None):
enrolled_students = get_program_enrollment(academic_year, academic_term, program, batch, student_category, course)
if enrolled_students:
student_list = []
for s in enrolled_students:
if frappe.db.get_value("Student", s.student, "enabled"):
s.update({"active": 1})
else:
s.update({"active": 0})
student_list.append(s)
return student_list
else:
frappe.msgprint(_("No students found"))
return []
def get_program_enrollment(academic_year, academic_term=None, program=None, batch=None, student_category=None, course=None):
condition1 = " "
condition2 = " "
if academic_term:
condition1 += " and pe.academic_term = %(academic_term)s"
if program:
condition1 += " and pe.program = %(program)s"
if batch:
condition1 += " and pe.student_batch_name = %(batch)s"
if student_category:
condition1 += " and pe.student_category = %(student_category)s"
if course:
condition1 += " and pe.name = pec.parent and pec.course = %(course)s"
condition2 = ", `tabProgram Enrollment Course` pec"
return frappe.db.sql('''
select
pe.student,
|
pe.student_name
from
`tabProgram Enrollment` pe {condition2}
where
pe.academic_year = %(ac
|
ademic_year)s {condition1}
order by
pe.student_name asc
'''.format(condition1=condition1, condition2=condition2),
({"academic_year": academic_year, "academic_term":academic_term, "program": program, "batch": batch, "student_category": student_category, "course": course}), as_dict=1)
@frappe.whitelist()
def fetch_students(doctype, txt, searchfield, start, page_len, filters):
if filters.get("group_based_on") != "Activity":
enrolled_students = get_program_enrollment(filters.get('academic_year'), filters.get('academic_term'),
filters.get('program'), filters.get('batch'), filters.get('student_category'))
student_group_student = frappe.db.sql_list('''select student from `tabStudent Group Student` where parent=%s''',
(filters.get('student_group')))
students = ([d.student for d in enrolled_students if d.student not in student_group_student]
if enrolled_students else [""]) or [""]
return frappe.db.sql("""select name, title from tabStudent
where name in ({0}) and (`{1}` LIKE %s or title LIKE %s)
order by idx desc, name
limit %s, %s""".format(", ".join(['%s']*len(students)), searchfield),
tuple(students + ["%%%s%%" % txt, "%%%s%%" % txt, start, page_len]))
else:
return frappe.db.sql("""select name, title from tabStudent
where `{0}` LIKE %s or title LIKE %s
order by idx desc, name
limit %s, %s""".format(searchfield),
tuple(["%%%s%%" % txt, "%%%s%%" % txt, start, page_len]))
|
labepi/destate
|
gui/AutomataManage.py
|
Python
|
gpl-2.0
| 16,796
| 0.002739
|
# vim: set fileencoding=utf-8 :
# Copyright (C) 2008 Joao Paulo de Souza Medeiros.
#
# Author(s): João Paulo de Souza Medeiros <ignotus21@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os
import gtk
import pango
import pydot
import types
import bestwidgets as bw
from gui.Canvas import Canvas
from gui.Image import Pixmaps
from gui.Command import Command
from core.Path import path
from core.Automaton import *
from core.Parser import Parser
TMP_IMAGE = 'share/pixmaps/drawing.png'
GRAPH_ATTR = {'rankdir': 'LR',
'fontsize': '10',
'fontname': 'Monospaced Bold'}
NODE_ATTR = {'shape': 'circle',
'fontsize': '10',
'fontname': 'Monospaced Bold'}
START_NODE_ATTR = {'style': 'filled',
'fillcolor': '#000000',
'fontcolor': '#ffffff'}
FINAL_NODE_ATTR = {'shape': 'doublecircle'}
EDGE_ATTR = {'color': '#
|
888888',
'fontsize': '10',
'fontname': 'Monospaced'}
TRANSITION_TEXT = "(%s . %s) -> %s"
LAMBDA_TRANSITION_TEXT = "(%s . &) -> %s"
class AutomataManage(bw.BWVBox):
"""
"""
def __init__(self):
"""
"""
bw.BWVBox.__init__(self, spacing=2)
self.set_sensitive(False)
self.__parser = Parse
|
r()
self.__create_widgets()
def __create_widgets(self):
"""
"""
self.__automaton_list = AutomatonList(self)
self.__automaton_view = AutomatonView()
self.__command = Command(self.execute_command,
self.parse_command)
self.bw_pack_start_noexpand_nofill(self.__automaton_list)
self.bw_pack_start_expand_fill(self.__automaton_view)
self.bw_pack_start_noexpand_nofill(self.__command)
def show_automaton_details(self, value):
"""
"""
self.__automaton_view.show_details(value)
def refresh_view(self, widget=None):
"""
"""
self.__automaton_view.refresh_view()
def clear(self):
"""
"""
self.__automaton_view.clear()
def set_automaton(self, automaton):
"""
"""
self.__automaton_view.set_automaton(automaton)
def create_new_automaton(self):
"""
"""
if self.__automaton_list.get_number_of_automata() == 0:
self.set_sensitive(True)
return self.__automaton_list.create_new_automaton()
def create_automaton_from_xml(self, file):
"""
"""
result, argument = get_automaton_from_xml(file)
if result:
if self.__automaton_list.get_number_of_automata() == 0:
self.set_sensitive(True)
self.__automaton_list.append_automaton(argument)
return result, argument
def write_automaton_to_xml(self, file):
"""
"""
automaton = self.__automaton_list.get_selected_automaton()
result, argument = save_automaton_to_xml(automaton, file)
return result, argument
def get_selected_automaton(self):
"""
"""
return self.__automaton_list.get_selected_automaton()
def parse_command(self, command):
"""
"""
return self.__parser.parse(command)
def execute_command(self, command):
"""
"""
automaton = self.__automaton_list.get_selected_automaton()
if self.__parser.parse(command) and automaton != None:
result = self.__parser.execute_command(automaton, command)
if result:
self.__command.add_text(command)
self.__command.set_status_icon(True)
self.__automaton_view.refresh_view()
if type(result) == types.InstanceType:
self.__automaton_list.append_automaton(result)
return True
self.__command.set_status_icon(False)
return False
class AutomatonView(gtk.HPaned):
"""
"""
def __init__(self, automaton=None):
"""
"""
gtk.HPaned.__init__(self)
self.__automaton = automaton
self.__pixmap = Pixmaps()
self.__create_widgets()
def __create_widgets(self):
"""
"""
self.__hbox = bw.BWHBox()
self.__canvas = Canvas()
self.__details = AutomatonDetails()
self.add1(self.__canvas)
self.add2(self.__details)
self.__details.hide_all()
self.__details.set_no_show_all(True)
def show_details(self, value):
"""
"""
if value:
self.__details.set_no_show_all(False)
self.__details.show_all()
self.__details.set_size_request(200, -1)
self.set_position(450)
else:
self.__details.hide_all()
self.__details.set_no_show_all(True)
def clear(self):
"""
"""
self.__canvas.set_image(None)
self.__details.clear()
def set_automaton(self, automaton):
"""
"""
self.__automaton = automaton
self.__details.set_automaton(self.__automaton)
self.refresh_view()
def refresh_view(self):
"""
"""
file_url = os.path.join(path.get_dirbase(), TMP_IMAGE)
self.create_dot_object().write_png(file_url, prog="dot")
self.__canvas.set_image(self.__pixmap.get_pixbuf('drawing', force=True))
self.__details.refresh()
def create_dot_object(self):
"""
"""
graph = pydot.Dot()
graph.set_label(self.__automaton.get_name())
states = self.__automaton.get_states()
events = self.__automaton.get_events()
transitions = self.__automaton.get_transitions()
lambda_transitions = self.__automaton.get_lambda_transitions()
start_state = self.__automaton.get_start_state()
final_states = self.__automaton.get_final_states()
# add graph attributes
for key in GRAPH_ATTR.keys():
graph.set(key, GRAPH_ATTR[key])
# add nodes to graph and set its attributes
for s in states:
graph.add_node(pydot.Node(s))
node = graph.get_node(s)[0]
for key in NODE_ATTR.keys():
node.set(key, NODE_ATTR[key])
# add edges to graph
for (a, e) in transitions.keys():
for n in transitions[(a, e)]:
edge = pydot.Edge(a, n, label=e)
for key in EDGE_ATTR.keys():
edge.set(key, EDGE_ATTR[key])
graph.add_edge(edge)
# add edges to graph (lambda)
for a in lambda_transitions.keys():
for n in lambda_transitions[a]:
edge = pydot.Edge(a, n, label='&')
for key in EDGE_ATTR.keys():
edge.set(key, EDGE_ATTR[key])
graph.add_edge(edge)
# set final states attributes
for s in final_states:
node = graph.get_node(s)[0]
for key in FINAL_NODE_ATTR.keys():
node.set(key, FINAL_NODE_ATTR[key])
# set initial state attributes
if start_state != None:
start_node = graph.get_node(start_state)[0]
for key in START_NODE_ATTR.keys():
start_node.set(key, START_NODE_ATTR[key])
return graph
class AutomatonList(bw.BWHBox):
"""
"""
def __init__(self, manage):
"""
"
|
sassoftware/rbuild
|
plugins/createprojectbranch.py
|
Python
|
apache-2.0
| 8,529
| 0.00129
|
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from rbuild import errors
from rbuild import pluginapi
from rbuild.pluginapi import command
NONE = 'none'
USERPASS = 'userpass'
ENTITLEMENT = 'entitlement'
AUTH_TYPES = (NONE, USERPASS, ENTITLEMENT)
class CreateProjectCommand(command.BaseCommand):
help = 'Create a project on SAS App Engine'
docs = {
'name': 'Long name (title) of project',
'short-name': 'Short (unique) name of project',
'domain-name': 'Domain name of project, or default if omitted',
'description': 'Optional description for the project',
'external': 'Externally managed project',
'label': 'Upstream label',
'upstream-url': 'URL of upstream respoitory (optional)',
'auth-type': 'External authentication type'
' [none, userpass, entitlement]',
'username': 'External username',
'password': 'External password',
'entitlement': 'External entitlement key',
}
def addLocalParameters(self, argDef):
argDef['name'] = command.ONE_PARAM
argDef['short-name'] = command.ONE_PARAM
argDef['domain-name'] = command.ONE_PARAM
argDef['descrption'] = command.ONE_PARAM
argDef['external'] = command.NO_PARAM
argDef['label'] = command.ONE_PARAM
argDef['upstream-url'] = command.ONE_PARAM
argDef['auth-type'] = command.ONE_PARAM
argDef['username'] = command.ONE_PARAM
argDef['password'] = command.ONE_PARAM
argDef['entitlement'] = command.ONE_PARAM
def runCommand(self, handle, argSet, args):
ui = handle.ui
rb = handle.facade.rbuilder
cf = handle.facade.conary
# get options used by all projects
if not argSet.get('name'):
argSet['name'] = ui.getResponse("Project name (required)",
required=True)
argSet['description'] = ui.getResponse(
"Project description (optional)")
if not argSet.get('short-name'):
argSet['short-name'] = ui.getResponse("Unique name (required)",
validationFn=rb.isValidShortName, required=True)
if 'domain-name' not in argSet:
argSet['domain-name'] = ui.getResponse(
"Domain name (blank for default)",
validationFn=rb.isValidDomainName)
kwargs = dict(
title=argSet['name'],
shortName=argSet['short-name'],
domainName=argSet.get('domain-name'),
description=argSet.get('description', ''),
)
# if external project, ask for relevent authentication information
if 'external' in argSet and argSet['external']:
if 'label' not in argSet:
argSet['label'] = ui.getResponse("Upstream label (required)",
required=True, validationFn=cf.isValidLabel)
if 'upstream-url' not in argSet:
while True:
url = ui.getResponse(
"URL of upstream repository (optional)")
if url is None or rb.isValidUrl(url):
break
argSet['upstream-url'] = url
if 'auth-type' not in argSet:
response = ui.getChoice(
"External authentication type",
["None", "Username and Password", "Entitlement key"],
default=0)
argSet['auth-type'] = AUTH_TYPES[response]
|
else:
if argSet['auth-type'] not in AUTH_TYPES:
raise errors.BadParameterError(
"Unknown authentication type.")
# collect authentication information based on the user's auth type
if argSet['auth-type'] == USERPASS:
|
if 'username' not in argSet:
argSet['username'] = ui.getResponse(
'External username', required=True)
if 'password' not in argSet:
argSet['password'] = ui.getPassword(
'External password', verify=True)
elif argSet['auth-type'] == ENTITLEMENT:
if 'entitlement' not in argSet:
argSet['entitlement'] = ui.getResponse(
'External entitlement', required=True)
kwargs['external'] = argSet['external']
kwargs['external_params'] = (
[argSet['label']],
argSet['upstream-url'],
argSet['auth-type'],
argSet.get('username'),
argSet.get('password'),
argSet.get('entitlement'),
)
projectId = rb.createProject(**kwargs)
ui.info("Created project %s", projectId)
class CreateBranchCommand(command.BaseCommand):
help = 'Create a branch within an existing project'
docs = {
'project': 'Short (unique) name of the existing project',
'branch': 'Version or name of the new branch',
'namespace': 'Optional namespace for the new branch',
'description': 'Optional description for the new branch',
'platform': 'Platform href, label, or name on which to base the new branch',
}
def addLocalParameters(self, argDef):
argDef['project'] = command.ONE_PARAM
argDef['branch'] = command.ONE_PARAM
argDef['namespace'] = command.ONE_PARAM
argDef['description'] = command.ONE_PARAM
argDef['platform'] = command.ONE_PARAM
def runCommand(self, handle, argSet, args):
ui = handle.ui
rb = handle.facade.rbuilder
if not argSet.get('project'):
argSet['project'] = ui.getResponse("Project name (required)",
validationFn=rb.isValidShortName, required=True)
if not argSet.get('branch'):
argSet['branch'] = ui.getResponse("Branch name (required)",
validationFn=rb.isValidBranchName, required=True)
argSet['description'] = ui.getResponse(
"Branch description (optional)")
argSet['namespace'] = ui.getResponse(
"Namespace (blank for default)")
platforms = rb.listPlatforms()
if argSet.get('platform'):
match = argSet['platform'].lower().strip()
platformLabel = None
for platform in platforms:
for value in (platform.platformName, platform.label,
platform.id):
if value.lower().strip() == match:
platformLabel = platform.label
break
if platformLabel is not None:
break
if platformLabel is None:
raise errors.PluginError("No platform matching term '%s' "
"was found" % (argSet['platform'],))
else:
display = ['%s - %s' % (x.platformName, x.label) for x in platforms]
response = ui.getChoice("Platform", display,
"The following platforms are available:")
platformLabel = platforms[response].label
label = rb.createBranch(
project=argSet['project'],
name=argSet['branch'],
platformLabel=platformLabel,
namespace=argSet.get('namespace'),
description=argSet.get('description', ''),
)
ui.info("Created branch on label %s", label)
ui.info("Type 'rbuild init
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.