repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
drfmunoz/PythonEncodingSample | test/__init__.py | Python | apache-2.0 | 22 | 0 | __author__ = 'fr | edd | y'
|
rsoutelino/pyeditmask | pyeditmask.py | Python | mit | 21,757 | 0.008549 | #!/usr/bin/env python
######################################################
## Edits ROMS masks using a GUI
## Nov 2014
## rsoutelino@gmail.com
######################################################
import os
import wx
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wxagg import NavigationToolbar2WxAgg as Navbar
from matplotlib.backends.backend_wx import NavigationToolbar2Wx
from matplotlib.figure import Figure
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.path import Path
import scipy.io as sp
import netCDF4 as nc
from mpl_toolkits.basemap import Basemap
# TO-DO LIST: ====================================================
# - improve point selection based in find_lower_left_node
# - create better icons for mask/unmask area
# - resolve untoggle/toggle between mask/unmask functions
# - add support to other models (POM, SWAN, WW3)
# - move matplotlib toolbar to the lower part
# - add a wx.TaskBarIcon to show up on the unity launcher
# - display local depth of the pixel we are looking at
# - transform mask/unmask and mask_area and unmask_area in the same
# function, and figure out how to decide wether to mask or unmask
# ================================================================
# NICE TIP TO DEBUG THIS PROGRAM: ================================
# - comment out app.MainLoop at the last line of this script
# - ipython --gui=wx
# - run pyeditmask.py
# - trigger the events and check out the objects in the shell
# ================================================================
global currentDirectory
currentDirectory = os.getcwd()
PROJECT_DIR = os.path.abspath(os.path.dirname(__file__))
DEFAULT_VMIN = 0
DEFAULT_VMAX=1.5
DEFAULT_CMAP = plt.cm.BrBG
DEFAULT_DEPTH_FOR_LAND = -50
# ROMS related objects ---------------------------------------------
class RomsGrid(object):
"""
Stores and manipulates netcdf ROMS grid file information
"""
def __init__(sel | f,filename):
self.filename = filename
self.ncfile = nc.Dataset(filename, mode='r+')
self.lonr = self.ncfile.variables['lon_rho'][:]
self.latr = self.ncfile.variables['lat_rho'][:]
self.lonu = self.ncfile.variables['lon_u'][:]
self.latu = self.ncfile.variables['lat_u'][:]
self.lonv = self.ncfile.variables['lon_v'][:]
self.latv = self.ncfile.variables['lat_v'][:]
self.lonvert = self.ncfile.variables['lon | _vert'][:]
self.latvert = self.ncfile.variables['lat_vert'][:]
self.h = self.ncfile.variables['h'][:]
self.maskr = self.ncfile.variables['mask_rho'][:]
self.masku = self.ncfile.variables['mask_u'][:]
self.maskv = self.ncfile.variables['mask_v'][:]
def uvp_mask(rfield):
Mp, Lp = rfield.shape
M = Mp - 1
L = Lp - 1
vfield = rfield[0:M,:] * rfield[1:Mp,:]
ufield = rfield[:,0:L] * rfield[:,1:Lp]
pfield = ufield[0:M,:] * ufield[1:Mp,:]
return ufield, vfield, pfield
# -------------------------------------------------------------------
class App(wx.App):
def OnInit(self):
self.frame = Interface("PyEditMask 0.1.0", size=(1024,800))
self.frame.Show()
return True
class Interface(wx.Frame):
def __init__(self, title=wx.EmptyString, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=wx.DEFAULT_FRAME_STYLE,
*args, **kwargs):
wx.Frame.__init__(self, None, -1, "PyEditMask 0.1.0", pos=pos,
size=size, style=style, *args, **kwargs)
# Initializing toolbar
self.toolbar = MainToolBar(self)
# BASIC LAYOUT OF THE NESTED SIZERS ======================
panel1 = wx.Panel(self, wx.ID_ANY, style=wx.SUNKEN_BORDER)
mplpanel = wx.Panel(self, wx.ID_ANY, style=wx.SUNKEN_BORDER)
mplpanel.SetBackgroundColour("WHITE")
# BOX 1 is the main sizer
box1 = wx.BoxSizer(wx.HORIZONTAL)
box1.Add(panel1, 1, wx.EXPAND)
box1.Add(mplpanel, 15, wx.EXPAND)
# BOX 2 is the inner sizer of the left big control panel
box2 = wx.BoxSizer(wx.VERTICAL)
# BOX 3 is the sizer of the right big parent panel(panel1), the one that will
# serve as base for two child panels which will hold
# the two matplotlib canvas's
box3 = wx.BoxSizer(wx.VERTICAL)
# panel 1 content ========================================
main_label = wx.StaticText(panel1, label=" ")
box2.Add(main_label, proportion=0, flag=wx.CENTER)
# set_land = wx.Button(panel1, label="Set Land", style=wx.ID_CANCEL)
# box2.Add(set_land, proportion=0, flag=wx.CENTER)
# set_land.Bind(wx.EVT_BUTTON, self.onSetLand)
# set_water = wx.Button(panel1, label="Set Water", style=wx.ID_CANCEL)
# box2.Add(set_water, proportion=0, flag=wx.CENTER)
# set_water.Bind(wx.EVT_BUTTON, self.onSetWater)
# mplpanel content ========================================
self.mplpanel = SimpleMPLCanvas(mplpanel)
box3.Add(self.mplpanel.canvas, 1, flag=wx.CENTER)
# FINAL LAYOUT CONFIGURATIONS ============================
self.SetAutoLayout(True)
panel1.SetSizer(box2)
# panel2.SetSizer(box4)
mplpanel.SetSizer(box3)
self.SetSizer(box1)
self.InitMenu()
self.Layout()
self.Centre()
# self.ShowModal()
def InitMenu(self):
menubar = wx.MenuBar()
fileMenu = wx.Menu()
fileMenu.Append(wx.ID_OPEN, u'&Open ROMS grid file')
fileMenu.Append(wx.ID_OPEN, u'&Open coastline file')
fileMenu.Append(wx.ID_OPEN, u'&Open bathymetry file')
fileMenu.Append(wx.ID_SAVE, '&Save grid')
fileMenu.AppendSeparator()
qmi = wx.MenuItem(fileMenu, wx.ID_EXIT, '&Quit\tCtrl+W')
opf = wx.MenuItem(fileMenu, wx.ID_OPEN, '&Open\tCtrl+O')
opc = wx.MenuItem(fileMenu, wx.ID_OPEN, '&Open\tCtrl+O+C')
opb = wx.MenuItem(fileMenu, wx.ID_OPEN, '&Open\tCtrl+O+B')
svf = wx.MenuItem(fileMenu, wx.ID_SAVE, '&Save\tCtrl+S')
fileMenu.AppendItem(qmi)
# fileMenu.AppendItem(svf)
self.Bind(wx.EVT_MENU, self.OnQuit, qmi)
self.Bind(wx.EVT_MENU, self.toolbar.OnLoadGrid, opf)
self.Bind(wx.EVT_MENU, self.toolbar.OnLoadCoastline, opc)
self.Bind(wx.EVT_MENU, self.toolbar.OnLoadBathymetry, opb)
self.Bind(wx.EVT_MENU, self.toolbar.OnSaveGrid, svf)
menubar.Append(fileMenu, u'&PyEditMask')
self.SetMenuBar(menubar)
def OnQuit(self, e):
"""Fecha o programa"""
self.Close()
self.Destroy()
def OnCloseWindow(self, e):
self.Destroy()
class SimpleMPLCanvas(object):
"""docstring for SimpleMPLCanvas"""
def __init__(self, parent):
super(SimpleMPLCanvas, self).__init__()
self.parent = parent
self.plot_properties()
self.make_navbar()
def make_navbar(self):
self.navbar = Navbar(self.canvas)
self.navbar.SetPosition(wx.Point(0,0)) # this is not working !!
def plot_properties(self):
# Create matplotlib figure
self.fig = Figure(facecolor='w', figsize=(12,8))
self.canvas = FigureCanvas(self.parent, -1, self.fig)
self.ax = self.fig.add_subplot(111)
# tit = self.ax1.set_title("ROMS mask_rho", fontsize=12, fontweight='bold')
# tit.set_position([0.9, 1.05])
class MainToolBar(object):
def __init__(self, parent):
self.currentDirectory = os.getcwd()
self.parent = parent
self.toolbar = parent.CreateToolBar(style=1, winid=1,
name="Toolbar")
self.tools_params ={
'load_grid': (load_bitmap('grid.png'), u"Load grid",
"Load ocean_grd.nc ROMS grid netcdf file"),
'load_coastline': (load_bitmap('coast.png'), u"Load coastline",
"Load *.mat coastline file [lon / lat poligons]"),
'l |
stevepiercy/cookiecutter | cookiecutter/generate.py | Python | bsd-3-clause | 13,178 | 0 | # -*- coding: utf-8 -*-
"""Functions for generating a project from a project template."""
from __future__ import unicode_literals
from collections import OrderedDict
import fnmatch
import io
import json
import logging
import os
import shutil
from jinja2 import FileSystemLoader
from cookiecutter.environment import StrictEnvironment
from jinja2.exceptions import TemplateSyntaxError, UndefinedError
from binaryornot.check import is_binary
from .exceptions import (
NonTemplatedInputDirException,
ContextDecodingException,
FailedHookException,
OutputDirExistsException,
UndefinedVariableInTemplate
)
from .find import find_template
from .utils import make_sure_path_exists, work_in, rmtree
from .hooks import run_hook
logger = logging.getLogger(__name__)
def is_copy_only_path(path, context):
"""Check whether the given `path` should only be copied as opposed to being
rendered.
Returns True if `path` matches a pattern in the given `context` dict,
otherwise False.
:param path: A file-system path referring to a file or dir that
should be rendered or just copied.
:param context: cookiecutter context.
"""
try:
for dont_render in context['cookiecutter']['_copy_without_render']:
if fnmatch.fnmatch(path, dont_render):
return True
except KeyError:
return False
return False
def apply_overwrites_to_context(context, overwrite_context):
"""Modify the given context in place based on the overwrite_context."""
for variable, overwrite in overwrite_context.items():
if variable not in context:
# Do not include variables which are not used in the template
continue
context_value = context[variable]
if isinstance(context_value, list):
# We are dealing with a choice variable
if overwrite in context_value:
# This overwrite is actually valid for the given context
# Let's set it as default (by definition first item in list)
# see ``cookiecutter.prompt.prompt_choice_for_config``
context_value.remove(overwrite)
context_value.insert(0, overwrite)
else:
# Simply overwrite the value for this variable
context[variable] = overwrite
def generate_context(context_file='cookiecutter.json', default_context=None,
extra_context=None):
"""Generate the context for a Cookiecutter project template.
Loads the JSON file as a Python object, with key being the JSON filename.
:param context_file: JSON file containing key/value pairs for populating
the cookiecutter's variables.
:param default_context: Dictionary containing config to take into account.
:param extra_context: Dictionary containing configuration overrides
"""
context = {}
try:
with open(context_file) as file_handle:
obj = json.load(file_handle, object_pairs_hook=OrderedDict)
except ValueError as e:
# JSON decoding error. Let's throw a new exception that is more
# friendly for the developer or user.
full_fpath = os.path.abspath(context_file)
json_exc_message = str(e)
our_exc_message = (
'JSON decoding error while loading "{0}". Decoding'
' error details: "{1}"'.format(full_fpath, json_exc_message))
raise ContextDecodingException(our_exc_message)
# Add the Python object to the context dictionary
file_name = os.path.split(context_file)[1]
file_stem = file_name.split('.')[0]
context[file_stem] = obj
# Overwrite context variable defaults with the default context from the
# user's global config, if available
if default_context:
apply_overwrites_to_context(obj, default_context)
if extra_context:
apply_overwrites_to_context(obj, extra_context)
logger.debug('Context generated is {}'.format(context))
return context
def generate_file(project_dir, infile, context, env):
"""Render filename of infile as name of outfile, handle infile correctly.
Dealing with infile appropriately:
a. If infile is a binary file, copy it over without rendering.
b. If infile is a text file, render its contents and write the
rendered infile to outfile.
Precondition:
When calling `generate_file()`, the root template dir must be the
current working directory. Using `utils.work_in()` is the recommended
way to perform this directory change.
:param project_dir: Absolute path to the resulting generated project.
:param infile: Input file to generate the file from. Relative to the root
template dir.
:param context: Dict for populating the cookiecutter's variables.
:param env: Jinja2 template execution environment.
"""
logger.debug('Processing file {}'. | format(infile))
# Render the path to the output file (not including the root project dir)
outfile_tmpl = env.from_string(infile)
outfile = os.path.join(project_dir, outfile_tmpl.render(**context))
file_name_is_empty = os.path.isdir(outfile)
if file_name_is_empty:
logger.debug('The resulting file name is empty: {0}'.format(outfile))
return
logger.debug('Created file at {0}'.format(outf | ile))
# Just copy over binary files. Don't render.
logger.debug("Check {} to see if it's a binary".format(infile))
if is_binary(infile):
logger.debug(
'Copying binary {} to {} without rendering'
''.format(infile, outfile)
)
shutil.copyfile(infile, outfile)
else:
# Force fwd slashes on Windows for get_template
# This is a by-design Jinja issue
infile_fwd_slashes = infile.replace(os.path.sep, '/')
# Render the file
try:
tmpl = env.get_template(infile_fwd_slashes)
except TemplateSyntaxError as exception:
# Disable translated so that printed exception contains verbose
# information about syntax error location
exception.translated = False
raise
rendered_file = tmpl.render(**context)
logger.debug('Writing contents to file {}'.format(outfile))
with io.open(outfile, 'w', encoding='utf-8') as fh:
fh.write(rendered_file)
# Apply file permissions to output file
shutil.copymode(infile, outfile)
def render_and_create_dir(dirname, context, output_dir, environment,
overwrite_if_exists=False):
"""Render name of a directory, create the directory, return its path."""
name_tmpl = environment.from_string(dirname)
rendered_dirname = name_tmpl.render(**context)
dir_to_create = os.path.normpath(
os.path.join(output_dir, rendered_dirname)
)
logger.debug('Rendered dir {} must exist in output_dir {}'.format(
dir_to_create,
output_dir
))
output_dir_exists = os.path.exists(dir_to_create)
if overwrite_if_exists:
if output_dir_exists:
logger.debug(
'Output directory {} already exists,'
'overwriting it'.format(dir_to_create)
)
else:
if output_dir_exists:
msg = 'Error: "{}" directory already exists'.format(dir_to_create)
raise OutputDirExistsException(msg)
make_sure_path_exists(dir_to_create)
return dir_to_create
def ensure_dir_is_templated(dirname):
"""Ensure that dirname is a templated directory name."""
if '{{' in dirname and '}}' in dirname:
return True
else:
raise NonTemplatedInputDirException
def _run_hook_from_repo_dir(repo_dir, hook_name, project_dir, context):
"""Run hook from repo directory, clean project directory if hook fails.
:param repo_dir: Project template input directory.
:param hook_name: The hook to execute.
:param project_dir: The directory to execute the script from.
:param context: Cookiecutter project context.
"""
with work_in(repo_dir):
try:
run_hook(hook_name, project_dir, context) |
tongxindao/Flask-micblog | Flask-MicroBlog/web/micblog/db_migrate.py | Python | apache-2.0 | 844 | 0.003695 | # coding: utf-8
import imp
from migrate.versioning import api
from app import db
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
|
migration = SQLALCHEMY_MIGRATE_REPO + '/versions/%03d_migration.py' % (api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO) + 1)
tmp_module = imp.new_module('old_model')
old_model = api.create_model(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
exec old_model in tmp_module.__dict__
script = api.make_update_script_for_model(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, tmp_module.meta, db.metadata)
open(migration, "wt").write(script)
api.upgrade(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
print '新迁移版本保存为 ' + migration
print '当前数据库版本: ' + str(api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO))
| |
google-research/dreamer | dreamer/control/dummy_env.py | Python | apache-2.0 | 1,560 | 0.004487 | # Copyright 2019 The Dreamer Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gym
import numpy as np
class DummyEnv(object):
def __init__(self):
self._random = np.random.RandomState(seed=0)
self._step = None
@property
def observation_space(self):
low = np.zeros([64, 64, 3], dtype=np.float32)
high = np.ones([64, 64, 3], dtype=np.float32)
spaces = | {'image': gym.spaces.Box(low, high)}
return gym.spaces.Dict(spaces)
@property
def action_space(self):
low = -np.ones([5], dtype=np.float32)
high = np.ones([5], dtype=np.float32)
return gym.spaces.Box(low, high)
def reset(self):
self._step = 0
obs = self.observation_space.sample()
return obs
def step(self, action):
obs = self.observation_space.sample()
reward = self._ran | dom.uniform(0, 1)
self._step += 1
done = self._step >= 1000
info = {}
return obs, reward, done, info
|
nkmk/python-snippets | notebook/opencv_face_eye_detection.py | Python | mit | 1,633 | 0.001225 | import cv2
face_cascade_path = '/usr/local/opt/opencv/share/'\
'OpenCV/haarcascades/haarcascade_frontalface_default.xml'
eye_cascade_path = '/usr/local/opt/opencv/share/'\
'OpenCV/haarcascades/haarcascade_eye.xml'
face_cascade = cv2.CascadeClassifier(face_cascade_path)
eye_cascade = cv2.CascadeClassifier(eye_cascade_path)
src = cv2.imread('data/src/lena_square.png')
src_gray = | cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(src_gray)
for x, y, w, h in faces:
cv2.rectangle(src | , (x, y), (x + w, y + h), (255, 0, 0), 2)
face = src[y: y + h, x: x + w]
face_gray = src_gray[y: y + h, x: x + w]
eyes = eye_cascade.detectMultiScale(face_gray)
for (ex, ey, ew, eh) in eyes:
cv2.rectangle(face, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2)
cv2.imwrite('data/dst/opencv_face_detect_rectangle.jpg', src)
# True
src = cv2.imread('data/src/lena_square.png')
src_gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(src_gray)
for x, y, w, h in faces:
src[y: y + h, x: x + w] = [0, 128, 255]
cv2.imwrite('data/dst/opencv_face_detect_fill.jpg', src)
# True
src = cv2.imread('data/src/lena_square.png')
src_gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(src_gray)
ratio = 0.05
for x, y, w, h in faces:
small = cv2.resize(src[y: y + h, x: x + w], None, fx=ratio, fy=ratio, interpolation=cv2.INTER_NEAREST)
src[y: y + h, x: x + w] = cv2.resize(small, (w, h), interpolation=cv2.INTER_NEAREST)
cv2.imwrite('data/dst/opencv_face_detect_mosaic.jpg', src)
# True
|
mozilla-iam/cis | python-modules/cis_publisher/cis_publisher/auth0.py | Python | mpl-2.0 | 24,297 | 0.003992 | import cis_profile
import cis_publisher
import boto3
import botocore
import os
import logging
import json
import time
from auth0.v3.authentication import GetToken
from auth0.v3.management import Auth0
from auth0.v3.exceptions import Auth0Error
from datetime import datetime, timezone, timedelta
from traceback import format_exc
# from http.client import HTTPConnection
logger = logging.getLogger(__name__)
# HTTPConnection.debuglevel = 1
class Auth0Publisher:
def __init__(self, context={}):
self.secret_manager = cis_publisher.secret.Manager()
self.context = context
self.report = None
self.config = cis_publisher.common.get_config()
self.s3_cache = None
self.s3_cache_require_update = False
# Only fields we care about for the user entries
# auth0 field->cis field map
self.az_cis_fields = {
"created_at": "created",
"given_name": "first_name",
"family_name": "last_name",
"name": None,
"nickname": None,
"user_id": "user_id",
"email": "primary_email",
"identities": "identities",
"blocked": "active",
}
self.az_blacklisted_connections = ["Mozilla-LDAP", "Mozilla-LDAP-Dev"]
self.az_whitelisted_connections = ["email", "github", "google-oauth2", "firefoxaccounts"]
self.az_users = None
self.all_cis_user_ids = None
self.user_ids_only = None
def get_s3_cache(self):
"""
If cache exists and is not older than timedelta() then return it, else don't
return: dict JSON
"""
if self.s3_cache is not None:
return self.s3_cache
s3 = boto3.client("s3")
bucket = os.environ.get("CIS_BUCKET_URL")
cache_time = int(os.environ.get("CIS_AUTHZERO_CACHE_TIME_SECONDS", 120))
recent = datetime.now(timezone.utc) - timedelta(seconds=cache_time)
try:
objects = s3.list_objects_v2(Bucket=bucket)
# bucket has zero contents?
if "Contents" not in objects:
logger.info("No S3 cache present")
return None
# Recent file?
for o in objects["Contents"]:
if o["Key"] == "cache.json" and recent > o["LastModified"]:
logger.info(
f"S3 cache too old, not using ({recent} gt {o['LastModified']}"
f", was cached for: {cache_time}s)"
)
return None
response = s3.get_object(Bucket=bucket, Key="cache.json")
data = response["Body"].read()
except botocore.exceptions.ClientError as e:
logger.error("Could not find S3 cache file: {}".format(e))
return None
logger.info("Using S3 cache")
self.s3_cache = json.loads(data)
return self.s3_cache
def save_s3_cache(self, data):
"""
@data dict JSON
"""
if self.s3_cache_require_update is False:
return
s3 = boto3.client("s3")
bucket = os.environ.get("CIS_BUCKET_URL")
s3.put_object(Bucket=bucket, Key="cache.json", Body=json.dumps(data))
logger.info("Wrote S3 cache file")
def publish(self, user_ids=None, chunk_size=100):
"""
Glue to create or fetch cis_profile.User profiles for this publisher
Then pass everything over to the Publisher class
None, ALL profiles are sent.
@user_ids: list of str - user ids to publish. If None, all users are published.
@chunk_size: int when no user_id is selected, this is the size of the chunk/slice we'll create to divide the
work between function calls (to self)
"""
if user_ids is None:
le = "All"
else:
le = len(user_ids)
logger.info("Starting Auth0 Publisher [{} users]".format(le))
# XXX login_method is overridden when posting the user or listing users, i.e. the one here does not matter
publisher = cis_publisher.Publish([], login_method="github", publisher_name="auth0")
# These are the users auth0 knows about
self.az_users = self.fetch_az_users(user_ids)
self.all_cis_user_ids = self.fetch_all_cis_user_ids(publisher)
# Should we fan-out processing to multiple function calls?
if user_ids is None:
# Because we do not care about most attributes update, we only process new users, or users that will be
# deactivated in order to save time. Note that there is (currently) no auth0 hook to notify of new user
# event, so this (the auth0 publisher that is) function needs to be reasonably fast to avoid delays when
# provisioning users
# So first, remove all known users from the requested list
user_ids_to_process_set = set(self.get_az_user_ids()) - set(self.all_cis_user_ids)
az_user_ids_set = set(self.get_az_user_ids())
# Add blocked users so that they get deactivated
logger.info(
"Converting filtering list, size of user_ids_to_process {}".format(len(user_ids_to_process_set))
)
for u in self.az_users:
if u["user_id"] in az_user_ids_set:
if ("blocked" in u.keys()) and (u["blocked"] is True):
user_ids_to_process_set.add(u["user_id"])
logger.info(
"After filtering out known CIS users/in auth0 blocked users, we will process {} users".format(
len(user_ids_to_process_set)
)
)
self.save_s3_cache({"az_users": self.az_users, "all_cis_user_ids": self.all_cis_user_ids})
self.fan_out(publisher, chunk_size, list(user_ids_to_process_set))
else:
# Don't cache auth0 list if we're just getting a single user, so that we get the most up to date data
# and because it's pretty fast for a single user
if len(user_ids) == 1:
os.environ["CIS_AUTHZERO_CACHE_TIME_SECONDS"] = "0"
logger.info("CIS_AUTHZERO_CACHE_TIME_SECONDS was set to 0 (caching disabled) for this run")
self.process(publisher, user_ids)
def fetch_all_cis_user_ids(self, publisher):
"""
Get all known CIS user ids for the whitelisted login methods
This is here because CIS only returns user ids per specific login methods
We also cache this
"""
self.s3_cache = self.get_s3_cache()
if self.s3_cache is not None:
self.all_cis_user_ids = self.s3_cache["all_cis_user_ids"]
return self.all_cis_user_ids
if self.all_cis_user_ids is not None:
return self.all_cis_user_ids
# Not cached, fetch it
self.s3_cache_require_update = True
# These are the users CIS knows about
self.all_cis_user_ids = []
for c in self.az_whitelisted_connections:
# FIXME we're not using the real login method here because
# Code in the CIS Vault matches against the start of `user_id` instead of the actual login method
# This is fine for most methods, except this one... ideally the code should change in the CIS Vault when it
# uses something else than DynamoDB and is able to match efficiently on other attributes
if c == "firefoxaccounts":
c = "oauth2|firefoxaccounts"
publisher.login_method = c
publisher.get_known_cis_users(include_inactive=False)
self.all_cis_user_ids += publisher.known_cis_users_by_user_id.keys()
# Invalidate p | ublisher memory cache
publisher.known_cis_users = None
# XXX in case we got duplicates for some reason, we uniq | uify
self.all_cis_user_ids = list(set(self.all_cis_user_ids))
logger.info("Got {} known CIS users for all whitelisted login methods".format(len(self.all_cis_user_ids)))
return self.all_cis_user_ids
def get_az_user_ids(self):
"""
Extrac |
ESOedX/edx-platform | common/djangoapps/terrain/stubs/lti.py | Python | agpl-3.0 | 13,193 | 0.002653 | """
Stub implementation of LTI Provider.
What is supported:
------------------
1.) This LTI Provider can service only one Tool Consumer at the same time. It is
not possible to have this LTI multiple times on a single page in LMS.
"""
from __future__ import absolute_impor | t
import base64
import hashlib
import logging
import os
import textwrap
from uuid import uuid4
import mock
import oauthlib.oauth1
import requests
import six
import six.moves.urllib.error # pylint: disable=import-error
import six.moves.urllib.parse # pylint: disable=import-error
import six.moves.urllib.request # pylint: disable=import-error
from oauthlib.oauth1.rfc5849 import parameters, signature
from openedx.core.djangolib.markup import HTML
from .http i | mport StubHttpRequestHandler, StubHttpService
log = logging.getLogger(__name__)
class StubLtiHandler(StubHttpRequestHandler):
"""
A handler for LTI POST and GET requests.
"""
DEFAULT_CLIENT_KEY = 'test_client_key'
DEFAULT_CLIENT_SECRET = 'test_client_secret'
DEFAULT_LTI_ENDPOINT = 'correct_lti_endpoint'
DEFAULT_LTI_ADDRESS = 'http://{host}:{port}/'
def do_GET(self):
"""
Handle a GET request from the client and sends response back.
Used for checking LTI Provider started correctly.
"""
self.send_response(200, 'This is LTI Provider.', {'Content-type': 'text/plain'})
def do_POST(self):
"""
Handle a POST request from the client and sends response back.
"""
if 'grade' in self.path and self._send_graded_result().status_code == 200:
status_message = HTML('LTI consumer (edX) responded with XML content:<br>{grade_data}').format(
grade_data=self.server.grade_data['TC answer']
)
content = self._create_content(status_message)
self.send_response(200, content)
elif 'lti2_outcome' in self.path and self._send_lti2_outcome().status_code == 200:
status_message = HTML('LTI consumer (edX) responded with HTTP {}<br>').format(
self.server.grade_data['status_code'])
content = self._create_content(status_message)
self.send_response(200, content)
elif 'lti2_delete' in self.path and self._send_lti2_delete().status_code == 200:
status_message = HTML('LTI consumer (edX) responded with HTTP {}<br>').format(
self.server.grade_data['status_code'])
content = self._create_content(status_message)
self.send_response(200, content)
# Respond to request with correct lti endpoint
elif self._is_correct_lti_request():
params = {k: v for k, v in self.post_dict.items() if k != 'oauth_signature'}
if self._check_oauth_signature(params, self.post_dict.get('oauth_signature', "")):
status_message = "This is LTI tool. Success."
# Set data for grades what need to be stored as server data
if 'lis_outcome_service_url' in self.post_dict:
self.server.grade_data = {
'callback_url': self.post_dict.get('lis_outcome_service_url').replace('https', 'http'),
'sourcedId': self.post_dict.get('lis_result_sourcedid')
}
host = os.environ.get('BOK_CHOY_HOSTNAME', self.server.server_address[0])
submit_url = '//{}:{}'.format(host, self.server.server_address[1])
content = self._create_content(status_message, submit_url)
self.send_response(200, content)
else:
content = self._create_content("Wrong LTI signature")
self.send_response(200, content)
else:
content = self._create_content("Invalid request URL")
self.send_response(500, content)
def _send_graded_result(self):
"""
Send grade request.
"""
values = {
'textString': 0.5,
'sourcedId': self.server.grade_data['sourcedId'],
'imsx_messageIdentifier': uuid4().hex,
}
payload = textwrap.dedent("""
<?xml version = "1.0" encoding = "UTF-8"?>
<imsx_POXEnvelopeRequest xmlns="http://www.imsglobal.org/services/ltiv1p1/xsd/imsoms_v1p0">
<imsx_POXHeader>
<imsx_POXRequestHeaderInfo>
<imsx_version>V1.0</imsx_version>
<imsx_messageIdentifier>{imsx_messageIdentifier}</imsx_messageIdentifier> /
</imsx_POXRequestHeaderInfo>
</imsx_POXHeader>
<imsx_POXBody>
<replaceResultRequest>
<resultRecord>
<sourcedGUID>
<sourcedId>{sourcedId}</sourcedId>
</sourcedGUID>
<result>
<resultScore>
<language>en-us</language>
<textString>{textString}</textString>
</resultScore>
</result>
</resultRecord>
</replaceResultRequest>
</imsx_POXBody>
</imsx_POXEnvelopeRequest>
""")
data = payload.format(**values)
url = self.server.grade_data['callback_url']
headers = {
'Content-Type': 'application/xml',
'X-Requested-With': 'XMLHttpRequest',
'Authorization': self._oauth_sign(url, data)
}
# Send request ignoring verifirecation of SSL certificate
response = requests.post(url, data=data, headers=headers, verify=False)
self.server.grade_data['TC answer'] = response.content
return response
def _send_lti2_outcome(self):
"""
Send a grade back to consumer
"""
payload = textwrap.dedent("""
{{
"@context" : "http://purl.imsglobal.org/ctx/lis/v2/Result",
"@type" : "Result",
"resultScore" : {score},
"comment" : "This is awesome."
}}
""")
data = payload.format(score=0.8)
return self._send_lti2(data)
def _send_lti2_delete(self):
"""
Send a delete back to consumer
"""
payload = textwrap.dedent("""
{
"@context" : "http://purl.imsglobal.org/ctx/lis/v2/Result",
"@type" : "Result"
}
""")
return self._send_lti2(payload)
def _send_lti2(self, payload):
"""
Send lti2 json result service request.
"""
### We compute the LTI V2.0 service endpoint from the callback_url (which is set by the launch call)
url = self.server.grade_data['callback_url']
url_parts = url.split('/')
url_parts[-1] = "lti_2_0_result_rest_handler"
anon_id = self.server.grade_data['sourcedId'].split(":")[-1]
url_parts.extend(["user", anon_id])
new_url = '/'.join(url_parts)
content_type = 'application/vnd.ims.lis.v2.result+json'
headers = {
'Content-Type': content_type,
'Authorization': self._oauth_sign(new_url, payload,
method='PUT',
content_type=content_type)
}
# Send request ignoring verifirecation of SSL certificate
response = requests.put(new_url, data=payload, headers=headers, verify=False)
self.server.grade_data['status_code'] = response.status_code
self.server.grade_data['TC answer'] = response.content
return response
def _create_content(self, response_text, submit_url=None):
"""
Return content (str) either for launch, send grade or get result from TC.
"""
if submit_url:
submit_form = textwrap.dedent(HTML("""
<form action="{submit_url}/grade" method="post">
<input type="submit" name="submit-button" value="Submit" id="submit-button">
</form>
<f |
chetan/cherokee | qa/137-Mime1.py | Python | gpl-2.0 | 797 | 0.012547 | from base import *
TYPE = "example/ejemplo"
EXTENSION = "mime_test_1"
MIME_TYPES = """
mime!application/java-archive!extensions = jar
mime!application/java-serialized-object!extensions = ser
mime!application/j | ava-vm!extensions = class
mime!%s!extensions = %s
mime!x-world/x-vrml!extensions = vrm vrml wrl
""" % (TYPE, EXTENSION)
class Test (TestBase):
def __init__ (self):
TestBase.__init__ (self, __file__)
self.name = "Mime type I"
se | lf.request = "GET /mime1/file.%s HTTP/1.0\r\n" % (EXTENSION)
self.expected_error = 200
self.expected_content = "Content-Type: %s" % (TYPE)
def Prepare (self, www):
d = self.Mkdir (www, "mime1")
f = self.WriteFile (d, "file.%s" % (EXTENSION))
self.conf = MIME_TYPES
|
JuliaSprenger/python-neo | neo/rawio/baserawio.py | Python | bsd-3-clause | 26,231 | 0.001792 | """
baserawio
======
Classes
-------
BaseRawIO
abstract class which should be overridden to write a RawIO.
RawIO is a new API in neo that is supposed to acces as fast as possible
raw data. All IO with theses characteristics should/could be rewritten:
* internally use of memmap (or hdf5)
* reading header is quite cheap (not read all the file)
* neo tree object is symetric and logical: same channel/units/event
along all block and segments.
So this handles **only** one simplified but very frequent case of dataset:
* Only one channel set for AnalogSignal (aka ChannelIndex) stable along Segment
* Only one channel set for SpikeTrain (aka Unit) stable along Segment
* AnalogSignal have all the same sampling_rate acroos all Segment
* t_start/t_stop are the same for many object (SpikeTrain, Event) inside a Segment
* AnalogSignal should all have the same sampling_rate otherwise the won't be read
a the same time. So signal_group_mode=='split-all' in BaseFromRaw
A helper class `neo.io.basefromrawio.BaseFromRaw` should transform a RawIO to
neo legacy IO from free.
With this API the IO have an attributes `header` with necessary keys.
See ExampleRawIO as example.
BaseRawIO implement a possible presistent cache system that can be used
by some IOs to avoid very long parse_header(). The idea is that some variable
or vector can be store somewhere (near the fiel, /tmp, any path)
"""
# from __future__ import unicode_literals, print_function, division, absolute_import
import logging
import numpy as np
import os
import sys
from neo import logging_handler
try:
import joblib
HAVE_JOBLIB = True
except ImportError:
HAVE_JOBLIB = False
possible_raw_modes = ['one-file', 'multi-file', 'one-dir', ] # 'multi-dir', 'url', 'other'
error_header = 'Header is not read yet, do parse_header() first'
_signal_channel_dtype = [
('name', 'U64'),
('id', 'int64'),
('sampling_rate', 'float64'),
('dtype', 'U16'),
('units', 'U64'),
('gain', 'float64'),
('offset', 'float64'),
('group_id', 'int64'),
]
_common_sig_characteristics = ['sampling_rate', 'dtype', 'group_id']
_unit_channel_dtype = [
('name', 'U64'),
('id', 'U64'),
# for waveform
('wf_units', 'U64'),
('wf_gain', 'float64'),
('wf_offset', 'float64'),
('wf_left_sweep', 'int64'),
('wf_sampling_rate', 'float64'),
]
_event_channel_dtype = [
('name', 'U64'),
('id', 'U64'),
('type', 'S5'), # epoch ot event
]
class BaseRawIO:
"""
Generic class to handle.
"""
name = 'BaseIO'
description = ''
extensions = []
rawmode = None # one key in possible_raw_modes
def __init__(self, use_cache=False, cache_path='same_as_resource', **kargs):
"""
When rawmode=='one-file' kargs MUST contains 'filename' the filename
When rawmode=='multi-file' kargs MUST contains 'filename' one of the filenames.
When rawmode=='one-dir' kargs MUST contains 'dirname' the dirname.
"""
# create a logger for the IO class
fullname = self.__class__.__module__ + '.' + self.__class__.__name__
self.logger = logging.getLogger(fulln | ame)
# create a logger fo | r 'neo' and add a handler to it if it doesn't
# have one already.
# (it will also not add one if the root logger has a handler)
corename = self.__class__.__module__.split('.')[0]
corelogger = logging.getLogger(corename)
rootlogger = logging.getLogger()
if not corelogger.handlers and not rootlogger.handlers:
corelogger.addHandler(logging_handler)
self.use_cache = use_cache
if use_cache:
assert HAVE_JOBLIB, 'You need to install joblib for cache'
self.setup_cache(cache_path)
else:
self._cache = None
self.header = None
def parse_header(self):
"""
This must parse the file header to get all stuff for fast use later on.
This must create
self.header['nb_block']
self.header['nb_segment']
self.header['signal_channels']
self.header['units_channels']
self.header['event_channels']
"""
self._parse_header()
self._group_signal_channel_characteristics()
def source_name(self):
"""Return fancy name of file source"""
return self._source_name()
def __repr__(self):
txt = '{}: {}\n'.format(self.__class__.__name__, self.source_name())
if self.header is not None:
nb_block = self.block_count()
txt += 'nb_block: {}\n'.format(nb_block)
nb_seg = [self.segment_count(i) for i in range(nb_block)]
txt += 'nb_segment: {}\n'.format(nb_seg)
for k in ('signal_channels', 'unit_channels', 'event_channels'):
ch = self.header[k]
if len(ch) > 8:
chantxt = "[{} ... {}]".format(', '.join(e for e in ch['name'][:4]),
' '.join(e for e in ch['name'][-4:]))
else:
chantxt = "[{}]".format(', '.join(e for e in ch['name']))
txt += '{}: {}\n'.format(k, chantxt)
return txt
def _generate_minimal_annotations(self):
"""
Helper function that generate a nested dict
of all annotations.
must be called when these are Ok:
* block_count()
* segment_count()
* signal_channels_count()
* unit_channels_count()
* event_channels_count()
Usage:
raw_annotations['blocks'][block_index] = { 'nickname' : 'super block', 'segments' : ...}
raw_annotations['blocks'][block_index] = { 'nickname' : 'super block', 'segments' : ...}
raw_annotations['blocks'][block_index]['segments'][seg_index]['signals'][channel_index] = {'nickname': 'super channel'}
raw_annotations['blocks'][block_index]['segments'][seg_index]['units'][unit_index] = {'nickname': 'super neuron'}
raw_annotations['blocks'][block_index]['segments'][seg_index]['events'][ev_chan] = {'nickname': 'super trigger'}
Theses annotations will be used at the neo.io API directly in objects.
Standard annotation like name/id/file_origin are already generated here.
"""
signal_channels = self.header['signal_channels']
unit_channels = self.header['unit_channels']
event_channels = self.header['event_channels']
a = {'blocks': [], 'signal_channels': [], 'unit_channels': [], 'event_channels': []}
for block_index in range(self.block_count()):
d = {'segments': []}
d['file_origin'] = self.source_name()
a['blocks'].append(d)
for seg_index in range(self.segment_count(block_index)):
d = {'signals': [], 'units': [], 'events': []}
d['file_origin'] = self.source_name()
a['blocks'][block_index]['segments'].append(d)
for c in range(signal_channels.size):
# use for AnalogSignal.annotations
d = {}
d['name'] = signal_channels['name'][c]
d['channel_id'] = signal_channels['id'][c]
a['blocks'][block_index]['segments'][seg_index]['signals'].append(d)
for c in range(unit_channels.size):
# use for SpikeTrain.annotations
d = {}
d['name'] = unit_channels['name'][c]
d['id'] = unit_channels['id'][c]
a['blocks'][block_index]['segments'][seg_index]['units'].append(d)
for c in range(event_channels.size):
# use for Event.annotations
d = {}
d['name'] = event_channels['name'][c]
d['id'] = event_channels['id'][c]
d['file_origin'] = self._source_name()
a['blocks'][block_index]['segments'][seg_index]['events'].append(d)
for c in range(signal_channels.size):
# use for Ch |
Krissbro/LondonGaymers | antilink/antilink.py | Python | gpl-3.0 | 4,545 | 0.00154 | import discord
from discord.ext import commands
from .utils import checks
from __main__ import send_cmd_help, settings
from cogs.utils.dataIO import dataIO
import os
import re
import asyncio
class Antilink:
"""Blocks Discord invite links from users who don't have the permission 'Manage Messages'"""
def __init__(self, bot):
self.bot = bot
self.location = 'data/antilink/settings.json'
self.json = dataIO.load_json(self.location)
self.regex = re.compile(r"<?(https?:\/\/)?(www\.)?(discord\.gg|discordapp\.com\/invite)\b([-a-zA-Z0-9/]*)>?")
self.regex_discordme = re.compile(r"<?(https?:\/\/)?(www\.)?(discord\.me\/)\b([-a-zA-Z0-9/]*)>?")
@commands.group(pass_context=True, no_pm=True)
@checks.admin_or_permissions(administrator=True)
async def antilinkset(self, ctx):
"""Manages the settings for antilink."""
serverid = ctx.message.server.id
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
if serverid not in self.json:
self.json[serverid] = {'toggle': False, 'message': '', 'dm': False}
@antilinkset.command(pass_context=True, no_pm=True)
@checks.admin_or_permissions(administrator=True)
async def toggle(self, ctx):
"""Enable/disables antilink in the server"""
serverid = ctx.message.server.id
if self.json[serverid]['toggle'] is True:
self.json[serverid]['toggle'] = False
await self.bot.say('Antilink is now disabled')
elif self.json[serverid]['toggle'] is False:
self.json[serverid]['toggle'] = True
await self.bot.say('Antilink is now enabled')
dataIO.save_json(self.location, self.json)
@antilinkset.command(pass_context=True, no_pm=True)
@checks.admin_or_permissions(administrator=True)
async def message(self, ctx, *, text):
"""Set the message for when the user sends a illegal discord link"""
serverid = ctx.message.server.id
self.json[serverid]['message'] = text
dataIO.save_json(self.location, self.json)
await self.bot.say('Message is set')
if self.json[serverid]['dm'] is False:
| await self.bot.say('Remember: Direct Messages on removal is disabled!\nEnable it with ``antilinkset toggledm``')
@antilinkset.command(pass_context=True, no_pm=True)
| @checks.admin_or_permissions(administrator=True)
async def toggledm(self, ctx):
serverid = ctx.message.server.id
if self.json[serverid]['dm'] is False:
self.json[serverid]['dm'] = True
await self.bot.say('Enabled DMs on removal of invite links')
elif self.json[serverid]['dm'] is True:
self.json[serverid]['dm'] = False
await self.bot.say('Disabled DMs on removal of invite links')
dataIO.save_json(self.location, self.json)
async def _new_message(self, message):
"""Finds the message and checks it for regex"""
user = message.author
if message.server is None:
return
if message.server.id in self.json:
if self.json[message.server.id]['toggle'] is True:
if self.regex.search(message.content) is not None or self.regex_discordme.search(message.content) is not None:
roles = [r.name for r in user.roles]
bot_admin = settings.get_server_admin(message.server)
bot_mod = settings.get_server_mod(message.server)
if user.id == settings.owner:
return
elif bot_admin in roles:
return
elif bot_mod in roles:
return
elif user.permissions_in(message.channel).manage_messages is True:
return
else:
asyncio.sleep(0.5)
await self.bot.delete_message(message)
if self.json[message.server.id]['dm'] is True:
await self.bot.send_message(message.author, self.json[message.server.id]['message'])
def check_folder():
if not os.path.exists('data/antilink'):
os.makedirs('data/antilink')
def check_file():
f = 'data/antilink/settings.json'
if dataIO.is_valid_json(f) is False:
dataIO.save_json(f, {})
def setup(bot):
check_folder()
check_file()
n = Antilink(bot)
bot.add_cog(n)
bot.add_listener(n._new_message, 'on_message')
|
pombredanne/spaCy | spacy/tests/tokens/test_array.py | Python | mit | 1,141 | 0 | # coding: utf-8
from __future__ import unicode_literals
import pytest
from spacy import attrs
def test_attr_of_token(EN):
text = u'An example sentence.'
tokens = EN(text, tag=True, parse=False)
example = EN.vocab[u'example']
assert example.orth != example.shape
feats_array = tokens.to_array((attrs.ORTH, attrs.SHAPE))
assert feats_array[0][0] != feats_array[0][1]
@pytest.mark.models
def test_tag(EN):
tex | t = u'A nice sentence.'
tokens = EN(text)
assert tokens[0].tag != tokens[1].tag != tokens[2].tag != tokens[3].tag
feats_array = tokens.to_array((attrs.ORTH, attrs.TAG))
assert feats_array[0][1] == tokens[0].tag
assert feats_arr | ay[1][1] == tokens[1].tag
assert feats_array[2][1] == tokens[2].tag
assert feats_array[3][1] == tokens[3].tag
@pytest.mark.models
def test_dep(EN):
text = u'A nice sentence.'
tokens = EN(text)
feats_array = tokens.to_array((attrs.ORTH, attrs.DEP))
assert feats_array[0][1] == tokens[0].dep
assert feats_array[1][1] == tokens[1].dep
assert feats_array[2][1] == tokens[2].dep
assert feats_array[3][1] == tokens[3].dep
|
DavidBarishev/ClashOfClansBot | ClashOfClansBot/Plugins/Tasks/TrainTroops/_TrainTroopsTask.py | Python | mit | 131 | 0 | from ClashOfClansBot.Interfaces.BotAPI import ITask
class TrainTr | oopsTask(ITask | ):
def __init__(self, **troops):
pass
|
lmmsoft/LeetCode | LeetCode-Algorithm/1002. Find Common Characters/solution.py | Python | gpl-2.0 | 1,499 | 0 | class Solution(object):
def commonCha | rs(self, A):
"""
:type A: List[str]
:rtype: List[str]
"""
# 把每个字符串编程 char,count的dict放入数组
# 可以用直接用 str.count统计26个字母的个数
res: list = []
for a in A:
d = {}
for c in a:
d[c] = d.get(c, 0) + 1
res.append(d)
# chars = ['a','b', ... 'z' ]
| chars = []
for asi in range(ord('a'), ord('z') + 1):
c = chr(asi)
chars.append(c)
# print(c)
# 每个字母求个数最大值
ans = {}
for c in chars:
mmax = 99999999
for r in res:
mmax = min(mmax, r.get(c, 0))
ans[c] = mmax
# 输出,可以优化为 res += ch * maxx
ret = []
for k, v in ans.items():
for i in range(v):
ret.append(k)
return ret
class Solution2(object):
def commonChars(self, A):
"""
:type A: List[str]
:rtype: List[str]
"""
from collections import Counter
count = Counter(A[0]) # Counter({'l': 2, 'b': 1, 'e': 1, 'a': 1})
for i in range(1, len(A)):
count &= Counter(A[i])
return list(count.elements())
if __name__ == '__main__':
print(Solution().commonChars(["bella", "label", "roller"]))
print(Solution().commonChars(["cool", "lock", "cook"]))
|
jhbez/ProjectV | app/v/buy/rest/pendingRst.py | Python | apache-2.0 | 5,665 | 0.003001 | # -*- coding: utf-8 -*-
# Copyright 2017 ProjectV Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask_restful import Resource
from flask import request, g
from v.tools.exception import ExceptionRest
from v.tools.v import processing_rest_exception, processing_rest_success, \
type_of_insert_rest, type_of_update_rest
from v.tools.validate import validate_rest
from v.buy.model.pendingMdl import PendingMdl
class PendingListRst(Resource, PendingMdl):
def get(self):
try:
_qrg = """
SELECT array_to_json(array_agg(row_to_json(t) )) as collection
FROM ( SELECT id, name, description, completed_at FROM %s WHERE deleted_at IS NULL AND completed_at
is NULL AND create_id=%s )t;
""" % (self._table, g.user.id,)
g.db_conn.execute(_qrg)
if g.db_conn.count() > 0:
_collection = g.db_conn.one()[0]
if _collection:
_data = {self._table: _collection}
_get = processing_rest_success(data=_data)
else:
raise ExceptionRest(status_code=404, message="No se han encontrado resultados")
else:
raise ExceptionRest(status_code=404, message="No se han encontrado resultados")
except (Exception, ExceptionRest), e:
_get = processing_rest_exception(e)
return _get
def post(self):
_request = request.json
try:
_errors = validate_rest(fields=self._fields, request=_request)
if not _errors:
_col, _val = type_of_insert_rest(self._fields, _request)
_qrp = """
INSERT INTO %s (create_id , %s ) VALUES (%s, %s)
RETURNING (select row_to_json(collection) FROM (VALUES(id)) collection(id));
""" % (self._table, _col, g.user.id, _val)
g.db_conn.execute(_qrp)
if g.db_conn.count() > 0:
_data = {self._table: g.db_conn.one()}
_post = processing_rest_success(data=_data, message='Fue creado correctamente',
| status_code=201)
else:
raise ExceptionRest(status_code=500, message='No se ha podido registrar')
else:
raise ExceptionRest(status_code=400, errors=_errors)
e | xcept (Exception, ExceptionRest), e:
_post = processing_rest_exception(e)
return _post
class PendingRst(Resource, PendingMdl):
def get(self, id):
try:
_qrg = """
SELECT array_to_json(array_agg(row_to_json(t) )) as collection
FROM ( SELECT id, name, description, completed_at FROM %s WHERE deleted_at IS NULL AND
completed_at is NULL and create_id=%s and id = %s)t;
""" % (self._table, g.user.id, id,)
g.db_conn.execute(_qrg)
if g.db_conn.count() > 0:
_collection = g.db_conn.one()[0]
if _collection:
_data = {self._table: _collection}
_get = processing_rest_success(data=_data)
else:
raise ExceptionRest(status_code=404, message="No se han encontrado resultados")
else:
raise ExceptionRest(status_code=404, message="No se han encontrado resultados")
except (Exception, ExceptionRest), e:
_get = processing_rest_exception(e)
return _get
def put(self, id):
_request = request.json
try:
_errors = validate_rest(fields=self._fields, request=_request, method='put')
if not _errors:
_val = type_of_update_rest(self._fields, _request)
_qrp = "UPDATE %s SET %s WHERE id=%s;" % (self._table, _val, id,)
g.db_conn.execute(_qrp)
if g.db_conn.count() > 0:
_put = processing_rest_success(status_code=201, message="El registro fue actualizado correctamente")
else:
raise ExceptionRest(status_code=404,
message="No se ha podido encontrar el registro, para actualizar.")
else:
raise ExceptionRest(status_code=400, errors=_errors)
except (Exception, ExceptionRest), e:
_put = processing_rest_exception(e)
return _put
def delete(self, id):
try:
_qrd = "UPDATE %s SET deleted_at=current_timestamp WHERE id=%s;" % (self._table, id,)
g.db_conn.execute(_qrd)
if g.db_conn.count() > 0:
_delete = processing_rest_success(status_code=201, message="El registro fue eliminado correctamente")
else:
raise ExceptionRest(status_code=404,
message="No se ha podido encontrar el registro, para eliminar.")
except (Exception, ExceptionRest), e:
_delete = processing_rest_exception(e)
return _delete
|
heryii/snabb | src/program/lwaftr/tests/subcommands/monitor_test.py | Python | apache-2.0 | 2,186 | 0.00183 | """
Test the "snabb lwaftr monitor" subcommand. Needs a NIC name and a TAP interface.
1. Execute "snabb lwaftr run" in on-a-stick mode and with the mirror option set.
2. Run "snabb lwaftr monitor" to set the counter and check its output.
"""
from random import randint
from subprocess import call, check_call
import unittest
from test_env import DATA_DIR, SNABB_CMD, BaseTestCase, nic_names
SNABB_PCI0 = nic_names()[0]
@unittest.skipUnless(SNABB_PCI0, 'NIC not configured')
class TestMonitor(BaseTestCase):
daemon_args = [
str(SNABB_CMD), 'lwaftr', 'run',
'--bench-file', '/dev/null',
'--conf', str(DATA_DIR / 'icmp_on_fail.conf'),
'--on-a-stick', SNABB_PCI0,
'--mirror', # TAP interface name added in setUpClass.
]
monitor_args = (str(SNABB_CMD), 'lwaftr', 'monitor', 'all')
# Use setUpClass to only setup the daemon once for all tests.
@classmethod
def setUpClass(cls):
# Create the TAP interface and append its name to daemon_args
# before calling the superclass' setUpClass, which needs both.
# 'tapXXXXXX' where X is a 0-9 digit.
cls.tap_name = 'tap%s' % randint(100000, 999999)
check_call(('ip', 'tuntap', 'add', cls.tap_name, 'mode', 'tap'))
cls.daemon_args.append(cls.tap_name)
try:
super(TestMonitor, cls).setUpClass()
except Exception:
# Clean up the TAP interface.
call(('ip', 'tuntap', 'delete', cls.tap_name, 'mode', 'tap'))
raise
def test_monitor(self):
monitor_args = list(self.monitor_args)
monitor_args.append(str(self.daemon.pid))
output = self.run_cmd(monitor_args)
self.assertIn(b'Mirror address set', output,
b'\n'.join((b'OUTPUT', output)) | )
self.assertIn(b'255.255.255.255', output,
b'\n'.join((b'OUTPUT', output)))
@classmethod
def tearDownClass(cls):
try:
super(TestMonitor, cls).tearDownClass()
finally:
# Clean up th | e TAP interface.
call(('ip', 'tuntap', 'delete', cls.tap_name, 'mode', 'tap'))
if __name__ == '__main__':
unittest.main()
|
dapengchen123/code_v1 | reid/datasets/market1501.py | Python | mit | 3,563 | 0.000561 | from __future__ import print_function, absolute_import
import os.path as osp
from ..utils.data import Da | taset
from ..utils.osutils import mkdir_if_missing
from ..utils.serialization import write_json
class Market1501(Dataset):
url = 'https://drive.google.com/file/d/0B8-rUzbwVRk0c054eEozWG9COHM/view'
md5 = '65005ab7d12ec1c44de4eeafe813e68a'
def __init__(self, root, split_id=0, num_val=0.3, download=False):
super(Market1501, self).__init__(root, split_id=split_id)
if download:
self.download()
if not s | elf._check_integrity():
raise RuntimeError("Dataset not found or corrupted. " +
"You can use download=True to download it.")
self.load(num_val)
def download(self):
if self._check_integrity():
print("Files already downloaded and verified")
return
import re
import hashlib
import shutil
from glob import glob
from zipfile import ZipFile
raw_dir = osp.join(self.root, 'raw')
mkdir_if_missing(raw_dir)
# Download the raw zip file
fpath = osp.join(raw_dir, 'Market-1501-v15.09.15.zip')
if osp.isfile(fpath) and \
hashlib.md5(open(fpath, 'rb').read()).hexdigest() == self.md5:
print("Using downloaded file: " + fpath)
else:
raise RuntimeError("Please download the dataset manually from {} "
"to {}".format(self.url, fpath))
# Extract the file
exdir = osp.join(raw_dir, 'Market-1501-v15.09.15')
if not osp.isdir(exdir):
print("Extracting zip file")
with ZipFile(fpath) as z:
z.extractall(path=raw_dir)
# Format
images_dir = osp.join(self.root, 'images')
mkdir_if_missing(images_dir)
# 1501 identities (+1 for background) with 6 camera views each
identities = [[[] for _ in range(6)] for _ in range(1502)]
def register(subdir, pattern=re.compile(r'([-\d]+)_c(\d)')):
fpaths = sorted(glob(osp.join(exdir, subdir, '*.jpg')))
pids = set()
for fpath in fpaths:
fname = osp.basename(fpath)
pid, cam = map(int, pattern.search(fname).groups())
if pid == -1: continue # junk images are just ignored
assert 0 <= pid <= 1501 # pid == 0 means background
assert 1 <= cam <= 6
cam -= 1
pids.add(pid)
fname = ('{:08d}_{:02d}_{:04d}.jpg'
.format(pid, cam, len(identities[pid][cam])))
identities[pid][cam].append(fname)
shutil.copy(fpath, osp.join(images_dir, fname))
return pids
trainval_pids = register('bounding_box_train')
gallery_pids = register('bounding_box_test')
query_pids = register('query')
assert query_pids <= gallery_pids
assert trainval_pids.isdisjoint(gallery_pids)
# Save meta information into a json file
meta = {'name': 'Market1501', 'shot': 'multiple', 'num_cameras': 6,
'identities': identities}
write_json(meta, osp.join(self.root, 'meta.json'))
# Save the only training / test split
splits = [{
'trainval': sorted(list(trainval_pids)),
'query': sorted(list(query_pids)),
'gallery': sorted(list(gallery_pids))}]
write_json(splits, osp.join(self.root, 'splits.json'))
|
DiscourseDB/discoursedb-core | composeddb/brat/config.py | Python | gpl-2.0 | 3,622 | 0.005246 | # This configuration was automatically generated by install.sh
from os.path import dirname, join as path_join
# This configuration file specifies the global setup of the brat
# server. It is recommended that you use the installation script
# instead of editing this file directly. To do this, run the following
# command in the brat directory:
#
# ./install.sh
#
# if you wish to configure the server manually, you will first need to
# make sure that this file appears as config.py in the brat server
# root directory. If this file is currently named config_template.py,
# you can do this as follows:
#
# cp config_template.py config.py
#
# you will then need to edit config.py, minimally replacing all
# instances of the string CHANGE_ME with their appropriate values.
# Please note that these values MUST appear in quotes, e.g. as in
#
# ADMIN_CONTACT_EMAIL = 'cbogart@cs.cmu.edu'
# Contact email for users to use if the software encounters errors
ADMIN_CONTACT_EMAIL = 'cbogart@cs.cmu.edu'
# Directories required by the brat server:
#
# BASE_DIR: directory in which the server is installed
# DATA_DIR: directory containin | g texts and annotations
# WORK_DIR: directory that the server uses for temporary files
#
BASE_DIR = dirname(__file__)
DATA_DIR = path_join(BASE_DIR, 'data' | )
WORK_DIR = path_join(BASE_DIR, 'work')
# If you have installed brat as suggested in the installation
# instructions, you can set up BASE_DIR, DATA_DIR and WORK_DIR by
# removing the three lines above and deleting the initial '#'
# character from the following four lines:
#from os.path import dirname, join
#BASE_DIR = dirname(__file__)
#DATA_DIR = path_join(BASE_DIR, 'data')
#WORK_DIR = path_join(BASE_DIR, 'work')
# To allow editing, include at least one USERNAME:PASSWORD pair below.
# The format is the following:
#
# 'USERNAME': 'PASSWORD',
#
# For example, user `editor` and password `annotate`:
#
# 'editor': 'annotate',
USER_PASSWORD = {
'discoursedb': 'discoursedb'
}
########## ADVANCED CONFIGURATION OPTIONS ##########
# The following options control advanced aspects of the brat server
# setup. It is not necessary to edit these in a basic brat server
# installation.
### MAX_SEARCH_RESULT_NUMBER
# It may be a good idea to limit the max number of results to a search
# as very high numbers can be demanding of both server and clients.
# (unlimited if not defined or <= 0)
MAX_SEARCH_RESULT_NUMBER = 1000
### DEBUG
# Set to True to enable additional debug output
DEBUG = False
### LOG_LEVEL
# If you are a developer you may want to turn on extensive server
# logging by enabling LOG_LEVEL = LL_DEBUG
LL_DEBUG, LL_INFO, LL_WARNING, LL_ERROR, LL_CRITICAL = range(5)
LOG_LEVEL = LL_WARNING
#LOG_LEVEL = LL_DEBUG
### BACKUP_DIR
# Define to enable backups
# from os.path import join
#BACKUP_DIR = join(WORK_DIR, 'backup')
try:
assert DATA_DIR != BACKUP_DIR, 'DATA_DIR cannot equal BACKUP_DIR'
except NameError:
pass # BACKUP_DIR most likely not defined
### SVG_CONVERSION_COMMANDS
# If export to formats other than SVG is needed, the server must have
# a software capable of conversion like inkscape set up, and the
# following must be defined.
# (SETUP NOTE: at least Inkscape 0.46 requires the directory
# ".gnome2/" in the apache home directory and will crash if it doesn't
# exist.)
#SVG_CONVERSION_COMMANDS = [
# ('png', 'inkscape --export-area-drawing --without-gui --file=%s --export-png=%s'),
# ('pdf', 'inkscape --export-area-drawing --without-gui --file=%s --export-pdf=%s'),
# ('eps', 'inkscape --export-area-drawing --without-gui --file=%s --export-eps=%s'),
#]
|
mph-/lcapy | doc/examples/networks/seriesparallelRL1.py | Python | lgpl-2.1 | 110 | 0.009091 | from lcapy import R, L
n = (R('R1') | L('L1')) + (R('R2') | L( | 'L2'))
n.draw(__file__.replace('.py', '.png'))
| |
seewindcn/tortoisehg | src/mercurial/demandimport.py | Python | gpl-2.0 | 9,674 | 0.001964 | # demandimport.py - global demand-loading of modules for Mercurial
#
# Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''
demandimport - automatic demandloading of modules
To enable this module, do:
import demandimport; demandimport.enable()
Imports of the following forms will be demand-loaded:
import a, b.c
import a.b as c
from a import b,c # a will be loaded immediately
These imports will not be delayed:
from a import *
b = __import__(a)
'''
from __future__ import absolute_import
import contextlib
import os
import sys
# __builtin__ in Python 2, builtins in Python 3.
try:
import __builtin__ as builtins
except ImportError:
import builtins
contextmanager = contextlib.contextmanager
_origimport = __import__
nothing = object()
# Python 3 doesn't have relative imports nor level -1.
level = -1
if sys.version_info[0] >= 3:
level = 0
_import = _origimport
def _hgextimport(importfunc, name, globals, *args, **kwargs):
try:
return importfunc(name, globals, *args, **kwargs)
except ImportError:
if not globals:
raise
# extensions are loaded with "hgext_" prefix
hgextname = 'hgext_%s' % name
nameroot = hgextname.split('.', 1)[0]
contextroot = globals.get('__name__', '').split('.', 1)[0]
if nameroot != contextroot:
raise
# retry to import with "hgext_" prefix
return impo | rtfunc(hgextname, globals, *args, **kwargs)
class _demandmod(object):
"""module demand-loader and proxy"""
def __init__(self, name, globals, locals, level=level):
if '.' in name:
head, rest = name.split('.', 1)
after = [rest]
else:
head = name
after = []
object.__setattr__(self, "_data",
| (head, globals, locals, after, level, set()))
object.__setattr__(self, "_module", None)
def _extend(self, name):
"""add to the list of submodules to load"""
self._data[3].append(name)
def _addref(self, name):
"""Record that the named module ``name`` imports this module.
References to this proxy class having the name of this module will be
replaced at module load time. We assume the symbol inside the importing
module is identical to the "head" name of this module. We don't
actually know if "as X" syntax is being used to change the symbol name
because this information isn't exposed to __import__.
"""
self._data[5].add(name)
def _load(self):
if not self._module:
head, globals, locals, after, level, modrefs = self._data
mod = _hgextimport(_import, head, globals, locals, None, level)
# load submodules
def subload(mod, p):
h, t = p, None
if '.' in p:
h, t = p.split('.', 1)
if getattr(mod, h, nothing) is nothing:
setattr(mod, h, _demandmod(p, mod.__dict__, mod.__dict__))
elif t:
subload(getattr(mod, h), t)
for x in after:
subload(mod, x)
# Replace references to this proxy instance with the actual module.
if locals and locals.get(head) == self:
locals[head] = mod
for modname in modrefs:
modref = sys.modules.get(modname, None)
if modref and getattr(modref, head, None) == self:
setattr(modref, head, mod)
object.__setattr__(self, "_module", mod)
def __repr__(self):
if self._module:
return "<proxied module '%s'>" % self._data[0]
return "<unloaded module '%s'>" % self._data[0]
def __call__(self, *args, **kwargs):
raise TypeError("%s object is not callable" % repr(self))
def __getattribute__(self, attr):
if attr in ('_data', '_extend', '_load', '_module', '_addref'):
return object.__getattribute__(self, attr)
self._load()
return getattr(self._module, attr)
def __setattr__(self, attr, val):
self._load()
setattr(self._module, attr, val)
_pypy = '__pypy__' in sys.builtin_module_names
def _demandimport(name, globals=None, locals=None, fromlist=None, level=level):
if not locals or name in ignore or fromlist == ('*',):
# these cases we can't really delay
return _hgextimport(_import, name, globals, locals, fromlist, level)
elif not fromlist:
# import a [as b]
if '.' in name: # a.b
base, rest = name.split('.', 1)
# email.__init__ loading email.mime
if globals and globals.get('__name__', None) == base:
return _import(name, globals, locals, fromlist, level)
# if a is already demand-loaded, add b to its submodule list
if base in locals:
if isinstance(locals[base], _demandmod):
locals[base]._extend(rest)
return locals[base]
return _demandmod(name, globals, locals, level)
else:
# There is a fromlist.
# from a import b,c,d
# from . import b,c,d
# from .a import b,c,d
# level == -1: relative and absolute attempted (Python 2 only).
# level >= 0: absolute only (Python 2 w/ absolute_import and Python 3).
# The modern Mercurial convention is to use absolute_import everywhere,
# so modern Mercurial code will have level >= 0.
# The name of the module the import statement is located in.
globalname = globals.get('__name__')
def processfromitem(mod, attr):
"""Process an imported symbol in the import statement.
If the symbol doesn't exist in the parent module, it must be a
module. We set missing modules up as _demandmod instances.
"""
symbol = getattr(mod, attr, nothing)
if symbol is nothing:
symbol = _demandmod(attr, mod.__dict__, locals, level=1)
setattr(mod, attr, symbol)
# Record the importing module references this symbol so we can
# replace the symbol with the actual module instance at load
# time.
if globalname and isinstance(symbol, _demandmod):
symbol._addref(globalname)
if level >= 0:
# The "from a import b,c,d" or "from .a import b,c,d"
# syntax gives errors with some modules for unknown
# reasons. Work around the problem.
if name:
return _hgextimport(_origimport, name, globals, locals,
fromlist, level)
if _pypy:
# PyPy's __import__ throws an exception if invoked
# with an empty name and no fromlist. Recreate the
# desired behaviour by hand.
mn = globalname
mod = sys.modules[mn]
if getattr(mod, '__path__', nothing) is nothing:
mn = mn.rsplit('.', 1)[0]
mod = sys.modules[mn]
if level > 1:
mn = mn.rsplit('.', level - 1)[0]
mod = sys.modules[mn]
else:
mod = _hgextimport(_origimport, name, globals, locals,
level=level)
for x in fromlist:
processfromitem(mod, x)
return mod
# But, we still need to support lazy loading of standard library and 3rd
# party modules. So handle level == -1.
mod = _hgextimport(_origimport, name, globals, locals)
# recurse down the module chain
for comp in name.split('.')[1:]:
if getattr(mod, comp, nothing) is nothing:
setattr(mod, comp,
_demandmod(comp, mod.__dict__, mod.__dict__))
mod = getattr(mod, comp)
for x in fromlist:
|
danielsamuels/django-registration | registration/tests/test_views.py | Python | bsd-3-clause | 1,035 | 0 | from django.core.urlresolvers import reverse
from django.test import override_settings, TestCase
from ..models import RegistrationProfile
class ActivationViewTests(TestCase):
urls = 'registration.tests.urls'
@ove | rride_settings(ACCOUNT_ACTIVATION_DAYS=7)
def test_activation(self):
"""
Activation of an account functions properly when using a
simple string URL as the success redirect.
"""
resp = self.client.post(reverse('registration_register'),
| data={'username': 'bob',
'email': 'bob@example.com',
'password1': 'secret',
'password2': 'secret'})
profile = RegistrationProfile.objects.get(user__username='bob')
resp = self.client.get(reverse(
'registration_activate',
args=(),
kwargs={'activation_key': profile.activation_key})
)
self.assertRedirects(resp, '/')
|
ncdesouza/bookworm | env/lib/python2.7/site-packages/werkzeug/__init__.py | Python | gpl-3.0 | 7,216 | 0.00194 | # -*- coding: utf-8 -*-
"""
werkzeug
~~~~~~~~
Werkzeug is the Swiss Army knife of Python web development.
It provides useful classes and functions for any WSGI application to make
the life of a python web developer much easier. All of the provided
classes are independent from each other so you can mix it with any other
library.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from types import ModuleType
import sys
from werkzeug._compat import iteritems
# the version. Usually set automatically by a script.
__version__ = '0.9.6'
# This import magic raises concerns quite often which is why the implementation
# and motivation is explained here in detail now.
#
# The majority of the functions and classes provided by Werkzeug work on the
# HTTP and WSGI layer. There is no useful grouping for those which is why
# they are all importable from "werkzeug" instead of the mod_auth where they are
# implemented. The downside of that is, that now everything would be loaded at
# once, even if unused.
#
# The implementation of a lazy-loading module in this file replaces the
# werkzeug package when imported from within. Attribute access to the werkzeug
# module will then lazily import from the mod_auth that implement the objects.
# import mapping to objects in other mod_auth
all_by_module = {
'werkzeug.debug': ['DebuggedApplication'],
'werkzeug.local': ['Local', 'LocalManager', 'LocalProxy',
'LocalStack', 'release_local'],
'werkzeug.serving': ['run_simple'],
'werkzeug.test': ['Client', 'EnvironBuilder', 'create_environ',
'run_wsgi_app'],
'werkzeug.testapp': ['test_app'],
'werkzeug.exceptions': ['abort', 'Aborter'],
'werkzeug.urls': ['url_decode', 'url_encode', 'url_quote',
'url_quote_plus', 'url_unquote',
'url_unquote_plus', 'url_fix', 'Href',
'iri_to_uri', 'uri_to_iri'],
'werkzeug.formparser': ['parse_form_data'],
'werkzeug.utils': ['escape', 'environ_property',
'append_slash_redirect', 'redirect',
'cached_property', 'import_string',
'dump_cookie', 'parse_cookie', 'unescape',
'format_string', 'find_modules', 'header_property',
'html', 'xhtml', 'HTMLBuilder',
'validate_arguments', 'ArgumentValidationError',
'bind_arguments', 'secure_filename'],
'werkzeug.wsgi': ['get_current_url', 'get_host', 'pop_path_info',
'peek_path_info', 'SharedDataMiddleware',
'DispatcherMiddleware', 'ClosingIterator',
'FileWrapper', 'make_line_iter', 'LimitedStream',
'responder', 'wrap_file', 'extract_path_info'],
'werkzeug.datastructures': ['MultiDict', 'CombinedMultiDict', 'Headers',
'EnvironHeaders', 'ImmutableList',
'ImmutableDict', 'ImmutableMultiDict',
'TypeConversionDict', 'ImmutableTypeConversionDict',
'Accept', 'MIMEAccept', 'CharsetAccept',
'LanguageAccept', 'RequestCacheControl',
'ResponseCacheControl', 'ETags', 'HeaderSet',
'WWWAuthenticate', 'Autho | rization',
'FileMultiDict', 'CallbackDict', 'FileStorage',
'OrderedMultiDict', 'ImmutableOrderedMultiDict'],
'werkzeug.useragents': ['UserAgent'],
'werkzeug.http': ['parse_etags', 'parse_date', 'http_date',
'cookie_date', 'parse_cache_control_header',
'is_resource_modified', 'parse_accept_header',
'parse_set_header', 'quote_etag', 'unqu | ote_etag',
'generate_etag', 'dump_header',
'parse_list_header', 'parse_dict_header',
'parse_authorization_header',
'parse_www_authenticate_header',
'remove_entity_headers', 'is_entity_header',
'remove_hop_by_hop_headers', 'parse_options_header',
'dump_options_header', 'is_hop_by_hop_header',
'unquote_header_value',
'quote_header_value', 'HTTP_STATUS_CODES'],
'werkzeug.wrappers': ['BaseResponse', 'BaseRequest', 'Request',
'Response', 'AcceptMixin', 'ETagRequestMixin',
'ETagResponseMixin', 'ResponseStreamMixin',
'CommonResponseDescriptorsMixin',
'UserAgentMixin', 'AuthorizationMixin',
'WWWAuthenticateMixin',
'CommonRequestDescriptorsMixin'],
'werkzeug.security': ['generate_password_hash', 'check_password_hash'],
# the undocumented easteregg ;-)
'werkzeug._internal': ['_easteregg']
}
# mod_auth that should be imported when accessed as attributes of werkzeug
attribute_modules = frozenset(['exceptions', 'routing', 'script'])
object_origins = {}
for module, items in iteritems(all_by_module):
for item in items:
object_origins[item] = module
class module(ModuleType):
"""Automatically import objects from the mod_auth."""
def __getattr__(self, name):
if name in object_origins:
module = __import__(object_origins[name], None, None, [name])
for extra_name in all_by_module[module.__name__]:
setattr(self, extra_name, getattr(module, extra_name))
return getattr(module, name)
elif name in attribute_modules:
__import__('werkzeug.' + name)
return ModuleType.__getattribute__(self, name)
def __dir__(self):
"""Just show what we want to show."""
result = list(new_module.__all__)
result.extend(('__file__', '__path__', '__doc__', '__all__',
'__docformat__', '__name__', '__path__',
'__package__', '__version__'))
return result
# keep a reference to this module so that it's not garbage collected
old_module = sys.modules['werkzeug']
# setup the new module and patch it into the dict of loaded mod_auth
new_module = sys.modules['werkzeug'] = module('werkzeug')
new_module.__dict__.update({
'__file__': __file__,
'__package__': 'werkzeug',
'__path__': __path__,
'__doc__': __doc__,
'__version__': __version__,
'__all__': tuple(object_origins) + tuple(attribute_modules),
'__docformat__': 'restructuredtext en'
})
# Due to bootstrapping issues we need to import exceptions here.
# Don't ask :-(
__import__('werkzeug.exceptions')
|
euhackathon/commission-today-api | backend/api/resources.py | Python | mit | 2,974 | 0.001345 | from tastypie.constants import ALL
from tastypie.fields import ToOneField
from tastypie.resources import ModelResource
from backend.models import Portofolio, Member, Meeting, Organization
import calendar
from haystack.query import SearchQuerySet
from django.core.paginator import Paginator, InvalidPage
from tastypie.utils import trailing_slash
from django.http import Http404
from django.conf.urls import url
class PortofolioResource(ModelResource):
class Meta:
queryset = Portofolio.objects.all()
allowed_methods = ['get']
def dehydrate(self, bundle):
bundle = super(PortofolioResource, self).dehydrate(bundle)
shorthand = bundle.data.get('shorthand', '')
if shorthand != '':
bundle.data['name'] = shorthand
bundle.data.pop('shorthand')
return bundle
class MemberResource(ModelResource):
portofolio = ToOneField(PortofolioResource, 'portofolio', full=True)
class Meta:
limit = 1000
queryset = Member.objects.all()
allowed_methods = ['get']
class OrganizationResource(ModelResource):
class Meta:
limit = 1000
queryset = Organization.objects.all()
allowed_methods = ['get']
def dehydrate(self, bundle):
money = bundle.data.get('money')
if money:
bundle.data['money'] = "{:,}".format(money)
return bundle
class MeetingResource(ModelResource):
member = ToOneField(MemberResource, 'member', full=True)
organization = ToOneField(OrganizationResource, 'organization', full=True,
null=True)
class Meta:
limit = 1000
queryset = Meeting.objects.all()
allowed_methods = ['get']
filtering = {
'date': ALL,
'member': ALL
}
ordering = ['date']
def dehydrate_date(self, bundle):
"""Fucking UNIX timestamps."""
x = bundle.data['date']
return calendar.timegm(x.timetuple())
def override_urls(self):
return [
url(r"^(?P<resource_name>%s)/search%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('get_search'), name="api_get_search"),
]
def get_search(self, request, **kwargs):
self.method_check(request, allowed=['get'])
# Do the query.
sqs = SearchQuerySet().models(Meeting).load_all().auto_query | (request.GET.get('q', ''))
paginator = Paginator(sqs, 20)
try:
page = paginator.page(int(request.GET.get('page', 1)))
except InvalidPage:
raise Http404("Sorry, no results on that page.")
objects = []
for result in page.object_list:
bundle = self.build_bundle(obj=result.object, request=request)
bundle = self.full_dehydrate(bundle)
objects.append(bundle)
| object_list = {
'objects': objects,
}
return self.create_response(request, object_list)
|
norus/procstat-json | tornado/test/auth_test.py | Python | gpl-3.0 | 10,876 | 0.002023 | # These tests do not currently do much to verify the correct implementation
# of the openid/oauth protocols, they just exercise the major code paths
# and ensure that it doesn't blow up (e.g. with unicode/bytes issues in
# python 3)
from __future__ import absolute_import, division, with_statement
from tornado.auth import OpenIdMixin, OAuthMixin, OAuth2Mixin, TwitterMixin
from tornado.escape import json_decode
from tornado.testing import AsyncHTTPTestCase
from tornado.util import b
from tornado.web import RequestHandler, Application, asynchronous
class OpenIdClientLoginHandler(RequestHandler, OpenIdMixin):
def initialize(self, test):
self._OPENID_ENDPOINT = test.get_url('/openid/server/authenticate')
@asynchronous
def get(self):
if self.get_argument('openid.mode', None):
self.get_authenticated_user(
self.on_user, http_client=self.settings['http_client'])
return
self.authenticate_redirect()
def on_user(self, user):
if user is None:
raise Exception("user is None")
self.finish(user)
class OpenIdServerAuthenticateHandler(RequestHandler):
def post(self):
if self.get_argument('openid.mode') != 'check_authentication':
raise Exception("incorrect openid.mode %r")
self.write('is_valid:true')
class OAuth1ClientLoginHandler(RequestHandler, OAuthMixin):
def initialize(self, test, version):
self._OAUTH_VERSION = version
self._OAUTH_REQUEST_TOKEN_URL = test.get_url('/oauth1/server/request_token')
self._OAUTH_AUTHORIZE_URL = test.get_url('/oauth1/server/authorize')
self._OAUTH_ACCESS_TOKEN_URL = test.get_url('/oauth1/server/access_token')
def _oauth_consumer_token(self):
return dict(key='asdf', secret='qwer')
@asynchronous
def get(self):
if self.get_argument('oauth_token', None):
self.get_authenticated_user(
self.on_user, http_client=self.settings['http_client'])
return
self.authorize_redirect(http_client=self.settings['http_client'])
def on_user(self, user):
if user is None:
raise Exception("user is None")
self.finish(user)
def _oauth_get_user(self, access_token, callback):
if access_token != dict(key=b('uiop'), secret=b('5678')):
raise Exception("incorrect access token %r" % access_token)
callback(dict(email='foo@example.com'))
class OAuth1ClientRequestParametersHandler(RequestHandler, OAuthMixin):
def initialize(self, version):
self._OAUTH_VERSION = version
def _oauth_consumer_token(self):
return dict(key='asdf', secret='qwer')
def get(self):
params = self._oauth_request_parameters(
'http://www.example.com/api/asdf',
dict(key='uiop', secret='5678'),
parameters=dict(foo='bar'))
import urllib
urllib.urlencode(params)
self.write(params)
class OAuth1ServerRequestTokenHandler(RequestHandler):
def get(self):
self.write('oauth_token=zxcv&oauth_token_secret=1234')
class OAuth1ServerAccessTokenHandler(RequestHandler):
def get(self):
self.write('oauth_token=uiop&oauth_token_secret=5678')
class OAuth2ClientLoginHandler(RequestHandler, OAuth2Mixin):
def initialize(self, test):
self._OAUTH_AUTHORIZE_URL = test.get_url('/oauth2/server/authorize')
def get(self):
self.authorize_redirect()
class TwitterClientLoginHandler(RequestHandler, TwitterMixin):
def initialize(self, test):
self._OAUTH_REQUEST_TOKEN_URL = test.get_url('/oauth1/server/request_token')
self._OAUTH_ACCESS_TOKEN_URL = test.get_url('/twitter/server/access_token')
self._OAUTH_AUTHORIZE_URL = test.get_url('/oauth1/server/authorize')
self._TWITTER_BASE_URL = test.get_url('/twitter/api')
@asynchronous
def get(self):
if self.get_argument("oauth_token", None):
self.get_authenticated_user(self.on_user)
return
self.authorize_redirect()
def on_user(self, user):
if user is None:
raise Exception("user is None")
self.finish(user)
def get_auth_http_client(self):
return self.settings['http_client']
class TwitterServerAccessTokenHandler(RequestHandler):
def get(self):
self.write('oauth_token=hjkl&oauth_token_secret=vbnm&screen_name=foo')
class TwitterServerShowUserHandler(RequestHandler):
def get(self, screen_name):
self.write(dict(screen_name=screen_name, name=screen_name.capitalize()))
class AuthTest(AsyncHTTPTestCase):
def get_app(self):
return Application(
| [
# test endpoints
('/openid/client/login', OpenIdClientLoginHandler, dict(test=self)),
('/oauth10/client/login', OAuth1ClientLoginHandler,
dict(test=self, version='1.0')),
('/oauth10/client/request_params',
OAuth1ClientRequestParametersHandler,
dict(version='1.0')),
('/oauth10a/client/logi | n', OAuth1ClientLoginHandler,
dict(test=self, version='1.0a')),
('/oauth10a/client/request_params',
OAuth1ClientRequestParametersHandler,
dict(version='1.0a')),
('/oauth2/client/login', OAuth2ClientLoginHandler, dict(test=self)),
('/twitter/client/login', TwitterClientLoginHandler, dict(test=self)),
# simulated servers
('/openid/server/authenticate', OpenIdServerAuthenticateHandler),
('/oauth1/server/request_token', OAuth1ServerRequestTokenHandler),
('/oauth1/server/access_token', OAuth1ServerAccessTokenHandler),
('/twitter/server/access_token', TwitterServerAccessTokenHandler),
(r'/twitter/api/users/show/(.*)\.json', TwitterServerShowUserHandler),
],
http_client=self.http_client,
twitter_consumer_key='test_twitter_consumer_key',
twitter_consumer_secret='test_twitter_consumer_secret')
def test_openid_redirect(self):
response = self.fetch('/openid/client/login', follow_redirects=False)
self.assertEqual(response.code, 302)
self.assertTrue(
'/openid/server/authenticate?' in response.headers['Location'])
def test_openid_get_user(self):
response = self.fetch('/openid/client/login?openid.mode=blah&openid.ns.ax=http://openid.net/srv/ax/1.0&openid.ax.type.email=http://axschema.org/contact/email&openid.ax.value.email=foo@example.com')
response.rethrow()
parsed = json_decode(response.body)
self.assertEqual(parsed["email"], "foo@example.com")
def test_oauth10_redirect(self):
response = self.fetch('/oauth10/client/login', follow_redirects=False)
self.assertEqual(response.code, 302)
self.assertTrue(response.headers['Location'].endswith(
'/oauth1/server/authorize?oauth_token=zxcv'))
# the cookie is base64('zxcv')|base64('1234')
self.assertTrue(
'_oauth_request_token="enhjdg==|MTIzNA=="' in response.headers['Set-Cookie'],
response.headers['Set-Cookie'])
def test_oauth10_get_user(self):
response = self.fetch(
'/oauth10/client/login?oauth_token=zxcv',
headers={'Cookie': '_oauth_request_token=enhjdg==|MTIzNA=='})
response.rethrow()
parsed = json_decode(response.body)
self.assertEqual(parsed['email'], 'foo@example.com')
self.assertEqual(parsed['access_token'], dict(key='uiop', secret='5678'))
def test_oauth10_request_parameters(self):
response = self.fetch('/oauth10/client/request_params')
response.rethrow()
parsed = json_decode(response.body)
self.assertEqual(parsed['oauth_consumer_key'], 'asdf')
self.assertEqual(parsed['oauth_token'], 'uiop')
self.assertTrue('oauth_nonce' in parsed)
self.assertTrue('oauth_signature' in parsed)
def test_oauth10a_redirect(self):
|
kakRostropovich/EmmetOneLine | emmet_css_from_one_line.py | Python | mit | 908 | 0.007709 | import sublime, sublime_plugin, re
class EmmetCssFromOneLineCommand(sublime_plugin.TextCommand):
def run(self, edit):
view = self.view
line_region = view.line(view.sel()[0])
line_str = view.substr(line_region)
left_padding = re.findall(r'^(\s+)', line_str)[0]
# find commands in line
props_a | rray = re.findall(r'([a-zA-Z0-9:!;().,?/\-+#]+)', line_str)
# Delete long string
view.replace(edit, line_region, '')
def runEmmet():
view.run_command("expand_abbreviation_by_tab")
# Processing first element
view.insert(edit, view.sel()[0].end(), left_padding + props_array[0])
runEmmet()
i = 1
while i < len(props_array):
| view.insert(edit, view.sel()[0].end(), '\n' + left_padding + props_array[i])
runEmmet()
i += 1
|
hongliang5623/sentry | src/sentry/models/event.py | Python | bsd-3-clause | 7,063 | 0.001133 | """
sentry.models.event
~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import warnings
from collections import OrderedDict
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from sentry.db.models import (
Model, NodeField, BoundedIntegerField, BoundedPositiveIntegerField,
BaseManager, FlexibleForeignKey, sane_repr
)
from sentry.interfaces.base import get_interface
from sentry.utils.cache import memoize
from sentry.utils.safe import safe_execute
from sentry.utils.strings import truncatechars, strip
class Event(Model):
"""
An individual event.
"""
__core__ = False
group = FlexibleForeignKey('sentry.Group', blank=True, null=True, related_name="event_set")
event_id = models.CharField(max_length=32, null=True, db_column="message_id")
project = FlexibleForeignKey('sentry.Project', null=True)
message = models.TextField()
num_comments = BoundedPositiveIntegerField(default=0, null=True)
platform = models.CharField(max_length=64, null=True)
datetime = models.DateTimeField(default=timezone.now, db_index=True)
time_spent = BoundedIntegerField(null=True)
data = NodeField(blank=True, null=True)
objects = BaseManager()
class Meta:
app_label = 'sentry'
db_table = 'sentry_message'
verbose_name = _('message')
verbose_name_plural = _('messages')
unique_together = (('project', 'event_id'),)
index_together = (('group', 'datetime'),)
__repr__ = sane_repr('project_id', 'group_id')
def error(self):
message = strip(self.message)
if not message:
message = '<unlabeled message>'
else:
message = truncatechars(message.splitlines()[0], 100)
return message
error.short_description = _('error')
def has_two_part_message(self):
message = strip(self.message)
return '\n' in message or len(message) > 100
@property
def message_short(self):
message = strip(self.message)
if not message:
message = '<unlabeled message>'
else:
message = truncatechars(message.splitlines()[0], 100)
return message
@property
def team(self):
return self.project.team
@property
def organization(self):
return self.project.organization
@property
def version(self):
return self.data.get('version', '5')
@memoize
def ip_address(self):
user_data = self.data.get('sentry.interfaces.User')
if user_data:
value = user_data.get('ip_address')
if value:
return value
http_data = self.data.get('sentry.interfaces.Http')
if http_data and 'env' in http_data:
value = http_data['env'].get('REMOTE_ADDR')
if value:
return value
return None
@memoize
def user_ident(self):
"""
The identifier from a user is consid | ered from several interfaces.
In order:
- User.id
- User.email
- User.username
- Http.env.REMOTE_ADDR
"""
user_data = self.data.get('sentry.interfaces.User', self.data.get('user'))
if user_data:
ident = user_data.get('id')
if ident:
return 'id:%s' % (ident,)
ident = user_data.get('email')
if ident:
return 'email:%s' % (ident,)
iden | t = user_data.get('username')
if ident:
return 'username:%s' % (ident,)
ident = self.ip_address
if ident:
return 'ip:%s' % (ident,)
return None
def get_interfaces(self):
result = []
for key, data in self.data.iteritems():
try:
cls = get_interface(key)
except ValueError:
continue
value = safe_execute(cls.to_python, data)
if not value:
continue
result.append((key, value))
return OrderedDict((k, v) for k, v in sorted(result, key=lambda x: x[1].get_score(), reverse=True))
@memoize
def interfaces(self):
return self.get_interfaces()
def get_tags(self, with_internal=True):
try:
return sorted(
(t, v) for t, v in self.data.get('tags') or ()
if with_internal or not t.startswith('sentry:')
)
except ValueError:
# at one point Sentry allowed invalid tag sets such as (foo, bar)
# vs ((tag, foo), (tag, bar))
return []
tags = property(get_tags)
def get_tag(self, key):
for t, v in (self.data.get('tags') or ()):
if t == key:
return v
return None
def as_dict(self):
# We use a OrderedDict to keep elements ordered for a potential JSON serializer
data = OrderedDict()
data['id'] = self.event_id
data['project'] = self.project_id
data['release'] = self.get_tag('sentry:release')
data['platform'] = self.platform
data['culprit'] = self.group.culprit
data['message'] = self.message
data['datetime'] = self.datetime
data['time_spent'] = self.time_spent
data['tags'] = self.get_tags()
for k, v in sorted(self.data.iteritems()):
data[k] = v
return data
@property
def size(self):
data_len = len(self.message)
for value in self.data.itervalues():
data_len += len(repr(value))
return data_len
# XXX(dcramer): compatibility with plugins
def get_level_display(self):
warnings.warn('Event.get_level_display is deprecated. Use Event.tags instead.',
DeprecationWarning)
return self.group.get_level_display()
@property
def level(self):
warnings.warn('Event.level is deprecated. Use Event.tags instead.',
DeprecationWarning)
return self.group.level
@property
def logger(self):
warnings.warn('Event.logger is deprecated. Use Event.tags instead.',
DeprecationWarning)
return self.get_tag('logger')
@property
def site(self):
warnings.warn('Event.site is deprecated. Use Event.tags instead.',
DeprecationWarning)
return self.get_tag('site')
@property
def server_name(self):
warnings.warn('Event.server_name is deprecated. Use Event.tags instead.')
return self.get_tag('server_name')
@property
def culprit(self):
warnings.warn('Event.culprit is deprecated. Use Group.culprit instead.')
return self.group.culprit
@property
def checksum(self):
warnings.warn('Event.checksum is no longer used', DeprecationWarning)
return ''
|
aerospike/aerospike-client-python | test/new_tests/test_info_all.py | Python | apache-2.0 | 3,320 | 0.000602 | # -*- coding: utf-8 -*-
import pytest
import sys
from aerospike import exception as e
from .test_base_class import TestBaseClass
aerospike = pytest.importorskip("aerospike")
try:
import aerospike
except:
print("Please install aerospike python client.")
sys.exit(1)
@pytest.mark.usefixtures("as_connection", "connection_config")
class TestInfo(object):
def test_info_all(self):
request = "statistics"
nodes_info = self.as_connection.info_all(request)
assert nodes_info is not None
assert type(nodes_info) == dict
def test_info_all_with_None_policy(self):
request = "statistics"
nodes_info = self.as_connection.info_all(request, None)
assert nodes_info is not None
assert type(nodes_info) == dict
@pytest.mark.parametrize(
"container_type, container_name",
[
('namespaces', 'test'),
('sets', 'demo'),
('bins', 'names')
],
ids=("namespace", "sets", "bins")
)
def test_positive_info_all(self, container_type, container_name):
"""
Test to see whether a namespace, set,
and bin exist after a key is added
"""
key = ('test', 'demo', 'list_key')
rec = {'names': ['John', 'Marlen', 'Steve']}
self.as_connection.put(key, rec)
response = self.as_connection.info_all(container_type)
self.as_connection.remove(key)
found = False
for keys in response.keys():
for value in response[keys]:
if value is not None:
if container_name in value:
found = True
assert found
def test_info_all_with_config_for_statistics_and_policy(self):
request = "statistics"
policy = {'timeout': 1000}
hosts = [host for host in self.connection_config['hosts']]
nodes_info = self.as_connection.info_all(
request, policy)
assert nodes_info is not None
assert isinstance(nodes_info, dict)
def test_info_all_for_invalid_request(self):
request = "fake_request_string_not_real"
hosts = [host for host in self.connection_config['hosts']]
nodes_info = self.as_connection.info_all(request)
assert isinstance(nodes_info, dict)
assert nodes_info.values() is not None
def test_info_all_with_none_request(self):
'''
Test that sending None as the request raises an error
'''
request = None
with pytest.raises(e.ParamError):
self.as_connection.info_all(None)
def test_info_all_without_parameters(self):
with pytest.raises(TypeError) as err_info:
self.as_connection.info_all()
def test_info_all_without_connection(self):
"""
Test info positive for se | ts without connection
"""
client1 = aerospike.client(self.connection_config)
with pytest.raises(e.ClusterError) as err_info:
client1.info_all('sets')
def test_info_all_with_invalid_policy_type(self):
| '''
Test that sending a non dict/None as policy raises an error
'''
request = None
with pytest.raises(e.ParamError):
self.as_connection.info_all(None, [])
|
darkryder/django | tests/inspectdb/models.py | Python | bsd-3-clause | 3,075 | 0.000325 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
class People(models.Model):
name = models.CharField(max_length=255)
parent = models.ForeignKey('self', models.CASCADE)
class Message(models.Model):
from_field = models.ForeignKey(People, models.CASCADE, db_column='from_id')
class PeopleData(models.Model):
people_pk = models.ForeignKey(People, models.CASCADE, primary_key=True)
ssn = models.CharField(max_length=11)
class PeopleMoreData(models.Model):
people_unique = models.ForeignKey(People, models.CASCADE, unique=True)
license = models.CharField(max_length=255)
class DigitsInColumnName(models.Model):
all_digits = models.CharField(max_length=11, db_column='123')
leading_digit = models.CharField(max_length=11, db_column='4extra')
leading_digits = models.CharField(max_length=11, db_column='45extra')
class SpecialName(models.Model):
field = models.IntegerField(db_column='field')
# Underscores
field_field_0 = models.IntegerField(db_column='Field_')
field_field_1 = models.IntegerField(db_column='Field__')
field_field_2 = models.IntegerField(db_column='__field')
# Other chars
prc_x = models.IntegerField(db_column='prc(%) x')
non_ascii = mo | dels.IntegerField(db_column='tamaño')
class Meta:
db_table = "inspectdb_special.table name"
class ColumnTypes(models.Model):
id = models.AutoField(primary_key=True)
big_int_field = models.BigIntegerField()
bool_field = models.BooleanField(default=False)
null_bool_field = models.NullBooleanField()
char_field = models.CharField(max_length=10)
null_char_field = models.CharField(max_length=10, blank=True, null=True)
comma_separated_int_field = models.Comma | SeparatedIntegerField(max_length=99)
date_field = models.DateField()
date_time_field = models.DateTimeField()
decimal_field = models.DecimalField(max_digits=6, decimal_places=1)
email_field = models.EmailField()
file_field = models.FileField(upload_to="unused")
file_path_field = models.FilePathField()
float_field = models.FloatField()
int_field = models.IntegerField()
gen_ip_adress_field = models.GenericIPAddressField(protocol="ipv4")
pos_int_field = models.PositiveIntegerField()
pos_small_int_field = models.PositiveSmallIntegerField()
slug_field = models.SlugField()
small_int_field = models.SmallIntegerField()
text_field = models.TextField()
time_field = models.TimeField()
url_field = models.URLField()
uuid_field = models.UUIDField()
class UniqueTogether(models.Model):
field1 = models.IntegerField()
field2 = models.CharField(max_length=10)
from_field = models.IntegerField(db_column='from')
non_unique = models.IntegerField(db_column='non__unique_column')
non_unique_0 = models.IntegerField(db_column='non_unique__column')
class Meta:
unique_together = [
('field1', 'field2'),
('from_field', 'field1'),
('non_unique', 'non_unique_0'),
]
|
gulopine/steel | examples/images/png.py | Python | bsd-3-clause | 6,898 | 0.00087 | import decimal
import sys
import steel
from steel import chunks
COMPRESSION_CHOICES = (
(0, 'zlib/deflate'),
)
RENDERING_INTENT_CHOICES = (
(0, 'Perceptual'),
(1, 'Relative Colorimetric'),
(2, 'Saturation'),
(3, 'Absolute Colorimetric'),
)
PHYSICAL_UNIT_CHOICES = (
| (0, '<Unknown Unit>'),
(1, 'Meters'),
)
FILTER_CHOICES = (
(0, 'Adaptive Filtering'),
)
INTERLACE_CHOICES = (
(0, '<No Interlacing>'),
(1, 'Adam7'),
)
class Chunk(chunks.Chunk, encoding='ascii'):
"""
A special chunk for PNG, which puts the size before the type
and includes a CRC field for verifying data integrity.
"""
| size = steel.Integer(size=4)
id = steel.String(size=4)
payload = chunks.Payload(size=size)
crc = steel.CRC32(first=id)
@property
def is_critical(self):
# Critical chunks will always have an uppercase letter for the
# first character in the type. Ancillary will always be lower.
return self.type[0].upper() == self.type[0]
@property
def is_public(self):
# Public chunks will always have an uppercase letter for the
# second character in the type. Private will always be lower.
return self.type[1].upper() == self.type[1]
@Chunk('IHDR')
class Header(steel.Structure):
width = steel.Integer(size=4)
height = steel.Integer(size=4)
bit_depth = steel.Integer(size=1, choices=(1, 2, 4, 8, 16))
color_type = steel.Integer(size=1, choices=(0, 2, 3, 4, 6))
compression_method = steel.Integer(size=1, choices=COMPRESSION_CHOICES)
filter_method = steel.Integer(size=1, choices=FILTER_CHOICES)
interlace_method = steel.Integer(size=1, choices=INTERLACE_CHOICES)
class HundredThousand(steel.Integer):
"""
Value is usable as a Decimal in Python, but stored
as an integer after multiplying the value by 100,000
"""
def __init__(self):
super(HundredThousand, self).__init__(size=4)
def decode(self, value):
value = super(HundredThousand, self).decode(value)
return decimal.Decimal('0.%05s' % value)
def encode(self, obj, value):
return super(HundredThousand, self).encode(obj, int(value * 100000))
@Chunk('cHRM')
class Chromaticity(steel.Structure):
white_x = HundredThousand()
white_y = HundredThousand()
red_x = HundredThousand()
red_y = HundredThousand()
green_x = HundredThousand()
green_y = HundredThousand()
blue_x = HundredThousand()
blue_y = HundredThousand()
@Chunk('gAMA')
class Gamma(steel.Structure):
value = HundredThousand()
@Chunk('iCCP')
class ICCProfile(steel.Structure):
name = steel.String(encoding='latin-1')
compression = steel.Integer(size=1, choices=COMPRESSION_CHOICES)
profile = steel.Bytes(size=steel.Remainder) # TODO: decompress
@Chunk('sBIT')
class SignificantBits(steel.Structure):
data = steel.Bytes(size=steel.Remainder)
# TODO: decode based on parent Header.color_type
@Chunk('sRGB')
class sRGB(steel.Structure):
rendering_intent = steel.Integer(size=1, choices=RENDERING_INTENT_CHOICES)
class PaletteColor(steel.Structure):
red = steel.Integer(size=1)
green = steel.Integer(size=1)
blue = steel.Integer(size=1)
@Chunk('PLTE')
class Palette(steel.Structure):
colors = steel.List(steel.SubStructure(PaletteColor), size=steel.Remainder)
def __iter__(self):
return iter(self.colors)
@Chunk('bKGD')
class Background(steel.Structure):
data = steel.Bytes(size=steel.Remainder)
# TODO: decode based on parent Header.color_type
@Chunk('hIST')
class Histogram(steel.Structure):
frequencies = steel.List(steel.Integer(size=2), size=steel.Remainder)
@Chunk('tRNS')
class Transparency(steel.Structure):
data = steel.Bytes(size=steel.Remainder)
# TODO: decode based on parent Header.color_type
@Chunk('IDAT', multiple=True)
class Data(steel.Structure):
data = steel.Bytes(size=steel.Remainder)
@Chunk('pHYs')
class PhysicalDimentions(steel.Structure):
x = steel.Integer(size=4)
y = steel.Integer(size=4)
unit = steel.Integer(size=1, choices=PHYSICAL_UNIT_CHOICES)
class SuggestedPaletteEntry(steel.Structure):
red = steel.Integer(size=2)
green = steel.Integer(size=2)
blue = steel.Integer(size=2)
alpha = steel.Integer(size=2)
frequency = steel.Integer(size=2)
# TODO: figure out a good way to handle size based on sample_depth below
@Chunk('sPLT')
class SuggestedPalette(steel.Structure):
name = steel.String(encoding='latin-1')
sample_depth = steel.Integer(size=1)
colors = steel.List(steel.SubStructure(SuggestedPaletteEntry), size=steel.Remainder)
@Chunk('tIME')
class Timestamp(steel.Structure):
year = steel.Integer(size=2)
month = steel.Integer(size=1)
day = steel.Integer(size=1)
hour = steel.Integer(size=1)
minute = steel.Integer(size=1)
second = steel.Integer(size=1)
# TODO: convert this into a datetime object
@Chunk('tEXt', multiple=True)
class Text(steel.Structure, encoding='latin-1'):
keyword = steel.String()
content = steel.String(size=steel.Remainder)
@Chunk('zTXt', multiple=True)
class CompressedText(steel.Structure, encoding='latin-1'):
keyword = steel.String()
compression = steel.Integer(size=1, choices=COMPRESSION_CHOICES)
content = steel.Bytes(size=steel.Remainder) # TODO: decompress
@Chunk('iTXt', multiple=True)
class InternationalText(steel.Structure, encoding='utf8'):
keyword = steel.String()
is_compressed = steel.Integer(size=1)
compression = steel.Integer(size=1, choices=COMPRESSION_CHOICES)
language = steel.String()
translated_keyword = steel.String()
content = steel.Bytes(size=steel.Remainder) # TODO: decompress
@Chunk('IEND')
class End(steel.Structure):
pass
class PNG(steel.Structure):
signature = steel.FixedString(b'\x89PNG\x0d\x0a\x1a\x0a')
header = steel.SubStructure(Header)
chunks = chunks.ChunkList(Chunk, (Header, Chromaticity, Gamma, ICCProfile,
SignificantBits, sRGB, Palette, Background,
Histogram, Transparency, PhysicalDimentions,
SuggestedPalette, Data, Timestamp, Text,
CompressedText, InternationalText), terminator=End)
@property
def data_chunks(self):
for chunk in self.chunks:
if isinstance(chunk, Data):
yield chunk
if __name__ == '__main__':
png = PNG(open(sys.argv[1], 'rb'))
print('%s x %s' % (png.header.width, png.header.height))
print(list(png.data_chunks))
|
ProfessorX/Config | .PyCharm30/system/python_stubs/-1247971765/PyKDE4/kdecore/KPtyProcess.py | Python | gpl-2.0 | 1,129 | 0.010629 | # encoding: utf-8
# module PyKDE4.kdecore
# from /usr/lib/python3/dist-packages/PyKDE4/kdecore.cpython-34m-x86_64-linux-gnu.so
# by generator 1.135
# no doc
# imports
import PyQt4.QtCore as __PyQt4_QtCore
import PyQt4.QtNetwork as __PyQt4_QtNetwork
from .KProcess import KProcess
class KPtyProcess(KProcess):
# no doc
def isUseUtmp(self, *args, **kwargs): # real signature unknown
pass
def pty(self, *args, **kwargs): # real signature unknown
pass
def ptyChannels(self, *args, **kwargs): # real signature unknown
pass
def setPtyChannels(self, *args, **kwargs): # real signature unknown
pass
def setupChildProcess(self, *args, **kwargs): # real signature unknown
pass
def setUseUtmp(self, *args, **kwargs): # real signature unknown
pass
| def __init__(self, *args, **kwargs): # real signature unknown
pass
AllChannels = 7
AllOutputChannels = 6
NoChannels = 0
| PtyChannelFlag = None # (!) real value is ''
PtyChannels = None # (!) real value is ''
StderrChannel = 4
StdinChannel = 1
StdoutChannel = 2
|
lukecwik/incubator-beam | sdks/python/apache_beam/runners/portability/fn_api_runner/__init__.py | Python | apache-2.0 | 865 | 0 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Ver | sion 2.0
# (the "License"); you may not use this file except in compliance | with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from apache_beam.runners.portability.fn_api_runner.fn_runner import FnApiRunner
|
ragupta-git/ImcSdk | imcsdk/mometa/bios/BiosVfOSBootWatchdogTimer.py | Python | apache-2.0 | 3,929 | 0.009163 | """This module contains the general information for BiosVfOSBootWatchdogTimer ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class BiosVfOSBootWatchdogTimerConsts:
VP_OSBOOT_WATCHDOG_TIMER_DISABLED = "Disabled"
VP_OSBOOT_WATCHDOG_TIMER_ENABLED = "Enabled"
_VP_OSBOOT_WATCHDOG_TIMER_DISABLED = "disabled"
_VP_OSBOOT_WATCHDOG_TIMER_ENABLED = "enabled"
VP_OSBOOT_WATCHDOG_TIMER_PLATFORM_DEFAULT = "platform-default"
class BiosVfOSBootWatchdogTimer(ManagedObject):
"""This is BiosVfOSBootWatchdogTimer class."""
consts = BiosVfOSBootWatchdogTimerConsts()
naming_props = set([])
mo_meta = {
"classic": MoMeta("BiosVfOSBootWatchdogTimer", "biosVfOSBootWatchdogTimer", "OS-Boot-Watchdog-Timer-Param", VersionMeta.Version151f, "InputOutput", 0x1f, [], ["admin", "read-only", "user"], [u'biosPlatformDefaults', u'biosSettings'], [], ["Get", "Set"]),
"modular": MoMeta("BiosVfOSBootWatchdogTimer", "biosVfOSBootWatchdogTimer", "OS-Boot-Watchdog-Timer-Param", VersionMeta.Version2013e, "InputOutput", 0x1f, [], ["admin", "read-only", "user"], [u'biosPlatformDefaults', u'biosSettings'], [], ["Get", "Set"])
}
prop_meta = {
"classic": {
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x2, 0, 255, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x4, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"vp_os_boot_watchdog_timer": MoPropertyMeta("vp_os_boot_watchdog_timer", "vpOSBootWatchdogTimer", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRIT | E, 0x10, None, None, None, ["Disabled", "Enabled", "disabled", "enabled", "platform-default"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version151f, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
},
"modular": {
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x2, 0, 255, None | , [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x4, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"vp_os_boot_watchdog_timer": MoPropertyMeta("vp_os_boot_watchdog_timer", "vpOSBootWatchdogTimer", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["Disabled", "Enabled", "disabled", "enabled", "platform-default"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
},
}
prop_map = {
"classic": {
"dn": "dn",
"rn": "rn",
"status": "status",
"vpOSBootWatchdogTimer": "vp_os_boot_watchdog_timer",
"childAction": "child_action",
},
"modular": {
"dn": "dn",
"rn": "rn",
"status": "status",
"vpOSBootWatchdogTimer": "vp_os_boot_watchdog_timer",
"childAction": "child_action",
},
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.status = None
self.vp_os_boot_watchdog_timer = None
self.child_action = None
ManagedObject.__init__(self, "BiosVfOSBootWatchdogTimer", parent_mo_or_dn, **kwargs)
|
Apkawa/simple-captcha-ocr-opencv | cap_extra/extras.py | Python | mit | 4,861 | 0.001646 | """
Copyright 2011 Dmitry Nikulin
This file is part of Captchure.
Captchure is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Captchure is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Captchure. If not, see <http://www.gnu.org/licenses/>.
"""
import os, types, cv, cvext
from operator import itemgetter
from consts import *
def getFilename(addr):
return os.path.splitext(os.path.basename(addr))[0]
def addFakeChannels(image):
if image.nChannels == 3:
return image
if image.nChannels != 1:
raise ValueError("Input image must be 1- or 3-channel.")
result = cv.CreateImage(cv.GetSize(image), image.depth, 3)
cv.CvtColor(image, result, cv.CV_GRAY2BGR)
return result
def joinImagesV(images, bgcolor=128):
totalHeight = sum([image.height for image in images])
maxWidth = max([image.width for image in images])
maxNChannels = max([image.nChannels for image in images])
if maxNChannels == 3:
images = map(addFakeChannels, images)
bgcolor = cv.ScalarAll(bgcolor)
total = len(images)
result = cv.CreateImage((maxWidth, totalHeight + total - 1), images[0].depth, images[0].nChannels)
cv.Set(result, bgcolor)
curH = 0
for index in xrange(len(images)):
image = images[index]
off = (maxWidth - image.width) / 2
cvext.copyTo(image, result, (off, curH), None)
curH += image.height + 1
return result
def joinImagesH(images, bgcolor=128):
totalWidth = sum([image.width for image in images])
maxHeight = max([image.height for image in images])
maxNChannels = max([image.nChannels for image in images])
if maxNChannels == 3:
images = map(addFakeChannels, images)
bgcolor = cv.ScalarAll(bgcolor)
total = len(images)
result = cv.CreateImage((totalWidth + total - 1, maxHeight), images[0].depth, images[0].nChannels)
cv.Set(result, bgcolor)
curW = 0
for index in xrange(len(images)):
image = images[index]
off = (maxHeight - image.height) / 2
cvext.copyTo(image, result, (curW, off), None)
curW += image.width + 1
return result
def drawComponents(image, components, startcol=192, stepcol=8):
result = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_8U, 3)
cv.CvtColor(image, result, cv.CV_GRAY2BGR)
if len(components) == 0: return result
if type(components[0][2]) == types.TupleType:
rects = map(itemgetter(2), components)
else:
rects = components
for index, rect in enumerate(rects):
pt1 = (rect[0], rect[1])
| pt2 = (rect[0] + rect[2], rect[1] + rect[3])
cv.Rectangle(result, pt1, pt2, (0, 0, startcol + stepcol * index), 1)
return result
suffixes = {CAP_STAGE_PRE: | "pre", CAP_STAGE_SEG: "seg", CAP_STAGE_REC: "rec"}
titles = {CAP_STAGE_PRE: "Preprocessing", CAP_STAGE_SEG: "Segmentation", CAP_STAGE_REC: "Recognition"}
def getSuffix(stage):
try:
suffix = suffixes[stage]
except KeyError:
raise ValueError("Incorrect stage parameter")
return suffix
def getTitle(stage):
try:
title = titles[stage]
except KeyError:
raise ValueError("Incorrect stage parameter")
return title
def processExtras(steps, addr, extras, stage):
if extras == CAP_EXTRAS_OFF:
return
result = joinImagesV(steps)
if extras == CAP_EXTRAS_SAVE:
name = getFilename(addr)
suf = getSuffix(stage)
# newaddr = os.path.join(extras_dir, name + "_" + suf)
#for index, image in enumerate(steps):
# cv.SaveImage(newaddr + str(index) + defext, image)
newaddr = os.path.join(extras_dir, name + "_" + suf + defext)
cv.SaveImage(newaddr, result)
elif extras == CAP_EXTRAS_SHOW:
title = getTitle(stage)
cv.NamedWindow(title + " steps", 0)
cv.ShowImage(title + " steps", result)
class logger:
def __init__(self, extras, image=None, clone=True):
self.steps = []
if extras == CAP_EXTRAS_OFF:
self.log = self.dontLog
else:
self.log = self.doLog
if image is not None:
self.log(image, clone)
def dontLog(self, image, clone=True):
pass
def doLog(self, image, clone=True):
if clone:
self.steps.append(cv.CloneImage(image))
else:
self.steps.append(image) |
tecnologiaenegocios/tn.plonemailing | src/tn/plonemailing/tests/base.py | Python | bsd-3-clause | 475 | 0.004211 | from Products.Five import zcml
from Products.Five import fiveconfigure
from Products.PloneTestCase import PloneTestCase as ptc
from Products.PloneTestCase.layer import onsetup
@onsetup
d | ef setup_product():
fiveconfigure.debug_mode = True
import tn.plonemailing
zcml.load_config('configure.zcml', tn.plonemailing)
fiveconfigure.debug_mode = False
setup_product()
ptc.setupPloneSite(product | s=['tn.plonemailing'])
class TestCase(ptc.PloneTestCase):
pass
|
scollis/iris | docs/iris/src/sphinxext/generate_package_rst.py | Python | gpl-3.0 | 7,563 | 0.006876 | # (C) British Crown Copyright 2010 - 2014, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import re
import inspect
document_dict = {
# Use autoclass for classes
'class': '''
%(object_docstring)s
..
.. autoclass:: %(object_name)s
:members:
:undoc-members:
:inherited-members:
''',
'function': '''
.. autofunction:: %(object_name)s
''',
# For everything else, let automodule do some magic...
None: '''
.. autodata:: %(object_name)s
'''}
horizontal_sep = """
.. raw:: html
<p class="hr_p"><a href="#">↑   top   ↑</a></p>
<!--
-----------
.. raw:: html
-->
"""
def lookup_object_type(obj):
if inspect.isclass(obj):
return 'class'
elif inspect.isfunction(obj):
return 'function'
else:
return None
def auto_doc_module(file_path, import_name, root_package, package_toc=None, title=None):
mod = __import__(import_name)
mod = sys.modules[import_name]
elems = dir(mod)
if '__all__' in elems:
document_these = [[attr_name, getattr(mod, attr_name)] for attr_name in mod.__all__]
else:
document_these = [[attr_name, getattr(mod, attr_name)] for attr_name in elems
if not attr_name.startswith('_') and not inspect.ismodule(getattr(mod, attr_name))]
is_from_this_module = lambda (name, obj), this_module=mod.__name__: hasattr(obj, '__module__') and obj.__module__ == mod.__name__
document_these = filter(is_from_this_module, document_these)
sort_order = {'class': 2, 'function': 1}
# sort them according to sort_order dict
document_these = sorted(document_these, key=lambda (name, obj): sort_order.get(lookup_object_type(obj), 0))
lines = []
for element, obj in document_these:
obj_content = document_dict[lookup_object_type(obj)] % {'object_name': import_name + '.' + element,
'object_name_header_line':'+' * len(import_name + '.' + element),
'object_docstring': inspect.getdoc(obj)}
lines.append(obj_content)
lines = horizontal_sep.join(lines)
module_elements = '\n'.join([' * :py:obj:`%s`' % (element) for element, obj in document_these])
lines = r'''.. _%(import_name)s:
%(title_underline)s
%(title)s
%(title_underline)s
%(sidebar)s
.. currentmodule:: %(root_package)s
.. automodule:: %(import_name)s
In this module:
%(module_elements)s
''' + lines
if package_toc:
sidebar = """
.. sidebar:: Modules in this package
%(package_toc_tree)s
""" % {'package_toc_tree': package_toc}
else:
sidebar = ''
return lines % {'title': title or import_name,
'title_underline': '=' * len(title or import_name),
'import_name': import_name, 'root_package': root_package,
'sidebar': sidebar, 'module_elements': module_elements}
def auto_doc_package(file_path, import_name, root_package, sub_packages):
max_depth = 1 if import_name == 'iris' else 2
package_toc = '\n '.join(sub_packages)
package_toc = '''
.. toctree::
:maxdepth: %d
:titlesonly:
%s
''' % (max_depth, package_toc)
if '.' in import_name:
title = None
else:
title = import_name.capitalize() + ' reference documentation'
return auto_doc_module(file_path, import_name, root_package, package_toc=package_toc, title=title)
def auto_package_build(app):
root_package = app.config.autopackage_name
if root_package is None:
raise ValueError('set the autopackage_name variable in the conf.py file')
if not isinstance(root_package, list):
raise ValueError("autopackage was expecting a list of packages to document e.g. ['itertools']")
for package in root_package:
do_package(package)
def do_package(package_name):
out_dir = package_name + os.path.sep
# import the root package. If this fails then an import error will be raised.
module = __import__(package_name)
root_package = package_nam | e
rootdir = os.path.dirname(module.__file__)
package_folder = []
module_folders = {}
for root, subFolders, files in os.walk(rootdir):
for fname in files:
name, ext = os.path.splitext(fname)
# skip some non-relevant files
if ( fname.startswith('.') or fname.startswith('#') or re.search("^_[^_]", fname) or
fname.find('.svn')>=0 or not (ext in ['.py', '.so']) ):
continue
| rel_path = root_package + os.path.join(root, fname).split(rootdir)[-1]
mod_folder = root_package + os.path.join(root).split(rootdir)[-1].replace('/','.')
# only add to package folder list if it contains an __init__ script
if name == '__init__':
package_folder.append([mod_folder, rel_path])
else:
import_name = mod_folder + '.' + name
module_folders.setdefault(mod_folder, []).append([import_name, rel_path])
if not os.path.exists(out_dir):
os.makedirs(out_dir)
for package, package_path in package_folder:
if '._' in package or 'test' in package:
continue
sub_packages = (spackage for spackage, spackage_path in package_folder if spackage != package and spackage.startswith(package))
paths = [os.path.join(*spackage.rsplit('.', 2)[-2:None])+'.rst' for spackage in sub_packages]
paths.extend( [os.path.join(os.path.basename(os.path.dirname(path)), os.path.splitext(os.path.basename(path))[0]) for imp_name, path in module_folders.get(package, [])])
paths.sort()
doc = auto_doc_package(package_path, package, root_package, paths)
package_dir = out_dir + package.replace('.', os.path.sep)
if not os.path.exists(package_dir):
os.makedirs(out_dir + package.replace('.', os.path.sep))
out_path = package_dir + '.rst'
if not os.path.exists(out_path) or doc != ''.join(file(out_path, 'r').readlines()):
print 'creating out of date/non-existant document %s' % out_path
file(out_path, 'w').write(doc)
for import_name, module_path in module_folders.get(package, []):
doc = auto_doc_module(module_path, import_name, root_package)
out_path = out_dir + import_name.replace('.', os.path.sep) + '.rst'
if not os.path.exists(out_path) or doc != ''.join(file(out_path, 'r').readlines()):
print 'creating out of date/non-existant document %s' % out_path
file(out_path, 'w').write(doc)
def setup(app):
app.connect('builder-inited', auto_package_build)
app.add_config_value('autopackage_name', None, 'env')
|
CodeforLeipzig/luftqualitaet_sachsen | luftqualitaet_sachsen/tests/test_measuring_stations/test_models.py | Python | bsd-3-clause | 655 | 0 | from measuring_stations.models import MeasuringPoint
import pytest
def test_active_values():
mp = MeasuringPoint( | so2=True, o3=True)
assert mp.get_active_values() == ('so2', 'o3')
@pytest.fixture
def csv():
return "Datum Zeit; Leipzig-Mitte SO2\n" \
"; <B5>g/m<B3>\n" \
"01-07-14 11:00; 4,0\n" \
"01-07-14 10:00; 4,1"
@pytest.fixture
def params():
return []
@pytest.fixture
def stationName():
retu | rn "Leipzig-Mitte"
def test_import_values(csv, stationName, params):
pytest.mark.django_db(transaction=False)
mp = MeasuringPoint()
mp.put_csv(csv, stationName, params)
# assert
|
DemocracyClub/UK-Polling-Stations | polling_stations/apps/data_importers/management/commands/import_north_norfolk.py | Python | bsd-3-clause | 1,890 | 0.003704 | from data_importers.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "NNO"
addresses_name = "2021-03-25T13:58:54.180597/Democracy_Club__06May2021.CSV"
stations_name = "2021-03-25T13:58:54.180597/Democracy_Club__06May2021.CSV"
elections = ["2021-05-06"]
csv_delimiter = ","
def address_record_to_dict(self, record):
uprn = record.property_urn.strip().lstrip("0")
if uprn in | [
"10034791449", # THE CEDARS, GRESHAM, NORWICH
"10023665747", # CHALET 4 MILL FARM AYLSHAM ROAD, FELMINGHAM
"10034812867", # BURROW COTTAGE AT WARREN BARN BREWERY ROAD, TRUNCH
"10034807115", # 6 SEAWARD CREST, LINKS ROAD, MUNDESLEY, NORWICH
"10034 | 818211", # GARDEN COTTAGE, HOVETON HALL ESTATE, HOVETON, NORWICH
"10034819670", # THE OLD GATEHOUSE, WALSINGHAM ROAD, EAST BARSHAM, FAKENHAM
"100091325096", # FLAT 6 7 NORWICH ROAD, CROMER
]:
return None
if record.addressline6 in ["NR12 0RX", "NR11 7PE", "NR12 8AH", "NR12 0UD"]:
return None
return super().address_record_to_dict(record)
def station_record_to_dict(self, record):
if record.polling_place_id in [
"17025", # Walsingham Village Hall Wells Road Walsingham NR23 1RX
"17003", # Great Snoring Social Club Walsingham Road Great Snoring Fakenham NR21 0AP
"16607", # The Preston Room Neatishead Road Ashmanhaugh Wroxham NR12 8LB
]:
record = record._replace(polling_place_postcode="")
# Walcott Village Hall Coast Road Walcott NR12 ONG - O => 0
if record.polling_place_id == "16728":
record = record._replace(polling_place_postcode="NR12 0NG")
return super().station_record_to_dict(record)
|
ubccr/supremm | src/supremm/plugins/MemUsageTimeseries.py | Python | lgpl-3.0 | 3,532 | 0.003681 | #!/usr/bin/env python
""" Timeseries generator module """
from supremm.plugin import Plugin
from supremm.subsample import TimeseriesAccumulator
import numpy
from collections import Counter
class MemUsageTimeseries(Plugin):
""" Generate the CPU usage as a timeseries data """
name = property(lambda x: "memused_minus_diskcache")
mode = property(lambda x: "timeseries")
requiredMetrics = property(lambda x: ["mem.numa.util.used", "mem.numa.util.filePages", "mem.numa.util.slab"])
optionalMetrics = property(lambda x: [])
derivedMetrics = property(lambda x: [])
def __init__(self, job):
super(MemUsageTimeseries, self).__init__(job)
self._data = TimeseriesAccumulator(job.nodecount, self._job.walltime)
self._hostdata = {}
self._hostdevnames = {}
def process(self, nodemeta, timestamp, data, description):
hostidx = nodemeta.nodeindex
if len(data[0]) == 0:
# Skip data point with no data
return True
if nodemeta.nodeindex not in self._hostdata:
| self._hostdata[hostidx] = numpy.empty((TimeseriesAccumulator.MAX_DATAPOINTS, len(data[0])))
self._hostdevnames[hostidx] = dict((str(k), "numa " + v) for k, v in zip(description[0][0], description[0][1]))
nodemem_kb = numpy.sum(data[0]) - numpy.sum(data[1]) - numpy.sum(data[2])
insertat = self._data.adddata(hostidx, timestamp, nodemem_kb / 1048576.0)
if insertat != None:
self._hostdata[hostidx][insertat] = ( | data[0] - data[1] - data[2]) / 1048576.0
return True
def results(self):
values = self._data.get()
if len(self._hostdata) > 64:
# Compute min, max & median data and only save the host data
# for these hosts
memdata = values[:, :, 1]
sortarr = numpy.argsort(memdata.T, axis=1)
retdata = {
"min": self.collatedata(sortarr[:, 0], memdata),
"max": self.collatedata(sortarr[:, -1], memdata),
"med": self.collatedata(sortarr[:, sortarr.shape[1] / 2], memdata),
"times": values[0, :, 0].tolist(),
"hosts": {}
}
uniqhosts = Counter(sortarr[:, 0])
uniqhosts.update(sortarr[:, -1])
uniqhosts.update(sortarr[:, sortarr.shape[1] / 2])
includelist = uniqhosts.keys()
else:
# Save data for all hosts
retdata = {
"times": values[0, :, 0].tolist(),
"hosts": {}
}
includelist = self._hostdata.keys()
for hostidx in includelist:
retdata['hosts'][str(hostidx)] = {}
retdata['hosts'][str(hostidx)]['all'] = values[hostidx, :, 1].tolist()
retdata['hosts'][str(hostidx)]['dev'] = {}
for devid in self._hostdevnames[hostidx].iterkeys():
dpnts = len(values[hostidx, :, 0])
retdata['hosts'][str(hostidx)]['dev'][devid] = self._hostdata[hostidx][:dpnts, numpy.int(devid)].tolist()
retdata['hosts'][str(hostidx)]['names'] = self._hostdevnames[hostidx]
return retdata
@staticmethod
def collatedata(args, rates):
""" build output data """
result = []
for timepoint, hostidx in enumerate(args):
try:
result.append([rates[hostidx, timepoint], int(hostidx)])
except IndexError:
pass
return result
|
gregmarra/pxlart | layouts/couch_layout_gen.py | Python | mit | 1,417 | 0.018349 | ### NOTE NEED TO MANUALLY DELETE COMMA AFTER LAST POINT
LEDS_PER_METER = 30.0
# Dimensions from http://www.ikea.com/us/en/catalog/products/S99932597/
COUCH_WIDTH = 2.18
COUCH_DEPTH = 0.88
def layout_leds(width, depth, leds_per_meter):
led_coordinates = []
direction = ""
next_direction = "east"
x = -width/2
y = -depth/2
# Go east
while True:
if direction == "east":
x = x + 1/LEDS_PER_METER
if x > width/2:
remainder = (x - width/2)
x = width/2
y = y + remainder
next_direction = "north"
if direction == "north":
y = y + 1/LEDS_PER_METER
if y > depth/2:
| remainder = (y - depth/2)
y = depth/2
x = x - remainder
next_direction = "west"
if direction == "west":
x = x - 1/LEDS_PER_METER
if x < -depth/2:
remainder = (-depth/2) - x
x | = -depth/2
y = y - remainder
next_direction = "south"
if direction == "south":
y = y - 1/LEDS_PER_METER
if y < -depth/2:
break
direction = next_direction
led_coordinates.append((x, y))
return led_coordinates
def print_coordinates(led_coordinates):
print "["
for x, y in led_coordinates:
print "{\"point\": [%5.3f, %5.3f, %5.3f]}," % (x, y, 0)
print "]"
def main():
print_coordinates(layout_leds(COUCH_WIDTH, COUCH_DEPTH, LEDS_PER_METER))
if __name__ == "__main__":
main()
|
maui-packages/qt-creator | tests/system/suite_QMLS/tst_QMLS05/test.py | Python | lgpl-2.1 | 2,938 | 0.00919 | #############################################################################
##
## Copyright (C) 2014 Digia Plc and/or its subsidiary(-ies).
## Contact: http://www.qt-project.org/legal
##
## This file is part of Qt Creator.
##
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and Digia. For licensing terms and
## conditions see http://qt.digia.com/licensing. For further information
## use the contact form at http://qt.digia.com/contact-us.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 as published by the Free Software
## Foundation and appearing in the file LICENSE.LGPL included in the
## packaging of this file. Please review the following information to
## ensure the GNU Lesser General Public License version 2.1 requirements
## will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, Digia gives you certain additional
## rights. These rights are described in the Digia Qt LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
#############################################################################
source("../shared/qmls.py")
def main():
editorArea = startQtCreatorWithNewAppAtQMLEditor(tempDir(), "SampleApp", "Text {")
if not editorArea:
return
homeKey = "<Home>"
if platform.system() == "Darwin":
homeKey = "<Ctrl+Left>"
for i in range(2):
type(editorArea, homeKey)
type(editorArea, "<Return>")
type(editorArea, "<Up>")
type(editorArea, "<Tab>")
type(editorArea, "Item { x: 10; y: 20; width: 10 }")
for i in range(30):
type(editorArea, "<Left>")
invokeMenuItem("File", "Save All")
# activate menu and apply 'Refactoring - Split initializer'
numLinesExpected = len(str(editorArea.plainText).splitlines()) + 4
try:
invokeContextMenuItem(editorArea, "Refactoring", "Split Initializer")
except:
# If menu item is disabled it needs to reopen the menu for updating
invokeCont | extMenuItem(editorArea, "Refactoring", "Split Initializer")
# wait until refactoring ended
waitFor("len(str(editorArea.plainText).splitlines()) == numLinesExpected", 5000)
# verify if refactoring was properly applied - each part on separate line
| verifyMessage = "Verifying split initializer functionality at element line."
for line in ["Item {", "x: 10;", "y: 20;", "width: 10", "}"]:
verifyCurrentLine(editorArea, line, verifyMessage)
type(editorArea, "<Down>")
#save and exit
invokeMenuItem("File", "Save All")
invokeMenuItem("File", "Exit")
|
JohanComparat/nbody-npt-functions | bin/bin_galform/plotLFs-color-mag-trends.py | Python | cc0-1.0 | 11,045 | 0.026075 | #! /usr/bin/env python
"""
This script produces quality plots to check that the LFs are fine compared to simumlations.
"""
import sys
import os
from os.path import join
data_dir = os.environ['DATA_DIR']
import glob
from lib_plot import *
#from lineListAir import *
SNlim = 5
# "D:\data\LF-O\LFmodels\data\trends_color_mag\O2_3728-VVDSDEEPI24-z0.947.txt"
plotDir="/home/comparat/database/Simulations/galform-lightcone/products/emissionLineLuminosityFunctions/plots/"
dir="/home/comparat/database/Simulations/galform-lightcone/products/emissionLineLuminosityFunctions/O2_3728/"
lf_measurement_files_ref=n.array(glob.glob(dir+"*MagLimR-24.2-z0.7*.txt"))
lf_measurement_files=n.array(glob.glob(dir+"*MagLimR-*z0.7*.txt"))
lf_measurement_files.sort()
dataRef = n.loadtxt( lf_measurement_files_ref[0], unpack=True)
phiRatio = n.empty([ len(lf_measurement_files), len(dataRef[0]) ])
label = n.array([ "R<23.0","R<23.5", "R<24.2"])
for ii,el in enumerate(lf_measurement_files) :
data= n.loadtxt( el, unpack=True)
phiRatio[ii] = data[3] / dataRef[3]
imin = n.argmax(dataRef[6])-1
p.figure(0,(6,6))
for jj in range(len(label)):
p.plot(dataRef[2][imin:],phiRatio[jj][imin:],label=label[jj])
p.xlabel(r'$log_{10}(L[O_{II}])$ [erg s$^{-1}$]')
p.ylabel(r'$\Phi/\Phi_{ref}$')
p.xscale('log')
p.xlim((1e40,1e43))
p.ylim((-0.05,1.05))
p.grid()
p.legend(loc=2)
p.savefig(join(plotDir,"trends_O2_3728_Rmag-z0.7.pdf"))
p.clf()
lf_measurement_files_ref=n.array(glob.glob(dir+"*MagLimR-24.2-z0.9*.txt"))
lf_measurement_files=n.array(glob.glob(dir+"*MagLimR-*z0.9*.txt"))
lf_measurement_files.sort()
dataRef = n.loadtxt( lf_measurement_files_ref[0], unpack=True)
phiRatio = n.empty([ len(lf_measurement_files), len(dataRef[0]) ])
label = n.array([ "R<23.0","R<23.5", "R<24.2"])
for ii,el in enumerate(lf_measurement_files) :
data= n.loadtxt( el, unpack=True)
phiRatio[ii] = data[3] / dataRef[3]
imin = n.argmax(dataRef[6])-1
p.figure(0,(6,6))
for jj in range(len(label)):
p.plot(dataRef[2][imin:],phiRatio[jj][imin:],label=label[jj])
p.xlabel(r'$log_{10}(L[O_{II}])$ [erg s$^{-1}$]')
p.ylabel(r'$\Phi/\Phi_{ref}$')
p.xscale('log')
p.xlim((1e40,1e43))
p.ylim((-0.05,1.05))
p.grid()
p.legend(loc=2)
p.savefig(join(plotDir,"trends_O2_3728_Rmag-z0.9.pdf"))
p.clf()
lf_measurement_files_ref=n.array(glob.glob(dir+"*MagLimR-24.2-z1.*.txt"))
lf_measurement_files=n.array(glob.glob(dir+"*MagLimR-*z1.*.txt"))
lf_measurement_files.sort()
dataRef = n.loadtxt( lf_measurement_files_ref[0], unpack=True)
phiRatio = n.empty([ len(lf_measurement_files), len(dataRef[0]) ])
label = n.array([ "R<23.0","R<23.5", "R<24.2"])
for ii,el in enumerate(lf_measurement_files) :
data= n.loadtxt( el, unpack=True)
phiRatio[ii] = data[3] / dataRef[3]
imin = n.argmax(dataRef[6])-1
p.figure(0,(6,6))
for jj in range(len(label)):
p.plot(dataRef[2][imin:],phiRatio[jj][imin:],label=label[jj])
p.xlabel(r'$log_{10}(L[O_{II}])$ [erg s$^{-1}$]')
p.ylabel(r'$\Phi/\Phi_{ref}$')
p.xscale('log')
p.xlim((1e40,1e43))
p.ylim((-0.05,1.05))
p.grid()
p.legend(loc=2)
p.savefig(join(plotDir,"trends_O2_3728_Rmag-z1.2.pdf"))
p.clf()
########################################33
lf_measurement_files_ref=n.array(glob.glob(dir+"*MagLimI-24-z1.*.txt"))
lf_measurement_files=n.array(glob.glob(dir+"*MagLimI-*z1.*.txt"))
lf_measurement_files.sort()
dataRef = n.loadtxt( lf_measurement_files_ref[0], unpack=True)
phiRatio = n.empty([ len(lf_measurement_files), len(dataRef[0]) ])
label = n.array([ "I<22.5", "I<23.0","I<23.5","I<24.0"])
for jj,el in enumerate(lf_measurement_files) :
data= n.loadtxt( el, unpack=True)
phiRatio[jj] = data[3] / dataRef[3]
imin = n.argmax(dataRef[6])-1
p.figure(0,(6,6))
for jj in range(len(label)):
p.plot(dataRef[2][imin:],phiRatio[jj][imin:],label=label[jj])
p.xlabel(r'$log_{10}(L[O_{II}])$ [erg s$^{-1}$]')
p.ylabel(r'$\Phi/\Phi_{ref}$')
p.xscale('log')
p.xlim((1e40,1e43))
p.ylim((-0.05,1.05))
p.grid()
p.legend(loc=2)
p.savefig(join(plotDir,"trends_O2_3728_Imag-z1.2.pdf"))
p.clf()
lf_measurement_files_ref=n.array(glob.glob(dir+"*MagLimI-24-z0.9*.txt"))
lf_measurement_files=n.array(glob.glob(dir+"*MagLimI-*z0.9*.txt"))
lf_measurement_files.sort()
dataRef = n.loadtxt( lf_measurement_files_ref[0], unpack=True)
phiRatio = n.empty([ len(lf_measurement_files), len(dataRef[0]) ])
label = n.array([ "I<22.5", "I<23.0","I<23.5","I<24.0"])
for jj,el in enumerate(lf_measurement_files) :
data= n.loadtxt( el, unpack=True)
phiRatio[jj] = data[3] / dataRef[3]
imin = n.argmax(dataRef[6])-1
p.figure(0,(6,6))
for jj in range(len(label)):
p.plot(dataRef[2][imin:],phiRatio[jj][imin:],label=label[jj])
p.xlabel(r'$log_{10}(L[O_{II}])$ [erg s$^{-1}$]')
p.ylabel(r'$\Phi/\Phi_{ref}$')
p.xscale('log')
p.xlim((1e40,1e43))
p.ylim((-0.05,1.05))
p.grid()
p.legend(loc=2)
p.savefig(join(plotDir,"trends_O2_3728_Imag-z0.9.pdf"))
p.clf()
lf_measurement_files_ref=n.array(glob.glob(dir+"*MagLimI-24-z0.7*.txt"))
lf_measurement_files=n.array(glob.glob(dir+"*MagLimI-*z0.7*.txt"))
lf_measurement_files.sort()
dataRef = n.loadtxt( lf_measurement_files_ref[0], unpack=True)
phiRatio = n.empty([ len(lf_measurement_files), len(dataRef[0]) ])
label = n.array([ "I<22.5", "I<23.0","I<23.5","I<24.0"])
for jj,el in enumerate(lf_measurement_files):
data= n.loadtxt( el, unpack=True)
phiRatio[jj] = data[3] / dataRef[3]
imin = n.argmax(dataRef[6])-1
p.figure(0,(6,6))
for jj in range(len(label)):
p.plot(dataRef[2][imin:],phiRatio[jj][imin:],label=label[jj])
p.xlabel(r'$log_{10}(L[O_{II}])$ [erg s$^{-1}$]')
p.ylabel(r'$\Phi/\Phi_{ref}$')
p.xscale('log')
p.xlim((1e40,1e43))
p.ylim((-0.05,1.05))
p.grid()
p.legend(loc=2)
p.savefig(join(plotDir,"trends_O2_3728_Imag-z0.75.pdf"))
p.clf()
#####################################3
#####################################3
# R-Z
#####################################3
#####################################3
lf_measurement_files_ref=n.array(glob.glob(dir+"*VVDSrz_gt_0.0-z0.7*.txt"))
lf_measurement_files=n.array(glob.glob(dir+"*VVDSrz_?t_*z0.7*.txt"))
lf_measurement_files.sort()
dataRef = n.loadtxt( lf_measurement_files_ref[0], unpack=True)
label = n.array(["r-z>0", "r-z>0.5", "r-z>1", "r-z>1.5", "r-z<1", "r-z<1.5", "r-z<2"])
phiRatio = n.empty([ 7, len(dataRef[0]) ])
for ii, el in enumerate(lf_measurement_files):
data= n.loadtxt( el, unpack=True)
phiRatio[ii] = data[3] / dataRef[3]
imin = n.argmax(dataRef[6])-1
p.figure(0,(6,6))
for jj in range(len(label)):
p.plot(dataRef[2][imin:],phiRatio[jj][imin:],label=label[jj])
p.xlabel(r'$log_{10}(L[O_{II}])$ [erg s$^{-1}$]')
p.ylabel(r'$\Phi/\Phi_{ref}$')
p.xscale('log')
p.xlim((7e40,5e43))
p.ylim((-0.05,1.05))
p.grid()
p.legend(loc=4)
p.savefig(join(plotDir,"trends_O2_3728_I22.5_RZ-z0.75.pdf"))
p.clf()
lf_measurement_files_ref=n.array(glob.glob(dir+"*VVDSrz_gt_0.0-z0.9*.txt"))
lf_measurement_files=n.array(glob.glob(dir+"*VVDSrz_?t_*z0.9*.txt"))
lf_measurement_files.sort()
dataRef = n.loadtxt( lf_measurement_files_ref[0], unpack=True)
phiRatio = n.empty([ 7, len(dataRef[0]) ])
for ii, el in enumerate(lf_measurement_files):
data= n.loadtxt( el, unpack=True)
phiRatio[ii] = data[3] / dataRef[3]
imin = n.argmax(dataRef[6])-1
p.figure(0,(6,6))
for jj in range(len(label)):
p.plot(dataRef[2][imin:],phiRatio[jj][imin:],label=label[jj])
p.xlabel(r'$log_{10}(L[O_{II}])$ [erg s$^{-1}$]')
p.ylabel(r'$\Phi/\Phi_{ref}$')
p.xscale('log')
p.xlim((7e40,5e43))
p.ylim((-0.05,1.05))
p.grid()
p.legend(loc=4)
p.savefig(join(plotDir,"trends_O2_3728_I22.5_RZ-z0.9.pdf"))
p.clf()
lf_measurement_files_ref=n.array(glob.glob(dir+"*VVDSrz_gt_0.0-z1.*.txt"))
lf_measurement_files=n.array(glob.glob(dir+"*VVDSrz_?t_*z1.*.txt"))
lf_measurement_files.sort()
dataRef = n.loadtxt( lf_measurement_files_ref[0], unpack=True)
phiRatio = n.empty([ 7, len(dataRef[0]) ])
for ii, el in enumerate(lf_measurement_files):
data= n.loadtxt( el, unpack=True)
phiRatio[ii] = | data[3] / dataRef[ | 3]
imin = n.argmax(dataRef[6])-1
p.figure(0,(6,6))
for jj in range(len(label)):
p.plot(dataRef[2][imin:],phiRatio[jj][imin:],label=label[jj])
p.xlabel(r'$log_{10}(L[O_{II}])$ [erg s$^{-1}$]')
p.ylabel(r'$\Phi/\Phi_{ref}$')
p.xscale('log')
p.xlim((7e40,5e43))
p.y |
siosio/intellij-community | python/testData/inspections/PyNonAsciiCharReferenceInspection/test.py | Python | apache-2.0 | 119 | 0.042735 | g = 2
i = 2
<warning descr="Non-ASCII c | haracter 'ɡ' in the file, but no encoding declared">ɡ</warning> = | 1
a = g + i
|
SeleniumHQ/selenium | py/selenium/webdriver/remote/file_detector.py | Python | apache-2.0 | 1,803 | 0 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# wit | h the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the Li | cense is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from abc import ABCMeta, abstractmethod
import os
from typing import Optional
from selenium.types import AnyKey
from selenium.webdriver.common.utils import keys_to_typing
class FileDetector(metaclass=ABCMeta):
"""
Used for identifying whether a sequence of chars represents the path to a
file.
"""
@abstractmethod
def is_local_file(self, *keys: AnyKey) -> Optional[str]:
return None
class UselessFileDetector(FileDetector):
"""
A file detector that never finds anything.
"""
def is_local_file(self, *keys: AnyKey) -> Optional[str]:
return None
class LocalFileDetector(FileDetector):
"""
Detects files on the local disk.
"""
def is_local_file(self, *keys: AnyKey) -> Optional[str]:
file_path = ''.join(keys_to_typing(keys))
if not file_path:
return None
try:
if os.path.isfile(file_path):
return file_path
except Exception:
pass
return None
|
freakboy3742/toga_web_demo | toga_django/widgets/list.py | Python | bsd-3-clause | 3,836 | 0.001564 | from django.core.urlresolvers import reverse, resolve
from | django.utils.html import escape
from .base import Widget
from ..libs import List as TogaList, SimpleListElement as TogaSimpleLis | tElement
class SimpleListElement(Widget):
def __init__(self, content, detail=None, **style):
super(SimpleListElement, self).__init__(**style)
self.content = content
self.detail = detail
self.startup()
def startup(self):
pass
def materialize(self):
return TogaSimpleListElement(
widget_id=self.widget_id,
content=escape(self.content),
delete_url=reverse(self.detail, kwargs={'pk': self.content.id})
)
def _set_window(self, window):
super()._set_window(window)
if self.on_press:
self.window.callbacks[(self.widget_id, 'on_press')] = self.on_press
class List(Widget):
IMPL_CLASS = TogaList
def __init__(self, source=None, detail=None, item_class=None, on_item_press=None, **style):
super(List, self).__init__(**style)
self.source = source
self.detail = detail
self.item_class = item_class
self.on_item_press = on_item_press
self.children = []
self.startup()
def startup(self):
pass
def materialize(self):
children = []
if self.source:
api_view = resolve(reverse(self.source)).func
for child in api_view.view_class().get_queryset():
children.append(self.item_class(child, self.detail).materialize())
else:
for child in self.children:
children.add(child.materialize())
return TogaList(
widget_id=self.widget_id,
children=children,
create_url=reverse(self.source),
on_item_press=self.handler(self.on_item_press, 'on_item_press') if self.on_item_press else None
)
def add(self, content):
if self.source:
raise Exception("Can't manually add to an API-sourced list")
self.children.append(self.item_class(content, self.detail))
def _set_app(self, app):
for child in self.children:
child.app = app
def _set_window(self, window):
for child in self.children:
child.window = window
if self.on_item_press:
self.window.callbacks[(self.widget_id, 'on_item_press')] = self.on_item_press
# def _hint_size(self, width, height, min_width=None, min_height=None):
# if width is not None:
# self.width = width
# else:
# del(self.width)
# if min_width is not None:
# self.min_width = min_width
# else:
# del(self.min_width)
# if height is not None:
# self.height = height
# else:
# del(self.height)
# if min_height is not None:
# self.min_height = min_height
# else:
# del(self.min_height)
# def _update_child_layout(self, **style):
# """Force a layout update on children of this container.
# The update request can be accompanied by additional style information
# (probably min_width, min_height, width or height) to control the
# layout.
# """
# for child in self.children:
# if child.is_container:
# child._update_layout()
# def _set_frame(self, frame):
# print("SET FRAME", self, frame.origin.x, frame.origin.y, frame.size.width, frame.size.height)
# self._impl.setFrame_(frame)
# self._impl.setNeedsDisplay_(True)
# for child in self.children:
# layout = child.layout
# child._set_frame(NSRect(NSPoint(layout.left, layout.top), NSSize(layout.width, layout.height)))
|
rspavel/spack | lib/spack/spack/compilers/apple_clang.py | Python | lgpl-2.1 | 7,173 | 0.000279 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os.path
import re
import shutil
import llnl.util.tty as tty
import llnl.util.lang
import spack.compiler
import spack.compilers.clang
import spack.util.executable
import spack.version
class AppleClang(spack.compilers.clang.Clang):
openmp_flag = "-Xpreprocessor -fopenmp"
@classmethod
@llnl.util.lang.memoized
def extract_version_from_output(cls, output):
ver = 'unknown'
match = re.search(
# Apple's LLVM compiler has its own versions, so suffix them.
r'^Apple (?:LLVM|clang) version ([^ )]+)',
output,
# Multi-line, since 'Apple clang' may not be on the first line
# in particular, when run as gcc, it seems to output
# "Configured with: --prefix=..." as the first line
re.M,
)
if match:
ver = match.group(match.lastindex)
return ver
@property
def cxx11_flag(self):
# Adapted from CMake's AppleClang-CXX rules
# Spack's AppleClang detection only valid from Xcode >= 4.6
if self.version < spack.version.ver('4.0.0'):
raise spack.compiler.UnsupportedCompilerFlag(
self, "the C++11 standard", "cxx11_flag", "Xcode < 4.0.0"
)
return "-std=c++11"
@property
def cxx14_flag(self):
# Adapted from CMake's rules for AppleClang
if self.version < spack.version.ver('5.1.0'):
raise spack.compiler.UnsupportedCompilerFlag(
self, "the C++14 standard", "cxx14_flag", "Xcode < 5.1.0"
)
elif self.version < spack.version.ver('6.1.0'):
return "-std=c++1y"
return "-std=c++14"
@property
def cxx17_flag(self):
# Adapted from CMake's rules for AppleClang
if self.version < spack.version.ver('6.1.0'):
raise spack.compiler.UnsupportedCompilerFlag(
self, "the C++17 standard", "cxx17_flag", "Xcode < 6.1.0"
)
return "-std=c++1z"
def setup_custom_environment(self, pkg, env):
"""Set the DEVELOPER_DIR environment for the Xcode toolchain.
On macOS, not all buildsystems support querying CC and CXX for the
compilers to use and instead query the Xcode toolchain for what
compiler to run. This side-steps the spack wrappers. In order to inject
spack into this setup, we need to copy (a subset of) Xcode.app and
replace the compiler executables with symlinks to the spack wrapper.
Currently, the stage is used to store the Xcode.app copies. We then set
the 'DEVELOPER_DIR' environment variables to cause the xcrun and
related tools to use this Xcode.app.
"""
super(AppleClang, self).setup_custom_environment(pkg, env)
if not pkg.use_xcode:
# if we do it for all packages, we get into big troubles with MPI:
# filter_compilers(self) will use mockup XCode compilers on macOS
# with Clang. Those point to Spack's compiler wrappers and
# consequently render MPI non-functional outside of Spack.
return
# Use special XCode versions of compiler wrappers when using XCode
# Overwrites build_environment's setting of SPACK_CC and SPACK_CXX
xcrun = spack.util.executable.Executable('xcrun')
xcode_clang = xcrun('-f', 'clang', output=str).strip()
xcode_clangpp = xcrun('-f', 'clang++', output=str).strip()
env.set('SPACK_CC', xcode_clang, force=True)
env.set('SPACK_CXX', xcode_clangpp, force=True)
xcode_select = spack.util.executable.Executable('xcode-select')
# Get the path of the active developer directory
real_root = xcode_select('--print-path', output=str).strip()
# The path name can be used to determine whether the full Xcode suite
# or just the command-line tools are installed
if real_root.endswith('Developer'):
# The full Xcode suite is installed
pass
else:
if real_root.endswith('CommandLineTools'):
# Only the command-line tools are installed
msg = 'It appears that you have the Xcode command-line tools '
msg += 'but not the full Xcode suite installed.\n'
else:
# Xcode is not installed
msg = 'It appears that you do not have Xcode installed.\n'
msg += 'In order to use Spack to build the requested application, '
msg += 'you need the full Xcode suite. It can be installed '
msg += 'through the App Store. Make sure you launch the '
msg += 'application and accept the license agreement.\n'
raise OSError(msg)
real_root = os.path.dirname(os.path.dirname(real_root))
developer_root = os.path.join(spack.stage.get_stage_root(),
'xcode-select',
self.name,
str(self.version))
xcode_link = os.path.join(developer_root, 'Xcode.app')
if not os.path.exists(developer_root):
tty.warn('Copying Xcode from %s to %s in order to add spack '
'wrappers to it. Please do not interrupt.'
% (real_root, developer_root))
# We need to make a new Xcode.app instance, but with symlinks to
# the spack wrappers for the compilers it ships. This is necessary
# because some projects insist on just asking xcrun and related
# tools where the compiler runs. These tools are very hard to trick
# as they do realpath and end up ignoring the symlinks in a
# "softer" tree of nothing but symlinks in the right places.
shutil.copytree(
real_root, developer_root, symlinks=True,
ignore=shutil.ignore_patterns(
'AppleTV*.platform', 'Watch*.platform', 'iPhone*.platform',
'Documentation', 'swift*'
))
real_dirs = [
'Toolchains/XcodeDefault.xctoolchain/usr/bin',
'usr/bin',
]
bins = ['c++', 'c89', 'c99', 'cc', 'clang', 'clang++', 'cpp']
for real_dir in real_dirs:
dev_dir = os.path.join(developer_root,
'Contents',
'Developer',
real_dir)
for fname in os.listdir(dev_dir):
if fname in bins:
os.unlink(os.path.join(dev_dir, fname))
| os.symlink(
os.path.join(spack.paths.build_env_path, 'cc'),
os.path.join(dev_dir, fname) | )
os.symlink(developer_root, xcode_link)
env.set('DEVELOPER_DIR', xcode_link)
|
gustavofoa/pympm | scripts/locustfile.py | Python | apache-2.0 | 988 | 0.002024 | from locust import HttpLocust, TaskSet, task
class WebsiteTasks(TaskSet):
@task
def page1(self):
self.client.get("/sugestoes-para/6a-feira-da-quarta-semana-da-pascoa/")
@task
def page2(self):
self.client | .get( | "/sugestoes-para/5a-feira-da-quarta-semana-da-pascoa/")
@task
def page3(self):
self.client.get("/sugestoes-para/4a-feira-da-quarta-semana-da-pascoa/")
@task
def page4(self):
self.client.get("/sugestoes-para/3a-feira-da-quarta-semana-da-pascoa/")
@task
def musica1(self):
self.client.get("/musica/ressuscitou/")
@task
def musica2(self):
self.client.get("/musica/prova-de-amor-maior-nao-ha/")
@task
def musica3(self):
self.client.get("/musica/porque-ele-vive/")
@task
def musica4(self):
self.client.get("/musica/o-senhor-ressuscitou-aleluia/")
class WebsiteUser(HttpLocust):
task_set = WebsiteTasks
min_wait = 5000
max_wait = 15000
|
cjmay/energyweb | graph/management/commands/energyfaker.py | Python | mit | 5,892 | 0.002206 | #!/usr/bin/env python
'''
Serve data on a TCP port (using the assumption that the machine can
listen on the specified address and port), imitating a Rhizome Systems
energy monitoring device. The imitation is accomplished by reading from
a "profile," i.e., a list of sample readings. Called with two
arguments: The sensor ID (as represented in the PostgreSQL DB)
and a command (start, stop, restart). (The TCP port used will be that
specified in the database for the given sensor.) Daemonize on
initialization.
'''
import socket, psycopg2, datetime, atexit, signal, time, sys
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from energyweb.graph.daemon import Daemon
from logging import error, info, debug, basicConfig, DEBUG as LOGGING_DEBUG
from energyweb.graph.fake_rhizome_profiles import FAKE_RHIZOME_PROFILES
from SocketServer import TCPServer, BaseRequestHandler
from random import random, randint
from binascii import unhexlify
from energyweb.graph.models import Sensor, Setting
class FakeRhizomeHandler(BaseRequestHandler):
'''
Subclass of the SocketServer BaseRequestHandler that imitates a
Rhizome device, as given by FakeRhizomeHandler.profile (assumed
to be set before connections are received).
'''
def handle(self):
'''
Imitate a Rhizome device by sending the data specified in
self.profile.
'''
info('%s:%d connected.' % self.client_address)
reading_num = 0
reading = self.profile[reading_num]
# Loop forever, sending every 10 seconds (roughly)
while True:
chars_sent = 0
# Loop until a full reading (45 bytes) has been sent.
while chars_sent < 45:
chars_to_send = randint(1, 45 - chars_sent)
r = reading[chars_sent:(chars_sent + chars_to_send)]
self.request.send(r)
chars_sent += chars_to_send
debug('Reading %d: data sent. (%d bytes)'
% (reading_num, chars_to_send))
reading_num += 1
if reading_num == len(self.profile):
reading_num = 0
reading = self.profile[reading_num]
time.sleep(10 + (random() - 0.5) * settings.FAKER_SLEEP_VARIATION)
self.request.close()
info('%s:%d closed.' % self.client_address)
class FakeRhizomeDaemon(Daemon):
'''
Subclass of a daemonizing class. When initialized, wait for
a connection on the TCP port specified in the database for the
given energy monitoring device. (The particular device is given
by the sensor_id argument to run().) Send imitation data when a
connection is received.
'''
def cleanup(self):
'''
Close database and socket connections in preparation for
termination.
'''
info('Cleaning up: rolling back, disconnecting, disconnecting.')
if hasattr(self, 'sock'):
self.sock.shutdown()
def handle_signal(self, signum, frame):
'''
If a SIGQUIT, SIGTERM, or SIGINT is received, shutdown cleanly.
'''
if signum == signal.SIGQUIT:
info('Caught SIGQUIT.')
elif signum == signal.SIGTERM:
info('Caught SIGTERM.')
elif signum == signal.SIGINT:
info('Caught SIGINT.')
# cleanup() will be called since it is registered with atexit
sys.exit(0)
def run(self, sensor_id):
'''
Perform the main listen and send loop of the program.
(See file and class docstrings.)
'''
basicConfig(filename=(settings.FAKER_LOG_FILE_TEMPL % sensor_id),
format=settings.LOG_FORMAT, datefmt=settings.LOG_DATEFMT,
level=LOGGING_DEBUG)
# Register exit and signal behaviors.
atexit.register(self.cleanup)
signal.signal(signal.SIGHUP, signal.SIG_IGN)
signal.signal(signal.SIGINT, self.handle_signal)
signal.signal(signal.SIGTERM, self.handle_signal)
signal.signal(signal.SIGQUIT, self.handle_signal)
self.sensor = Sensor.objects.get(pk=sensor_id)
addr = '%s:%d' % (self.sensor.ip, self.sensor.port)
if self.sensor.name == '':
desc = self.sensor.sensor_group.name
else:
desc = '%s %s' % (self.sensor.sensor_group.name, self.sensor.name)
# TODO: is there a better way to give the profile to the
# handler?
FakeRhizomeHandler.profile = [unhexlify(s) for s in
FAKE_RHIZOME_PROFILES[sensor_id]]
self.sock = TCPServer((self.sensor.ip, self.sensor.port),
FakeRhizomeHandler)
info('Serving for sensor %d (%s, %s).' % (sensor_id, desc, addr))
self.sock.serve_forever()
class Command(BaseCommand):
args = '<sen | sor_id> start|stop|restart'
help = 'Imitat | e the specified Rhizome device.'
def handle(self, *args, **options):
if len(args) == 2:
try:
sensor_id = int(args[0])
except ValueError:
raise CommandError('Invalid sensor id: \'%s\'.' % args[0])
daemon = FakeRhizomeDaemon(
settings.FAKER_PID_FILE_TEMPL % sensor_id, args=(sensor_id,),
stdout=(settings.FAKER_LOG_FILE_TEMPL % sensor_id),
stderr=(settings.FAKER_LOG_FILE_TEMPL % sensor_id))
if args[1] == 'start':
daemon.start()
elif args[1] == 'stop':
daemon.stop()
elif args[1] == 'restart':
daemon.restart()
else:
raise CommandError('Invalid action: \'%s\'.' % args[1])
else:
raise CommandError('Invalid number of arguments: %d.' % len(args))
|
hali4ka/robotframework-selenium2library | src/Selenium2Library/lib/selenium-2.8.1/py/selenium/webdriver/common/action_chains.py | Python | apache-2.0 | 7,157 | 0.002794 | # Copyright 2011 WebDriver committers
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The ActionChains implementation."""
from selenium.webdriver.remote.command import Command
class ActionChains(object):
"""Generate user actions.
All actions are stored in the ActionChains object. Call perform() to fire
stored actions."""
def __init__(self, driver):
"""Creates a new ActionChains.
Args:
driver: The WebDriver instance which performs user actions.
"""
self._driver = driver
self._actions = []
def perform(self):
"""Performs all stored actions."""
for action in self._actions:
action()
def click(self, on_element=None):
"""Clicks an element.
Args:
on_element: The element to click.
If None, clicks on current mouse position.
"""
if on_element: self.move_to_element(on_element)
self._actions.append(lambda:
self._driver.execute(Command.CLICK, {'button': 0}))
return self
def click_and_hold(self, on_element):
"""Holds down the left mouse button on an element.
Args:
on_element: The element to mouse down.
If None, clicks on current mouse position.
"""
if on_element: self.move_to_element(on_element)
self._actions.append(lambda:
self._driver.execute(Command.MOUSE_DOWN, {}))
return self
def context_click(self, on_element):
"""Performs a context-click (right click) on an element.
Args:
on_element: The element to context-click.
If None, clicks on current mouse position.
"""
if on_element: self.move_to_element(on_element)
self._actions.append(lambda:
self._driver.execute(Command.CLICK, {'button': 2}))
return self
def double_click(self, on_element):
"""Double-clicks an element.
Args:
on_element: The element to double-click.
If None, clicks on current mouse position.
"""
if on_element: self.move_to_element(on_element)
self._actions.append(lambda:
self._driver.execute(Command.DOUBLE_CLICK, {}))
return self
def drag_and_drop(self, source, target):
"""Holds down the left mouse button on the source element,
then moves to the target element and releases the mouse button.
Args:
source: The element to mouse down.
target: The element to mouse up.
"""
self.click_and_hold(source)
self.release(target)
return self
def drag_and_drop_by_offset(self, source, xoffset, yoffset):
"""Holds down the left mouse button on the source element,
then moves to the target element and releases the mouse button.
Args:
source: The element to mouse down.
xoffset: X offset to move to.
yoffset: Y offset to move to.
"""
self.click_and_hold(source)
self.move_by_offset(xoffset, yoffset)
self.release(source)
return self
def key_down(self, key, element=None):
"""Sends a key press only, without releasing it.
Should only be used with modifier keys (Control, Alt and Shift).
Args:
key: The modifier key to send. Values are defined in Keys class.
target: The element to send keys.
If None, sends a key to current focused element.
"""
if element: self.click(element)
self._actions.append(lambda:
self._driver.execute(Command.SEND_MODIFIER_KEY_TO_ACTIVE_ELEMENT, {
"value": key,
"isdown": True}))
return self
def key_up(self, key, element=None):
"""Releases a modifier key.
Args:
key: The modifier key to send. Values are defined in Keys class.
target: The element to send keys.
If None, sends a key to current focused element.
"""
if element: self.click(element)
self._actions.append(lambda:
self._driver.execute(Command.SEND_MODIFIER_KEY_TO_ACTIVE_ELEMENT, {
"value": key,
"isdown": False}))
return self
def move_by_offset(self, xoffset, yoffset):
"""Moving the mouse to an offset from current mouse position.
| Args:
xoffset: X offset to move to.
yoffset: Y offset to move to.
"""
self._actions.append(lambda:
| self._driver.execute(Command.MOVE_TO, {
'xoffset': xoffset,
'yoffset': yoffset}))
return self
def move_to_element(self, to_element):
"""Moving the mouse to the middle of an element.
Args:
to_element: The element to move to.
"""
self._actions.append(lambda:
self._driver.execute(Command.MOVE_TO, {'element': to_element.id}))
return self
def move_to_element_with_offset(self, to_element, xoffset, yoffset):
"""Move the mouse by an offset of the specificed element.
Offsets are relative to the top-left corner of the element.
Args:
to_element: The element to move to.
xoffset: X offset to move to.
yoffset: Y offset to move to.
"""
self._actions.append(lambda:
self._driver.execute(Command.MOVE_TO, {
'element': to_element.id,
'xoffset': xoffset,
'yoffset': yoffset}))
return self
def release(self, on_element):
"""Releasing a held mouse button.
Args:
on_element: The element to mouse up.
"""
if on_element: self.move_to_element(on_element)
self._actions.append(lambda:
self._driver.execute(Command.MOUSE_UP, {}))
return self
def send_keys(self, *keys_to_send):
"""Sends keys to current focused element.
Args:
keys_to_send: The keys to send.
"""
self._actions.append(lambda:
self._driver.switch_to_active_element().send_keys(*keys_to_send))
return self
def send_keys_to_element(self, element, *keys_to_send):
"""Sends keys to an element.
Args:
element: The element to send keys.
keys_to_send: The keys to send.
"""
self._actions.append(lambda:
element.send_keys(*keys_to_send))
return self
|
krishna11888/ai | third_party/pattern/pattern/metrics.py | Python | gpl-2.0 | 42,773 | 0.008604 | #### PATTERN | METRICS #############################################################################
# coding: utf-8
# Copyright (c) 2010 University of Antwerp, Belgium
# Author: Tom De Smedt <tom@organisms.be>
# License: BSD (see LICENSE.txt for details).
# http://www.clips.ua.ac.be/pages/pattern
import sys
from time import time
from math import sqrt, floor, ceil, modf, exp, pi, log
from collections import defaultdict, deque
from itertools import chain
from operator import itemgetter, lt, le
from heapq import nlargest
from bisect import bisect_right
from random import gauss
if sys.version > "3":
xrange = range
####################################################################################################
# Simple implementation of Counter for Python 2.5 and 2.6.
# See also: http://code.activestate.com/recipes/576611/
class Counter(dict):
def __init__(self, iterable=None, **kwargs):
self.update(iterable, **kwargs)
def __missing__(self, k):
return 0
def update(self, iterable=None, **kwargs):
""" Updates counter with the tallies from the given iterable, dictionary or Counter.
"""
if kwargs:
self.update(kwargs)
if hasattr(iterable, "items"):
for k, v in iterable.items():
self[k] = self.get(k, 0) + v
elif hasattr(iterable, "__getitem__") \
or hasattr(iterable, "__iter__"):
for k in iterable:
self[k] = self.get(k, 0) + 1
def most_common(self, n=None):
""" Returns a list of the n most common (element, count)-tuples.
"""
if n is None:
return sorted(self.items(), key=itemgetter(1), reverse=True)
return nlargest(n, self.items(), key=itemgetter(1))
def copy(self):
ret | urn Counter(self)
def __delitem__(self, k):
if k in self:
dict.__delitem__(self, k)
def __repr__(self):
return "Counter({%s})" % ", ".join("%r: %r" % e for e in | self.most_common())
try:
# Import Counter from Python 2.7+ if possible.
from collections import Counter
except:
pass
def cumsum(iterable):
""" Returns an iterator over the cumulative sum of values in the given list.
"""
n = 0
for x in iterable:
n += x
yield n
#### PROFILER ######################################################################################
def duration(function, *args, **kwargs):
""" Returns the running time of the given function, in seconds.
"""
t = time()
function(*args, **kwargs)
return time() - t
def profile(function, *args, **kwargs):
""" Returns the performance analysis (as a string) of the given Python function.
"""
def run():
function(*args, **kwargs)
if not hasattr(function, "__call__"):
raise TypeError("%s is not a function" % type(function))
try:
import cProfile as profile
except:
import profile
import pstats
import os
import sys; sys.modules["__main__"].__profile_run__ = run
id = function.__name__ + "()"
profile.run("__profile_run__()", id)
p = pstats.Stats(id)
p.stream = open(id, "w")
p.sort_stats("cumulative").print_stats(30)
p.stream.close()
s = open(id).read()
os.remove(id)
return s
def sizeof(object):
""" Returns the memory size of the given object (in bytes).
"""
return sys.getsizeof(object)
def kb(object):
""" Returns the memory size of the given object (in kilobytes).
"""
return sys.getsizeof(object) * 0.01
#### PRECISION & RECALL ############################################################################
ACCURACY, PRECISION, RECALL, F1_SCORE = "accuracy", "precision", "recall", "F1-score"
MACRO = "macro"
def confusion_matrix(classify=lambda document: False, documents=[(None,False)]):
""" Returns the performance of a binary classification task (i.e., predicts True or False)
as a tuple of (TP, TN, FP, FN):
- TP: true positives = correct hits,
- TN: true negatives = correct rejections,
- FP: false positives = false alarm (= type I error),
- FN: false negatives = misses (= type II error).
The given classify() function returns True or False for a document.
The list of documents contains (document, bool)-tuples for testing,
where True means a document that should be identified as True by classify().
"""
TN = TP = FN = FP = 0
for document, b1 in documents:
b2 = classify(document)
if b1 and b2:
TP += 1 # true positive
elif not b1 and not b2:
TN += 1 # true negative
elif not b1 and b2:
FP += 1 # false positive (type I error)
elif b1 and not b2:
FN += 1 # false negative (type II error)
return TP, TN, FP, FN
def test(classify=lambda document:False, documents=[], average=None):
""" Returns an (accuracy, precision, recall, F1-score)-tuple.
With average=None, precision & recall are computed for the positive class (True).
With average=MACRO, precision & recall for positive and negative class are macro-averaged.
"""
TP, TN, FP, FN = confusion_matrix(classify, documents)
A = float(TP + TN) / ((TP + TN + FP + FN) or 1)
P1 = float(TP) / ((TP + FP) or 1) # positive class precision
R1 = float(TP) / ((TP + FN) or 1) # positive class recall
P0 = float(TN) / ((TN + FN) or 1) # negative class precision
R0 = float(TN) / ((TN + FP) or 1) # negative class recall
if average is None:
P, R = (P1, R1)
if average == MACRO:
P, R = ((P1 + P0) / 2,
(R1 + R0) / 2)
F1 = 2 * P * R / ((P + R) or 1)
return (A, P, R, F1)
def accuracy(classify=lambda document:False, documents=[], average=None):
""" Returns the percentage of correct classifications (true positives + true negatives).
"""
return test(classify, documents, average)[0]
def precision(classify=lambda document:False, documents=[], average=None):
""" Returns the percentage of correct positive classifications.
"""
return test(classify, documents, average)[1]
def recall(classify=lambda document:False, documents=[], average=None):
""" Returns the percentage of positive cases correctly classified as positive.
"""
return test(classify, documents, average)[2]
def F1(classify=lambda document:False, documents=[], average=None):
""" Returns the harmonic mean of precision and recall.
"""
return test(classify, documents, average)[3]
def F(classify=lambda document:False, documents=[], beta=1, average=None):
""" Returns the weighted harmonic mean of precision and recall,
where recall is beta times more important than precision.
"""
A, P, R, F1 = test(classify, documents, average)
return (beta ** 2 + 1) * P * R / ((beta ** 2 * P + R) or 1)
#### SENSITIVITY & SPECIFICITY #####################################################################
def sensitivity(classify=lambda document:False, documents=[]):
""" Returns the percentage of positive cases correctly classified as positive (= recall).
"""
return recall(classify, document, average=None)
def specificity(classify=lambda document:False, documents=[]):
""" Returns the percentage of negative cases correctly classified as negative.
"""
TP, TN, FP, FN = confusion_matrix(classify, documents)
return float(TN) / ((TN + FP) or 1)
TPR = sensitivity # true positive rate
TNR = specificity # true negative rate
#### ROC & AUC #####################################################################################
# See: Tom Fawcett (2005), An Introduction to ROC analysis.
def roc(tests=[]):
""" Returns the ROC curve as an iterator of (x, y)-points,
for the given list of (TP, TN, FP, FN)-tuples.
The x-axis represents FPR = the false positive rate (1 - specificity).
The y-axis represents TPR = the true positive rate.
"""
x = FPR = lambda TP, TN, FP, FN: float(FP) / ((FP + TN) or 1)
y = |
Plailect/PlaiCDN | PlaiCDN.py | Python | gpl-3.0 | 30,166 | 0.004575 | #!/usr/bin/env python3
#script is a replacement for https://github.com/Relys/3DS_Multi_Decryptor/blob/master/to3DS/CDNto3DS/CDNto3DS.py
#requires PyCrypto to be installed ("python3 -m ensurepip" then "pip3 install PyCrypto")
#requires makerom (https://github.com/profi200/Project_CTR/releases)
#this is a Python 3 script
from subprocess import DEVNULL, STDOUT, call, check_call
from struct import pack, unpack
from binascii import hexlify, | unhexlify
from Crypto.Cipher import AES
from hashlib import sha256
from imp import reload
import json
import platform
import os
import | struct
import errno
import shlex
import ssl
import sys
import urllib.request, urllib.error, urllib.parse
# from http://stackoverflow.com/questions/600268/
def pmkdir(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
# from http://stackoverflow.com/questions/377017/377028#377028
def which(program):
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
# based on https://stackoverflow.com/questions/5783517/
def report_chunk(bytes_so_far, chunk_size, total_size):
percent = float(bytes_so_far) / total_size
percent = round(percent*100, 2)
sys.stdout.write('\rDownloaded and decrypted %d of %d bytes (%0.2f%%)' % (bytes_so_far, total_size, percent))
sys.stdout.flush()
if bytes_so_far >= total_size:
print('')
# download in 0x200000 byte chunks, decrypt the chunk with IVs described below, then write the decrypted chunk to disk (half the file size of decrypting separately!)
def read_chunk(response, f_out, intitle_key, first_iv, chunk_size=0x200000, report_hook=None):
file_handler = open(f_out,'wb')
total_size = int(response.getheader('Content-Length'))
total_size = int(total_size)
bytes_so_far = 0
data = []
first_read_chunk = 0
while 1:
if report_hook:
report_hook(bytes_so_far, chunk_size, total_size)
chunk = response.read(chunk_size)
bytes_so_far += len(chunk)
if not chunk:
break
# IV of first chunk should be the Content ID + 28 0s like with the entire file, but each subsequent chunk should be the last 16 bytes of the previous still ciphered chunk
if first_read_chunk == 0:
decryptor = AES.new(intitle_key, AES.MODE_CBC, unhexlify(first_iv))
first_read_chunk = 1
else:
decryptor = AES.new(intitle_key, AES.MODE_CBC, prev_chunk[(0x200000 - 16):0x200000])
dec_chunk = decryptor.decrypt(chunk)
prev_chunk = chunk
file_handler.write(dec_chunk)
file_handler.close()
def system_usage():
print('Usage: PlaiCDN <TitleID TitleKey> <Options> for content options')
print('-redown : redownload content')
print('-no3ds : don\'t build 3DS file')
print('-nocia : don\'t build CIA file')
print('-nobuild : don\'t build 3DS or CIA')
print('-nohash : ignore hash checks')
print('-check : checks if title id matches key')
print('-fast : skips name retrieval when using -check')
print('')
print('Usage: PlaiCDN <TitleID> for general options')
print('-info : to display detailed metadata')
print('-seed : generates game-specific seeddb file when using -info')
print('')
print('Usage: PlaiCDN <Options> for decTitleKeys.bin options')
print('-deckey : print keys from decTitleKeys.bin')
print('-checkbin : checks titlekeys from decTitleKeys.bin')
print('-checkall : check all titlekeys when using -checkbin')
print('-fast : skips name retrieval when using -checkbin, cannot be used with seed/seeddb')
print('-seeddb : generates a single seeddb.bin')
raise SystemExit(0)
def getTitleInfo(title_id):
tid_high = ((hexlify(title_id)).decode()).upper()[:8]
tid_index = ['00040010', '0004001B', '000400DB', '0004009B',
'00040030', '00040130', '00040138', '00040001',
'00048005', '0004800F', '00040002', '0004008C']
res_index = ['-System Application-', '-System Data Archive-', '-System Data Archive-', '-System Data Archive-',
'-System Applet-', '-System Module-', '-System Firmware-', '-Download Play Title-',
'-TWL System Application-', '-TWL System Data Archive-', '-Game Demo-', '-Addon DLC-']
if fast == 1 and gen_seed != 1:
tid_index.extend(['00040000', '0004000E'])
res_index.extend(['-eShop Content-', '-eShop Content Update-'])
if tid_high in tid_index:
return(res_index[tid_index.index(tid_high)], '---', '-------', '------', '', '---', '---')
# create new SSL context to load decrypted CLCert-A off directory, key and cert are in PEM format
# see https://github.com/SciresM/ccrypt
try:
ctr_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctr_context.load_cert_chain('ctr-common-1.crt', keyfile='ctr-common-1.key')
except FileNotFoundError:
if '-checkbin' not in sys.argv:
print('\nCould not find certificate files, all secure connections will fail!\n')
nocert = 1
return('-eShop Content-', '---', '-------', '------', None, '---', '---')
# ninja handles handles actions that require authentication, in addition to converting title ID to internal the CDN content ID
ninja_url = 'https://ninja.ctr.shop.nintendo.net/ninja/ws/'
# use GET request with parameter "title_id[]=mytitle_id" with SSL context
# use header "Accept: application/json" to retrieve JSON instead of XML
try:
shop_request = urllib.request.Request(ninja_url + 'titles/id_pair' + '?title_id[]=' + (hexlify(title_id)).decode())
shop_request.get_method = lambda: 'GET'
shop_request.headers['Accept'] = 'application/json'
response = urllib.request.urlopen(shop_request, context=ctr_context)
json_response = json.loads((response.read()).decode('UTF-8', 'replace'))
except urllib.error.URLError as e:
raise
# set ns_uid (the internal content ID) to field from JSON
ns_uid = json_response['title_id_pairs']['title_id_pair'][0]['ns_uid']
# samurai handles metadata actions, including getting a title's info
# URL regions are by country instead of geographical regions... for some reason
samurai_url = 'https://samurai.ctr.shop.nintendo.net/samurai/ws/'
region_dict = {'JP': 'JPN', 'HK': 'HKG', 'TW': 'TWN', 'KR': 'KOR', 'DE': 'EUR', 'FR': 'EUR', 'ES': 'EUR', 'NL': 'EUR', 'IT': 'EUR', 'GB': 'EUR', 'US': 'USA'}
region_dict_passed = {}
# try loop to figure out which region the title is from; there is no easy way to do this other than try them all
for country_code, region in region_dict.items():
try:
title_request = urllib.request.Request(samurai_url + country_code + '/title/' + str(ns_uid))
title_request.headers['Accept'] = 'application/json'
response = urllib.request.urlopen(title_request, context=ctr_context)
title_response = json.loads((response.read()).decode('UTF-8', 'replace'))
except urllib.error.URLError as e:
pass
else:
region_dict_passed.update({country_code: region})
if len(region_dict_passed) == 0:
raise
elif len(region_dict_passed) > 1:
region = 'ALL'
else:
region = list(region_dict_passed.values())[0]
ec_request = urllib.request.Request(ninja_url + list(region_dict_passed.keys())[0] + '/title/' + str(ns_uid) + '/ec_info')
ec_request.headers['Accept'] = 'application/json'
response = urllib.request.urlopen(ec_request, context=ctr_context)
ec_response = json.loads((response.read()).decode('UTF-8', 'replace'))
# get info from the retu |
ugoertz/tippspiel | tippspiel/tools/vorrunde.py | Python | mit | 2,049 | 0.007809 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import datetime
import django
sys.path.append('/var/django/tippspiel')
django.setup()
from tipps.models import Spiel, Mannschaft, Runde
spiele = """
V1 | 2016-06-10, 21:00 | fra | rou
V1 | 2016-06-11, 15:00 | alb | sui
V1 | 2016-06-11, 18:00 | wal | svk
V1 | 2016-06-11, 21:00 | eng | rus
V1 | 2016-06-12, 18:00 | pol | nir
V1 | 2016-06-12, 21:00 | ger | ukr
V1 | 2016-06-12, 15:00 | tur | cro
V1 | 2016-06-13, 15:00 | esp | cze
V1 | 2016-06-13, 18:00 | irl | swe
V1 | 2016-06-13, 21:00 | bel | ita
V1 | 2016-06-14, 18:00 | aut | hun
V1 | 2016-06-14, 21:00 | por | isl
V2 | 2016-06-15, 18:00 | rou | sui
V2 | 2016-06-15, 21:00 | fra | alb
V2 | 2016-06-15, 15:00 | rus | svk
V2 | 2016-06-16, 15:00 | eng | wal
V2 | 2016-06-16, 18:00 | ukr | nir
V2 | 2016-06-16, 21:00 | ger | pol
V2 | 2016-06-17, 18:00 | cze | cro
V2 | 2016-06-17, 21:00 | esp | tur
V2 | 2016-06-17, 15:00 | ita | swe
V2 | 2016-06-18, 15:00 | bel | irl
V2 | 2016-06-18, 18:00 | isl | hun
V2 | 2016-06-18, 21:00 | por | aut
V3 | 2016-06-19, 21:00 | sui | fra
V3 | 2016-06-19, 21:00 | rou | alb
V3 | 2016-06-20, 21:00 | svk | eng
V3 | 2016-06-20, 21:00 | rus | wal
V3 | 2016-06-21, 18:00 | | ukr | pol
V3 | 2016-06-21, 18:00 | nir | ger
V3 | 2016-06-21, 21:00 | cro | esp
V3 | 2016-06-21, 21:00 | cze | tur
V3 | 2 | 016-06-22, 21:00 | ita | irl
V3 | 2016-06-22, 21:00 | swe | bel
V3 | 2016-06-22, 18:00 | hun | por
V3 | 2016-06-22, 18:00 | isl | aut
"""
spielliste = [ l for l in spiele.split('\n') if l.strip() ]
for l in spielliste:
runde, datum, ms1, ms2 = l.split(' | ')
print datum, ms1, ms2
m1 = Mannschaft.objects.get(code = ms1)
m2 = Mannschaft.objects.get(code = ms2)
r = Runde.objects.get(name = runde)
d1, d2 = datum.split(',')
year, month, day = [ int(x) for x in d1.strip().split('-') ]
hour, minutes = [ int(x) for x in d2.strip().split(':') ]
d = datetime.datetime(year, month, day, hour, minutes)
sp = Spiel(mannschaft1=m1, mannschaft2=m2, datum=d, runde=r)
sp.save()
|
pettai/capirca | tools/cgrep.py | Python | apache-2.0 | 2,954 | 0.009479 | # Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Simply util to grep through network definitions.
# Examples:
# To find out which tokens contain "10.4.3.1" use
# $ cgrep.py -i 10.4.3.1
#
# To find out if token 'FOO' includes ip "1.2.3.4" use
# $ cgrep.py -t FOO -i 1.2.3.4
#
# To find the difference and union of tokens 'FOO' and 'BAR | ' use
# $ cgrep.py -c FOO BAR
#
__author__ = "watson@google.com (Tony Watson)"
import sys
sys.path.append('../')
from lib import naming
from lib import nacaddr
from optparse import OptionParser
def main(argv):
parser = OptionParser()
parser.add_option("-d", "--def", dest="defs", action="store",
help="Network Definitions directory location",
default="../def")
parser.add_option("- | i", "--ip", dest="ip", action="store",
help="Return list of defintions containing this IP. "
"Multiple IPs permitted.")
parser.add_option("-t", "--token", dest="token", action="store",
help="See if an IP is contained within this token."
"Must be used in conjunction with --ip [addr].")
parser.add_option("-c", "--cmp", dest="cmp", action="store_true",
help="Compare two network definition tokens")
(options, args) = parser.parse_args()
db = naming.Naming(options.defs)
if options.ip is not None and options.token is None:
for arg in sys.argv[2:]:
print "%s: " % arg
rval = db.GetIpParents(arg)
print rval
if options.token is not None and options.ip is None:
print "You must specify and IP Address with --ip [addr] to check."
sys.exit(0)
if options.token is not None and options.ip is not None:
token = options.token
ip = options.ip
rval = db.GetIpParents(ip)
if token in rval:
print '%s is in %s' % (ip, token)
else:
print '%s is not in %s' % (ip, token)
if options.cmp is not None:
t1 = argv[2]
t2 = argv[3]
d1 = db.GetNet(t1)
d2 = db.GetNet(t2)
union = list(set(d1 + d2))
print 'Union of %s and %s:\n %s\n' % (t1, t2, union)
print 'Diff of %s and %s:' % (t1, t2)
for el in set(d1 + d2):
el = nacaddr.IP(el)
if el in d1 and el in d2:
print ' %s' % el
elif el in d1:
print '+ %s' % el
elif el in d2:
print '- %s' % el
if __name__ == '__main__':
main(sys.argv)
|
vladan-m/ggrc-core | src/tests/ggrc/behave/factories.py | Python | apache-2.0 | 12,159 | 0.019163 | # Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: david@reciprocitylabs.com
# Maintained By: david@reciprocitylabs.com
import datetime
import factory
import random
from factory.base import BaseFactory, FactoryMetaClass, CREATE_STRATEGY
from factory.fuzzy import (
BaseFuzzyAttribute, FuzzyChoice, FuzzyDate, FuzzyDateTime, FuzzyInteger)
from factory.compat import UTC
from ggrc import models
from ggrc.models.reflection import AttributeInfo
def random_string(prefix='', no_unicode=False):
return u'{prefix}{suffix}{extra}'.format(
prefix=prefix,
suffix=random.randint(0,9999999999),
extra='' if no_unicode else u'\xff'
)
def random_string_attribute(prefix=''):
return factory.LazyAttribute(lambda m: random_string(prefix))
class FuzzyEmail(BaseFuzzyAttribute):
def fuzz(self):
return u"{0}@{1}.{2}".format(
random_string('user-', True), random_string('domain-', True), 'com')
class FactoryStubMarker(object):
def __init__(self, class_):
self.class_ = class_
class FactoryAttributeGenerator(object):
"""Use the SQLAlchemy ORM model to generate factory attributes."""
@classmethod
def generate(cls, attrs, model_class, attr):
"""Generate a factory attribute for `attr` by inspecting the mapping
type of the attribute in `model_class`. Add the attribute to the
`attrs` dictionary.
"""
if (hasattr(attr, '__call__')):
attr_name = attr.attr_name
value = []
else | :
attr_name = attr
class_attr = getattr(model_class, attr_name)
#look | up the class method to use to generate the attribute
method = getattr(cls, class_attr.__class__.__name__)
value = method(attr_name, class_attr)
attrs[attr_name] = value
@classmethod
def InstrumentedAttribute(cls, attr_name, class_attr):
method = getattr(cls, class_attr.property.__class__.__name__)
return method(attr_name, class_attr)
@classmethod
def ColumnProperty(cls, attr_name, class_attr):
method = getattr(
cls,
class_attr.property.expression.type.__class__.__name__,
cls.default_column_handler)
return method(attr_name, class_attr)
@classmethod
def default_column_handler(cls, attr_name, class_attr):
return random_string_attribute(attr_name)
@classmethod
def DateTime(cls, attr_name, class_attr):
return FuzzyDateTime(
datetime.datetime(2013,1,1,tzinfo=UTC),
datetime.datetime.now(UTC) + datetime.timedelta(days=730),
)
@classmethod
def Date(cls, attr_name, class_attr):
return FuzzyDate(
datetime.date(2013,1,1),
datetime.date.today() + datetime.timedelta(days=730),
)
@classmethod
def Boolean(cls, attr_name, class_attr):
return FuzzyChoice([True, False])
@classmethod
def Integer(cls, attr_name, class_attr):
return FuzzyInteger(0,100000)
@classmethod
def RelationshipProperty(cls, attr_name, class_attr):
if class_attr.property.uselist:
return []
else:
columns = tuple(class_attr.property.local_columns)
# FIXME: ? Doesn't handle multiple local columns, so won't work for
# polymorphic links
if columns[0].nullable:
# Not a required association, so skip it
return None
elif columns[0].primary_key:
# This is a 'reverse' association, so skip it (primary keys are
# not nullable, but the relationship may still be optional)
return None
else:
return FactoryStubMarker(class_attr.property.mapper.class_)
@classmethod
def AssociationProxy(cls, attr_name, class_attr):
return []
@classmethod
def property(cls, attr_name, class_attr):
return None
@classmethod
def simple_property(cls, attr_name, class_attr):
return None
class ModelFactoryMetaClass(FactoryMetaClass):
def __new__(cls, class_name, bases, attrs, extra_attrs=None):
"""Use model reflection to build up the list of factory attributes.
The default attributes can be overridden by defining a subclass
of `ModelFactory` and defining the attribute to be overriden.
"""
model_class = attrs.pop('MODEL', None)
if model_class:
attrs['FACTORY_FOR'] = dict
attribute_info = AttributeInfo(model_class)
for attr in attribute_info._create_attrs:
if hasattr(attr, '__call__'):
attr_name = attr.attr_name
else:
attr_name = attr
if not attr_name in attrs:
FactoryAttributeGenerator.generate(attrs, model_class, attr)
return super(ModelFactoryMetaClass, cls).__new__(
cls, class_name, bases, attrs)
ModelFactory = ModelFactoryMetaClass(
'ModelFactory', (BaseFactory,), {
'ABSTRACT_FACTORY': True,
'FACTORY_STRATEGY': CREATE_STRATEGY,
'__doc__': """ModelFactory base with build and create support.
This class has supports SQLAlchemy ORM.
""",
})
def factory_for(model_class):
"""Get the factory for a model by name or by class.
If there is a factory defined for this model in globals() that factory
will be used. Otherwise, one will be created and added to globals().
"""
if isinstance(model_class, (str, unicode)):
if '.' in model_class:
import sys
path = model_class.split('.')
module_name = '.'.join(path[:-1])
factory_name = path[-1]
__import__(module_name)
model_class = getattr(sys.modules[module_name], factory_name, None)
else:
factory_name = model_class
import ggrc.models
model_class = ggrc.models.get_model(model_class)
else:
factory_name = model_class.__name__
factory_name = '{0}Factory'.format(factory_name)
factory = globals().get(factory_name, None)
if not factory:
class model_factory(ModelFactory):
MODEL = model_class
model_factory.__name__ = factory_name
globals()[factory_name] = model_factory
factory = model_factory
return factory
class PersonFactory(ModelFactory):
MODEL = models.Person
email = FuzzyEmail()
# Governance Objects
class ProgramFactory(ModelFactory):
MODEL = models.Program
kind = FuzzyChoice(['Directive', 'Company Controls'])
status = FuzzyChoice(MODEL.VALID_STATES)
class ContractFactory(ModelFactory):
MODEL = models.Contract
kind = FuzzyChoice(MODEL.valid_kinds)
status = FuzzyChoice(MODEL.VALID_STATES)
class PolicyFactory(ModelFactory):
MODEL = models.Policy
kind = FuzzyChoice(MODEL.valid_kinds)
status = FuzzyChoice(MODEL.VALID_STATES)
class RegulationFactory(ModelFactory):
MODEL = models.Regulation
kind = FuzzyChoice(MODEL.valid_kinds)
status = FuzzyChoice(MODEL.VALID_STATES)
class StandardFactory(ModelFactory):
MODEL = models.Standard
kind = FuzzyChoice(MODEL.valid_kinds)
status = FuzzyChoice(MODEL.VALID_STATES)
class SectionFactory(ModelFactory):
MODEL = models.Section
# Explicit `directive` factory is necessary, since it's a `nullable`
# column, but uses @validate to maintain requirement
directive = FactoryStubMarker(models.Regulation)
class ClauseFactory(ModelFactory):
MODEL = models.Clause
class ObjectiveFactory(ModelFactory):
MODEL = models.Objective
status = FuzzyChoice(MODEL.VALID_STATES)
class ControlFactory(ModelFactory):
MODEL = models.Control
status = FuzzyChoice(MODEL.VALID_STATES)
# Business Objects
class DataAssetFactory(ModelFactory):
MODEL = models.DataAsset
status = FuzzyChoice(MODEL.VALID_STATES)
class FacilityFactory(ModelFactory):
MODEL = models.Facility
status = FuzzyChoice(MODEL.VALID_STATES)
class MarketFactory(ModelFactory):
MODEL = models.Market
status = FuzzyChoice(MODEL.VALID_STATES)
class OrgGroupFactory(ModelFactory):
MODEL = models.OrgGroup
status = FuzzyChoice(MODEL.VALID_STATES)
class ProductFactory(ModelFactory):
MODEL = models.Product
status = FuzzyChoice(MODEL.VALID_STATES)
class ProjectFactory(ModelFactory):
MODEL = models.Project
status = FuzzyChoice(MODEL.VALID_STATES)
class SystemFactory(ModelFactory):
MODEL = models.System
status = FuzzyChoice(MODEL.VALID_STATES)
class ProcessFactory(ModelFactory):
MODEL = |
brain-research/conv-sv | conv2d_singular_values_test.py | Python | apache-2.0 | 7,125 | 0.006737 | """
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import math
import numpy as np
import tensorflow as tf
import time
from tensorflow.python.platform import test
import conv2d_singular_values as convsv
class CirculantSingularTest(test.TestCase):
def testOneDimension(self):
"""Test singular values of the one dimensional convolution."""
filter = np.array([2, 7, -1])
n = 20
# Compute the singular values directly
transform_mat = np.zeros([n, n])
for i in range(n):
for j in range(filter.size):
transform_mat[i, (j+i) % n] = filter[j]
U, D, V = np.linalg.svd(transform_mat)
# Compute the singular values using FFT
D1 = np.sort(np.absolute(np.fft.fft(filter, n)))[::-1]
self.assertAllCloseAccordingToType(D, D1)
def twoDimOneChannel(self, filter, n):
"""Given a two- | d, one-channel filter and an input size, return the matrix
of the linear transformation corresponding to the | filter"""
transform_mat = np.zeros([n ** 2, n ** 2])
for i1 in range(n):
for i2 in range(n):
for j1 in range(filter.shape[0]):
for j2 in range(filter.shape[1]):
col = ((i1 + j1) % n) * n + (i2 + j2) % n
transform_mat[i1 * n + i2, col] = filter[j1, j2]
return transform_mat
def testTwoDimension(self):
"""Test singular values of the two dimensional convolution."""
filter = np.array([[2, 7, -1], [1, 3, -8], [5, -3, 1]])
n = 20
# Compute the singular values directly
transform_mat = self.twoDimOneChannel(filter, n)
D = np.linalg.svd(transform_mat, compute_uv=False)
# Compute the singular values using FFT
D1 = np.sort(np.absolute(
np.fft.fft2(filter, [n, n]).flatten()))[::-1]
self.assertAllCloseAccordingToType(D, D1)
def testTwoDimensionRectangular(self):
"""Test singular values of the two dimensional convolution."""
filter = np.array([[2, 7, -1], [1, 3, -8]])
n = 20
# Compute the singular values directly
transform_mat = self.twoDimOneChannel(filter, n)
D = np.linalg.svd(transform_mat, compute_uv=False)
# Compute the singular values using FFT
D1 = np.sort(np.absolute(
np.fft.fft2(filter, [n, n]).flatten()))[::-1]
self.assertAllCloseAccordingToType(D, D1)
def testMultiChannel(self):
"""Test the case where inputs and outputs have several channels each"""
num_inp_channels = 2
num_out_channels = 3
filter_x = 3
filter_y = 4
filter_shape = (filter_x, filter_y, num_inp_channels, num_out_channels)
filter = np.random.randint(low=-8, high=8,size=filter_shape)
n = 32
# Compute the singular values directly
print("Start Full Matrix")
start = time.time()
transform_mat = np.zeros([num_inp_channels * (n ** 2),
num_out_channels * (n ** 2)])
for c1 in range(num_inp_channels):
for c2 in range(num_out_channels):
first_row = n * n * c1
first_col = n * n * c2
this_block = self.twoDimOneChannel(filter[:,:,c1,c2], n)
transform_mat[first_row:(first_row+n*n),
first_col:(first_col+n*n)] = this_block
D = np.linalg.svd(transform_mat, compute_uv=False)
print("Time for SVD Full Matrix:", time.time() - start)
start = time.time()
singular_vals_by_freq_pair = convsv.SVD_Conv_Tensor_NP(filter, [n, n])
print("Short algorithm time:", time.time() - start)
# sort singular values in decreasing order
D1 = np.flip(np.sort(singular_vals_by_freq_pair.flatten()),0)
self.assertAllCloseAccordingToType(D, D1)
def testMultiChannelTF(self):
"""Test the case where inputs and outputs have several channels each.
Using much bigger input, to check timing."""
num_inp_channels = 64
num_out_channels = 256
filter_x = 3
filter_y = 4
filter_shape = (filter_x, filter_y, num_inp_channels, num_out_channels)
filter = np.random.randint(low=-8, high=8,size=filter_shape)
n = 32
start = time.time()
singular_vals_by_freq_pair = convsv.SVD_Conv_Tensor_NP(filter, [n, n])
print("NP SVD time:", time.time() - start)
# sort singular values in decreasing order
D1 = np.flip(np.sort(singular_vals_by_freq_pair.flatten()),0)
with self.test_session() as sess:
filter_tf = tf.constant(filter, dtype=tf.float32)
D2_tf = convsv.SVD_Conv_Tensor(filter_tf, [n, n])
tf.global_variables_initializer().run()
start = time.time()
singular_vals = sess.run(D2_tf)
print("TF SVD Time:", time.time() - start)
D2 = np.flip(np.sort(singular_vals.flatten()),0)
self.assertAllClose(D1 / D1[0], D2 / D2[0], atol=3e-5)
def testMultiChannelClipRepeated(self):
print("Testing Repeated Clipping")
num_inp_channels = 3
num_out_channels = 4
filter_x = 3
filter_y = 4
filter_shape = (filter_x, filter_y, num_inp_channels, num_out_channels)
filter = np.random.randint(low=-8, high=8,size=filter_shape)
n = 32
singular_vals = convsv.SVD_Conv_Tensor_NP(filter, [n, n])
clipped_filter = convsv.Clip_OperatorNorm_NP(filter, [n, n],
singular_vals.max())
self.assertAllClose(filter, clipped_filter)
clip_value = 10
last_max = singular_vals.max()
for round in range(10):
clipped_filter = convsv.Clip_OperatorNorm_NP(clipped_filter, [n, n],
clip_value)
clipped_singular_vals = convsv.SVD_Conv_Tensor_NP(clipped_filter, [n, n])
self.assertTrue(last_max > clipped_singular_vals.max())
last_max = clipped_singular_vals.max()
def testMultiChannelClipTF(self):
print("Testing Clipping TF vs Numpy")
num_inp_channels = 32
num_out_channels = 64
filter_x = 3
filter_y = 4
filter_shape = (filter_x, filter_y, num_inp_channels, num_out_channels)
filter = np.random.randint(low=-8, high=8,size=filter_shape)
n = 32
start = time.time()
clipped_filter = convsv.Clip_OperatorNorm_NP(filter, [n, n], 10)
print("Numpy Clipping Time:", time.time() - start)
with self.test_session() as sess:
filter_tf = tf.constant(filter, dtype=tf.float32)
clipped_filter_tf, norm = convsv.Clip_OperatorNorm(filter_tf, [n, n], 10)
tf.global_variables_initializer().run()
start = time.time()
clipped_filter2 = sess.run(clipped_filter_tf)
print("TF Clipping Time:", time.time() - start)
self.assertAllClose(clipped_filter2, clipped_filter, atol=3e-5)
"""
if __name__ == '__main__':
test.main()
|
kaiweifan/horizon | openstack_dashboard/dashboards/project/loadbalancers/workflows.py | Python | apache-2.0 | 29,119 | 0.000275 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import exceptions
from horizon import forms
from horizon.utils import fields
from horizon.utils import validators
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.utils import filters
ALLOCATE_FIP_URL = "horizon:project:access_and_security:floating_ips:allocate"
class DynamicPopulatedTypedChoiceField(forms.DynamicTypedChoiceField):
def valid_value(self, value):
# do not valid the value as this is populated dynamically
return True
class AddPoolAction(workflows.Action):
name = forms.CharField(max_length=80, label=_("Name"))
description = forms.CharField(
initial="", required=False,
max_length=80, label=_("Description"))
# provider is optional because some LBaaS implemetation does
# not support service-type extension.
provider = forms.ChoiceField(label=_("Provider"), required=False)
subnet_id = forms.ChoiceField(label=_("Subnet"))
protocol = forms.ChoiceField(label=_("Protocol"))
lb_method = forms.ChoiceField(label=_("Load Balancing Method"))
admin_state_up = forms.BooleanField(label=_("Admin State"),
initial=True, required=False)
def __init__(self, request, *args, **kwargs):
super(AddPoolAction, self).__init__(request, *args, **kwargs)
tenant_id = request.user.tenant_id
subnet_id_choices = [('', _("Select a Subnet"))]
try:
networks = api.neutron.network_list_for_tenant(request, tenant_id)
except Exception:
exceptions.handle(request,
_('Unable to retrieve networks list.'))
networks = []
for n in networks:
for s in n['subnets']:
subnet_id_choices.append((s.id, s.cidr))
self.fields['subnet_id'].choices = subnet_id_choices
protocol_choices = [('', _("Select a Protocol"))]
protocol_choices.append(('HTTP', 'HTTP'))
protocol_choices.append(('HTTPS', 'HTTPS'))
self.fields['protocol'].choices = protocol_choices
lb_method_choices = [('', _("Select a Method"))]
lb_method_choices.append(('ROUND_ROBIN', 'ROUND_ROBIN'))
lb_method_choices.append(('LEAST_CONNECTIONS', 'LEAST_CONNECTIONS'))
lb_method_choices.append(('SOURCE_IP', 'SOURCE_IP'))
self.fields['lb_method'].choices = lb_method_choices
# provider choice
try:
if api.neutron.is_extension_supported(request, 'service-type'):
provider_list = api.neutron.provider_list(request)
providers = [p for p in provider_list
if p['service_type'] == 'LOADBALANCER']
else:
providers = None
except Exception:
exceptions.handle(request,
_('Unable to retrieve providers list.'))
providers = []
if providers:
default_providers = [p for p in providers if p.get('default')]
if default_providers:
default_provider = default_providers[0]['name']
else:
default_provider = None
provider_choices = [(p['name'], p['name']) for p in providers
if p['name'] != default_provider]
if default_provider:
provider_choices.insert(
0, (default_provider,
_("%s (default)") % default_provider))
else:
if providers is None:
msg = _("Provider for Load Balancer is not supported.")
else:
msg = _("No provider is available.")
provider_choices = [('', msg)]
self.fields['provider'].widget.attrs['readonly'] = True
self.fields['provider'].choices = provider_choices
class Meta:
name = _("Add New Pool")
permissions = ('openstack.services.network',)
help_text = _("Create Pool for current project.\n\n"
"Assign a name and description for the pool. "
"Choose one subnet where all members of this "
"pool must be on. "
"Select the protocol and load balancing method "
"for this pool. "
"Admin State is UP (checked) by default.")
class AddPoolStep(workflows.Step):
action_class = AddPoolAction
contributes = ("name", "description", "subnet_id", "provider",
"protocol", "lb_method", "admin_state_up")
def contribute(self, data, context):
context = super(AddPoolStep, self).contribute(data, context)
if data:
return context
class AddPool(workflows.Workflow):
slug = "addpool"
name = _("Add Pool")
finalize_button_name = _("Add")
success_message = _('Added pool "%s".')
failure_message = _('Unable to add pool "%s".')
success_url = "horizon:project:loadbalancers:index"
default_steps = (AddPoolStep,)
def format_status_message(self, message):
name = self.context.get('name')
return message % name
def handle(self, request, context):
try:
api.lbaas.pool_create(request, **context)
return True
except Exception:
return False
class AddVipAction(workflows.Action):
name = forms.CharField(max_length=80, label=_("Name"))
description = forms.CharField(
initial="", required=False,
max_length=80, label=_("Description"))
# router_network: will be added in __init__() if neutron plugin supports
# service_router extension
# floatingip_id: will be added in __init__() if neutron plugin supports
# service_router extension
# floatip_address: will be added in __init__() if neutron plugin DOES NOT
# support service_router extension
# other_address: will be added in __init__() if neutron plugin DOES NOT
# support service_router extension
protocol_port = forms.IntegerField(label=_("Protocol Port"), min_value=1,
help_text=_("Enter an integer value "
"between 1 and 65535."),
validators=[validators.validate_port_range])
protocol = forms.ChoiceField(label=_("Protocol"))
session_persistence = forms.ChoiceField(
required=False, initial={}, label=_("Session Persistence"))
cookie_name = forms.CharField(
initial="", required=False,
max_length=80, label=_("Cookie Name"),
help_text=_("Required for APP_COOKIE persistence;"
" Ignored otherwise."))
connection_limit = forms.IntegerField(
min_value=-1, label=_("Connection Limit"),
help_text=_("Maximum number of connections allowed "
"for the VIP or '-1' if the limit is not set"))
admin_state_up = forms.BooleanField(
label=_("Admin State"), initial=True, required=False)
def __init__(self, request, context, *args, **kwargs):
super(AddVipAction, self).__init__(request, context, *args, ** | kwargs)
if api.neutron.is_extension_supported(request, 'service-router'):
router | _network_choices = self.populate_router_network_choices(
request, context)
self.fields.insert(
|
paveenju/mlat-sim | main/figure2_2a.py | Python | gpl-3.0 | 1,441 | 0.008328 | '''
Created on Dec 5, 2016
@author: paveenju
'''
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib2tikz.save as tikz_save
import utils.functions as fn
if __name__ == '__main__':
pass
def axes():
plt.axhline(0, alpha=.1)
plt.axvline(0, alpha=.1)
# input variables
dL = np.array([0.4, 0.6, 0.9])
P1, P2 = [1.0, 0.0, 0], [-1.0, 0.0, 0]
d = np.linalg.norm(np.mat(P1)-np.mat(P2))
c = d/2
A = dL/2
# data generation
x_p = np.linspace(-3, 3, 100)
y_p = np.linspace(-3, 3, 100)
x_p, y | _p = np.meshgrid(x_p, y_p)
x, y, h, k = fn.linear_transformation(P1, P2, x_p, y_p)
# matplotlib
mpl.rcParams['lines.color'] = 'k'
mpl.rcParams['axes.prop_cycle'] = mpl.cycler('color', ['k'])
for a in A:
plt.contour(x_p, y_p,
((x**2/a**2) - (y**2/(c**2-a**2)) - 1),
[0], colors='b')
axes()
plt.annotate(r'$\tau_1$', xy=(0, 0), xytext=(0.67, 2.8), fontsize=20)
plt.annotate(r'$\tau_2$', xy=(0, 0), xytext=(0.9, 2.5), fontsize=20)
plt.annot | ate(r'$\tau_3$', xy=(0, 0), xytext=(1.2, 2.1), fontsize=20)
plt.text(1.75, 0.5, r'$\tau_1=0.4$' + '\n' + r'$\tau_2=0.6$' + '\n' + r'$\tau_2=0.9$',
bbox={'facecolor':'white', 'alpha':0.5, 'pad':10},
fontsize=20)
plt.plot(P1[0], P1[1], 'xr', mew=5, ms=15)
plt.plot(P2[0], P2[1], 'xr', mew=5, ms=15)
#plt.show()
tikz_save('../output/figure2_2a.tex') |
jamielennox/python-kiteclient | kiteclient/common/meta_data.py | Python | apache-2.0 | 1,416 | 0 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import struct
from Crypto import Random
import six
from kiteclient.opensta | ck.common import jsonutils
from kiteclient.openstack.common import timeutils
class Metadata(object):
def __init__(self, source, destination, timestamp=None, nonce= | None):
self.source = source
self.destination = destination
self.timestamp = timestamp or timeutils.utcnow()
self.nonce = nonce or self.gen_nonce()
@classmethod
def gen_nonce(cls):
return struct.unpack('Q', Random.new().read(8))[0]
def get_data(self):
return {'source': self.source,
'destination': self.destination,
'timestamp': timeutils.strtime(self.timestamp),
'nonce': self.nonce}
def encode(self):
data = self.get_data()
return base64.b64encode(six.b(jsonutils.dumps(data)))
|
benoitc/uzmq | docs/conf.py | Python | mit | 9,957 | 0.007231 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# uzmq documentation build configuration file, created by
# sphinx-quickstart on Wed Nov 7 00:32:37 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import sys
class Mock(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
mockType = type(name, (), {})
mockType.__module__ = __name__
return mockType
else:
return Mock()
MOCK_MODULES = ['zmq']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
skip_coverage = os.environ.get('SKIP_COVERAGE', None) == 'True'
if on_rtd:
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
CURDIR = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(CURDIR, '..', '..'))
sys.path.append(os.path.join(CURDIR, '..'))
sys.path.append(os.path.join(CURDIR, '.'))
import uzmq
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'uzmq'
copyright = '2012, Author'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "%s.%s" % (uzmq.version_info[0], uzmq.version_info[1])
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the | HTML footer. Default is True.
#html_show_sphinx = True
# If | true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'uzmqdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'uzmq.tex', 'uzmq Documentation',
'Author', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'uzmq', 'uzmq Documentation',
['Author'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------- |
ulno/micropython-extra-ulno | examples/integriot_test/testnode1/files/autostart.py | Python | mit | 1,163 | 0.00258 | # user.py is the autostart code for a ulnoiot node.
# Configure your devices, sensors and local interaction here.
# Always start with this to make everything from ulnoiot available.
# Therefore, do not delete the following line.
from ulnoiot import *
# The following is just example code, adjust to your needs accordingly.
# wifi and mqtt connect are done automatical | ly, we assume for this example
# the following configuration.
# mqtt("ulnoiotgw", "myroom/test1")
## Use some shields
# The onboard-led is a | lways available.
# With this configuration it will report under myroom/test1/blue
# and can be set via sending off or on to myroom/test1/blue/test.
from ulnoiot.shield.onboardled import blue
blue.high() # make sure it's off (it's reversed)
## Add some other devices
# Add a button with a slightly higher debounce rate, which will report
# in the topic myroom/test1/button1.
button("b1", d6, pullup=False, threshold=2)
# Count rising signals on d2=Pin(4) and
# report number counted at myroom/test1/shock1.
# trigger("shock1",Pin(4))
## Start to transmit every 10 seconds (or when status changed).
# Don't forget the run-comamnd at the end.
run(5)
|
ecreall/nova-ideo | novaideo/views/reports_management/restor.py | Python | agpl-3.0 | 2,013 | 0.000497 | # Copyright (c) 2014 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
import deform
from pyramid.view import view_config
from dace.processinstance.core import DEFAULTMAPPING_ACTIONS_VIEWS
from pontus.default_behavior import Cancel
from pontus.form import FormView
from pontus.view import BasicView
from pontus.view_operation import MultipleView
from novaideo.content.processes.reports_management.behaviors import Restor
from novaideo.core import SignalableEntity
from novaideo import _
class RestorViewStudyRestor(BasicView):
title = _('Alert for restoring')
name = 'alertforpublication'
template = 'novaideo:views/reports_management/templates/alert_restor.pt'
def update(self):
result = {}
values = {'context': self.context}
body = self.content(args=values, template=self.template)['body']
item = self.adapt_item(body, self.viewid)
result['coordinates'] = {self.coordinates: [item]}
return result
class RestorFormView(FormView):
title = _('Restore')
behaviors = [Restor, Cancel]
formid = 'formrestor'
name = 'formrestor'
def before_update(self):
self.action = self.request.resource_url(
self.context, 'novaideoapi',
query={'op': 'update_action_view',
'node_id': Restor.node_definition.id})
self.schema.widget = deform.widget.FormWidget(
css_class='deform novaideo-ajax-form')
@view_config(
name='restor',
context=SignalableEntity,
renderer='pontus:templates/views_templates/grid.pt',
)
class RestorView(MultipleView):
title = _('Restore')
name = 'restor'
behaviors = [Restor]
| viewid = 'restorentity'
template = 'pontus:templates/views_templates/simple_multipleview.pt'
views = (RestorViewStudyRestor, RestorFormV | iew)
validators = [Restor.get_validator()]
DEFAULTMAPPING_ACTIONS_VIEWS.update(
{Restor: RestorView})
|
takeflight/wagtail | wagtail/tests/testapp/models.py | Python | bsd-3-clause | 43,462 | 0.001196 | import hashlib
import json
import os
import uuid
from django import forms
from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from django.core.serializers.json import DjangoJSONEncoder
from django.db import models
from django.shortcuts import redirect
from django.template.response import TemplateResponse
from modelcluster.contrib.taggit import ClusterTaggableManager
from modelcluster.fields import ParentalKey, ParentalManyToManyField
from modelcluster.models import ClusterableModel
from taggit.managers import TaggableManager
from taggit.models import ItemBase, TagBase, TaggedItemBase
from wagtail.admin.edit_handlers import (
FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel,
TabbedInterface)
from wagtail.admin.forms import WagtailAdminPageForm
from wagtail.admin.mail import send_mail
from wagtail.contrib.forms.forms import FormBuilder
from wagtail.contrib.forms.models import (
FORM_FIELD_CHOICES, AbstractEmailForm, AbstractFormField, AbstractFormSubmission)
from wagtail.contrib.forms.views import SubmissionsListView
from wagtail.contrib.settings.models import BaseSetting, register_setting
from wagtail.contrib.sitemaps import Sitemap
from wagtail.contrib.table_block.blocks import TableBlock
from wagtail.core.blocks import CharBlock, RawHTMLBlock, RichTextBlock, StructBlock
from wagtail.core.fields import RichTextField, StreamField
from wagtail.core.models import Orderable, Page, PageManager, PageQuerySet, Task
from wagtail.documents.edit_handlers import DocumentChooserPanel
from wagtail.documents.models import AbstractDocument, Document
from wagtail.images.blocks import ImageChooserBlock
from wagtail.images.edit_handlers import ImageChooserPanel
from wagtail.images.models import AbstractImage, AbstractRendition, Image
from wagtail.search import index
from wagtail.snippets.edit_handlers import SnippetChooserPanel
from wagtail.snippets.models import register_snippet
from wagtail.utils.decorators import cached_classmethod
from .forms import FormClassAdditionalFieldPageForm, ValidatedPageForm
EVENT_AUDIENCE_CHOICES = (
('public', "Public"),
('private', "Private"),
)
COMMON_PANELS = (
FieldPanel('slug'),
FieldPanel('seo_title'),
FieldPanel('show_in_menus'),
FieldPanel('search_description'),
)
# Link fields
class LinkFields(models.Model):
link_external = models.URLField("External link", blank=True)
link_page = models.ForeignKey(
'wagtailcore.Page',
null=True,
blank=True,
related_name='+',
on_delete=models.CASCADE
)
link_document = models.ForeignKey(
'wagtaildocs.Document',
null=True,
blank=True,
related_name='+',
on_delete=models.CASCADE
)
@property
def link(self):
if self.link_page:
return self.link_page.url
elif self.link_document:
return self.link_document.url
else:
return self.link_external
panels = [
FieldPanel('link_external'),
PageChooserPanel('link_page'),
DocumentChooserPanel('link_document'),
]
class Meta:
abstract = True
# Carousel items
class CarouselItem(LinkFields):
image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
embed_url = models.URLField("Embed URL", blank=True)
caption = models.CharField(max_length=255, blank=True)
panels = [
ImageChooserPanel('image'),
FieldPanel('embed_url'),
FieldPanel('caption'),
MultiFieldPanel(LinkFields.panels, "Link"),
]
class Meta:
abstract = True
# Related links
class RelatedLink(LinkFields):
title = models.CharField(max_length=255, help_text="Link title")
panels = [
FieldPanel('title'),
MultiFieldPanel(LinkFields.panels, "Link"),
]
class Meta:
abstract = True
# Simple page
class SimplePage(Page):
content = models.TextField()
content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('content'),
]
def get_admin_display_title(self):
return "%s (simple page)" % super().get_admin_display_title()
# Page with Excluded Fields when copied
class PageWithExcludedCopyField(Page):
content = models.TextField()
# Exclude this field from being copied
special_field = models.CharField(
blank=True, max_length=255, default='Very Special')
exclude_fields_in_copy = ['special_field']
content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('special_field | '),
FieldPanel('content'),
]
class PageWithOldStyleRouteMethod(Page):
"""
Prior to Wagtail 0.4, the route() method on Page returned an HttpResponse
rather than a Page instance. As subclasses of Page may override route,
we need to continue accepting this convention (albeit as a deprecated API).
"""
content = models.TextField()
template = 'tests/simple_page.html'
def route(self, request, path_components):
return self. | serve(request)
# File page
class FilePage(Page):
file_field = models.FileField()
FilePage.content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('file_field'),
]
# Event page
class EventPageCarouselItem(Orderable, CarouselItem):
page = ParentalKey('tests.EventPage', related_name='carousel_items', on_delete=models.CASCADE)
class EventPageRelatedLink(Orderable, RelatedLink):
page = ParentalKey('tests.EventPage', related_name='related_links', on_delete=models.CASCADE)
class EventPageSpeakerAward(Orderable, models.Model):
speaker = ParentalKey('tests.EventPageSpeaker', related_name='awards', on_delete=models.CASCADE)
name = models.CharField("Award name", max_length=255)
date_awarded = models.DateField(null=True, blank=True)
panels = [
FieldPanel('name'),
FieldPanel('date_awarded'),
]
class EventPageSpeaker(Orderable, LinkFields, ClusterableModel):
page = ParentalKey('tests.EventPage', related_name='speakers', related_query_name='speaker', on_delete=models.CASCADE)
first_name = models.CharField("Name", max_length=255, blank=True)
last_name = models.CharField("Surname", max_length=255, blank=True)
image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
@property
def name_display(self):
return self.first_name + " " + self.last_name
panels = [
FieldPanel('first_name'),
FieldPanel('last_name'),
ImageChooserPanel('image'),
MultiFieldPanel(LinkFields.panels, "Link"),
InlinePanel('awards', label="Awards"),
]
class EventCategory(models.Model):
name = models.CharField("Name", max_length=255)
def __str__(self):
return self.name
# Override the standard WagtailAdminPageForm to add validation on start/end dates
# that appears as a non-field error
class EventPageForm(WagtailAdminPageForm):
def clean(self):
cleaned_data = super().clean()
# Make sure that the event starts before it ends
start_date = cleaned_data['date_from']
end_date = cleaned_data['date_to']
if start_date and end_date and start_date > end_date:
raise ValidationError('The end date must be after the start date')
return cleaned_data
class EventPage(Page):
date_from = models.DateField("Start date", null=True)
date_to = models.DateField(
"End date",
null=True,
blank=True,
help_text="Not required if event is on a single day"
)
time_from = models.TimeField("Start time", null=True, blank=True)
time_to = models.TimeField("End time", null=True, blank=True)
audience = models |
gmarciani/ipath | model/linkedlist.py | Python | mit | 11,316 | 0.009544 | #Interface Import
from model.base.baselinkedlist import baselinkedlist
class SimpleLinkedList(baselinkedlist):
class Record:
def __init__(self, element):
self.element = element
self._next = None
def __repr__(self):
return str(self.element)
def __str__(self):
return self.__repr__()
def __init__ | (self):
self._first = None
self._last = None
self._num_elements = 0
def is_empty(self):
"""
Returns True if linked- | list is empty, otherwise False.
is_empty() -> True/False
@rtype: bool
@return: True if empty, False otherwise.
"""
return (self._first is None)
def add_as_first(self, element):
"""
Adds element as first into the linked-list.
add_as_first(element) -> None
@type element: object
@param element: element to be added as first into the linked-list.
"""
record = SimpleLinkedList.Record(element)
if self._first is None:
self._first = self._last = record
else:
record._next = self._first
self._first = record
self._num_elements += 1
def add_as_last(self, element):
"""
Adds element as last into the linked-list.
add_as_last(element) -> None
@type element: object
@param element: element to be added as last into the linked-list.
"""
record = SimpleLinkedList.Record(element)
if self._first is None:
self._first = self._last = record
else:
self._last._next = record
self._last = record
self._num_elements += 1
def get_first(self):
"""
Returns the first element into the linked-list.
get_first() -> first_element
@rtype: object
@return: first element into the linked-list.
"""
return None if self._first is None else self._first.element
def get_last(self):
"""
Returns the last element into the linked-list.
get_last() -> last_element
@rtype: object
@return: last element into the linked-list.
"""
return None if self._last is None else self._last.element
def get_first_record(self):
"""
Returns the first record into the linked-list.
get_first_record() -> first_record
@rtype: Record
@return: first record into the linked-list.
"""
return None if self._first is None else self._first
def get_last_record(self):
"""
Returns the last record into the linked-list.
get_last_record() -> last_record
@rtype: Record
@return: last record into the linked-list.
"""
return None if self._first is None else self._last
def pop_first(self):
"""
Deletes the first record from the linked-list, and return the correspondent element.
pop_first() -> first_element
@rtype: object
@return: first element into the linked-list.
"""
if self._first is None:
return None
else:
first_element = self._first.element
self._first = self._first._next
if self._first is None:
self._last = None
self._num_elements -= 1
return first_element
def pop_last(self):
"""
Deletes the last record from the linked-list, and return the correspondent element.
pop_last() -> last_element
@rtype: object
@return: last element into the linked-list.
"""
if self._first is None:
return None
else:
last_element = self._last.element
curr = self.get_first_record()
prev = None
while curr is not self._last:
prev = curr
curr = curr._next
if prev is None:
self._first = None
self._last = None
else:
self._last = prev
prev._next = None
self._num_elements -= 1
return last_element
def delete_record(self, record):
"""
Deletes the specified record from the linked-list.
delete_record(record) -> None
@type record: Record
@param record: record to be deleted from the linked-list.
"""
if self._first is None or record is None:
return
self._num_elements -= 1
curr = self.get_first_record()
prev = None
while curr is not None:
if curr is record:
if prev is None:
self._first = curr._next
elif curr._next is None:
self._last = prev
prev._next = None
else:
prev._next = curr._next
break
prev = curr
curr = curr._next
def __repr__(self):
s = "["
if self._first is not None:
curr = self._first
while curr is not None:
if len(s) > 1:
s += ", "
s += str(curr)
curr = curr._next
s += "]"
return s
def __str__(self):
return self.__repr__()
class DoubleLinkedList(SimpleLinkedList, baselinkedlist):
class Record(SimpleLinkedList.Record):
def __init__(self, element):
SimpleLinkedList.Record.__init__(self, element)
self._prev = None
def __repr__(self):
return str(self.element)
def __str__(self):
return self.__repr__()
def add_as_first(self, element):
"""
Adds element as first into the linked-list.
add_as_first(element) -> None
@type element: object
@param element: element to be added as first into the linked-list.
"""
record = DoubleLinkedList.Record(element)
if self._first is None:
self._first = self._last = record
else:
self._first._prev = record
record._next = self._first
self._first = record
self._num_elements += 1
def add_as_last(self, element):
"""
Adds element as last into the linked-list.
add_as_last(element) -> None
@type element: object
@param element: element to be added as last into the linked-list.
"""
record = DoubleLinkedList.Record(element)
if self._first is None:
self._first = self._last = record
else:
record._prev = self._last
self._last._next = record
self._last = record
self._num_elements += 1
def pop_first(self):
"""
Deletes the first record from the linked-list, and return the correspondent element.
pop_first() -> first_element
@rtype: object
@return: first element into the linked-list.
"""
if self._first is None:
return None
else:
res = self._first.element
self._first = self._first._next
if self._first != None:
self._first._prev = None
else:
self._last = None
self._num_elements -= 1
return res
def pop_last(self):
"""
Deletes the last record from the linked-list, and return the correspondent element.
pop_last() -> last_element
@rtype: object
@return: last element into the linked-list.
"""
if self._first is None:
|
flandr/kerberos-keyutil | keyutil/keytab.py | Python | bsd-2-clause | 5,159 | 0.00252 | #!/usr/bin/env python
# Copyright (c) 2015, Nate Rosenblum
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import argparse
import derive
import getpass
import struct
import time
# http://www.ioplex.com/utilities/keytab.txt is helpful
class Keytab:
""" Keytab generator """
def __init__(self):
self.keytypes = {
'des-cbc-md5': 0x0003,
'aes128-cts-hmac-sha1-96': 0x0011,
'rc4-hmac-md5': 0x0017,
}
self.entries = []
class Entry:
def __init__(self, principal, realm, kvno, encoding, components, key):
self.principal = principal
self.realm = realm
self.kvno = kvno
self.encoding = encoding
self.components = components
self.key = key
def add_entry(self, princ, kvno, keytype, password):
if keytype == 'rc4-hmac-md5':
keybytes = derive.derive_password_rc4hmac(password)
else:
raise ValueError, "Unsupported key type {}".format(keytype)
encoding = self.keytypes[keytype]
[principal, realm] = princ.split('@')
components = principal.split('/')
self.entries.append(self.Entry(principal, realm, kvno, encoding,
components, keybytes))
@staticmethod
def serialize_string(value):
fmt = "!H{}s".format(len(value))
return struct.pack(fmt, len(value), value)
@staticmethod
def serialize_bytearray(value):
ret = bytearray()
ret.extend(struct.pack('!H', len(value)))
for b in value:
ret.extend(struct.pack('!B', b))
return ret
def serialize(self, value):
if isinstance(value, str):
return self.serialize_string(value)
elif isinstance(value, bytearray):
return self.serialize_bytearray(value)
else:
raise(ValueError)
def serialize_keytab(self):
output = bytearray()
# Version is fixed
output += struct.pack('!H', 0x502)
for entry in self.entries:
ser = bytearray();
# num_components
ser += struct.pack('!H', len(entry.components))
# realm
ser += self.serialize(entry.realm)
# Name components
for component in entry.components:
ser += self.serialize(component)
# Always KRB5_NT_PRINCIPAL
ser += struct.pack('!L', 1)
# Whatever. Right now.
ser += struct.pack('!L', int(time.time()))
# Kvno. XXX may overflow
ser += struct.pack('!B', entry.kvno)
# Key encoding
ser += struct.pack('!H', entry.encoding)
# Key
ser += self.serialize(entry.key);
output += struct.pack('!l', len(ser))
output += ser
return output
def main():
parser = argparse.ArgumentParser | ()
parser.add_argument("principal", metavar='principal', type=str, nargs=1,
help='Principal name (name@realm)')
parser.add_argument("--kvno", dest="kvno", action="store", default=1,
help='Key version number (default 1)')
parser.add_argument("--password", dest="password", action="store",
| default=None, help='Password agument (default is to prompt)')
parser.add_argument("-o", "--output", dest="output", action="store",
default="keytab", help='Output keytab file name')
options = parser.parse_args()
if not options.password:
options.password = getpass.getpass("Password")
kt = Keytab()
kt.add_entry(options.principal[0], options.kvno, "rc4-hmac-md5",
options.password)
ktbytes = kt.serialize_keytab()
with open(options.output, 'wb') as out:
out.write(ktbytes)
if __name__ == "__main__":
main()
|
ya-mouse/python-opcua | opcua/internal_server.py | Python | lgpl-3.0 | 8,403 | 0.002856 | """
Internal server implementing opcu-ua interface. can be used on server side or to implement binary/https opc-ua servers
"""
from datetime import datetime
from copy import copy
import logging
from threading import Lock
from enum import Enum
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
from opcua import ua
from opcua import utils
from opcua import Node
from opcua.address_space import AddressSpace
from opcua.address_space import AttributeService
from opcua.address_space import ViewService
from opcua.address_space import NodeManagementService
from opcua.address_space import MethodService
from opcua.subscription_service import SubscriptionService
from opcua import standard_address_space
from opcua.users import User
class SessionState(Enum):
Created = 0
Activated = 1
Closed = 2
class InternalServer(object):
def __init__(self):
self.logger = logging.getLogger(__name__)
self.endpoints = []
self._channel_id_counter = 5
self.allow_remote_admin = True
self.aspace = AddressSpace()
self.attribute_service = AttributeService(self.aspace)
self.view_service = ViewService(self.aspace)
self.method_service = MethodService(self.aspace)
self.node_mgt_service = NodeManagementService(self.aspace)
standard_address_space.fill_address_space(self.node_mgt_service)
#standard_address_space.fill_address_space_from_disk(self.aspace)
self.loop = utils.ThreadLoop()
self.subscription_service = SubscriptionService(self.loop, self.aspace)
# create a session to use on server side
self.isession = InternalSession(self, self.aspace, self.subscription_service, "Internal", user=User.Admin)
self.current_time_node = Node(self.isession, ua.NodeId(ua.ObjectIds.Server_ServerStatus_CurrentTime))
uries = ["http://opcfoundation.org/UA/"]
ns_node = Node(self.isession, ua.NodeId(ua.ObjectIds.Server_NamespaceArray))
ns_node.set_value(uries)
def load_address_space(self, path):
self.aspace.load(path)
def dump_address_space(self, path):
self.aspace.dump(path)
def start(self):
self.logger.info("starting internal server")
self.loop.start()
Node(self.isession, ua.NodeId(ua.ObjectIds.Server_ServerStatus_State)).set_value(0)
Node(self.isession, ua.NodeId(ua.ObjectIds.Server_ServerStatus_StartTime)).set_value(datetime.now())
self._set_current_time()
def stop(self):
self.logger.info("stopping internal server")
self.loop.stop()
def _set_current_time(self):
self.current_time_node.set_value(datetime.now())
self.loop.call_later(1, self._set_current_time)
def get_new_channel_id(self):
self._channel_id_counter += 1
return self._channel_id_counter
def add_endpoint(self, endpoint):
self.endpoints.append(endpoint)
def get_endpoints(self, params=None, sockname=None):
self.logger.info("get endpoint")
if sockname:
#return to client the ip address it has access to
edps = []
for edp in self.endpoints:
edp1 = copy(edp)
url = urlparse(edp1.EndpointUrl)
url = url._replace(netloc=sockname[0] + ":" + str(sockname[1]))
edp1.EndpointUrl = url.geturl()
edps.append(edp1)
return edps
return self.endpoints[:]
def find_servers(self, params):
#FIXME: implement correctly
servers = []
for edp in self.endpoints:
servers.append(edp.Server)
return servers
def create_session(self, name, user=User.Anonymous):
return InternalSession(self, self.aspace, self.subscription_service, name, user=user)
class InternalSession(object):
_counter = 10
_auth_counter = 1000
def __init__(self, internal_server, aspace, submgr, name, user=User.Anonymous):
self.logger = logging.getLogger(__name__)
self.iserver = internal_server
self.aspace = aspace
self.subscription_service = submgr
self.name = name
self.user = user
self.state = SessionState.Created
self.session_id = ua.NodeId(self._counter)
InternalSession._counter += 1
self.authentication_token = ua.NodeId(self._auth_counter)
InternalSession._auth_counter += 1
self.nonce = utils.create_nonce()
self.subscriptions = []
#self.logger.debug("Created internal session %s for user %s", self.name, self.user)
print("Created internal session {} for user {}".format(self.name, self.user))
self._lock = Lock()
def __str__(self):
return "InternalSession(name:{}, user:{}, id:{}, auth_token:{})".format(self.name, self.user, self.session_id, self.authentication_token)
def get_endpoints(self, params=None, sockname=None):
return self.iserver.get_endpoints(params, sockname)
def create_session(self, params, sockname=None):
self.logger.info("Create session request")
result = ua.CreateSessionResult()
result.SessionId = self.session_id
result.AuthenticationToken = self.authentication_token
result.RevisedSessionTimeout = params.RequestedSessionTimeout
result.MaxRequestMessageSize = 65536
result.ServerNonce = self.nonce
result.ServerEndpoints = self.get_endpoints(sockname=sockname)
return result
def close_session(self, delete_subs):
self.logger.info("close session %s with subscriptions %s", self, self.subscriptions)
self.state = SessionState.Closed
self.delete_subscriptions(self.subscriptions[:])
def activate_session(self, params):
self.logger.info("activate session")
result = ua.ActivateSessionResult()
if not self.state == SessionState.Created:
result.Results = [ua.StatusCode(ua.StatusCodes.BadSessionIdInvalid)]
return result
result.ServerNonce = self.nonce
for _ in params.ClientSoftwareCertificates:
result.Results.append(ua.StatusCode())
self.state = SessionState.Activated
id_token = ua.downcast_extobject(params.UserIdentityToken)
if id_token.TypeId == ua.FourByteNodeId(ua.ObjectIds.UserNameIdentityToken_Encoding_DefaultBinary):
if self.iserver.allow_remote_admin and id_token.UserName in ("admin", "Admin"):
self.user = User.Admin
return result
def read(self, params):
return self.iserver.attribute_service.read(params)
def write(self, params):
retu | rn self.iserver.attribute_service.write(params, self.user)
def browse(self, params):
return self.iserver.view_service.browse(params)
def translate_browsepaths_to_nodeids(self, params):
return self.iserver.view_service.translate_browsepaths_to_nodeids(params)
def add_nodes(self, params):
return self.iserver.node_ | mgt_service.add_nodes(params, self.user)
def add_method_callback(self, methodid, callback):
return self.aspace.add_method_callback(methodid, callback)
def call(self, params):
return self.iserver.method_service.call(params)
def create_subscription(self, params, callback):
result = self.subscription_service.create_subscription(params, callback)
with self._lock:
self.subscriptions.append(result.SubscriptionId)
return result
def create_monitored_items(self, params):
return self.subscription_service.create_monitored_items(params)
def modify_monitored_items(self, params):
return self.subscription_service.modify_monitored_items(params)
def republish(self, params):
return self.subscription_service.republish(params)
def delete_subscriptions(self, ids):
for i in ids:
with self._lock:
if i in self.subscriptions:
self.subscriptions.remove(i)
return self.subscription_service.delete_subscriptions(ids)
def delete_monitored_items(s |
drpngx/tensorflow | tensorflow/contrib/data/python/kernel_tests/serialization/tf_record_dataset_serialization_test.py | Python | apache-2.0 | 3,947 | 0.006081 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the TFRecordDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import zlib
from tensorflow.contrib.data.python.kernel_tests import reader_dataset_ops_test_base
from tensorflow.contrib.data.python.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.ops import readers as core_readers
from tensorflow.python.platform import test
class TFRecordDatasetSerializationTest(
reader_dataset_ops_test_base.TFRecordDatasetTestBase,
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_iterator_graph(self,
num_epochs,
batch_size=1,
compression_type=None,
buffer_size= | None):
filenames = self._createFiles()
if compression_type == "ZLIB":
zlib_files = []
for i, fn in enumerate(filenames):
with open(fn, "rb") as f:
cdata = zlib.compress(f.read())
zfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.z" % i)
with open(zfn, "wb") as f:
f.write(cdata)
zli | b_files.append(zfn)
filenames = zlib_files
elif compression_type == "GZIP":
gzip_files = []
for i, fn in enumerate(self.test_filenames):
with open(fn, "rb") as f:
gzfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.gz" % i)
with gzip.GzipFile(gzfn, "wb") as gzf:
gzf.write(f.read())
gzip_files.append(gzfn)
filenames = gzip_files
return core_readers.TFRecordDataset(
filenames, compression_type,
buffer_size=buffer_size).repeat(num_epochs).batch(batch_size)
def testTFRecordWithoutBufferCore(self):
num_epochs = 5
batch_size = num_epochs
num_outputs = num_epochs * self._num_files * self._num_records // batch_size
# pylint: disable=g-long-lambda
self.run_core_tests(
lambda: self._build_iterator_graph(num_epochs, batch_size,
buffer_size=0),
lambda: self._build_iterator_graph(num_epochs * 2, batch_size),
num_outputs)
self.run_core_tests(
lambda: self._build_iterator_graph(num_epochs, buffer_size=0), None,
num_outputs * batch_size)
# pylint: enable=g-long-lambda
def testTFRecordWithBufferCore(self):
num_epochs = 5
num_outputs = num_epochs * self._num_files * self._num_records
self.run_core_tests(lambda: self._build_iterator_graph(num_epochs),
lambda: self._build_iterator_graph(num_epochs * 2),
num_outputs)
def testTFRecordWithCompressionCore(self):
num_epochs = 5
num_outputs = num_epochs * self._num_files * self._num_records
self.run_core_tests(
lambda: self._build_iterator_graph(num_epochs, compression_type="ZLIB"),
lambda: self._build_iterator_graph(num_epochs * 2), num_outputs)
self.run_core_tests(
lambda: self._build_iterator_graph(num_epochs, compression_type="GZIP"),
lambda: self._build_iterator_graph(num_epochs * 2), num_outputs)
if __name__ == "__main__":
test.main()
|
ldolberg/the_port_ors_hdx | src/district_distance.py | Python | apache-2.0 | 1,973 | 0.014698 |
import math
import operator
import json
from geopy.distance import great_circle
class Order_districts():
def get_district_info():
# -- get names and coordinates from csv file
with open('coordinates.json') as coord_file:
district_dict = json.load(coord_file)
return district_dict
#get_district_info(); # test for json reading
def distance(lat0, lon0, lat, lon):
'''
Calculates distance on Earth's surface in meters
'''
return great_circle((lat0,lon0), (lat,lon)).meters
def e_distance(x,y,w,z):
'''
Euclidean distance calculation for simple sorting purposes
'''
a = math.pow(x - w,2)
b = math.pow(y - z,2)
return math.sqrt(a+b)
def order_districts(lat0, lon0, district_dict):
'''
function that return a list of names of districts ordered by distance from the point (lat0,lon0) passed from map
| Inputs: 'lat0' = latitude of point at center of map
'lon0' = longitude of point at cent | er of map
'district_dict' = dict of district names and (lat,lon) from function get_district_info()
Outputs: df with district names ordered by distance, coordinates of district (lat,lon)
'''
distance_dict={}
# -- loop thru entries in coord/name dictionary
for key, value in district_dict.iteritems():
lat = float(value[0]); lon = float(value[1]);
# -- calculate coords in radians
#Delta_lat = math.radians(lat0-lat) # latitudinal distance
#Delta_lon = math.radians(lon0-lon) # longitudinal distance
#lat0 = math.radians(lat0) # convert to radians
#lat = math.radians(lat)
distance_dict[key] = distance(lat0, lon0, lat, lon)
sorted_districts = sorted(distance_dict.items(), key=operator.itemgetter(1))
return zip(*sorted_districts)
|
aricaldeira/pybrasil | pybrasil/inscricao/pis.py | Python | lgpl-2.1 | 2,760 | 0.000366 | # -*- coding: utf-8 -*-
#
# PyBrasil - Functions useful for most Brazil's ERPs
#
# Copyright (C) 2016-
# Copyright (C) Aristides Caldeira <aristides.caldeira at tauga.com.br>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Library General Public License as
# published by the Free Software Foundation, either version 2.1 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# PyBrasil - Funções de validação necessárias a ERPs no Brasil
#
# Copyright (C) 2016-
# Copyright (C) Aristides Caldeira <aristides.caldeira arroba tauga.com.br>
#
# Este programa é um software livre: você pode redistribuir e/ou modificar
# este programa sob os termos da licença GNU Library General Public License,
# publicada pela Free Software Foundation, em sua versão 2.1 ou, de acordo
# com sua opção, qualquer versão posterior.
#
# Este programa é distribuido na esperança de que venha a ser útil,
# porém SEM QUAISQUER GARANTIAS, nem mesmo a garantia implícita de
# COMERCIABILIDADE ou ADEQUAÇÃO A UMA FINALIDADE ESPECÍFICA. Veja a
# GNU Library General Public License para mais detalhes.
#
# Você deve ter recebido uma cópia da GNU Library General Public License
# juntamente com este programa. Caso esse não seja o caso, acesse:
# <http://www.gnu.org/licenses/>
#
from __future__ import (division, print_function, unicode_literals,
absolute_import)
from builtins import str
from ..base import modulo11
from .cnpj_cpf import eh_tudo_igual
from .inscricao_estadual import LIMPA
def valida_pis(pis):
u'''Verifica que o PIS seja válido
de acordo com os dígitos verificadores
'''
pis = LIMPA.sub('', pis)
if len(pis) < 11:
pis = pis.zfill(11)
if len(pis) != 11:
return False
if not pis.isdigit():
return False
if eh_tudo_igual(pis):
return False
digito = pis[-1]
digito = pis[-1]
d1 = modulo11(pis[:-1], pesos=range(2, 10))
print(d1, 'digito')
return digito == str(d1)
def formata_pis(pis):
if not valida_pis(pis):
| return pis
pis = LIMPA.sub('', pis)
p | is = str(int(pis))
digito = pis[-1]
numero = pis[:-1][::-1]
numero = numero[0:2] + '.' + numero[2:7] + '.' + numero[7:]
numero = numero[::-1]
return numero + '-' + digito
|
python-poetry/poetry-core | src/poetry/core/_vendor/lark/grammar.py | Python | mit | 2,918 | 0.00377 | from .utils import Serialize
###{standalone
class Symbol(Serialize):
__slots__ = ('name',)
is_term = NotImplemented
def __init__(self, name):
self.name = name
def __eq__(self, other):
assert isinstance(other, Symbol), other
return self.is_term == other.is_term and self.name == other.name
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(self.name)
def __repr__(self):
return '%s(%r)' % (type(self).__name__, self.name)
fullrepr = property(__repr__)
class Terminal(Symbol):
__serialize_fields__ = 'name', 'filter_out'
is_term = True
def __init__(self, name, filter_out=False):
self.name = name
self.filter_out = filter_out
@property
def fullrepr(self):
return '%s(%r, %r)' % (type(self).__name__, self.name, self.filter_out)
class NonTerminal(Symbol):
__serialize_fields__ = 'name',
is_term = False
class RuleOptions(Serialize):
__serialize_fields__ = 'keep_all_tokens', 'expand1', 'priority', 'template_source', 'empty_indices'
def __init__(self, keep_all_tokens=Fa | lse, expand1=False, priority=None, template_source=None, empty_indices=()):
self.keep_all_tokens = keep_all_tokens
self.expand1 = expand1
self.priority = priority
self.template_source = template_source
self.empty_indices = empty_indices
def __repr__(self):
return 'RuleOptions(%r, %r, %r, %r)' % (
self.keep_all_tokens,
self.expand1,
| self.priority,
self.template_source
)
class Rule(Serialize):
"""
origin : a symbol
expansion : a list of symbols
order : index of this expansion amongst all rules of the same name
"""
__slots__ = ('origin', 'expansion', 'alias', 'options', 'order', '_hash')
__serialize_fields__ = 'origin', 'expansion', 'order', 'alias', 'options'
__serialize_namespace__ = Terminal, NonTerminal, RuleOptions
def __init__(self, origin, expansion, order=0, alias=None, options=None):
self.origin = origin
self.expansion = expansion
self.alias = alias
self.order = order
self.options = options or RuleOptions()
self._hash = hash((self.origin, tuple(self.expansion)))
def _deserialize(self):
self._hash = hash((self.origin, tuple(self.expansion)))
def __str__(self):
return '<%s : %s>' % (self.origin.name, ' '.join(x.name for x in self.expansion))
def __repr__(self):
return 'Rule(%r, %r, %r, %r)' % (self.origin, self.expansion, self.alias, self.options)
def __hash__(self):
return self._hash
def __eq__(self, other):
if not isinstance(other, Rule):
return False
return self.origin == other.origin and self.expansion == other.expansion
###}
|
pratapvardhan/pandas | pandas/tests/plotting/common.py | Python | bsd-3-clause | 19,374 | 0.000103 | #!/usr/bin/env python
# coding: utf-8
import pytest
import os
import warnings
from pandas import DataFrame, Series
from pandas.compat import zip, iteritems
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.api import is_list_like
import pandas.util.testing as tm
from pandas.util.testing import (ensure_clean,
assert_is_valid_plot_return_object)
import pandas.util._test_decorators as td
import numpy as np
from numpy import random
import pandas.plotting as plotting
from pandas.plotting._tools import _flatten
"""
This is a common base class used for various plotting tests
"""
def _skip_if_no_scipy_gaussian_kde():
try:
from scipy.stats import gaussian_kde # noqa
except ImportError:
pytest.skip("scipy version doesn't support gaussian_kde")
def _ok_for_gaussian_kde(kind):
if kind in ['kde', 'density']:
try:
from scipy.stats import gaussian_kde # noqa
except ImportError:
return False
return plotting._compat._mpl_ge_1_5_0()
@td.skip_if_no_mpl
class TestPlotBase(object):
def setup_method(self, method):
import matplotlib as mpl
mpl.rcdefaults()
self.mpl_le_1_2_1 = plotting._compat._mpl_le_1_2_1()
self.mpl_ge_1_3_1 = plotting._compat._mpl_ge_1_3_1()
self.mpl_ge_1_4_0 = plotting._compat._mpl_ge_1_4_0()
self.mpl_ge_1_5_0 = plotting._compat._mpl_ge_1_5_0()
self.mpl_ge_2_0_0 = plotting._compat._mpl_ge_2_0_0()
self.mpl_ge_2_0_1 = plotting._compat._mpl_ge_2_0_1()
self.mpl_ge_2_2_0 = plotting._compat._mpl_ge_2_2_0()
if self.mpl_ge_1_4_0:
self.bp_n_objects = 7
else:
self.bp_n_objects = 8
if self.mpl_ge_1_5_0:
# 1.5 added PolyCollections to legend handler
# so we have twice as many items.
self.polycollection_factor = 2
else:
self.polycollection_factor = 1
if self.mpl_ge_2_0_0:
self.default_figsize = (6.4, 4.8)
else:
self.default_figsize = (8.0, 6.0)
self.default_tick_position = 'left' if self.mpl_ge_2_0_0 else 'default'
n = 100
with tm.RNGContext(42):
gender = np.random.choice(['Male', 'Female'], size=n)
classroom = np.random.choice(['A', 'B', 'C'], size=n)
self.hist_df = DataFrame({'gender': gender,
'classroom': classroom,
'height': random.normal(66, 4, size=n),
'weight': random.normal(161, 32, size=n),
'category': random.randint(4, size=n)})
self.tdf = tm.makeTimeDataFrame()
self.hexbin_df = DataFrame({"A": np.random.uniform(size=20),
"B": np.random.uniform(size=20),
"C": np.arange(20) + np.random.uniform(
size=20)})
def teardown_method(self, method):
tm.close()
@cache_readonly
def plt(self):
import matplotlib.pyplot as plt
return plt
@cache_readonly
def colorconverter(self):
import matplotlib.colors as colors
return colors.colorConverter
def _check_legend_labels(self, axes, labels=None, visible=True):
"""
Check each axes has expected legend labels
Parameters
----------
axes : matplotlib Axes object, or its list-like
labels : list-like
expected legend labels
visible : bool
expected legend visibility. labels are checked only when visible is
True
"""
if visible and (labels is None):
raise ValueError('labels must be specified when visible is True')
axes = self._flatten_visible(axes)
for ax in axes:
if visible:
assert ax.get_legend() is not None
self._check_text_labels(ax.get_legend().get_texts(), labels)
else:
assert ax.get_legend() is None
def _check_data(self, xp, rs):
"""
Check each axes has identical lines
Parameters
----------
xp : matplotlib Axes object
rs : matplotlib Axes object
"""
xp_lines = xp.get_lines()
rs_lines = rs.get_lines()
def check_line(xpl, rsl):
xpdata = xpl.get_xydata()
rsdata = rsl.get_xydata()
tm.assert_almost_equal(xpdata, rsdata)
assert len(xp_lines) == len(rs_lines)
[check_line(xpl, rsl) for xpl, rsl in zip(xp_lines, rs_lines)]
tm.close()
def _check_visible(self, collections, visible=True):
"""
Check each artist is visible or not
Parameters
----------
collections : matplotlib Artist or its list-like
target Artist or its list or collection
visible : bool
expected visibility
"""
from matplotlib.collections import Collection
if not isinstance(collections,
Collection) and not is_list_like(collections):
collections = [collections]
for patch in collections:
assert patch.get_visible() == visible
def _get_colors_mapped(self, series, colors):
unique = series.unique()
# unique and colors length can be differed
# depending on slice value
mapped = dict(zip(unique, colors))
return [mapped[v] for v in series.values]
def _check_colors(self, collections, linecolors=None, facecolors=None,
mapping=None):
"""
Check each artist has expected line colors and face colors
Parameters
----------
collections : list-like
list or collection of target artist
linecolors : list-like which has the same length as collections
list of expected line colors
facecolors : list-like which has the same length as collections
list of expected face colors
mapping : Series
Series used for color grouping key
used for andrew_curves, parallel_coordinates, radviz test
"""
from matplotlib.lines import Line2D
from matplotlib.collections import (
Collection, PolyCollection, LineCollection
)
conv = self.colorconverter
if linecolors is not None:
if mapping is not None:
linecolors = self._get_colors_mapped(mapping, linecolors)
linecolors = linecolors[:len(collections)]
assert len(collections) == len(linecolors)
for patch, color in zip(collections, linecolors):
if isinstance(patch, Line2D):
result = patch.get_color()
# Line2D may contains string color expression
result = conv.to_rgba(result)
elif isinstance(patch, (PolyCollection, Line | Collection)):
result = tuple(patch.get_edgecolor()[0])
else:
result = patch.get_edgecolor()
expected = conv.to_rgba(color)
assert result == expected
if facecolors is not None:
if mapping is not None:
facecolors = self._get_co | lors_mapped(mapping, facecolors)
facecolors = facecolors[:len(collections)]
assert len(collections) == len(facecolors)
for patch, color in zip(collections, facecolors):
if isinstance(patch, Collection):
# returned as list of np.array
result = patch.get_facecolor()[0]
else:
result = patch.get_facecolor()
if isinstance(result, np.ndarray):
result = tuple(result)
expected = conv.to_rgba(color)
assert result == expected
def _check_text_labels(self, texts, expected):
"""
Check each text has expected |
MrJohz/snooble | snooble/__init__.py | Python | mit | 4,756 | 0.002523 | import collections
import time
from urllib import parse as urlp
import requests
from . import oauth, errors, responses
from .ratelimit import RateLimiter
AUTH_DOMAIN = 'https://oauth.reddit.com/'
WWW_DOMAIN = 'https://www.reddit.com/'
Domain = collections.namedtuple('Domain', ['auth', 'www'])
class Snooble(object):
@property
def domain(self):
return Domain(auth=self.auth_domain, www=self.www_domain)
@domain.setter
def domain(self, tup):
self.www_domain, self.auth_domain = tup
@property
def authorized(self):
return self._auth is not None and self._auth.authorized
def __init__(self, useragent, bursty=False, ratelimit=(60, 60),
www_domain=WWW_DOMAIN, auth_domain=AUTH_DOMAIN, auth=None,
encode_all=False):
self.useragent = useragent
self.www_domain, self.auth_domain = www_domain, auth_domain
if isinstance(ratelimit, RateLimiter):
self._limiter = ratelimit
else:
self._limiter = RateLimiter(*ratelimit, bursty=bursty)
self._session = requests.Session()
self._session.headers.update({"User-Agent": useragent})
self._limited_session = self._limiter.limitate(self._session, ['get', 'post'])
self._auth = None
if auth is not None:
self.oauth(auth)
def oauth(self, auth=None, *args, **kwargs):
if auth is None and not len(args) and not len(kwargs):
return self._auth
elif not isinstance(auth, oauth.OAuth):
auth = oauth.OAuth(auth, *args, **kwargs)
old_auth, self._auth = self._auth, auth
return old_auth
def auth_url(self, state):
if self._auth is None:
raise ValueError("Cannot create auth url witout credentials")
if self._auth.kind not in (oauth.EXPLICIT_KIND, oauth.IMPLICIT_KIND):
raise ValueError("Selected auth kind does not use authorization URL")
response_type = 'code' if self._auth.kind == oauth.EXPLICIT_KIND else 'token'
options = {
"client_id": self._auth.client_id,
"response_type": response_type,
"state": state,
"redirect_uri": self._auth.redirect_uri,
"scope": ",".join(self._auth.scopes)
}
if self._auth.kind == oauth.EXPLICIT_KIND:
options['duration'] = self._auth.duration
base = urlp.urljoin(self.domain.www, 'api/v1/authorize')
if self._auth.mobile:
base += ".compact"
base += "?" + "&".join("{k}={v}".format(k=k, v=urlp.quote_plus(v))
for (k, v) in options.items())
return base
def authorize(self, code=None, expires=3600):
if self._auth is None:
raise ValueError("Attempting authorization without credentials")
elif self._auth.kind not in oauth.ALL_KINDS:
raise ValueError("Unrecognised auth kind {k}".format(k=self._auth.kind))
create_auth_request = oauth.AUTHORIZATION_METHODS[self._auth.kind]
response = create_auth_request(self, self._auth, self._limited_session, code)
if response is None and self._auth.kind == oauth.IMPLICIT_KIND:
# implicit kind does not send confirmation request, it has already been
# given the correct token, just use that.
self._auth.authorization = \
oauth.Authorization(token_type='bearer', recieved=time.time(),
token=code, length=expires)
elif response.status_code != 200:
m = "Authorization failed (are all your details correct?)"
raise errors.RedditError(m, response=response)
elif 'error' in response.json():
m = "Authorization failed due to error: {error!r}"
error = response.json()['error']
raise errors.RedditError(m.format(error=error), response=response)
else:
r = response.json()
self._auth.authorization = \
oauth.Authorization(token_type=r['token_type'], recieved=time.time(),
token=r['access_token'], length=r['expires_in'])
def get(self, url, **kwargs):
if not self.authorized:
raise ValueError("Snooble.authorize must be called before making requests")
else:
headers = {"Authorization": " ".join((self._auth.authorization.token_type,
self._auth.authorization.token))}
| url = urlp.urljoin(self.domain.auth, url)
response = self._limited_session.get(url, headers=headers, par | ams=kwargs)
return responses.create_response(response.json())
|
bruth/django-registration2 | registration/managers.py | Python | bsd-3-clause | 1,879 | 0.000532 | import re
import random
import hashlib
from django.db import models
SHA1_RE = re.compile('^[a-f0-9]{40}$')
class RegistrationManager(models.Manager):
"""Custom manager for the ``RegistrationProfile`` model.
The methods defined here provide shortcuts for account creation
and activation (including generation and emailing of activation
keys), and for cleaning out expired inactive accounts.
"""
def activate_user(self, activation_key):
"""Validate an activation key and activate the corresponding
``User`` if valid.
If the key is valid and has not expired, return the ``User``
after activating.
If the key is not valid or | has expired, return ``False``.
If the key is valid but the ``User`` is already active,
return ``False``.
"""
# Make sure the key we're trying conforms to the pattern of a
# SHA1 hash; if it doesn't, no point trying to look it up in
# the database.
if SHA1_RE.search(activation_key):
try:
| profile = self.get(activation_key=activation_key)
except self.model.DoesNotExist:
return
return profile.activate()
def create_profile(self, user):
"""Create a ``RegistrationProfile`` for a given ``User``, and return
the ``RegistrationProfile``.
The activation key for the ``RegistrationProfile`` will be a SHA1 hash,
generated from a combination of the ``User``'s username and a random
salt.
"""
salt = hashlib.sha1(str(random.random())).hexdigest()[:5]
username = user.username
if isinstance(username, unicode):
username = username.encode('utf-8')
activation_key = hashlib.sha1(salt+username).hexdigest()
return self.create(user=user, activation_key=activation_key)
|
Adoni/WeiboSpider | monitor/helper.py | Python | gpl-3.0 | 873 | 0.033219 | #coding:utf8
import re
import random
import time
import lxml.html
import json
def sleep(sleep_time):
sleep_time=sleep_time+random.randint(-2,2)
#print sleep_time
if sleep_time<=0:
sleep_time=0
#print('Sleeping for '+str(sleep_time)+' seconds')
time.sleep | (sleep_time)
#print('Wake up')
def get_target(html):
if(not 'location.replace' in html):
#print('No location.replace in html')
return None
pat="location.replace\('[^']*'\)"
a=re.findall(pat,html)
pat='location.replace\("[^"]*"\)'
b=re.findall(pat,html)
ans=[]
for url in a:
ans.append(url[18:-2]) |
for url in b:
ans.append(url[18:-2])
if(ans==[]):
return None
else:
return ans
if __name__=='__main__':
print 'Helper'
#print get_average_statuses_count()
#check()
#output_all_uids()
|
futurecolors/django-geoip | django_geoip/management/iso3166_1.py | Python | mit | 6,017 | 0 | # -*- coding: utf-8 -*-
ISO_CODES = {
"AF": "Afghanistan",
"AX": "Åland",
"AL": "Albania",
"DZ": "Algeria",
"AS": "American Samoa",
"AD": "Andorra",
"AO": "Angola",
"AI": "Anguilla",
"AQ": "Antarctica",
"AG": "Antigua and Barbuda",
"AR": "Argentina",
"AM": "Armenia",
"AW": "Aruba",
"AU": "Australia",
"AT": "Austria",
"AZ": "Azerbaijan",
"BS": "Bahamas",
"BH": "Bahrain",
"BD": "Bangladesh",
"BB": "Barbados",
"BY": "Belarus",
"BE": "Belgium",
"BZ": "Belize",
"BJ": "Benin",
"BM": "Bermuda",
"BT": "Bhutan",
"BO": "Bolivia",
"BQ": "Bonaire, Sint Eustatiusand Saba",
"BA": "Bosnia and Herzegovina",
"BW": "Botswana",
"BV": "Bouvet Island",
"BR": "Brazil",
"IO": "British Indian Ocean Territory",
"BN": "Brunei Darussalam",
"BG": "Bulgaria",
"BF": "Burkina Faso",
"BI": "Burundi",
"KH": "Cambodia",
"CM": "Cameroon",
"CA": "Canada",
"CV": "Cape Verde",
"KY": "Cayman Islands",
"CF": "Central African Republic",
"TD": "Chad",
"CL": "Chile",
"CN": "China",
"CX": "Christmas Island",
"CC": "Cocos (Keeling) Islands",
"CO": "Colombia",
"KM": "Comoros",
"CG": "Congo (Brazzaville)",
"CD": "Congo (Kinshasa)",
"CK": "Cook Islands",
"CR": "Costa Rica",
"CI": "Côte d'Ivoire",
"HR": "Croatia",
"CU": "Cuba",
"CW": "Curaçao",
"CY": "Cyprus",
"CZ": "Czech Republic",
"DK": "Denmark",
"DJ": "Djibouti",
"DM": "Dominica",
"DO": "Dominican Republic",
"EC": "Ecuador",
"EG": "Egypt",
"SV": "El Salvador",
"GQ": "Equatorial Guinea",
"ER": "Eritrea",
"EE": "Estonia",
"ET": "Ethiopia",
"FK": "Falkland Islands",
"FO": "Faroe Islands",
"FJ": "Fiji",
"FI": "Finland",
"FR": "France",
"GF": "French Guiana",
"PF": "French Polynesia",
"TF": "French Southern Lands",
"GA": "Gabon",
"GM": "Gambia",
"GE": "Georgia",
"DE": "Germany",
"GH": "Ghana",
"GI": "Gibraltar",
"GR": "Greece",
"GL": "Greenland",
"GD": "Grenada",
"GP": "Guadeloupe",
"GU": "Guam",
"GT": "Guatemala",
"GG": "Guernsey",
"GN": "Guinea",
"GW": "Guinea-Bissau",
"GY": "Guyana",
"HT": "Haiti",
"HM": "Heard and McDonald Islands",
"HN": "Honduras",
"HK": "Hong Kong",
"HU": "Hungary",
"IS": "Iceland",
"IN": "India",
"ID": "Indonesia",
"IR": "Iran",
"IQ": "Iraq",
"IE": "Ireland",
"IM": "Isle of Man",
"IL": "Israel",
"IT": "Italy",
"JM": "Jamaica",
"JP": "Japan",
"JE": "Jersey",
"JO": "Jordan",
"KZ": "Kazakhstan",
"KE": "Kenya",
"KI": "Kiribati",
"KP": "Korea, North",
"KR": "Korea, South",
"KW": "Kuwait",
"KG": "Kyrgyzstan",
"LA": "Laos",
"LV": "Latvia",
"LB": "Lebanon",
"LS": "Lesotho",
"LR": "Liberia",
"LY": "Libya",
"LI": "Liechtenstein",
"LT": "Lithuania",
"LU": "Luxembourg",
"MO": "Macau",
"MK": "Macedonia",
"MG": "Madagascar",
"MW": "Malawi",
"MY": "Malaysia",
"MV": "Maldives",
"ML": "Mali",
"MT": "Malta",
"MH": "Marshall Islands",
"MQ": "Martinique",
"MR": "Mauritania",
"MU": "Mauritius",
"YT": "Mayotte",
"MX": "Mexico",
"FM": "Micronesia",
"MD": "Moldova",
"MC": "Monaco",
"MN": "Mongolia",
"ME": "Montenegro",
"MS": "Montserrat",
"MA": "Morocco",
"MZ": "Mozambique",
"MM": "Myanmar",
"NA": "Namibia",
"NR": "Nauru",
"NP": "Nepal",
"NL": "Netherlands",
"NC": "New Caledonia",
"NZ": "New Zealand",
"NI": "Nicaragua",
"NE": "Niger",
"NG": "Nigeria",
"NU": "Niue",
"NF": "Norfolk Island",
"MP": "Northern Mariana Islands",
"NO": "Norway",
"OM": "Oman",
"PK": "Pakistan",
"PW": "Palau",
"PS": "Palestine",
"PA": "Panama",
"PG": "Papua New Guinea",
"PY": "Paraguay",
"PE": "Peru",
"PH": "Philippines",
"PN": "Pitcairn",
"PL": "Poland",
"PT": "Portugal",
"PR": "Puerto Rico",
"QA": "Qatar",
"RE": "Reunion",
"RO": "Romania",
"RU": "Russian Federation",
"RW": "Rwanda",
"BL": "Saint Barthélemy",
"SH": "Saint Helena",
"KN": "Saint Kitts and Nevis",
"LC": "Saint Lucia",
"MF": "Saint Martin (French part)",
"PM": "Saint Pierre and Miquelon",
"VC": "Saint Vincent and theGrenadines",
"WS": "Samoa",
"SM": "San Marino",
"ST": "Sao Tome and Principe",
"SA": "Saudi Arabia", |
"SN": "Senegal",
"RS": "Serbia",
"SC": "Seychelles",
"SL": "Sierra Leone",
"SG": "Singapore",
"SX": "Sint Maarten",
"SK": "Slovakia",
"SI": "Slovenia",
"SB": "Solomon Islands",
"SO": "Somalia",
"ZA": "South Africa",
"GS": "South Georgia and South Sandwich Islands",
"SS": "South Sudan",
"ES": "Spain",
"LK": "Sri Lanka",
"SD": "Sudan",
"SR": "Suriname",
"SJ": | "Svalbard and Jan Mayen Islands",
"SZ": "Swaziland",
"SE": "Sweden",
"CH": "Switzerland",
"SY": "Syria",
"TW": "Taiwan",
"TJ": "Tajikistan",
"TZ": "Tanzania",
"TH": "Thailand",
"TL": "Timor-Leste",
"TG": "Togo",
"TK": "Tokelau",
"TO": "Tonga",
"TT": "Trinidad and Tobago",
"TN": "Tunisia",
"TR": "Turkey",
"TM": "Turkmenistan",
"TC": "Turks and Caicos Islands",
"TV": "Tuvalu",
"UG": "Uganda",
"UA": "Ukraine",
"AE": "United Arab Emirates",
"GB": "United Kingdom",
"UM": "United States Minor Outlying Islands",
"US": "United States of America",
"UY": "Uruguay",
"UZ": "Uzbekistan",
"VU": "Vanuatu",
"VA": "Vatican City",
"VE": "Venezuela",
"VN": "Vietnam",
"VG": "Virgin Islands, British",
"VI": "Virgin Islands, U.S.",
"WF": "Wallis and Futuna Islands",
"EH": "Western Sahara",
"YE": "Yemen",
"ZM": "Zambia",
"ZW": "Zimbabwe",
}
|
sahat/bokeh | examples/glyphs/daylight.py | Python | bsd-3-clause | 3,445 | 0.007837 | from __future__ import print_function
import numpy as np
import datetime as dt
from bokeh.browserlib import view
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.glyphs import Patch, Line, Text
from bokeh.objects import (
ColumnDataSource, DataRange1d, DatetimeAxis, DatetimeTickFormatter,
Glyph, Grid, Legend, Plot
)
from bokeh.resources import INLINE
from bokeh.sampledata import daylight
df = daylight.daylight_warsaw_2013
source = ColumnDataSource(dict(
dates = df.Date,
sunrises = df.Sunrise,
sunsets = df.Sunset,
))
patch1_source = ColumnDataSource(dict(
dates = np.concatenate((df.Date, df.Date[::-1])),
times = np.concatenate((df.Sunrise, df.Sunset[::-1]))
))
summer | = df[df.Summer == 1]
patch2_source = ColumnDataSource(dict(
dates = np.concatenate((summer.Date, summer.Date[::-1])),
times = np.concatenate((summer.Sunrise, summer.Sunset[::-1]))
))
summer_start = df.Summer.tolist().index(1)
summer_end = df.Summer.tolist().index(0, summ | er_start)
calendar_start = df.Date.irow(0)
summer_start = df.Date.irow(summer_start)
summer_end = df.Date.irow(summer_end)
calendar_end = df.Date.irow(-1)
d1 = calendar_start + (summer_start - calendar_start)/2
d2 = summer_start + (summer_end - summer_start)/2
d3 = summer_end + (calendar_end - summer_end)/2
text_source = ColumnDataSource(dict(
dates = [d1, d2, d3],
times = [dt.time(11, 30)]*3,
texts = ["CST (UTC+1)", "CEST (UTC+2)", "CST (UTC+1)"],
))
xdr = DataRange1d(sources=[source.columns("dates")])
ydr = DataRange1d(sources=[source.columns("sunrises", "sunsets")])
title = "Daylight Hours - Warsaw, Poland"
plot = Plot(title=title, data_sources=[source, patch1_source, patch2_source, text_source], x_range=xdr, y_range=ydr, plot_width=800, plot_height=400)
patch1 = Patch(x="dates", y="times", fill_color="skyblue", fill_alpha=0.8)
patch1_glyph = Glyph(data_source=patch1_source, xdata_range=xdr, ydata_range=ydr, glyph=patch1)
plot.renderers.append(patch1_glyph)
patch2 = Patch(x="dates", y="times", fill_color="orange", fill_alpha=0.8)
patch2_glyph = Glyph(data_source=patch2_source, xdata_range=xdr, ydata_range=ydr, glyph=patch2)
plot.renderers.append(patch2_glyph)
line1 = Line(x="dates", y="sunrises", line_color="yellow", line_width=2)
line1_glyph = Glyph(data_source=source, xdata_range=xdr, ydata_range=ydr, glyph=line1)
plot.renderers.append(line1_glyph)
line2 = Line(x="dates", y="sunsets", line_color="red", line_width=2)
line2_glyph = Glyph(data_source=source, xdata_range=xdr, ydata_range=ydr, glyph=line2)
plot.renderers.append(line2_glyph)
text = Text(x="dates", y="times", text="texts", angle=0, text_align="center")
text_glyph = Glyph(data_source=text_source, xdata_range=xdr, ydata_range=ydr, glyph=text)
plot.renderers.append(text_glyph)
xformatter = DatetimeTickFormatter(formats=dict(months=["%b %Y"]))
xaxis = DatetimeAxis(plot=plot, dimension=0, formatter=xformatter)
yaxis = DatetimeAxis(plot=plot, dimension=1)
xgrid = Grid(plot=plot, dimension=0, axis=xaxis)
ygrid = Grid(plot=plot, dimension=1, axis=yaxis)
legend = Legend(plot=plot, legends={"sunrise": [line1_glyph], "sunset": [line2_glyph]})
plot.renderers.append(legend)
doc = Document()
doc.add(plot)
if __name__ == "__main__":
filename = "daylight.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Daylight Plot"))
print("Wrote %s" % filename)
view(filename)
|
inspoy/BounceArena | tools/protocol/protocol.py | Python | apache-2.0 | 13,731 | 0.001675 | #!/usr/local/bin/python3
# -*- coding: utf-8 -*-
import os.path
import shutil
import time
def get_protocol_data():
filename = "protocol.csv"
raw_data = []
try:
inp = open(filename, mode='r', encoding='utf-8')
lines = inp.readlines()
for line in lines:
fields = line.rstrip('\n').split(',')
raw_data.append(fields)
inp.close()
except FileNotFoundError:
print("File not found: " + filename)
except Exception as e:
print("Exception:%s\n%s" % (type(e), e))
protocols = []
cur_protocol = {"pid": "", "name": "", "desc": "", "req": [], "resp": [], "no_req": True}
for line in raw_data:
if line[0] == "pid":
cur_protocol["pid"] = line[1]
if line[0] == "name":
cur_protocol["name"] = line[1]
if line[0] == "desc":
cur_protocol["desc"] = line[1]
if line[0] == "req":
cur_protocol["no_req"] = False
if line[1] != "_PH":
cur_protocol["req"].append({"type": line[1], "name": line[2], "desc": line[3]})
if line[0] == "resp":
cur_protocol["resp"].append({"type": line[1], "name": line[2], "desc": line[3]})
if line[0] == "END":
protocols.append(cur_protocol)
cur_protocol = {"pid": "", "name": "", "desc": "", "req": [], "resp": [], "no_req": True}
return protocols
def get_structure_data():
filename = "structure.csv"
raw_data = []
try:
inp = open(filename, mode='r', encoding='utf-8')
lines = inp.readlines()
for line in lines:
fields = line.rstrip('\n').split(',')
raw_data.append(fields)
inp.close()
except FileNotFoundError:
print("File not found: " + filename)
except Exception as e:
print("Exception:%s\n%s" % (type(e), e))
structures = []
cur_item = {"name": "", "desc": "", "fields": []}
for line in raw_data:
if line[0] == "name":
cur_item["name"] = line[1]
elif line[0] == "desc":
cur_item["desc"] = line[1]
elif line[0] == "END":
structures.append(cur_item)
cur_item = {"name": "", "desc": "", "fields": []}
else:
cur_item["fields"].append({"type": line[0], "name": line[1], "desc": line[2]})
return structures
def write_client_protool(protocols):
filename = "SFMsgClass.protocol.cs"
file = open(filename, mode="wt", encoding="utf-8")
template = open("clientProtocolTemplate.txt", mode="r", encoding="utf-8")
res = template.readlines()
template.close()
for item in protocols:
# {"pid", "name", "desc", "req": [], "resp": [], "no_req": True}
pid = item["pid"]
name = item["name"]
desc = item["desc"]
req = item["req"]
resp = item["resp"]
no_req = item["no_req"]
res += "\n" \
" #region %s-%s\n" % (pid, desc)
# request
if not no_req:
res += " /// <summary>\n"
res += " /// [Req]%s\n" % desc
res += " /// </summary>\n"
res += " [Serializable]\n"
res += " public class SFRequestMsg%s : SFBaseRequestMessage\n" % name
res += " {\n" \
" public SFRequestMsg%s()\n" \
" {\n" \
" pid = %s;\n" \
" }\n" % (name, pid)
for req_item in req:
if req_item["desc"] != "":
res += "\n" \
" /// <summary>\n" \
" /// %s\n" \
" /// </summary>\n" % req_item["desc"]
res += " public %s %s;\n" % (req_item["type"], req_item["name"])
# end for
pass
res += " };\n\n"
# end if
pass
# response
prefix = "[Resp]"
if no_req:
prefix = "[Resp][Notify]"
res += " /// <summary>\n" \
" /// %s%s\n" \
" /// </summary>\n" % (prefix, desc)
res += " [Serializable]\n" \
" public class SFResponseMsg%s : SFBaseResponseMessage\n" \
" {\n" % name
res += " public const string pName = \"socket_%s\";\n" % pid
res += " public SFResponseMsg%s()\n" \
" {\n" \
" pid = %s;\n" \
" }\n" % (name, pid)
for resp_item in resp:
if resp_item["desc"] != "":
res += "\n" \
" /// <summary>\n" \
" /// %s\n" \
" /// </summary>\n" % resp_item["desc"]
# end if
pass
res += " public %s %s;\n" % (resp_item["type"], resp_item["name"])
# end for
pass
res += " };\n" \
" #endregion\n"
# end for
pass
res += "}\n" \
"// Last Update: %s\n" % time.strftime("%Y/%m/%d")
file.writelines(res)
file.close()
print("Write client protool completed.")
def write_client_structure(structures):
filename = "SFMsgData.protocol.cs"
file = open(filename, mode="wt", encoding="utf-8")
res = "using System;\n" \
"using System.Collections;\n" \
"using System.Collections.Generic;\n" \
"using UnityEngine;\n" \
"\n" \
"namespace SF\n" \
"{\n"
for item in structures:
# {"name", "desc", "fields":[]}
name = item["name"]
desc = item["desc"]
fields = item["fields"]
res += " /// <summary>\n" \
" /// %s\n" \
" /// </summary>\n" % desc
res += " [Serializable]\n" \
" public struct SFMsgData%s\n" \
" {\n" % name
for field in fields:
if field["desc"] != "":
res += "\n" \
" /// <summary>\n" \
" /// %s\n" \
" /// <summary>\n" % field["desc"]
# end if
pass
res += " public %s %s;\n" % (field["type"], field["name"])
# end for
pass
res += " };\n\n"
# end for
pass
res += "}\n"
file.writelines(res)
file.close()
print("Write client structure completed.")
def write_server(protocols, structures):
filename = "SFProtocolMessage.protocol.js"
file = open(filename, mode="wt", encoding="utf-8")
res = "/**\n" \
" * Last Update: %s\n" \
" */\n" \
"\"use strict\";\n" \
"\n" % time.strftime("%Y/%m/%d")
# structures
for item in structures:
# {"name", "desc", "fields":[]}
name = item["name"]
desc = item["desc"]
fields = item["fields"]
res += "/**\n" \
" * %s\n" % desc
str_fields = ""
for field in fields:
str_type = field["type"]
if str_type == "int" or str_type == "float":
str_type = "number"
if str_type.startswith("List<"):
str_type = str_type.replace("List<", "list<")
str_desc = ""
if field["desc"] != "":
str_desc = " - %s" % field["desc"]
res += " * @param {%s} %s%s\n" % (str_type, field["name"], str_desc)
str_default_value = "[]"
if str_type == "string":
str_default_value = "\"\""
if str_type == "number":
| str_default_value = "0"
str_fields += " this.%s = %s;\n" % (field["name"], str_default_value)
# end for
pass
res += " */\n" \
"class SFMsgData%s {\n" \
" constructor() {\n" \
| "%s" \
" }\n" \
"}\n" \
"exports.SFMsgData%s = SFMsgData%s;\n" \
"\n" % (name, str_fie |
the-zebulan/CodeWars | tests/kyu_5_tests/test_human_readable_time.py | Python | mit | 561 | 0 | import unittest
from katas.kyu_5.human_readable_time import make_readable
class HumanReadableTimeTestCase(unittest.TestCase):
def test_equals( | self):
self.assertEqual(make_readable(0), '00:00:00')
def test_equals_2(self):
self.assertEqual(make_readable(5), '00:00:05')
def test_equals_3(self):
self.assertEqual(make_readable(60), '00:01:00')
def test_equals_4(self):
self.assertEqual(make_readable(86399), '23:59:59') |
def test_equals_5(self):
self.assertEqual(make_readable(359999), '99:59:59')
|
Koheron/lase | examples/spectrum_analyzer.py | Python | mit | 1,482 | 0.006073 | #!/usr/bin/env python
# -* | - coding: utf-8 -*-
import os
| import time
import numpy as np
import matplotlib
matplotlib.use('GTKAgg')
from matplotlib import pyplot as plt
from koheron import connect
from drivers import Spectrum
from drivers import Laser
host = os.getenv('HOST','192.168.1.100')
client = connect(host, name='spectrum')
driver = Spectrum(client)
laser = Laser(client)
laser.start()
current = 30 # mA
laser.set_current(current)
# driver.reset_acquisition()
wfm_size = 4096
decimation_factor = 1
index_low = 0
index_high = wfm_size / 2
signal = driver.get_decimated_data(decimation_factor, index_low, index_high)
print('Signal')
print(signal)
mhz = 1e6
sampling_rate = 125e6
freq_min = 0
freq_max = sampling_rate / mhz / 2
# Plot parameters
fig = plt.figure()
ax = fig.add_subplot(111)
x = np.linspace(freq_min, freq_max, (wfm_size / 2))
print('X')
print(len(x))
y = 10*np.log10(signal)
print('Y')
print(len(y))
li, = ax.plot(x, y)
fig.canvas.draw()
ax.set_xlim((x[0],x[-1]))
ax.set_ylim((0,200))
ax.set_xlabel('Frequency (MHz)')
ax.set_ylabel('Power spectral density (dB)')
while True:
try:
signal = driver.get_decimated_data(decimation_factor, index_low, index_high)
li.set_ydata(10*np.log10(signal))
fig.canvas.draw()
plt.pause(0.001)
except KeyboardInterrupt:
# Save last spectrum in a csv file
np.savetxt("psd.csv", signal, delimiter=",")
laser.stop()
driver.close()
break
|
runmyrobot/runmyrobot | telly.py | Python | apache-2.0 | 1,037 | 0.007715 |
import robot_util
de | f sendSettings(ser, args):
if args.right_wheel_forward_speed is not None:
robot_util.sendSerialCommand(ser, "rwfs " + str(args.right_wheel_forward_speed))
if args.right_wheel_backward_speed is not None:
robot_util.sendSerialCommand(ser, "rwbs " + str(args.right_wheel_backward_speed))
if args.left_wheel_forward_speed is not None:
robot_util.sendSerialCommand(ser, "lwfs " + str(args.left_wheel_forward_speed))
if args.left_wheel_backward_speed is not None:
| robot_util.sendSerialCommand(ser, "lwbs " + str(args.left_wheel_backward_speed))
if args.straight_delay is not None:
robot_util.sendSerialCommand(ser, "straight-distance " + str(int(args.straight_delay * 255)))
if args.turn_delay is not None:
robot_util.sendSerialCommand(ser, "turn-distance " + str(int(args.turn_delay * 255)))
if args.led_max_brightness is not None:
robot_util.sendSerialCommand(ser, "led-max-brightness " + str(args.led_max_brightness))
|
dcowden/cadquery-freecad-module | CadQuery/Examples/Ex011_Mirroring_Symmetric_Geometry.py | Python | lgpl-3.0 | 447 | 0.011186 | #This example is meant to be used from within t | he CadQuery module of FreeCAD.
import cadquery
import Part
#1.0 is the distance, not coordinate
r = cadquery.Workplane("front").hLine(1.0)
#hLineTo allows using xCoordinate not distance
r = r.vLine(0.5).hLine(-0.25).vLine(-0.25).hLineTo(0.0)
#Mirror the geometry and extrude
resu | lt = r.mirrorY().extrude(0.25)
#Boiler plate code to render our solid in FreeCAD's GUI
Part.show(result.toFreecad())
|
cansik/pg4nosql | pg4nosql/PostgresNoSQLTable.py | Python | mit | 4,608 | 0.00217 | import copy
import json
from psycopg2.extensions import AsIs
from psycopg2.extras import RealDictCursor
from pg4nosql import DEFAULT_JSON_COLUMN_NAME, DEFAULT_ROW_IDENTIFIER
from pg4nosql.PostgresNoSQLQueryStructure import PostgresNoSQLQueryStructure
from pg4nosql.PostgresNoSQLResultItem import PostgresNoSQLResultItem
from pg4nosql.PostgresNoSQLUtil import to_nullable_string
class PostgresNoSQLTable(PostgresNoSQLQueryStructure):
__SQL_INSERT_JSON = "INSERT INTO %s(" + DEFAULT_JSON_COLUMN_NAME + " %s) VALUES(%s %s) RETURNING " + DEFAULT_ROW_IDENTIFIER
__SQL_GET_JSON = 'SELECT * FROM %s WHERE ' + DEFAULT_ROW_IDENTIFIER + '=%s'
__SQL_GET_COLUMNS = 'select column_name from information_schema.columns where table_name = %s'
__SQL_DELETE_JSON = 'DELETE FROM %s WHERE ' + DEFAULT_ROW_IDENTIFIER + '=%s'
__SQL_UPDATE_JSON = 'UPDATE %s SET ' + DEFAULT_JSON_COLUMN_NAME + '=%s %s WHERE ' + DEFAULT_ROW_IDENTIFIER + '=%s;'
__SQL_INSERT = "INSERT INTO %s(%s) VALUES(%s) RETURNING " + DEFAULT_ROW_IDENTIFIER
__SQL_UPDATE = 'UPDATE %s SET %s WHERE ' + DEFAULT_ROW_IDENTIFIER + '=%s;'
__SQL_QUERY_WITH_JOIN = 'SELECT %s FROM %s AS a JOIN %s AS b ON %s WHERE %s'
def __init__(self, name, connection):
super(PostgresNoSQLTable, self).__init__(name, connection)
self.super = super(PostgresNoSQLTable, self)
def commit(self):
"""
Use commit only if auto_commit in put or save are disabled!
:return: None
"""
self.connection.commit()
def insert(self, auto_commit=True, **data):
relational_data = data
relational_data_columns = ''
relational_data_values = ''
if relational_data:
relational_data_columns = ",".join(relational_data.keys())
data_list = map(str, map(to_nullable_string, relational_data.values()))
relational_data_values = ",".join(data_list)
self.cursor.execute(self.__SQL_INSERT, (AsIs(self.name),
AsIs(relational_data_columns),
AsIs(relational_data_values)))
if auto_commit:
self.commit()
return self.cursor.fetchone()[DEFAULT_ROW_IDENTIFIER]
def update(self, object_id, auto_commit=True, **relational_data):
relational_data_sql = ','.join(
"%s=%s" % (key, str(to_nullable_string(val))) for (key, val) in relational_data.items())
self.cursor.execute(self.__SQL_UPDATE, (AsIs(self.name),
AsIs(relational_data_sql), object_id))
if auto_commit:
self.commit()
def put(self, json_data, auto_commit=True, **relational_data):
relational_data.update({DEFAULT_JSON_COLUMN_NAME: json_data})
return self.insert(auto_commit=auto_commit, **relational_data)
def save(self, record, auto_commit=True):
data = copy.deepcopy(record.get_record())
object_id = data.pop(DEFAULT_ROW_IDENTIFIER)
self.update(object_id, auto_commit=auto_commit, **data)
def get(self, object_id):
self.cursor.execute(self.__SQL_GET_JSON, (AsIs(self.name), object_id))
record = self.cursor.fetchone()
if record is None:
return record
return PostgresNoSQLResultItem(record, self)
def query_join(self, table_name, on_statement, query='True', columns='*'):
self.cursor.execute(self.__SQL_QUERY_WITH_JOIN, (AsIs(columns),
AsIs(self.name),
| AsIs(table_name),
AsIs(on_statement),
| AsIs(query)))
rows = [item for item in self.cursor.fetchall()]
items = map(lambda r: PostgresNoSQLResultItem(r, self), rows)
return items
def query_one(self, query='True', columns='*'):
result = self.query(query, columns)
if not result:
return None
return result[0]
def get_columns(self):
self.cursor.execute(self.__SQL_GET_COLUMNS, (self.name,))
columns = map(lambda m: m['column_name'], self.cursor.fetchall())
return columns
def delete(self, object_id, auto_commit=True):
self.cursor.execute(self.__SQL_DELETE_JSON, (AsIs(self.name), object_id))
if auto_commit:
self.commit()
def execute(self, sql_query):
self.cursor.execute(sql_query)
return self.cursor.fetchall()
|
plotly/python-api | packages/python/plotly/plotly/validators/treemap/marker/colorbar/_ticklen.py | Python | mit | 515 | 0.001942 | import _plotly_utils.basevalidators
class TicklenValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="ticklen", parent_name="treemap.marker.colorbar", **kwargs
):
super(Tickle | nValidator, self).__init__(
plot | ly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
|
Havate/havate-openstack | proto-build/gui/horizon/Horizon_GUI/openstack_dashboard/usage/views.py | Python | apache-2.0 | 2,020 | 0 | from horizon import tables
from openstack_dashboard.usage import base
class UsageView(tables.DataTableView):
usage_class = None
show_terminated = True
def __init__(self, *args, **kwargs):
super(UsageView, self).__init__(*args, **kwargs)
if not issubclass(self.usage | _class, base.BaseUsage):
raise AttributeError("You must specify a usage_class attribute "
"which is a subclass of BaseUsage.")
def get_template_names(self):
if self.request.GET.get('format', 'html') == 'csv':
return ".".join((self.template_name.rsplit('.', 1)[0], 'csv'))
return self.template_name
def get_con | tent_type(self):
if self.request.GET.get('format', 'html') == 'csv':
return "text/csv"
return "text/html"
def get_data(self):
project_id = self.kwargs.get('project_id', self.request.user.tenant_id)
self.usage = self.usage_class(self.request, project_id)
self.usage.summarize(*self.usage.get_date_range())
self.usage.get_limits()
self.kwargs['usage'] = self.usage
return self.usage.usage_list
def get_context_data(self, **kwargs):
context = super(UsageView, self).get_context_data(**kwargs)
context['table'].kwargs['usage'] = self.usage
context['form'] = self.usage.form
context['usage'] = self.usage
return context
def render_to_response(self, context, **response_kwargs):
if self.request.GET.get('format', 'html') == 'csv':
render_class = self.csv_response_class
response_kwargs.setdefault("filename", "usage.csv")
else:
render_class = self.response_class
resp = render_class(request=self.request,
template=self.get_template_names(),
context=context,
content_type=self.get_content_type(),
**response_kwargs)
return resp
|
srowen/spark | python/pyspark/mllib/classification.py | Python | apache-2.0 | 33,064 | 0.001331 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from math import exp
import sys
import warnings
from typing import Any, Iterable, Optional, Union, overload, TYPE_CHECKING
import numpy
from pyspark import RDD, SparkContext, since
from pyspark.streaming.dstream import DStream
from pyspark.mllib.common import callMLlibFunc, _py2java, _java2py
from pyspark.mllib.linalg import _convert_to_vector
from pyspark.mllib.regression import ( # type: ignore[attr-defined]
LabeledPoint,
LinearModel,
_regression_train_wrapper,
StreamingLinearAlgorithm,
)
from pyspark.mllib.util import Saveable, Loader, inherit_doc # type: ignore[attr-defined]
from pyspark.mllib.linalg import Vector
from pyspark.mllib.regression import LabeledPoint
if | TYPE_CHECKING:
from pyspark.mllib._typing import VectorLike
__all__ = [
"LogisticRegressionModel",
"LogisticRegressionWithSGD",
"Log | isticRegressionWithLBFGS",
"SVMModel",
"SVMWithSGD",
"NaiveBayesModel",
"NaiveBayes",
"StreamingLogisticRegressionWithSGD",
]
class LinearClassificationModel(LinearModel):
"""
A private abstract class representing a multiclass classification
model. The categories are represented by int values: 0, 1, 2, etc.
"""
def __init__(self, weights: Vector, intercept: float) -> None:
super(LinearClassificationModel, self).__init__(weights, intercept)
self._threshold: Optional[float] = None
@since("1.4.0")
def setThreshold(self, value: float) -> None:
"""
Sets the threshold that separates positive predictions from
negative predictions. An example with prediction score greater
than or equal to this threshold is identified as a positive,
and negative otherwise. It is used for binary classification
only.
"""
self._threshold = value
@property # type: ignore[misc]
@since("1.4.0")
def threshold(self) -> Optional[float]:
"""
Returns the threshold (if any) used for converting raw
prediction scores into 0/1 predictions. It is used for
binary classification only.
"""
return self._threshold
@since("1.4.0")
def clearThreshold(self) -> None:
"""
Clears the threshold so that `predict` will output raw
prediction scores. It is used for binary classification only.
"""
self._threshold = None
@overload
def predict(self, test: "VectorLike") -> Union[int, float]:
...
@overload
def predict(self, test: RDD["VectorLike"]) -> RDD[Union[int, float]]:
...
def predict(
self, test: Union["VectorLike", RDD["VectorLike"]]
) -> Union[RDD[Union[int, float]], Union[int, float]]:
"""
Predict values for a single data point or an RDD of points
using the model trained.
.. versionadded:: 1.4.0
"""
raise NotImplementedError
class LogisticRegressionModel(LinearClassificationModel):
"""
Classification model trained using Multinomial/Binary Logistic
Regression.
.. versionadded:: 0.9.0
Parameters
----------
weights : :py:class:`pyspark.mllib.linalg.Vector`
Weights computed for every feature.
intercept : float
Intercept computed for this model. (Only used in Binary Logistic
Regression. In Multinomial Logistic Regression, the intercepts will
not be a single value, so the intercepts will be part of the
weights.)
numFeatures : int
The dimension of the features.
numClasses : int
The number of possible outcomes for k classes classification problem
in Multinomial Logistic Regression. By default, it is binary
logistic regression so numClasses will be set to 2.
Examples
--------
>>> from pyspark.mllib.linalg import SparseVector
>>> data = [
... LabeledPoint(0.0, [0.0, 1.0]),
... LabeledPoint(1.0, [1.0, 0.0]),
... ]
>>> lrm = LogisticRegressionWithSGD.train(sc.parallelize(data), iterations=10)
>>> lrm.predict([1.0, 0.0])
1
>>> lrm.predict([0.0, 1.0])
0
>>> lrm.predict(sc.parallelize([[1.0, 0.0], [0.0, 1.0]])).collect()
[1, 0]
>>> lrm.clearThreshold()
>>> lrm.predict([0.0, 1.0])
0.279...
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(0.0, SparseVector(2, {0: 1.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 2.0}))
... ]
>>> lrm = LogisticRegressionWithSGD.train(sc.parallelize(sparse_data), iterations=10)
>>> lrm.predict(numpy.array([0.0, 1.0]))
1
>>> lrm.predict(numpy.array([1.0, 0.0]))
0
>>> lrm.predict(SparseVector(2, {1: 1.0}))
1
>>> lrm.predict(SparseVector(2, {0: 1.0}))
0
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> lrm.save(sc, path)
>>> sameModel = LogisticRegressionModel.load(sc, path)
>>> sameModel.predict(numpy.array([0.0, 1.0]))
1
>>> sameModel.predict(SparseVector(2, {0: 1.0}))
0
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except BaseException:
... pass
>>> multi_class_data = [
... LabeledPoint(0.0, [0.0, 1.0, 0.0]),
... LabeledPoint(1.0, [1.0, 0.0, 0.0]),
... LabeledPoint(2.0, [0.0, 0.0, 1.0])
... ]
>>> data = sc.parallelize(multi_class_data)
>>> mcm = LogisticRegressionWithLBFGS.train(data, iterations=10, numClasses=3)
>>> mcm.predict([0.0, 0.5, 0.0])
0
>>> mcm.predict([0.8, 0.0, 0.0])
1
>>> mcm.predict([0.0, 0.0, 0.3])
2
"""
def __init__(
self, weights: Vector, intercept: float, numFeatures: int, numClasses: int
) -> None:
super(LogisticRegressionModel, self).__init__(weights, intercept)
self._numFeatures = int(numFeatures)
self._numClasses = int(numClasses)
self._threshold = 0.5
if self._numClasses == 2:
self._dataWithBiasSize = None
self._weightsMatrix = None
else:
self._dataWithBiasSize = self._coeff.size // ( # type: ignore[attr-defined]
self._numClasses - 1
)
self._weightsMatrix = self._coeff.toArray().reshape(
self._numClasses - 1, self._dataWithBiasSize
)
@property # type: ignore[misc]
@since("1.4.0")
def numFeatures(self) -> int:
"""
Dimension of the features.
"""
return self._numFeatures
@property # type: ignore[misc]
@since("1.4.0")
def numClasses(self) -> int:
"""
Number of possible outcomes for k classes classification problem
in Multinomial Logistic Regression.
"""
return self._numClasses
@overload
def predict(self, x: "VectorLike") -> Union[int, float]:
...
@overload
def predict(self, x: RDD["VectorLike"]) -> RDD[Union[int, float]]:
...
def predict(
self, x: Union["VectorLike", RDD["VectorLike"]]
) -> Union[RDD[Union[int, float]], Union[int, float]]:
"""
Predict values for a single data point or an RDD of points
using the model trained.
.. versionadded:: 0.9.0
"""
if isinstance(x, RDD):
return x.map(lambda v: self.predict(v))
x = _co |
cedricmenec/mysql-backup-service | main.py | Python | mit | 626 | 0.004792 | from app import app
from flask import jsonify
from backup.api import api as api_backup
from backup.api import api_restore
# Blueprints registration
app.register_blueprint(api_backup, url_prefix='/api/backup')
app.register_blueprint(api_restore, url_prefix='/api/restore')
@ap | p.route('/api/help', methods = ['GET'])
def help():
"""Print available functions."""
func_list = {}
for rule in app.url_map.iter_rules():
if rule.endpoint != 'static':
func_list[rule.rule] = app.view_functions[ru | le.endpoint].__doc__
return jsonify(func_list)
if __name__ == '__main__':
app.run('0.0.0.0') |
morepath/morepath | morepath/tests/fixtures/config/settings.py | Python | bsd-3-clause | 1,065 | 0 | """Example settings python dictionary for Morepath.
It contains also a helper function to create a JSON config file
from this dictionary.
"""
import json
settings = {
"chameleon": {"debug": True},
"jinja2": {
"auto_reload": False,
"autoescape": True,
"extensions": ["jinja2.ext.autoescape", "jinja2.ext.i18n"],
},
"jwtauth": {
"algorithm": "ES256",
"leeway": 20,
"public_key": "MIGbMBAGByqGSM49AgEGBSuBBAAjA4GGAAQBWcJwPEAnS/k4kFgUhxNF7J0SQQhZG" # noqa: E501
"+nNgy+/mXwhQ5PZIUmId1a1TjkNXiKzv6DpttBqduHbz/V0EtH+QfWy0B4BhZ5MnT"
"yDGjcz1DQqKdexebhzobbhSIZjpYd5aU48o9rXp/OnAnrajddpGsJ0bNf4rtMLBqF"
"YJN6LOslAB7xTBRg=",
},
"sqlalchemy": {"url": "sqlite:///morepath.db"},
"transaction": {"attempts" | : 2},
}
def create_json_config():
stream = open("settings.json", "w")
json.dump(
settings, stream, sort_keys=True, indent=4, separators=(",", ": ")
)
print(
json.dumps(settings, sort_ke | ys=True, indent=4, separators=(",", ": "))
)
|
douglaskastle/AcraNetwork | AcraNetwork/protocols/network/BaseAddress.py | Python | gpl-2.0 | 1,174 | 0.013629 | class BaseAddress():
_str = ''
@property
def str(self):
if None == self.int:
return None
else:
return self.intToAddr(self.int)
def __init__(self, i=None):
if None == i:
self.int = None
elif isinstance(i, int):
self.int = i
elif isinstance(i, str):
self.int = self.addrToInt(i)
def set(self, i):
return self.__init__(i)
def __str__(self):
return '{:s}'.format(self.str)
def __eq__(self, o):
| if o == None and self.int == o:
return True
elif isinstance(o, int) and self.int == o:
return True
elif isinstance(o, str) and self.str == o:
return True
return False
def __ne__(self, o):
return not self.__eq__(o)
def __add__(self, o):
if isinstance(o, int) :
self.int += o
return self
def __sub__(self, o):
return self.__add__(o*-1)
def intToAddr(self, i):
| raise NotImplementedError
def addrToInt(self, i):
raise NotImplementedError
|
realer01/aiohttp-debugtoolbar | examples/simple.py | Python | apache-2.0 | 1,624 | 0 | import asyncio
import jinja2
import aiohttp_debugtoolbar
import aiohttp_jinja2
from aiohttp import web
@aiohttp_jinja2.template('index.html')
def basic_handler(request):
return {'title': 'example aiohttp_debugtoolbar!',
'text': 'Hello aiohttp_debugtoolbar!',
'app': request.app}
@asyncio.coroutine
def exception_handler(request):
raise NotImplementedError
@asyncio.coroutine
def init(loop):
# add aiohttp_debugtoolbar middleware to you application
app = web.Application(loop=loop)
# install aiohttp_debugtoolbar
aiohttp_debugtoolbar.setup(app)
template = """
<html>
<head>
<title>{{ title }}</title>
</head>
<body>
<h1>{{ text }}</h1>
<p>
<a href="{{ app.router['exc_example'].url() }}">
Exception example</a>
</p>
</body>
</html>
"""
# install jinja2 templates
loader = jinja2.DictLoader({'index.html': template})
aiohttp_jinja2.setup(app, loader=loader)
# init routes for index page, and page with error
app.router.add_route('GET', '/', basic_handler, name='index')
app.router.add_route('GET', '/exc', exception_handler, name='exc_example')
handler = app.make_handler()
srv = yield from loop.create_server(handler, '127.0.0.1', 9000)
print("Server started at http://127.0.0.1:9000")
return srv, handler
loop = asyncio.get_event_loop()
srv, handler = loop.run_until_complete(init(loop))
try:
loop.run_forever()
except Keyboar | dInter | rupt:
loop.run_until_complete(handler.finish_connections())
|
erudit/zenon | tests/unit/apps/public/book/test_views.py | Python | gpl-3.0 | 771 | 0.001297 | import pytest
from apps.public.book.test.factories import BookCollectionFactory, BookFactory
from apps.public.book.views import BookListView
@pytest.mark.django_db
class TestBookListView:
def test_books_order(self):
collection = BookCo | llectionFactory()
book_1 = BookFactory(collection=collection, year='2000', title='A')
book_2 = BookFactory(collection=collection, year='2000', title='B')
book_3 = BookFactory(collection=collec | tion, year='2002', title='D')
book_4 = BookFactory(collection=collection, year='2001', title='C')
view = BookListView()
view.object_list = None
context = view.get_context_data()
assert list(context['collections'][0].books.all()) == [book_3, book_4, book_1, book_2]
|
pierrejean-coudert/winlibre_pm | package_manager/smart/media.py | Python | gpl-2.0 | 12,238 | 0.001879 | #
# Copyright (c) 2005 Canonical
# Copyright (c) 2004 Conectiva, Inc.
#
# Written by Gustavo Niemeyer <niemeyer@conectiva.com>
#
# This file is part of Smart Package Manager.
#
# Smart Package Manager is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# Smart Package Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Smart Package Manager; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from smart.util.filetools import compareFiles
from smart import *
import commands
import stat
import os
class MediaSet(object):
def __init__(self):
self._medias = []
self._processcache = {}
self.discover()
def discover(self):
self.restoreState()
del self._medias[:]
self._processcache.clear()
mountpoints = {}
for lst in hooks.call("discover-medias"):
for media in lst:
mountpoint = media.getMountPoint()
if mountpoint not in mountpoints:
mountpoints[mountpoint] = media
self._medias.append(media)
self._medias.sort()
def resetState(self):
for media in self._medias:
media.resetState()
def restoreState(self):
for media in self._medias:
media.restoreState()
def mountAll(self):
for media in self._medias:
media.mount()
def umountAll(self):
for media in self._medias:
media.umount()
def findMountPoint(self, path, subpath=False):
path = os.path.normpath(path)
for media in self._medias:
mountpoint = media.getMountPoint()
if (mountpoint == path or
subpath and path.startswith(mountpoint+"/")):
return media
return None
def findDevice(self, path, subpath=False):
path = os.path.normpath(path)
for media in self._medias:
device = media.getDevice()
if device and \
(device == path or subpath and path.startswith(device+"/")):
return media
return None
def findFile(self, path, comparepath=None):
if path.startswith("localmedia:"):
path = path[11:]
while path[:2] == "//":
path = path[1:]
for media in self._medias:
if media.isMounted():
filepath = media.joinPath(path)
if (os.path.isfile(filepath) and
not comparepath or compareFiles(filepath, comparepath)):
return media
return None
def processFilePath(self, filepath):
dirname = os.path.dirname(filepath)
if dirname in self._processcache:
media = self._processcache.get(dirname)
if media:
filepath = media.convertDevicePath(filepath)
else:
media = self.findMountPoint(filepath, subpath=True)
if not media:
media = self.findDevice(filepath, subpath=True)
if media:
media.mount()
filepath = media.convertDevicePath(filepath)
self._processcache[dirname] = media
else:
isfile = os.path.isfile
paths = []
path = dirname
while path != os.sep:
paths.append(path)
if isfile(path):
for media in hooks.call("discover-device-media", path):
if media:
media.mount()
self._medias.append(media)
filepath = media.convertDevicePath(filepath)
self._processcache.update(
dict.fromkeys(paths, media))
break
if media:
break
path = os.path.dirname(path)
else:
self._processcache.update(dict.fromkeys(paths, None))
return filepath, media
def getDefault(self):
default = sysconf.get("default-localmedia")
if default:
return self.findMountPoint(default, subpath=True)
return None
def __iter__(self):
return iter(self._medias)
class Media(object):
order = 1000
def __init__(self, mountpoint, device=None,
type=None, options=None, removable=False):
self._mountpoint = os.path.normpath(mountpoint)
self._device = device
self._type = type
self._options = options
self._removable = removable
self.resetState()
def resetState(self):
self._wasmounted = self.isMounted()
def restoreState(self):
if self._wasmounted:
self.mount()
else:
self.umount()
def getMountPoint(self):
return self._mountpoint
def getDevice(self):
return self._device
def getType(self):
return self._type
def getOptions(self):
return self._options
def isRemovable(self):
return self._removable
def wasMounted(self):
return self._wasmounted
def isMounted(self):
if not os.path.isfile("/proc/mounts"):
raise Error, _("/proc/mounts not found")
for line in open("/proc/mounts"):
device, mountpoint, type = line.split()[:3]
if mountpoint == self._mountpoint:
return True
return False
def mount(self):
return True
def umount(self):
return True
def eject(self):
if self._device:
status, output = commands.getstatusoutput("eject %s" %
self._device)
if status == 0:
return True
return False
def joinPath(self, path):
if path.startswith("localmedia:/"):
path = path[12:]
while path and path[0] == "/":
path = path[1:]
return os.path.join(self._mountpoint, path)
def joinURL(self, path):
if path.startswith("localmedia:/"):
path = path[12:]
while path and path[0] == "/":
path = path[1:]
return os.path.join("file://"+self._mountpoint, path)
def convertDevicePath(self, path):
if path.startswith(self._device):
path = path[len(self._device):]
while path and path[0] == "/":
path = path[1:]
path = os.path.join(self._mountpoint, path)
return path
def hasFile(self, path, comparepath=None):
if self.isMounted():
filepath = self.joinPath(path)
if (os.path.isfile(filepath) and
not comparepath or compareFile | s(path, comparepath)):
return True
return False
def __lt__(self, other):
return self.order < other.order
class MountMedia(Media):
def mount(self):
if self.isMounted():
return True
if self._device:
cmd = "mount %s %s" % (self._device, self._mountpoint)
if self._type:
cmd += " -t %s" % self._type
else:
cmd = "mount %s" % self._mountpoint
| if self._options:
cmd += " -o %s" % self._options
status, output = commands.getstatusoutput(cmd)
if status != 0:
iface.debug(output)
return False
return True
class UmountMedia(Media):
def umount(self):
if not self.isMounted():
return True
status, output = commands.getsta |
JulyKikuAkita/PythonPrac | cs15211/Leaf-SimilarTrees.py | Python | apache-2.0 | 3,524 | 0.00454 | __source__ = 'https://leetcode.com/problems/leaf-similar-trees/'
# Time: O(N + M)
# Space: O(N + M)
#
# Description: Leetcode # 872. Leaf-Similar Trees
#
# Consider all the leaves of a binary tree.
# From left to right order, the values of those leaves form a leaf value sequence.
#
# For example, in the given tree above, the leaf value sequence is (6, 7, 4, 9, 8).
#
# Two binary trees are considered leaf-similar if their leaf value sequence is the same.
#
# Return true if and only if the two given trees with head nodes root1 and root2 are leaf-similar.
#
# Note:
#
# Both of the given trees will have between 1 and 100 nodes.
#
import unittest
#20ms 100%
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def leafSimilar(self, root1, root2):
"""
:type root1: TreeNode
:type root2: TreeNode
:rtype: bool
"""
def TreeOne(root,r):
if root is None:
return None
if root.left is not None:
TreeOne(root.left,r)
if root.right is not None:
TreeOne(root.right,r)
if root.left is None and root.right is None:
r.append(root.val)
return r
r1 = TreeOne(root1,[])
r2 = TreeOne(root2,[])
return r1 == r2
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
#Thought: https://leetcode.com/problems/leaf-similar-trees/solution/
Approach 1: Depth First Search
Complexity Analysis
Time Complexity: O(T_1 + T_2), where T1, T2 are the lengths of the given trees.
Space Complexity: O(T_1 + T_2)O, the space used in storing the leaf values.
/**
* Definition for a binary tree node.
* public class TreeNode {
* int val;
* TreeNode left;
* TreeNode right;
* TreeNode(int x) { val = x; }
* }
*/
# DFS
# 2ms 99.33%
class Solution {
public boolean leafSimilar(TreeNode root1, TreeNode root2) {
List<Integer> leaves1 = new ArrayList();
List<Integer> leaves2 = new ArrayList();
dfs(root1, leaves1);
dfs(root2, leaves2);
return leaves1.equals(leaves2);
}
private void dfs(TreeNode node, List<Integer> leafValues) {
if (node == null) return;
if (node.left == null && node.right == null) leafValues.add(node.val);
dfs(node.left, leafValues);
dfs(node.right, leafValues);
}
}
# Queue:
# 2ms 99.33%
class Solution {
| public boolean leafSimilar(TreeNode root1, TreeNode root2) {
Queue<Inte | ger> q = new LinkedList<Integer>();
buildLeafNodeSeq(root1, q, true);
buildLeafNodeSeq(root2, q, false);
if (q.isEmpty()) return true;
return false;
}
private void buildLeafNodeSeq(TreeNode node, Queue<Integer> q, boolean add) {
if (node == null) {
return;
}
if (node.left == null && node.right == null) {
if (add) {
q.add(node.val);
} else {
if (q.peek() == node.val) {
q.remove(node.val);
}
}
}
if (node.left != null) {
buildLeafNodeSeq(node.left, q, add);
}
if (node.right != null) {
buildLeafNodeSeq(node.right, q, add);
}
}
}
'''
|
marcb1/groupme_bot | src/groupme_bot.py | Python | gpl-2.0 | 891 | 0.042649 | import os
from flask import Flask
from flask import request
import requests
import ran | dom
import codecs
#API id
#move this to a config file
bot_id = ''
app = Flask(__name__)
#encode string as ASCII
def stripped(text):
text = text.lower()
return text.encode('ascii','replace_spc')
def send(text):
message = {
'text' : text,
'bot_id' : bot_id
}
r = requests.post("https://api.groupme.com/v3/bots/post", params = message)
@app.route('/', methods=['POST'])
def message():
if not request.json or not 'text' in request.json:
return |
user_id = request.json['user_id']
nick = request.json['name'].lower()
message = request.json['text'].lower()
message = stripped(message).strip()
print 'Got message' + message
message_callback.got_message(message, nick);
return ''
if __name__ == "__main__":
app.run(port = 8080, host = '0.0.0.0', debug = True)
|
flipcoder/bitplanes | sg.py | Python | mit | 80 | 0.0125 | # sgmake | script to use clang instead of gcc
makefile_params=["CXX=\'clang+ | +\'"]
|
jiaphuan/models | research/object_detection/utils/shape_utils_test.py | Python | apache-2.0 | 13,659 | 0.006003 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.shape_utils."""
import numpy as np
import tensorflow as tf
from object_detection.utils import shape_utils
class UtilTest(tf.test.TestCase):
def test_pad_tensor_using_integer_input(self):
t1 = tf.constant([1], dtype=tf.int32)
pad_t1 = shape_utils.pad_tensor(t1, 2)
t2 = tf.constant([[0.1, 0.2]], dtype=tf.float32)
| pad_t2 = shape_utils.pad_tensor(t2, 2)
self.assertEqual(2, pad_t1.get_shape()[0])
self.assertEqual(2, pad_t2.get_shape()[0])
with self.test_session() as sess:
pad_t1_result, pad_ | t2_result = sess.run([pad_t1, pad_t2])
self.assertAllEqual([1, 0], pad_t1_result)
self.assertAllClose([[0.1, 0.2], [0, 0]], pad_t2_result)
def test_pad_tensor_using_tensor_input(self):
t1 = tf.constant([1], dtype=tf.int32)
pad_t1 = shape_utils.pad_tensor(t1, tf.constant(2))
t2 = tf.constant([[0.1, 0.2]], dtype=tf.float32)
pad_t2 = shape_utils.pad_tensor(t2, tf.constant(2))
with self.test_session() as sess:
pad_t1_result, pad_t2_result = sess.run([pad_t1, pad_t2])
self.assertAllEqual([1, 0], pad_t1_result)
self.assertAllClose([[0.1, 0.2], [0, 0]], pad_t2_result)
def test_clip_tensor_using_integer_input(self):
t1 = tf.constant([1, 2, 3], dtype=tf.int32)
clip_t1 = shape_utils.clip_tensor(t1, 2)
t2 = tf.constant([[0.1, 0.2], [0.2, 0.4], [0.5, 0.8]], dtype=tf.float32)
clip_t2 = shape_utils.clip_tensor(t2, 2)
self.assertEqual(2, clip_t1.get_shape()[0])
self.assertEqual(2, clip_t2.get_shape()[0])
with self.test_session() as sess:
clip_t1_result, clip_t2_result = sess.run([clip_t1, clip_t2])
self.assertAllEqual([1, 2], clip_t1_result)
self.assertAllClose([[0.1, 0.2], [0.2, 0.4]], clip_t2_result)
def test_clip_tensor_using_tensor_input(self):
t1 = tf.constant([1, 2, 3], dtype=tf.int32)
clip_t1 = shape_utils.clip_tensor(t1, tf.constant(2))
t2 = tf.constant([[0.1, 0.2], [0.2, 0.4], [0.5, 0.8]], dtype=tf.float32)
clip_t2 = shape_utils.clip_tensor(t2, tf.constant(2))
with self.test_session() as sess:
clip_t1_result, clip_t2_result = sess.run([clip_t1, clip_t2])
self.assertAllEqual([1, 2], clip_t1_result)
self.assertAllClose([[0.1, 0.2], [0.2, 0.4]], clip_t2_result)
def test_pad_or_clip_tensor_using_integer_input(self):
t1 = tf.constant([1], dtype=tf.int32)
tt1 = shape_utils.pad_or_clip_tensor(t1, 2)
t2 = tf.constant([[0.1, 0.2]], dtype=tf.float32)
tt2 = shape_utils.pad_or_clip_tensor(t2, 2)
t3 = tf.constant([1, 2, 3], dtype=tf.int32)
tt3 = shape_utils.clip_tensor(t3, 2)
t4 = tf.constant([[0.1, 0.2], [0.2, 0.4], [0.5, 0.8]], dtype=tf.float32)
tt4 = shape_utils.clip_tensor(t4, 2)
self.assertEqual(2, tt1.get_shape()[0])
self.assertEqual(2, tt2.get_shape()[0])
self.assertEqual(2, tt3.get_shape()[0])
self.assertEqual(2, tt4.get_shape()[0])
with self.test_session() as sess:
tt1_result, tt2_result, tt3_result, tt4_result = sess.run(
[tt1, tt2, tt3, tt4])
self.assertAllEqual([1, 0], tt1_result)
self.assertAllClose([[0.1, 0.2], [0, 0]], tt2_result)
self.assertAllEqual([1, 2], tt3_result)
self.assertAllClose([[0.1, 0.2], [0.2, 0.4]], tt4_result)
def test_pad_or_clip_tensor_using_tensor_input(self):
t1 = tf.constant([1], dtype=tf.int32)
tt1 = shape_utils.pad_or_clip_tensor(t1, tf.constant(2))
t2 = tf.constant([[0.1, 0.2]], dtype=tf.float32)
tt2 = shape_utils.pad_or_clip_tensor(t2, tf.constant(2))
t3 = tf.constant([1, 2, 3], dtype=tf.int32)
tt3 = shape_utils.clip_tensor(t3, tf.constant(2))
t4 = tf.constant([[0.1, 0.2], [0.2, 0.4], [0.5, 0.8]], dtype=tf.float32)
tt4 = shape_utils.clip_tensor(t4, tf.constant(2))
with self.test_session() as sess:
tt1_result, tt2_result, tt3_result, tt4_result = sess.run(
[tt1, tt2, tt3, tt4])
self.assertAllEqual([1, 0], tt1_result)
self.assertAllClose([[0.1, 0.2], [0, 0]], tt2_result)
self.assertAllEqual([1, 2], tt3_result)
self.assertAllClose([[0.1, 0.2], [0.2, 0.4]], tt4_result)
def test_combines_static_dynamic_shape(self):
tensor = tf.placeholder(tf.float32, shape=(None, 2, 3))
combined_shape = shape_utils.combined_static_and_dynamic_shape(
tensor)
self.assertTrue(tf.contrib.framework.is_tensor(combined_shape[0]))
self.assertListEqual(combined_shape[1:], [2, 3])
class StaticOrDynamicMapFnTest(tf.test.TestCase):
def test_with_dynamic_shape(self):
def fn(input_tensor):
return tf.reduce_sum(input_tensor)
input_tensor = tf.placeholder(tf.float32, shape=(None, 2))
map_fn_output = shape_utils.static_or_dynamic_map_fn(fn, input_tensor)
op_names = [op.name for op in tf.get_default_graph().get_operations()]
self.assertTrue(any(['map' == op_name[:3] for op_name in op_names]))
with self.test_session() as sess:
result1 = sess.run(
map_fn_output, feed_dict={
input_tensor: [[1, 2], [3, 1], [0, 4]]})
result2 = sess.run(
map_fn_output, feed_dict={
input_tensor: [[-1, 1], [0, 9]]})
self.assertAllEqual(result1, [3, 4, 4])
self.assertAllEqual(result2, [0, 9])
def test_with_static_shape(self):
def fn(input_tensor):
return tf.reduce_sum(input_tensor)
input_tensor = tf.constant([[1, 2], [3, 1], [0, 4]], dtype=tf.float32)
map_fn_output = shape_utils.static_or_dynamic_map_fn(fn, input_tensor)
op_names = [op.name for op in tf.get_default_graph().get_operations()]
self.assertTrue(all(['map' != op_name[:3] for op_name in op_names]))
with self.test_session() as sess:
result = sess.run(map_fn_output)
self.assertAllEqual(result, [3, 4, 4])
def test_with_multiple_dynamic_shapes(self):
def fn(elems):
input_tensor, scalar_index_tensor = elems
return tf.reshape(tf.slice(input_tensor, scalar_index_tensor, [1]), [])
input_tensor = tf.placeholder(tf.float32, shape=(None, 3))
scalar_index_tensor = tf.placeholder(tf.int32, shape=(None, 1))
map_fn_output = shape_utils.static_or_dynamic_map_fn(
fn, [input_tensor, scalar_index_tensor], dtype=tf.float32)
op_names = [op.name for op in tf.get_default_graph().get_operations()]
self.assertTrue(any(['map' == op_name[:3] for op_name in op_names]))
with self.test_session() as sess:
result1 = sess.run(
map_fn_output, feed_dict={
input_tensor: [[1, 2, 3], [4, 5, -1], [0, 6, 9]],
scalar_index_tensor: [[0], [2], [1]],
})
result2 = sess.run(
map_fn_output, feed_dict={
input_tensor: [[-1, 1, 0], [3, 9, 30]],
scalar_index_tensor: [[1], [0]]
})
self.assertAllEqual(result1, [1, -1, 6])
self.assertAllEqual(result2, [1, 3])
def test_with_multiple_static_shapes(self):
def fn(elems):
input_tensor, scalar_index_tensor = elems
return tf.reshape(tf.slice(input_tensor, scalar_index_tensor, [1]), [])
input_tensor = tf.constant([[1, 2, 3], [4, 5, -1], [0, 6, 9]],
dtype=tf.float32)
scalar_index_tensor = tf.constant([[0], [2], [1]], dtype=tf.int32)
map_fn_output = shape_utils.static_or_dynamic_map_fn(
fn, [input_tensor, scalar_index_tensor], dtype=tf.float32)
op_names = [op.name for op in tf.get_default_graph().get_operations()]
self.assertTrue(all(['m |
tonybeltramelli/Deep-Spying | server/analytics/modules/label/Label.py | Python | apache-2.0 | 725 | 0.002759 | __author__ = 'Tony Beltramelli www.tonybeltramelli.com - 06/09/2015'
import numpy as np
class Label:
def __init__(self, path):
file_path = "{}labels.csv".format(path)
try:
data = np.genfromtxt(file_ | path, delimiter=',', skip_header=1,
names=['timestamp', 'label'], dtype=[("timestamp", long), ('label', int)])
self.has_label = True
except IOError:
self.has_label = False
return
self.timestamp = data['timestamp']
label = data['label']
self.label = []
for i in range(0, len(label)):
self.label | .append(chr(int(label[i])))
self.diff = np.diff(self.timestamp)
|
knoppo/tcprocd | tcprocd/client.py | Python | mit | 7,172 | 0.000697 | """tcprocd client."""
from __future__ import unicode_literals, print_function, absolute_import
from tcprocd.protocol import Protocol
import socket
import select
import sys
if sys.version_info[0] < 3:
str_types = (str, unicode) # noqa
else:
str_types = (str, bytes) # noqa
class SocketShell(object):
"""
A class to connect to a process's thread.
Adds a line buffer for the socket and passes received messages to ``on_receive``.
Messages by the user are passed to ``on_stdin``.
:param client: :class:`tcprocd.client.Client` - The client to use for the connection.
"""
def __init__(self, client):
"""Initialize shell."""
self.client = client
self.sockets = [sys.stdin, client.socket]
self._do_stop = False
def on_stdin_ready(self):
"""Called when some input is ready."""
line = sys.stdin.readline().strip()
if line == 'exit':
return True
self.client.protocol.sendline(line + '\n')
def on_socket_ready(self):
"""Called when receiving some process output."""
line = self.client.protocol.readline()
if line == 'exit':
return True
sys.stdout.write(line + '\n')
sys.stdout.flush()
def run(self):
"""Start waiting for input/output."""
try:
while not self._do_stop:
ready = select.select(self.sockets, [], [])[0]
for s in ready:
if s == self.client.socket: # message by server
if self.on_socket_ready():
self._do_stop = True
else: # message by user
if self.on_stdin_ready():
self._do_stop = True
finally:
self.client.socket.close()
class AuthenticationError(Exception):
"""Exception raised when authentication fails."""
pass
class ServerError(Exception):
"""Exception raised when the server answers with an error."""
pass
class Client(object):
"""
A class to connect to a tcprocd server.
:param server_address: tuple of host | and port or the path to the socket file
"""
def __init__(self, server_address):
"""Initialize client."""
self.server_address = server_address
if isinstance(server_address, str_types):
self.is_unix_domain = True
sock_type = socket.AF_UNIX
else:
self.is_unix_domain = False
sock_type = socket.AF_INET
self.socket = socket.socket(sock_type, sock | et.SOCK_STREAM)
self.protocol = Protocol(self.socket)
self.server_version = None
self.attached_to = None
def connect(self, username=None, password=None, username_callback=None, password_callback=None):
"""Connect to the server."""
if self.is_unix_domain:
try:
SO_PASSCRED = socket.SO_PASSCRED
except AttributeError:
SO_PASSCRED = 16
self.socket.setsockopt(socket.SOL_SOCKET, SO_PASSCRED, 1)
self.socket.connect(self.server_address)
self.server_version = self.protocol.recv_part(3)
answer = self.protocol.recv_part(2)
# TCP connections always require username and password.
# A unix domain socket does not accept an username and
# only requires a password if the connecting user has one.
# TODO: return 'authentication required' and let the caller authenticate on its own
if answer == self.protocol.AUTHENTICATION_REQUIRED:
if not self.is_unix_domain:
if username is None:
username = username_callback()
self.protocol.send_part(2, username)
if password is None:
password = password_callback()
self.protocol.send_part(2, password)
answer = self.protocol.recv_part(2)
if answer != self.protocol.OK:
raise AuthenticationError()
elif answer != self.protocol.OK:
raise ServerError(answer)
def close(self):
"""Close the connection."""
self.socket.close()
def __enter__(self):
"""Connect when used as context manager."""
self.connect()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Disconnect afterwards, when used as context manager."""
self.close()
def list(self):
"""List servers."""
self.protocol.send_part(2, 'list')
answer = self.protocol.recv_part(2)
data = []
if answer == self.protocol.OK:
data = self.protocol.recv_part(6).split('\n')
return answer, data
def cat(self, name, start=0):
"""Get output of given process.
:param name: :class:`str` - Name of the process.
:param start: :class:`int` - Start at this line. (Default: ``0``)
:return: :class:`str` - Multi-line output of the process.
"""
self.protocol.send_part(2, 'cat')
self.protocol.send_part(2, name)
self.protocol.send_part(1, str(start))
answer = self.protocol.recv_part(2)
data = []
if answer == self.protocol.OK:
data = self.protocol.recv_part(6).split('\n')
return answer, data
def start(self, name, command, path=''):
"""Create a new process with the given ``name`` and ``command``.
:param name: :class:`str` - Name of the process.
:param command: :class:`str` - The command to run the process.
:param path: :class:`str` - The (remote) path to execute the
command in. (Default: ``None``)
:return: :class:`str` - Status message
"""
self.protocol.send_part(2, 'start')
self.protocol.send_part(2, name)
self.protocol.send_part(3, command)
self.protocol.send_part(3, path)
return self.protocol.recv_part(2)
def kill(self, name):
"""Kill the given process.
:param name: :class:`str` - Name of the process.
:return: :class:`str` - Status message
"""
self.protocol.send_part(2, 'kill')
self.protocol.send_part(2, name)
return self.protocol.recv_part(2)
def command(self, name, command):
"""Write the given command to the given process's stdin.
.. Note: Use ``cat`` to see the process's stdout!
:param name: :class:`str` - Name of the process.
:param command: :class:`str` - The command to send to the process.
:return: :class:`str` - Status message
"""
self.protocol.send_part(2, 'command')
self.protocol.send_part(2, name)
self.protocol.send_part(3, command)
return self.protocol.recv_part(2)
def attach(self, name):
"""
Attach to the given process's shell.
:param name: :class:`str` - Name of the process.
:return: :class:`str` - Status message
"""
self.protocol.send_part(2, 'attach')
self.protocol.send_part(2, name)
return self.protocol.recv_part(2)
|
jjst/rbpriority | setup.py | Python | mit | 601 | 0 | from reviewboard.extensions.packaging import setup
PACKAGE = "rbpriori | ty"
VERSION = "0.1"
setup(
name=PACKAGE,
version=VERSION,
description="Extension rbpriority",
author="Jeremie Jost",
packages=["r | bpriority"],
entry_points={
'reviewboard.extensions':
'%s = rbpriority.extension:PriorityExtension' % PACKAGE,
},
package_data={
'rbpriority': [
'templates/rbpriority/*.txt',
'templates/rbpriority/*.html',
],
},
scripts=['bin/rb-priority-alert'],
install_requires=[
'enum34'
]
)
|
irinabov/debian-qpid-cpp-1.35.0 | src/tests/acl_1.py | Python | apache-2.0 | 17,802 | 0.00573 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from qpid.tests.messaging.implementation import *
from qpid.tests.messaging import VersionTest
from mgmt_1 import Mgmt
class Policy(object):
def __init__(self):
self.lines = []
def str(self):
return '\n'.join(lines)
class AclCtrl(object):
def __init__(self, broker, path):
self.policy = path
self.original = self.read()
conn = Connection.establish(broker, protocol='amqp0-10', username='admin', password='admin', sasl_mechanism='PLAIN')
self.agent = Mgmt(conn)
self.agent.create('queue', 'acl_test_queue')
self.lines = []
def deny(self, user=None, *args):
self._add_rule('deny', user, *args)
return self
def allow(self, user=None, *args):
self._add_rule('allow', user, *args)
return self
def apply(self, allow_admin=True):
if allow_admin:
# admin users needs permission to send a qmf message to
# reload policy
self.lines.insert(0, 'acl allow admin@QPID all all')
self.specify("\n".join(self.lines))
return self
def dump(self):
print "\n".join(self.lines)
return self
def clear(self):
self.lines = []
return self
def _add_rule(self, deny_or_allow, user=None, *args):
elements = ['acl', deny_or_allow]
if user:
elements.append("%s@QPID" % user)
else:
elements.append('all')
if len(args) > 0:
for a in args:
elements.append(a)
else:
elements.append('all')
self.lines.append(' '.join(elements))
def read(self):
f = open(self.policy,'r')
content = f.read()
f.close()
return content
def specify(self, acl):
f = open(self.policy,'w')
f.write(acl)
f.close()
self.agent.reload_acl_file()
def restore(self):
self.agent.delete('queue', 'acl_test_queue')
self.specify(self.original)
self.agent.close()
class Acl_AMQP1_Tests (VersionTest):
"""
Tests for acl when accessing qpidd via AMQP 1.0
"""
def auth_session(self, user):
conn = Connection.establish(self.config.broker, protocol='amqp1.0', username=user, password=user, sasl_mechanism='PLAIN', container_id=user)
return conn.session()
def setUp(self):
VersionTest.setup(self)
self.acl = AclCtrl(self.config.broker, self.config.defines.get("policy_file"))
self.alice = self.auth_session('alice')
self.bob = self.auth_session('bob')
def tearDown(self):
self.bob.connection.close()
self.alice.connection.close()
self.acl.restore()
VersionTest.teardown(self)
def test_deny_sender_to_exchange(self):
self.acl.allow('alice').deny().apply()
try:
self.ssn.sender("amq.topic")
assert False, "anonymous should not be allowed to create sender to amq.topic"
except UnauthorizedAccess: pass
try:
self.bob.sender("amq.topic")
assert False, "bob should not be allowed to create sender to amq.topic"
except UnauthorizedAccess: pass
self.alice.sender("amq.topic")
def test_deny_sender_to_queue(self):
self.acl.allow('alice').deny().apply()
try:
self.ssn.sender("acl_test_queue")
assert False, "anonymous shound not be allowed to create sender to acl_test_queue"
except UnauthorizedAccess: pass
try:
self.bob.sender("acl_test_queue")
assert False, "bob should not be allowed to create sender to acl_test_queue"
except UnauthorizedAccess: pass
self.alice.sender("acl_test_queue")
def test_deny_sender_to_unknown(self):
self.acl.allow('alice').deny().apply()
try:
self.ssn.sender("unknown")
assert False, "anonymous should not be allowed to create sender to non-existent node"
except UnauthorizedAccess: pass
try:
self.bob.sender("unknown")
assert False, "bob should not be allowed to create sender to unknown"
except UnauthorizedAccess: pass
try:
self.alice.sender("unknown")
except NotFound: pass
def test_deny_receiver_to_exchange(self):
self.acl.allow('alice').deny().apply()
try:
self. | ssn.receiver("amq.topic")
assert False, "anonymous | should not be allowed to create receiver from amq.topic"
except UnauthorizedAccess: pass
try:
self.bob.receiver("amq.topic")
assert False, "bob should not be allowed to create receiver to amq.topic"
except UnauthorizedAccess: pass
self.alice.receiver("amq.topic")
def test_deny_receiver_to_queue(self):
self.acl.allow('alice').deny().apply()
try:
self.ssn.receiver("acl_test_queue")
assert False, "anonymous should not be allowed to create receiver from acl_test_queue"
except UnauthorizedAccess: pass
try:
self.bob.receiver("acl_test_queue")
assert False, "bob should not be allowed to create receiver to acl_test_queue"
except UnauthorizedAccess: pass
self.alice.receiver("acl_test_queue")
def test_deny_receiver_to_unknown(self):
self.acl.allow('alice').deny().apply()
try:
self.ssn.receiver("I_dont_exist")
assert False, "anonymous should not be allowed to create receiver from non-existent node"
except UnauthorizedAccess: pass
try:
self.bob.receiver("unknown")
assert False, "bob should not be allowed to create receiver to unknown"
except UnauthorizedAccess: pass
try:
self.alice.receiver("unknown")
except NotFound: pass
def test_create_for_receiver_from_exchange(self):
self.acl.allow('bob', 'access', 'exchange', 'name=amq.topic')
self.acl.allow('bob', 'access', 'queue', 'name=amq.topic')
self.acl.allow('alice').deny().apply()
try:
self.ssn.receiver("amq.topic")
assert False, "anonymous should not be allowed to create receiver from amq.topic"
except UnauthorizedAccess: pass
try:
self.bob.receiver("amq.topic")
assert False, "bob should not be allowed to create receiver from amq.topic without create permission"
except UnauthorizedAccess: pass
self.alice.receiver("amq.topic")
def test_bind_for_receiver_from_exchange(self):
self.acl.allow('bob', 'access', 'exchange', 'name=amq.topic')
self.acl.allow('bob', 'access', 'queue', 'name=amq.topic')
self.acl.allow('bob', 'create', 'queue', 'name=bob*')
self.acl.allow('alice').deny().apply()
try:
self.ssn.receiver("amq.topic")
assert False, "anonymous should not be allowed to create receiver from amq.topic"
except UnauthorizedAccess: pass
try:
self.bob.receiver("amq.topic")
assert False, "bob should not be allowed to create receiver from amq.topic without bind permission"
except UnauthorizedAccess: pass
self.alice.receiver("amq.topic")
def test_consume_for_receiver_from_exchange(self):
self.acl.allo |
axbaretto/beam | sdks/python/.tox/lint/lib/python2.7/site-packages/pylint/test/functional/wrong_import_position9.py | Python | apache-2.0 | 202 | 0.00495 | """Checks | import position rule"""
# pylint: disable=unused-import,relative-import,ungrouped-imports,import-error,no-name-in-module
import y
try:
import x
except | ImportError:
pass
else:
pass
|
Geontech/docker-redhawk-ubuntu | Dockerfiles/files/build/ide-fetcher.py | Python | gpl-3.0 | 1,661 | 0.004816 | #!/usr/bin/python
# Author: Thomas Goodwin <btgoodwin@geontech.com>
import urllib2, json, os, sys, re
def download_asset(path, url):
asset_path = None
try:
file_name = os.path.basename(url)
asset_path = os.path.join(path, file_name)
if os.path.exists(asset_path):
# Skip downloading
asset_path = None
else:
if not os.path.exists(path):
os.makedirs(path)
f = urllib2.urlopen(url)
with open(asset_path, "wb") as local_file:
local_file.write(f.read())
except Exception as e:
sys.exit('Failed to fetch IDE. Error: {0}'.format(e))
finally:
re | turn asset_path
def handle_release_assets(ass | ets):
assets = [ asset for asset in assets if re.match(r'redhawk-ide.+?(?=x86_64)', asset['name'])]
if not assets:
sys.exit('Failed to find the IDE asset')
elif len(assets) > 1:
sys.exit('Found too many IDE assets matching that description...?')
return download_asset('downloads', assets[0]['browser_download_url'])
def run(pv):
RELEASES_URL = 'http://api.github.com/repos/RedhawkSDR/redhawk/releases'
ide_asset = ''
try:
releases = json.loads(urllib2.urlopen(RELEASES_URL).read())
releases = [r for r in releases if r['tag_name'] == pv]
if releases:
ide_asset = handle_release_assets(releases[0]['assets'])
else:
sys.exit('Failed to find the release: {0}'.format(pv))
finally:
return ide_asset
if __name__ == '__main__':
# First argument is the version
asset = run(sys.argv[1])
print asset |
pacoqueen/ginn | extra/install/ipython2/ipython-5.10.0/setup.py | Python | gpl-2.0 | 10,245 | 0.007516 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Setup script for IPython.
Under Posix environments it works like a typical setup.py script.
Under Windows, the command sdist is not supported, since IPython
requires utilities which are not available under Windows."""
#-----------------------------------------------------------------------------
# Copyright (c) 2008-2011, IPython Development Team.
# Copyright (c) 2001-2007, Fernando Perez <fernando.perez@colorado.edu>
# Copyright (c) 2001, Janko Hauser <jhauser@zscout.de>
# Copyright (c) 2001, Nathaniel Gray <n8gray@caltech.edu>
#
# Distributed under | the terms of the Modified BSD License.
#
# The full license is in the file COPYING.rst, distributed with this software.
#--------- | --------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Minimal Python version sanity check
#-----------------------------------------------------------------------------
from __future__ import print_function
import sys
# This check is also made in IPython/__init__, don't forget to update both when
# changing Python version requirements.
v = sys.version_info
if v[:2] < (2,7) or (v[0] >= 3 and v[:2] < (3,3)):
error = "ERROR: IPython requires Python version 2.7 or 3.3 or above."
print(error, file=sys.stderr)
sys.exit(1)
PY3 = (sys.version_info[0] >= 3)
# At least we're on the python version we need, move on.
#-------------------------------------------------------------------------------
# Imports
#-------------------------------------------------------------------------------
# Stdlib imports
import os
from glob import glob
# BEFORE importing distutils, remove MANIFEST. distutils doesn't properly
# update it when the contents of directories change.
if os.path.exists('MANIFEST'): os.remove('MANIFEST')
from distutils.core import setup
# Our own imports
from setupbase import target_update
from setupbase import (
setup_args,
find_packages,
find_package_data,
check_package_data_first,
find_entry_points,
build_scripts_entrypt,
find_data_files,
git_prebuild,
install_symlinked,
install_lib_symlink,
install_scripts_for_symlink,
unsymlink,
)
isfile = os.path.isfile
pjoin = os.path.join
#-------------------------------------------------------------------------------
# Handle OS specific things
#-------------------------------------------------------------------------------
if os.name in ('nt','dos'):
os_name = 'windows'
else:
os_name = os.name
# Under Windows, 'sdist' has not been supported. Now that the docs build with
# Sphinx it might work, but let's not turn it on until someone confirms that it
# actually works.
if os_name == 'windows' and 'sdist' in sys.argv:
print('The sdist command is not available under Windows. Exiting.')
sys.exit(1)
#-------------------------------------------------------------------------------
# Things related to the IPython documentation
#-------------------------------------------------------------------------------
# update the manuals when building a source dist
if len(sys.argv) >= 2 and sys.argv[1] in ('sdist','bdist_rpm'):
# List of things to be updated. Each entry is a triplet of args for
# target_update()
to_update = [
('docs/man/ipython.1.gz',
['docs/man/ipython.1'],
'cd docs/man && gzip -9c ipython.1 > ipython.1.gz'),
]
[ target_update(*t) for t in to_update ]
#---------------------------------------------------------------------------
# Find all the packages, package data, and data_files
#---------------------------------------------------------------------------
packages = find_packages()
package_data = find_package_data()
data_files = find_data_files()
setup_args['packages'] = packages
setup_args['package_data'] = package_data
setup_args['data_files'] = data_files
#---------------------------------------------------------------------------
# custom distutils commands
#---------------------------------------------------------------------------
# imports here, so they are after setuptools import if there was one
from distutils.command.sdist import sdist
from distutils.command.upload import upload
class UploadWindowsInstallers(upload):
description = "Upload Windows installers to PyPI (only used from tools/release_windows.py)"
user_options = upload.user_options + [
('files=', 'f', 'exe file (or glob) to upload')
]
def initialize_options(self):
upload.initialize_options(self)
meta = self.distribution.metadata
base = '{name}-{version}'.format(
name=meta.get_name(),
version=meta.get_version()
)
self.files = os.path.join('dist', '%s.*.exe' % base)
def run(self):
for dist_file in glob(self.files):
self.upload_file('bdist_wininst', 'any', dist_file)
setup_args['cmdclass'] = {
'build_py': \
check_package_data_first(git_prebuild('IPython')),
'sdist' : git_prebuild('IPython', sdist),
'upload_wininst' : UploadWindowsInstallers,
'symlink': install_symlinked,
'install_lib_symlink': install_lib_symlink,
'install_scripts_sym': install_scripts_for_symlink,
'unsymlink': unsymlink,
}
#---------------------------------------------------------------------------
# Handle scripts, dependencies, and setuptools specific things
#---------------------------------------------------------------------------
# For some commands, use setuptools. Note that we do NOT list install here!
# If you want a setuptools-enhanced install, just run 'setupegg.py install'
needs_setuptools = set(('develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist', 'bdist_dumb', 'bdist_wininst', 'bdist_wheel',
'egg_info', 'easy_install', 'upload', 'install_egg_info',
))
if len(needs_setuptools.intersection(sys.argv)) > 0:
import setuptools
# This dict is used for passing extra arguments that are setuptools
# specific to setup
setuptools_extra_args = {}
# setuptools requirements
extras_require = dict(
parallel = ['ipyparallel'],
qtconsole = ['qtconsole'],
doc = ['Sphinx>=1.3'],
test = ['nose>=0.10.1', 'requests', 'testpath', 'pygments<2.6', 'nbformat', 'ipykernel'],
terminal = [],
kernel = ['ipykernel'],
nbformat = ['nbformat'],
notebook = ['notebook', 'ipywidgets'],
nbconvert = ['nbconvert'],
)
install_requires = [
'setuptools>=18.5',
'decorator',
'pickleshare',
'simplegeneric>0.8',
'traitlets>=4.2',
'prompt_toolkit>=1.0.4,<2.0.0',
'pygments<2.6',
]
# Platform-specific dependencies:
# This is the correct way to specify these,
# but requires pip >= 6. pip < 6 ignores these.
extras_require.update({
':python_version == "2.7"': ['backports.shutil_get_terminal_size'],
':python_version == "2.7" or python_version == "3.3"': ['pathlib2'],
'test:python_version >= "3.4"': ['numpy'],
':sys_platform != "win32"': ['pexpect'],
':sys_platform == "darwin"': ['appnope'],
':sys_platform == "win32"': ['colorama'],
':sys_platform == "win32" and python_version < "3.6"': ['win_unicode_console>=0.5'],
'test:python_version == "2.7"': ['mock'],
})
# FIXME: re-specify above platform dependencies for pip < 6
# These would result in non-portable bdists.
if not any(arg.startswith('bdist') for arg in sys.argv):
if sys.version_info < (3, 3):
extras_require['test'].append('mock')
if sys.platform == 'darwin':
install_requires.extend(['appnope'])
if not sys.platform.startswith('win'):
install_requires.append('pexpect')
# workaround pypa/setuptools#147, where setuptools misspells
# platform_python_implementation as python_implementation
if 'setuptools' in sys.modules:
for key in list(extras_require):
if 'platform_python_implementation' in key:
new_key = key.replace('platform_python_implementation', 'python_implementation')
extras_require[new_key] = extras_require.pop(key)
everything = |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.