repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
paplorinc/intellij-community
|
refs/heads/master
|
python/testData/inspections/PyPackageRequirementsInspection/ImportsNotInRequirementsTxt/test1.py
|
18
|
import pip
import <weak_warning descr="Package containing module 'opster' is not listed in project requirements">opster</weak_warning>
from <weak_warning descr="Package containing module 'clevercss' is not listed in project requirements">clevercss</weak_warning> import convert
import <weak_warning descr="Package containing module 'django' is not listed in project requirements">django</weak_warning>.conf
import httplib
import <weak_warning descr="Package containing module 'test3' is not listed in project requirements">test3</weak_warning>
print('Hello, World!')
|
joaduo/mepinta
|
refs/heads/master
|
plugins/c_and_cpp/k3dv1/plugins_tests/c_and_cpp/processors/k3dv1/__init__.py
|
12133432
| |
trondhindenes/ansible-modules-extras
|
refs/heads/devel
|
__init__.py
|
12133432
| |
mbareta/edx-platform-ft
|
refs/heads/open-release/eucalyptus.master
|
openedx/core/djangoapps/__init__.py
|
12133432
| |
astaff/ansible
|
refs/heads/devel
|
lib/ansible/utils/module_docs_fragments/__init__.py
|
12133432
| |
HeraldStudio/webservice-py
|
refs/heads/master
|
mod/library/__init__.py
|
12133432
| |
UltrosBot/Ultros
|
refs/heads/master
|
system/commands/__init__.py
|
13
|
# coding=utf-8
__author__ = 'Gareth Coles'
|
beatrizjesus/my-first-blog
|
refs/heads/master
|
pasta/Lib/site-packages/pip/operations/freeze.py
|
84
|
from __future__ import absolute_import
import logging
import re
import pip
from pip.compat import stdlib_pkgs
from pip.req import InstallRequirement
from pip.utils import get_installed_distributions
from pip._vendor import pkg_resources
logger = logging.getLogger(__name__)
# packages to exclude from freeze output
freeze_excludes = stdlib_pkgs + ['setuptools', 'pip', 'distribute']
def freeze(
requirement=None,
find_links=None, local_only=None, user_only=None, skip_regex=None,
find_tags=False,
default_vcs=None,
isolated=False):
find_links = find_links or []
skip_match = None
if skip_regex:
skip_match = re.compile(skip_regex)
dependency_links = []
for dist in pkg_resources.working_set:
if dist.has_metadata('dependency_links.txt'):
dependency_links.extend(
dist.get_metadata_lines('dependency_links.txt')
)
for link in find_links:
if '#egg=' in link:
dependency_links.append(link)
for link in find_links:
yield '-f %s' % link
installations = {}
for dist in get_installed_distributions(local_only=local_only,
skip=freeze_excludes,
user_only=user_only):
req = pip.FrozenRequirement.from_dist(
dist,
dependency_links,
find_tags=find_tags,
)
installations[req.name] = req
if requirement:
with open(requirement) as req_file:
for line in req_file:
if (not line.strip()
or line.strip().startswith('#')
or (skip_match and skip_match.search(line))
or line.startswith((
'-r', '--requirement',
'-Z', '--always-unzip',
'-f', '--find-links',
'-i', '--index-url',
'--extra-index-url'))):
yield line.rstrip()
continue
if line.startswith('-e') or line.startswith('--editable'):
if line.startswith('-e'):
line = line[2:].strip()
else:
line = line[len('--editable'):].strip().lstrip('=')
line_req = InstallRequirement.from_editable(
line,
default_vcs=default_vcs,
isolated=isolated,
)
else:
line_req = InstallRequirement.from_line(
line,
isolated=isolated,
)
if not line_req.name:
logger.info(
"Skipping line because it's not clear what it "
"would install: %s",
line.strip(),
)
logger.info(
" (add #egg=PackageName to the URL to avoid"
" this warning)"
)
elif line_req.name not in installations:
logger.warning(
"Requirement file contains %s, but that package is"
" not installed",
line.strip(),
)
else:
yield str(installations[line_req.name]).rstrip()
del installations[line_req.name]
yield(
'## The following requirements were added by '
'pip freeze:'
)
for installation in sorted(
installations.values(), key=lambda x: x.name.lower()):
yield str(installation).rstrip()
|
selkhateeb/tic
|
refs/heads/master
|
src/tic/db/__init__.py
|
1
|
# To change this template, choose Tools | Templates
# and open the template in the editor.
__author__="selkhateeb"
__date__ ="$Jul 17, 2010 6:50:52 PM$"
if __name__ == "__main__":
print "Hello World"
|
akhmadMizkat/odoo
|
refs/heads/master
|
addons/website_crm_partner_assign/controllers/__init__.py
|
7372
|
import main
|
gsnbng/erpnext
|
refs/heads/develop
|
erpnext/patches/v4_2/toggle_rounded_total.py
|
120
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
global_defaults = frappe.get_doc("Global Defaults", "Global Defaults")
global_defaults.toggle_rounded_total()
|
pjuu/pjuu
|
refs/heads/master
|
pjuu/auth/backend.py
|
1
|
# -*- coding: utf8 -*-
"""Simple auth functions with access to the databases for use in the views.
:license: AGPL v3, see LICENSE for more details
:copyright: 2014-2021 Joe Doherty
"""
# Stdlib imports
from datetime import datetime
import re
# 3rd party imports
from flask import session
from pymongo.errors import DuplicateKeyError
from werkzeug.security import (generate_password_hash as generate_password,
check_password_hash as check_password)
# Pjuu imports
from pjuu import mongo as m, redis as r, storage
from pjuu.auth.utils import get_user
from pjuu.lib import keys as k, timestamp, get_uuid
from pjuu.posts.backend import delete_post
# Username & E-mail checker re patterns
USERNAME_PATTERN = r'^\w{3,16}$'
EMAIL_PATTERN = r'^[^@%!/|`#&?]+@[^.@%!/|`#&?][^@%!/|`#&?]*\.[a-z]{2,10}$'
# Usable regular expression objects
USERNAME_RE = re.compile(USERNAME_PATTERN)
EMAIL_RE = re.compile(EMAIL_PATTERN)
# TODO: Come up with a better solution for this.
# Reserved names
# Before adding a name here ensure that no one is using it.
# Names here DO NOT have to watch the pattern for usernames as these may change
# in the future. We need to protect endpoints which we need and can not afford
# to give to users.
RESERVED_NAMES = [
'about', 'about_us', 'aboutus', 'access', 'account', 'accounts',
'activate', 'add', 'address', 'adm', 'admin', 'administration', 'ajax',
'alert', 'alerts', 'analytics', 'api', 'app', 'apps', 'archive', 'auth',
'authentication', 'avatar', 'billing', 'bin', 'blog', 'blogs', 'cache',
'calendar', 'careers', 'cgi', 'chat', 'client', 'code', 'config',
'connect', 'contact', 'contest', 'create', 'css', 'dashboard', 'data',
'db', 'delete', 'design', 'dev', 'devel', 'dir', 'directory', 'doc',
'docs', 'domain', 'download', 'downloads', 'downvote', 'ecommerce', 'edit',
'editor', 'email', 'faq', 'favorite', 'feed', 'feedback', 'file', 'files',
'find', 'flog', 'follow', 'followers', 'following', 'forgot', 'forum',
'forums', 'group', 'groups', 'has-alerts', 'hasalerts', 'help', 'home',
'homepage', 'host', 'hosting', 'hostname', 'hpg', 'html', 'http', 'httpd',
'https', 'i-has-alerts', 'ihasalerts', 'image', 'images', 'imap', 'img',
'index', 'info', 'information', 'invite', 'java', 'javascript', 'job',
'jobs', 'js', 'list', 'lists', 'log', 'login', 'logout', 'logs', 'mail',
'master', 'media', 'message', 'messages', 'name', 'net', 'network', 'new',
'news', 'newsletter', 'nick', 'nickname', 'notes', 'order', 'orders',
'page', 'pager', 'pages', 'password', 'photo', 'photos', 'php', 'pic',
'pics', 'pjuu', 'plugin', 'plugins', 'post', 'posts', 'privacy',
'privacy_policy', 'privacypolicy', 'profile', 'project', 'projects', 'pub',
'public', 'random', 'recover', 'register', 'registration', 'report',
'reset', 'root', 'rss', 'script', 'scripts', 'search', 'secure',
'security', 'send', 'service', 'setting', 'settings', 'setup', 'signin',
'signup', 'singout', 'site', 'sitemap', 'sites', 'ssh', 'stage', 'staging',
'start', 'stat', 'static', 'stats', 'status', 'store', 'stores',
'subdomain', 'subscribe', 'support', 'system', 'tablet', 'talk', 'task',
'tasks', 'template', 'templatestest', 'terms', 'terms_and_conditions',
'terms_of_service', 'termsandconditions', 'termsofservice', 'tests',
'theme', 'themes', 'tmp', 'todo', 'tools', 'unfollow', 'update', 'upload',
'upvote', 'url', 'usage', 'user', 'username', 'video', 'videos', 'web',
'webmail']
def create_account(username, email, password):
"""Creates a new user account.
:param username: The new users user name
:type username: str
:param email: The new users e-mail address
:type email: str
:param password: The new users password un-hashed
:type password: str
:returns: The UID of the new user
:rtype: str or None
"""
username = username.lower()
email = email.lower()
try:
if check_username(username) and check_username_pattern(username) and \
check_email(email) and check_email_pattern(email):
# Get a new UUID for the user
uid = get_uuid()
user = {
'_id': uid,
'username': username.lower(),
'email': email.lower(),
'password': generate_password(password,
method='pbkdf2:sha256:2000',
salt_length=20),
'created': timestamp(),
'last_login': -1,
'active': False,
'banned': False,
'op': False,
'muted': False,
'about': "",
'score': 0,
'alerts_last_checked': -1,
# Set the TTL for a newly created user, this has to be Datetime
# object for MongoDB to recognise it. This is removed on
# activation.
'ttl': datetime.utcnow()
}
# Set all the tips for new users
for tip_name in k.VALID_TIP_NAMES:
user['tip_{}'.format(tip_name)] = True
# Insert the new user in to Mongo. If this fails a None will be
# returned
result = m.db.users.insert(user)
return uid if result else None
except DuplicateKeyError: # pragma: no cover
# Oh no something went wrong. Pass over it. A None will be returned.
pass
return None
def check_username_pattern(username):
"""Check that username matches what we class as a username
:param username: The username to test the pattern of
:type username: str
:returns: True if successful match, False otherwise
:rtype: bool
"""
# Check the username is valid
return bool(USERNAME_RE.match(username.lower()))
def check_username(username):
"""Check for username availability
:param username: The username to check for existence
:type username: str
:returns: True is the username does NOT exist, False otherwise
:rtype: bool
"""
return username not in RESERVED_NAMES and \
not bool(m.db.users.find_one({'username': username.lower()}, {}))
def check_email_pattern(email):
"""Checks that email matches what we class as an email address
:param email: The email to test the pattern of
:type email: str
:returns: True if successful match, False otherwise
:rtype: bool
"""
return bool(EMAIL_RE.match(email.lower()))
def check_email(email):
"""Check an e-mail addresses availability
:param email: The email to check for existence
:type email: str
:returns: True if the email does NOT exist, False otherwise
:rtype: bool
"""
return not bool(m.db.users.find_one({'email': email.lower()}, {}))
def user_exists(user_id):
"""Is there a user object with `user_id`?
"""
return bool(m.db.users.find_one({'_id': user_id}, {}))
def authenticate(username, password):
"""Authenticate a username/password combination.
"""
# Case-insensitive login
username = username.lower()
if '@' in username:
user = m.db.users.find_one({'email': username})
else:
user = m.db.users.find_one({'username': username})
# Check that we got a result and that the password matches the stored one
if user and check_password(user.get('password'), password):
# If it matched return the document
return user
# Oh no, something went wrong
return None
def signin(user_id):
"""Logs the user with uid in by adding the uid to the session.
"""
session['user_id'] = user_id
# update last login
m.db.users.update({'_id': user_id}, {'$set': {'last_login': timestamp()}})
def signout():
"""Removes the user id from the session.
"""
session.pop('user_id', None)
def activate(user_id, action=True):
"""Activates a user account and removes 'ttl' key from Mongo
"""
return m.db.users.update(
{'_id': user_id},
{'$set': {'active': action}, '$unset': {'ttl': None}}
).get('updatedExisting')
def ban(user_id, action=True):
"""Ban a user.
By passing False as action this will unban the user
"""
return m.db.users.update(
{'_id': user_id},
{'$set': {'banned': action}}
).get('updatedExisting')
def bite(user_id, action=True):
"""Bite a user (think spideman), makes them op
By passing False as action this will unbite the user
"""
return m.db.users.update(
{'_id': user_id},
{'$set': {'op': action}}
).get('updatedExisting')
def mute(user_id, action=True):
"""Mutes a user, this stops them from posting, commenting or following
users.
By passing False as action this will un-mute the user
"""
return m.db.users.update(
{'_id': user_id},
{'$set': {'muted': action}}
).get('updatedExisting')
def change_password(user_id, password):
"""Changes user with ``user_id``s password.
Checking of the old password MUST be done before you run this! This is a
an unsafe function. You will also need to apply sanity (length etc.) checks
outside this function.
"""
# Create the password hash from the plain-text password
password = generate_password(password,
method='pbkdf2:sha256:2000',
salt_length=20)
return m.db.users.update({'_id': user_id},
{'$set': {'password': password}})
def change_email(user_id, new_email):
"""Changes the user with ``user_id``'s e-mail address.
Please ensure that it is a valid e-mail address out side of this function.
This function is unsafe and provides NO sanity checking.
"""
return m.db.users.update({'_id': user_id},
{'$set': {'email': new_email.lower()}})
def delete_account(user_id):
"""Will delete a users account.
This **REMOVES ALL** details, posts, replies, etc. Not votes though.
.. note: Ensure the user has authenticated this request. This is going to
be the most *expensive* task in Pjuu, be warned.
:param user_id: The `user_id` of the user to delete
:type user_id: str
"""
# Get the user object we will need this to remove the avatar
user = get_user(user_id)
# Delete the user from MongoDB
m.db.users.remove({'_id': user_id})
# If the user has an avatar remove it
if user.get('avatar'):
storage.delete(user.get('avatar'))
# Remove all posts a user has ever made. This includes all votes
# on the posts and all comments of the posts.
# This calls the backend function from posts to do the deed
posts_cursor = m.db.posts.find({'user_id': user_id}, {})
for post in posts_cursor:
delete_post(post.get('_id'))
# Remove all the following relationships from Redis
# Delete all references to followers of the user.
# This will remove the user from the other users following list
# TODO Replace with ZSCAN
follower_cursor = r.zrange(k.USER_FOLLOWERS.format(user_id), 0, -1)
for follower_id in follower_cursor:
# Clear the followers following list of the uid
r.zrem(k.USER_FOLLOWING.format(follower_id), user_id)
# Delete the followers list
r.delete(k.USER_FOLLOWERS.format(user_id))
# Delete all references to the users the user is following
# This will remove the user from the others users followers list
# TODO Replace with ZSCAN
followee_cursor = r.zrange(k.USER_FOLLOWING.format(user_id), 0, -1)
for followee_id in followee_cursor:
# Clear the followers list of people uid is following
r.zrem(k.USER_FOLLOWERS.format(followee_id), user_id)
# Delete the following list
r.delete(k.USER_FOLLOWING.format(user_id))
# Delete the users feed, this may have been added too during this process.
# Probably not but let's be on the safe side
r.delete(k.USER_FEED.format(user_id))
# Delete the users alert list
# DO NOT DELETE ANY ALERTS AS THESE ARE GENERIC
r.delete(k.USER_ALERTS.format(user_id))
# All done. This code may need making SAFER in case there are issues
# elsewhere in the code base.
def dump_account(user_id):
"""Dump a users entire account; details, posts and comments to a dict.
This WILL dump everything about the user. There is SOME caveats to this.
It will not list all voters on posts or comments as this is just meta data
on the posts. It will also not show your PASSWORD HASH as this probably
isn't a danger factor but lets not test that.
Your followers and following lists will also not be shown as these IDs are
related to a user which IS NOT the user dumping the data.
This will not list all comments underneath a post as this IS NOT the users
data either.
At the moment this WILL just dump account, posts and comments. ALL you have
not deleted.
.. note: This will need to become streaming or a background process one
day. This will be incredibly resource intensive.
"""
# Attempt to get the users account
user = m.db.users.find_one({'_id': user_id})
if user:
# We are going to remove the uid and the password hash as this may
# lead to some security issues
user['_id'] = '<UID>'
user['password'] = '<PASSWORD HASH>'
else:
# If there is no user then we will just stop this here. The account has
# gone, there is no data anyway
return None
# Place to store our posts
posts = []
# Mongo cursor for all of our posts
posts_cursor = m.db.posts.find({'user_id': user_id}).sort('created', -1)
for post in posts_cursor:
# Hide the uid from the post. The pid is okay to add as this is part of
# the URL anyway
post['user_id'] = '<UID>'
# Clear the user_id's from mentions
if post.get('mentions'):
for i in range(len(post['mentions'])):
post['mentions'][i]['user_id'] = '<UID>'
posts.append(post)
# Return the dict of the above, this will be turned in to JSON by the view
return {
'user': user,
'posts': posts,
}
|
piranna/pyfilesystem
|
refs/heads/master
|
fs/tests/test_importhook.py
|
12
|
import sys
import unittest
import marshal
import imp
import struct
from textwrap import dedent
from fs.expose.importhook import FSImportHook
from fs.tempfs import TempFS
from fs.zipfs import ZipFS
from six import b
class TestFSImportHook(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
for mph in list(sys.meta_path):
if isinstance(mph,FSImportHook):
sys.meta_path.remove(mph)
for ph in list(sys.path_hooks):
if issubclass(ph,FSImportHook):
sys.path_hooks.remove(mph)
for (k,v) in sys.modules.items():
if k.startswith("fsih_"):
del sys.modules[k]
elif hasattr(v,"__loader__"):
if isinstance(v.__loader__,FSImportHook):
del sys.modules[k]
sys.path_importer_cache.clear()
def _init_modules(self,fs):
fs.setcontents("fsih_hello.py",b(dedent("""
message = 'hello world!'
""")))
fs.makedir("fsih_pkg")
fs.setcontents("fsih_pkg/__init__.py",b(dedent("""
a = 42
""")))
fs.setcontents("fsih_pkg/sub1.py",b(dedent("""
import fsih_pkg
from fsih_hello import message
a = fsih_pkg.a
""")))
fs.setcontents("fsih_pkg/sub2.pyc",self._getpyc(b(dedent("""
import fsih_pkg
from fsih_hello import message
a = fsih_pkg.a * 2
"""))))
def _getpyc(self,src):
"""Get the .pyc contents to match th given .py source code."""
code = imp.get_magic() + struct.pack("<i",0)
code += marshal.dumps(compile(src,__file__,"exec"))
return code
def test_loader_methods(self):
t = TempFS()
self._init_modules(t)
ih = FSImportHook(t)
sys.meta_path.append(ih)
try:
self.assertEquals(ih.find_module("fsih_hello"),ih)
self.assertEquals(ih.find_module("fsih_helo"),None)
self.assertEquals(ih.find_module("fsih_pkg"),ih)
self.assertEquals(ih.find_module("fsih_pkg.sub1"),ih)
self.assertEquals(ih.find_module("fsih_pkg.sub2"),ih)
self.assertEquals(ih.find_module("fsih_pkg.sub3"),None)
m = ih.load_module("fsih_hello")
self.assertEquals(m.message,"hello world!")
self.assertRaises(ImportError,ih.load_module,"fsih_helo")
ih.load_module("fsih_pkg")
m = ih.load_module("fsih_pkg.sub1")
self.assertEquals(m.message,"hello world!")
self.assertEquals(m.a,42)
m = ih.load_module("fsih_pkg.sub2")
self.assertEquals(m.message,"hello world!")
self.assertEquals(m.a,42 * 2)
self.assertRaises(ImportError,ih.load_module,"fsih_pkg.sub3")
finally:
sys.meta_path.remove(ih)
t.close()
def _check_imports_are_working(self):
try:
import fsih_hello
self.assertEquals(fsih_hello.message,"hello world!")
try:
import fsih_helo
except ImportError:
pass
else:
assert False, "ImportError not raised"
import fsih_pkg
import fsih_pkg.sub1
self.assertEquals(fsih_pkg.sub1.message,"hello world!")
self.assertEquals(fsih_pkg.sub1.a,42)
import fsih_pkg.sub2
self.assertEquals(fsih_pkg.sub2.message,"hello world!")
self.assertEquals(fsih_pkg.sub2.a,42 * 2)
try:
import fsih_pkg.sub3
except ImportError:
pass
else:
assert False, "ImportError not raised"
finally:
for k in sys.modules.keys():
if k.startswith("fsih_"):
del sys.modules[k]
def test_importer_on_meta_path(self):
t = TempFS()
self._init_modules(t)
ih = FSImportHook(t)
sys.meta_path.append(ih)
try:
self._check_imports_are_working()
finally:
sys.meta_path.remove(ih)
t.close()
def test_url_on_sys_path(self):
t = TempFS()
zpath = t.getsyspath("modules.zip")
z = ZipFS(zpath,"w")
self._init_modules(z)
z.close()
z = ZipFS(zpath,"r")
assert z.isfile("fsih_hello.py")
z.close()
sys.path.append("zip://" + zpath)
FSImportHook.install()
try:
self._check_imports_are_working()
finally:
sys.path_hooks.remove(FSImportHook)
sys.path.pop()
t.close()
|
antoinecarme/pyaf
|
refs/heads/master
|
tests/model_control/detailed/transf_Quantization/model_control_one_enabled_Quantization_Lag1Trend_BestCycle_ARX.py
|
1
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Quantization'] , ['Lag1Trend'] , ['BestCycle'] , ['ARX'] );
|
UT-CHG/PolyADCIRC
|
refs/heads/master
|
examples/run_framework/poly_walls/concatenate_many.py
|
1
|
#! /usr/bin/env python
# import necessary modules
import polyadcirc.run_framework.random_wall as rmw
base_dir = '/h1/lgraham/workspace'
grid_dir = base_dir + '/ADCIRC_landuse/Inlet/inputs/poly_walls'
save_dir = base_dir + '/ADCIRC_landuse/Inlet/runs/poly_wall'
basis_dir = base_dir +'/ADCIRC_landuse/Inlet/landuse_basis/gap/beach_walls_2lands'
# set up saving
save_file = 'py_save_file'
main_run, domain, mann_pts, wall_pts, points = rmw.loadmat(save_file+'0',
base_dir, grid_dir, save_dir+'_0', basis_dir)
for i in xrange(1, 7):
save_file2 = save_file+str(i)
save_dir2 = save_dir+'_'+str(i)
other_run, domain, mann_pts2, wall_pts2, points2 = rmw.loadmat(save_file2,
base_dir, grid_dir, save_dir2, basis_dir)
run, points = main_run.concatenate(other_run, points, points2)
mdat = dict()
mdat['points'] = points
main_run.update_mdict(mdat)
main_run.save(mdat, 'poly7_file')
|
johnboyington/homework
|
refs/heads/master
|
ne737/final_project/extract.py
|
1
|
# ne737 final project
import matplotlib.pyplot as plt
import numpy as np
def Extract(name):
F = open('data/data0/{}.Spe'.format(name), 'r').readlines()
for ii in range(len(F)):
if '0 1023' in F[ii]:
line = np.array(F[ii + 1:ii + 1025])
data = np.loadtxt(line)
return data
return
#creates lists that contain strings of all of the names of each detector and position
fd1n = []
for ii in (range(5)):
for jj in (range(4)):
fd1n.append('d1r{}c{}'.format(ii+1, jj+1))
fd2n = []
for ii in (range(5)):
for jj in (range(4)):
fd2n.append('d2r{}c{}'.format(ii+1, jj+1))
fd3n = []
for ii in (range(5)):
for jj in (range(4)):
fd3n.append('d3r{}c{}'.format(ii+1, jj+1))
ed1n = []
for ii in [1, 3, 5]:
for jj in (range(4)):
ed1n.append('D1ER{}C{}'.format(ii, jj+1))
ed2n = []
for ii in [1, 3, 5]:
for jj in (range(4)):
ed2n.append('D2ER{}C{}'.format(ii, jj+1))
ed3n = []
for ii in [1, 3, 5]:
for jj in (range(4)):
ed3n.append('D3ER{}C{}'.format(ii, jj+1))
#extracts the corresponding data from the file and then sums all of the counts within that file
fd1sum = []
for nn in fd1n:
fd1sum.append(np.sum(Extract(nn)))
fd1sum = np.array(fd1sum).reshape(5,4)
fd2sum = []
for nn in fd2n:
fd2sum.append(np.sum(Extract(nn)))
fd2sum = np.array(fd2sum).reshape(5,4)
fd3sum = []
for nn in fd3n:
fd3sum.append(np.sum(Extract(nn)))
fd3sum = np.array(fd3sum).reshape(5,4)
ed1sum = []
for nn in ed1n:
ed1sum.append(np.sum(Extract(nn)))
ed1sum = np.array(ed1sum).reshape(3,4)
ed2sum = []
for nn in ed2n:
ed2sum.append(np.sum(Extract(nn)))
ed2sum = np.array(ed2sum).reshape(3,4)
ed3sum = []
for nn in ed3n:
ed3sum.append(np.sum(Extract(nn)))
ed3sum = np.array(ed3sum).reshape(3,4)
print fd1sum
print fd2sum
print fd3sum
print ed1sum
print ed2sum
print ed3sum
#print np.sum(Extract('d1r4c3'))
|
klunwebale/odoo
|
refs/heads/8.0
|
openerp/netsvc.py
|
220
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2014 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import logging.handlers
import os
import platform
import pprint
import release
import sys
import threading
import psycopg2
import openerp
import sql_db
import tools
_logger = logging.getLogger(__name__)
def log(logger, level, prefix, msg, depth=None):
indent=''
indent_after=' '*len(prefix)
for line in (prefix + pprint.pformat(msg, depth=depth)).split('\n'):
logger.log(level, indent+line)
indent=indent_after
def LocalService(name):
"""
The openerp.netsvc.LocalService() function is deprecated. It still works
in two cases: workflows and reports. For workflows, instead of using
LocalService('workflow'), openerp.workflow should be used (better yet,
methods on openerp.osv.orm.Model should be used). For reports,
openerp.report.render_report() should be used (methods on the Model should
be provided too in the future).
"""
assert openerp.conf.deprecation.allow_local_service
_logger.warning("LocalService() is deprecated since march 2013 (it was called with '%s')." % name)
if name == 'workflow':
return openerp.workflow
if name.startswith('report.'):
report = openerp.report.interface.report_int._reports.get(name)
if report:
return report
else:
dbname = getattr(threading.currentThread(), 'dbname', None)
if dbname:
registry = openerp.modules.registry.RegistryManager.get(dbname)
with registry.cursor() as cr:
return registry['ir.actions.report.xml']._lookup_report(cr, name[len('report.'):])
path_prefix = os.path.realpath(os.path.dirname(os.path.dirname(__file__)))
class PostgreSQLHandler(logging.Handler):
""" PostgreSQL Loggin Handler will store logs in the database, by default
the current database, can be set using --log-db=DBNAME
"""
def emit(self, record):
ct = threading.current_thread()
ct_db = getattr(ct, 'dbname', None)
dbname = tools.config['log_db'] if tools.config['log_db'] and tools.config['log_db'] != '%d' else ct_db
if not dbname:
return
with tools.ignore(Exception), tools.mute_logger('openerp.sql_db'), sql_db.db_connect(dbname, allow_uri=True).cursor() as cr:
cr.autocommit(True)
msg = tools.ustr(record.msg)
if record.args:
msg = msg % record.args
traceback = getattr(record, 'exc_text', '')
if traceback:
msg = "%s\n%s" % (msg, traceback)
# we do not use record.levelname because it may have been changed by ColoredFormatter.
levelname = logging.getLevelName(record.levelno)
val = ('server', ct_db, record.name, levelname, msg, record.pathname[len(path_prefix)+1:], record.lineno, record.funcName)
cr.execute("""
INSERT INTO ir_logging(create_date, type, dbname, name, level, message, path, line, func)
VALUES (NOW() at time zone 'UTC', %s, %s, %s, %s, %s, %s, %s, %s)
""", val)
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, _NOTHING, DEFAULT = range(10)
#The background is set with 40 plus the number of the color, and the foreground with 30
#These are the sequences need to get colored ouput
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
COLOR_PATTERN = "%s%s%%s%s" % (COLOR_SEQ, COLOR_SEQ, RESET_SEQ)
LEVEL_COLOR_MAPPING = {
logging.DEBUG: (BLUE, DEFAULT),
logging.INFO: (GREEN, DEFAULT),
logging.WARNING: (YELLOW, DEFAULT),
logging.ERROR: (RED, DEFAULT),
logging.CRITICAL: (WHITE, RED),
}
class DBFormatter(logging.Formatter):
def format(self, record):
record.pid = os.getpid()
record.dbname = getattr(threading.currentThread(), 'dbname', '?')
return logging.Formatter.format(self, record)
class ColoredFormatter(DBFormatter):
def format(self, record):
fg_color, bg_color = LEVEL_COLOR_MAPPING.get(record.levelno, (GREEN, DEFAULT))
record.levelname = COLOR_PATTERN % (30 + fg_color, 40 + bg_color, record.levelname)
return DBFormatter.format(self, record)
_logger_init = False
def init_logger():
global _logger_init
if _logger_init:
return
_logger_init = True
logging.addLevelName(25, "INFO")
from tools.translate import resetlocale
resetlocale()
# create a format for log messages and dates
format = '%(asctime)s %(pid)s %(levelname)s %(dbname)s %(name)s: %(message)s'
if tools.config['syslog']:
# SysLog Handler
if os.name == 'nt':
handler = logging.handlers.NTEventLogHandler("%s %s" % (release.description, release.version))
elif platform.system() == 'Darwin':
handler = logging.handlers.SysLogHandler('/var/run/log')
else:
handler = logging.handlers.SysLogHandler('/dev/log')
format = '%s %s' % (release.description, release.version) \
+ ':%(dbname)s:%(levelname)s:%(name)s:%(message)s'
elif tools.config['logfile']:
# LogFile Handler
logf = tools.config['logfile']
try:
# We check we have the right location for the log files
dirname = os.path.dirname(logf)
if dirname and not os.path.isdir(dirname):
os.makedirs(dirname)
if tools.config['logrotate'] is not False:
handler = logging.handlers.TimedRotatingFileHandler(filename=logf, when='D', interval=1, backupCount=30)
elif os.name == 'posix':
handler = logging.handlers.WatchedFileHandler(logf)
else:
handler = logging.FileHandler(logf)
except Exception:
sys.stderr.write("ERROR: couldn't create the logfile directory. Logging to the standard output.\n")
handler = logging.StreamHandler(sys.stdout)
else:
# Normal Handler on standard output
handler = logging.StreamHandler(sys.stdout)
# Check that handler.stream has a fileno() method: when running OpenERP
# behind Apache with mod_wsgi, handler.stream will have type mod_wsgi.Log,
# which has no fileno() method. (mod_wsgi.Log is what is being bound to
# sys.stderr when the logging.StreamHandler is being constructed above.)
def is_a_tty(stream):
return hasattr(stream, 'fileno') and os.isatty(stream.fileno())
if os.name == 'posix' and isinstance(handler, logging.StreamHandler) and is_a_tty(handler.stream):
formatter = ColoredFormatter(format)
else:
formatter = DBFormatter(format)
handler.setFormatter(formatter)
logging.getLogger().addHandler(handler)
if tools.config['log_db']:
db_levels = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
postgresqlHandler = PostgreSQLHandler()
postgresqlHandler.setLevel(int(db_levels.get(tools.config['log_db_level'], tools.config['log_db_level'])))
logging.getLogger().addHandler(postgresqlHandler)
# Configure loggers levels
pseudo_config = PSEUDOCONFIG_MAPPER.get(tools.config['log_level'], [])
logconfig = tools.config['log_handler']
logging_configurations = DEFAULT_LOG_CONFIGURATION + pseudo_config + logconfig
for logconfig_item in logging_configurations:
loggername, level = logconfig_item.split(':')
level = getattr(logging, level, logging.INFO)
logger = logging.getLogger(loggername)
logger.setLevel(level)
for logconfig_item in logging_configurations:
_logger.debug('logger level set: "%s"', logconfig_item)
DEFAULT_LOG_CONFIGURATION = [
'openerp.workflow.workitem:WARNING',
'openerp.http.rpc.request:INFO',
'openerp.http.rpc.response:INFO',
'openerp.addons.web.http:INFO',
'openerp.sql_db:INFO',
':INFO',
]
PSEUDOCONFIG_MAPPER = {
'debug_rpc_answer': ['openerp:DEBUG','openerp.http.rpc.request:DEBUG', 'openerp.http.rpc.response:DEBUG'],
'debug_rpc': ['openerp:DEBUG','openerp.http.rpc.request:DEBUG'],
'debug': ['openerp:DEBUG'],
'debug_sql': ['openerp.sql_db:DEBUG'],
'info': [],
'warn': ['openerp:WARNING', 'werkzeug:WARNING'],
'error': ['openerp:ERROR', 'werkzeug:ERROR'],
'critical': ['openerp:CRITICAL', 'werkzeug:CRITICAL'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
iWuzHere/node-gyp
|
refs/heads/master
|
gyp/pylib/gyp/generator/gypd.py
|
1824
|
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""gypd output module
This module produces gyp input as its output. Output files are given the
.gypd extension to avoid overwriting the .gyp files that they are generated
from. Internal references to .gyp files (such as those found in
"dependencies" sections) are not adjusted to point to .gypd files instead;
unlike other paths, which are relative to the .gyp or .gypd file, such paths
are relative to the directory from which gyp was run to create the .gypd file.
This generator module is intended to be a sample and a debugging aid, hence
the "d" for "debug" in .gypd. It is useful to inspect the results of the
various merges, expansions, and conditional evaluations performed by gyp
and to see a representation of what would be fed to a generator module.
It's not advisable to rename .gypd files produced by this module to .gyp,
because they will have all merges, expansions, and evaluations already
performed and the relevant constructs not present in the output; paths to
dependencies may be wrong; and various sections that do not belong in .gyp
files such as such as "included_files" and "*_excluded" will be present.
Output will also be stripped of comments. This is not intended to be a
general-purpose gyp pretty-printer; for that, you probably just want to
run "pprint.pprint(eval(open('source.gyp').read()))", which will still strip
comments but won't do all of the other things done to this module's output.
The specific formatting of the output generated by this module is subject
to change.
"""
import gyp.common
import errno
import os
import pprint
# These variables should just be spit back out as variable references.
_generator_identity_variables = [
'CONFIGURATION_NAME',
'EXECUTABLE_PREFIX',
'EXECUTABLE_SUFFIX',
'INTERMEDIATE_DIR',
'LIB_DIR',
'PRODUCT_DIR',
'RULE_INPUT_ROOT',
'RULE_INPUT_DIRNAME',
'RULE_INPUT_EXT',
'RULE_INPUT_NAME',
'RULE_INPUT_PATH',
'SHARED_INTERMEDIATE_DIR',
'SHARED_LIB_DIR',
'SHARED_LIB_PREFIX',
'SHARED_LIB_SUFFIX',
'STATIC_LIB_PREFIX',
'STATIC_LIB_SUFFIX',
]
# gypd doesn't define a default value for OS like many other generator
# modules. Specify "-D OS=whatever" on the command line to provide a value.
generator_default_variables = {
}
# gypd supports multiple toolsets
generator_supports_multiple_toolsets = True
# TODO(mark): This always uses <, which isn't right. The input module should
# notify the generator to tell it which phase it is operating in, and this
# module should use < for the early phase and then switch to > for the late
# phase. Bonus points for carrying @ back into the output too.
for v in _generator_identity_variables:
generator_default_variables[v] = '<(%s)' % v
def GenerateOutput(target_list, target_dicts, data, params):
output_files = {}
for qualified_target in target_list:
[input_file, target] = \
gyp.common.ParseQualifiedTarget(qualified_target)[0:2]
if input_file[-4:] != '.gyp':
continue
input_file_stem = input_file[:-4]
output_file = input_file_stem + params['options'].suffix + '.gypd'
if not output_file in output_files:
output_files[output_file] = input_file
for output_file, input_file in output_files.iteritems():
output = open(output_file, 'w')
pprint.pprint(data[input_file], output)
output.close()
|
hammerhead-dev/android_kernel_lge_hammerhead
|
refs/heads/cm-13.0
|
scripts/rt-tester/rt-tester.py
|
11005
|
#!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
|
GreenJoey/My-Simple-Programs
|
refs/heads/master
|
python/scrapy/OfficialSpider/tutorial/tutorial/spiders/__init__.py
|
2415
|
# This package will contain the spiders of your Scrapy project
#
# Please refer to the documentation for information on how to create and manage
# your spiders.
|
drtuxwang/system-config
|
refs/heads/mac-fix
|
bin/extfbfl.py
|
1
|
#!/usr/bin/env python3
"""
Extract Facebook friends list from saved HTML file.
"""
import argparse
import glob
import os
import re
import signal
import sys
import time
from typing import List
class Options:
"""
Options class
"""
def __init__(self) -> None:
self._args: argparse.Namespace = None
self.parse(sys.argv)
def get_file(self) -> str:
"""
Return html file.
"""
return self._args.file[0]
def _parse_args(self, args: List[str]) -> None:
parser = argparse.ArgumentParser(
description='Extract Facebook friends list from saved HTML file.',
)
parser.add_argument('file', nargs=1, help='HTML file.')
self._args = parser.parse_args(args)
def parse(self, args: List[str]) -> None:
"""
Parse arguments
"""
self._parse_args(args[1:])
class Profile:
"""
Profile class
"""
def __init__(self, name: str, url: str) -> None:
self._name = name
self._url = url
def get_name(self) -> str:
"""
Return name.
"""
return self._name
def get_url(self) -> str:
"""
Return url.
"""
return self._url
class Main:
"""
Main class
"""
def __init__(self) -> None:
try:
self.config()
sys.exit(self.run())
except (EOFError, KeyboardInterrupt):
sys.exit(114)
except SystemExit as exception:
sys.exit(exception)
@staticmethod
def config() -> None:
"""
Configure program
"""
if hasattr(signal, 'SIGPIPE'):
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
if os.name == 'nt':
argv = []
for arg in sys.argv:
files = glob.glob(arg) # Fixes Windows globbing bug
if files:
argv.extend(files)
else:
argv.append(arg)
sys.argv = argv
def _read_html(self, file: str) -> None:
isjunk = re.compile('(&|[?])ref=pb$|[?&]fref=.*|&.*')
try:
with open(file, errors='replace') as ifile:
for line in ifile:
for block in line.split('href="'):
if '://www.facebook.com/' in block:
if ('hc_location=friends_tab' in
block.split("'")[0]):
url = isjunk.sub('', block.split(
"'")[0]).replace(
'?hc_location=friend_browser', '')
uid = int(block.split('user.php?id=')[1].split(
'"')[0].split('&')[0])
name = block.split('>')[1].split('<')[0]
self._profiles[uid] = Profile(name, url)
except OSError as exception:
raise SystemExit(
sys.argv[0] + ': Cannot read "' + file + '" HTML file.'
) from exception
def run(self) -> int:
"""
Start program
"""
options = Options()
self._profiles: dict = {}
self._read_html(options.get_file())
file = time.strftime('facebook-%Y%m%d.csv', time.localtime())
print(
'Writing "' + file + '" with',
len(self._profiles.keys()),
"friends..."
)
try:
with open(file, 'w', newline='\n') as ofile:
print("uid,name,profile_url", file=ofile)
for uid, profile in sorted(self._profiles.items()):
if uid < 0:
print("???", end='', file=ofile)
else:
print(uid, end='', file=ofile)
if ' ' in profile.get_name():
print(',"{0:s}",{1:s}'.format(
profile.get_name(), profile.get_url()), file=ofile)
else:
print(",{0:s},{1:s}".format(
profile.get_name(), profile.get_url()), file=ofile)
except OSError as exception:
raise SystemExit(
sys.argv[0] + ': Cannot create "' + file + '" CSV file.'
) from exception
return 0
if __name__ == '__main__':
if '--pydoc' in sys.argv:
help(__name__)
else:
Main()
|
pyjs/pyjs
|
refs/heads/master
|
examples/misc/flaskexamples/flaskcors/Flask_JSONRPC_CORS.py
|
6
|
import pyjd # dummy in pyjs
from pyjamas.ui.RootPanel import RootPanel
from pyjamas.ui.TextArea import TextArea
from pyjamas.ui.Label import Label
from pyjamas.ui.Button import Button
from pyjamas.ui.HTML import HTML
from pyjamas.ui.VerticalPanel import VerticalPanel
from pyjamas.ui.HorizontalPanel import HorizontalPanel
from pyjamas.ui.ListBox import ListBox
from pyjamas.JSONService import JSONProxy
class JSONRPCExample:
def onModuleLoad(self):
self.TEXT_WAITING = "Waiting for response..."
self.TEXT_ERROR = "Server Error"
self.METHOD_ECHO = "Echo"
self.METHOD_REVERSE = "Reverse"
self.METHOD_UPPERCASE = "UPPERCASE"
self.METHOD_LOWERCASE = "lowercase"
self.METHOD_NONEXISTANT = "Non existant"
self.methods = [self.METHOD_ECHO, self.METHOD_REVERSE,
self.METHOD_UPPERCASE, self.METHOD_LOWERCASE,
self.METHOD_NONEXISTANT]
self.remote_php = EchoServicePHP()
self.remote_py = [
EchoServicePython(),
EchoServicePython(server="flask"),
]
self.status = Label()
self.text_area = TextArea()
self.text_area.setText("""{'Test'} [\"String\"]
\tTest Tab
Test Newline\n
after newline
""" + r"""Literal String:
{'Test'} [\"String\"]
""")
self.text_area.setCharacterWidth(80)
self.text_area.setVisibleLines(8)
self.method_list = ListBox()
self.method_list.setName("hello")
self.method_list.setVisibleItemCount(1)
for method in self.methods:
self.method_list.addItem(method)
self.method_list.setSelectedIndex(0)
method_panel = HorizontalPanel()
method_panel.add(HTML("Remote string method to call: "))
method_panel.add(self.method_list)
method_panel.setSpacing(8)
self.button_php = Button("Send to PHP Service", self)
python_buttons = [
Button("Send to Python Service", self),
Button("Send to Flask view function (localhost:5000)", self),
]
buttons = HorizontalPanel()
buttons.add(self.button_php)
self.python_buttons = {}
for i in range(len(python_buttons)):
buttons.add(python_buttons[i])
self.python_buttons[python_buttons[i]] = self.remote_py[i]
buttons.setSpacing(8)
info = """<h2>JSON-RPC Example</h2>
<p>This example demonstrates the calling of server services with
<a href="http://json-rpc.org/">JSON-RPC</a>.
</p>
<p>Enter some text below, and press a button to send the text
to an Echo service on your server. An echo service simply sends the exact same text back that it receives.
</p>"""
panel = VerticalPanel()
panel.add(HTML(info))
panel.add(self.text_area)
panel.add(method_panel)
panel.add(buttons)
panel.add(self.status)
RootPanel().add(panel)
def onClick(self, sender):
method = self.methods[self.method_list.getSelectedIndex()]
text = self.text_area.getText()
# demonstrate proxy & callMethod()
if sender == self.button_php:
if method == self.METHOD_ECHO:
id = self.remote_php.echo(text, self)
elif method == self.METHOD_REVERSE:
id = self.remote_php.callMethod("reverse", [text], self)
elif method == self.METHOD_UPPERCASE:
id = self.remote_php.uppercase(text, self)
elif method == self.METHOD_LOWERCASE:
id = self.remote_php.lowercase(self, msg=text)
elif method == self.METHOD_NONEXISTANT:
id = self.remote_php.nonexistant(text, self)
elif(sender in self.python_buttons):
remote_py = self.python_buttons[sender]
if method == self.METHOD_ECHO:
id = remote_py.echo(text, self)
elif method == self.METHOD_REVERSE:
id = remote_py.reverse(text, self)
elif method == self.METHOD_UPPERCASE:
id = remote_py.uppercase(text, self)
elif method == self.METHOD_LOWERCASE:
id = remote_py.lowercase(text, self)
elif method == self.METHOD_NONEXISTANT:
id = remote_py.nonexistant(text, self)
else:
self.status.setText(self.TEXT_WAITING + " unrecognized method")
# what should really be done here?
pass
def onRemoteResponse(self, response, request_info):
self.status.setText(response)
def onRemoteError(self, code, errobj, request_info):
# onRemoteError gets the HTTP error code or 0 and
# errobj is an jsonrpc 2.0 error dict:
# {
# 'code': jsonrpc-error-code (integer) ,
# 'message': jsonrpc-error-message (string) ,
# 'data' : extra-error-data
# }
message = errobj['message']
if code != 0:
self.status.setText("HTTP error %d: %s" %
(code, message))
else:
code = errobj['code']
self.status.setText("JSONRPC Error %s: %s" %
(code, message))
class EchoServicePHP(JSONProxy):
def __init__(self):
JSONProxy.__init__(self, "services/EchoService.php", ["echo", "reverse", "uppercase", "lowercase", "nonexistant"])
class EchoServicePython(JSONProxy):
def __init__(self, server="mod_python"):
methods = ["echo", "reverse", "uppercase", "lowercase", "nonexistant"]
if server == "mod_python":
JSONProxy.__init__(self, "services/EchoService.py", methods)
elif server == "flask":
JSONProxy.__init__(
self, "http://localhost:5000/json_echo/", methods)
if __name__ == '__main__':
# for pyjd, set up a web server and load the HTML from there:
# this convinces the browser engine that the AJAX will be loaded
# from the same URI base as the URL, it's all a bit messy...
pyjd.setup("http://127.0.0.1:8000/public/JSONRPCExample.html")
app = JSONRPCExample()
app.onModuleLoad()
pyjd.run()
|
gangadharkadam/smrterp
|
refs/heads/develop
|
erpnext/hr/report/monthly_salary_register/__init__.py
|
12133432
| |
otron/zenodo
|
refs/heads/master
|
zenodo/modules/quotas/upgrades/__init__.py
|
12133432
| |
TiagoBras/audio-clip-extractor
|
refs/heads/master
|
tests/__init__.py
|
12133432
| |
kayako/python-api-library
|
refs/heads/master
|
src/kayako/tests/core/__init__.py
|
12133432
| |
gangadhar-kadam/verve_erp
|
refs/heads/v5.0
|
erpnext/hr/report/monthly_salary_register/__init__.py
|
12133432
| |
sunnyzwh/readthedocs.org
|
refs/heads/master
|
readthedocs/builds/models.py
|
15
|
import logging
import re
import os.path
from shutil import rmtree
from django.core.urlresolvers import reverse
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _, ugettext
from guardian.shortcuts import assign
from taggit.managers import TaggableManager
from readthedocs.privacy.loader import (VersionManager, RelatedProjectManager,
RelatedBuildManager)
from readthedocs.projects.models import Project
from readthedocs.projects.constants import (PRIVACY_CHOICES, REPO_TYPE_GIT,
REPO_TYPE_HG, GITHUB_URL,
GITHUB_REGEXS, BITBUCKET_URL,
BITBUCKET_REGEXS)
from .constants import (BUILD_STATE, BUILD_TYPES, VERSION_TYPES,
LATEST, NON_REPOSITORY_VERSIONS, STABLE,
BUILD_STATE_FINISHED)
from .version_slug import VersionSlugField
DEFAULT_VERSION_PRIVACY_LEVEL = getattr(settings, 'DEFAULT_VERSION_PRIVACY_LEVEL', 'public')
log = logging.getLogger(__name__)
class Version(models.Model):
"""
Attributes
----------
``identifier``
The identifier is the ID for the revision this is version is for. This
might be the revision number (e.g. in SVN), or the commit hash (e.g. in
Git). If the this version is pointing to a branch, then ``identifier``
will contain the branch name.
``verbose_name``
This is the actual name that we got for the commit stored in
``identifier``. This might be the tag or branch name like ``"v1.0.4"``.
However this might also hold special version names like ``"latest"``
and ``"stable"``.
``slug``
The slug is the slugified version of ``verbose_name`` that can be used
in the URL to identify this version in a project. It's also used in the
filesystem to determine how the paths for this version are called. It
must not be used for any other identifying purposes.
"""
project = models.ForeignKey(Project, verbose_name=_('Project'),
related_name='versions')
type = models.CharField(
_('Type'), max_length=20,
choices=VERSION_TYPES, default='unknown',
)
# used by the vcs backend
identifier = models.CharField(_('Identifier'), max_length=255)
verbose_name = models.CharField(_('Verbose Name'), max_length=255)
slug = VersionSlugField(_('Slug'), max_length=255,
populate_from='verbose_name')
supported = models.BooleanField(_('Supported'), default=True)
active = models.BooleanField(_('Active'), default=False)
built = models.BooleanField(_('Built'), default=False)
uploaded = models.BooleanField(_('Uploaded'), default=False)
privacy_level = models.CharField(
_('Privacy Level'), max_length=20, choices=PRIVACY_CHOICES,
default=DEFAULT_VERSION_PRIVACY_LEVEL, help_text=_("Level of privacy for this Version.")
)
tags = TaggableManager(blank=True)
machine = models.BooleanField(_('Machine Created'), default=False)
objects = VersionManager()
class Meta:
unique_together = [('project', 'slug')]
ordering = ['-verbose_name']
permissions = (
# Translators: Permission around whether a user can view the
# version
('view_version', _('View Version')),
)
def __unicode__(self):
return ugettext(u"Version %(version)s of %(project)s (%(pk)s)" % {
'version': self.verbose_name,
'project': self.project,
'pk': self.pk
})
@property
def commit_name(self):
"""Return the branch name, the tag name or the revision identifier."""
if self.type == 'branch':
return self.identifier
if self.verbose_name in NON_REPOSITORY_VERSIONS:
return self.identifier
return self.verbose_name
def get_absolute_url(self):
if not self.built and not self.uploaded:
return reverse('project_version_detail', kwargs={
'project_slug': self.project.slug,
'version_slug': self.slug,
})
return self.project.get_docs_url(version_slug=self.slug)
def save(self, *args, **kwargs):
"""
Add permissions to the Version for all owners on save.
"""
obj = super(Version, self).save(*args, **kwargs)
for owner in self.project.users.all():
assign('view_version', owner, self)
self.project.sync_supported_versions()
return obj
@property
def remote_slug(self):
if self.slug == LATEST:
if self.project.default_branch:
return self.project.default_branch
else:
return self.project.vcs_repo().fallback_branch
else:
return self.slug
@property
def identifier_friendly(self):
'''Return display friendly identifier'''
re_sha = re.compile(r'^[0-9a-f]{40}$', re.I)
if re_sha.match(str(self.identifier)):
return self.identifier[:8]
return self.identifier
def get_subdomain_url(self):
use_subdomain = getattr(settings, 'USE_SUBDOMAIN', False)
if use_subdomain:
return "/%s/%s/" % (
self.project.language,
self.slug,
)
else:
return reverse('docs_detail', kwargs={
'project_slug': self.project.slug,
'lang_slug': self.project.language,
'version_slug': self.slug,
'filename': ''
})
def get_subproject_url(self):
return "/projects/%s/%s/%s/" % (
self.project.slug,
self.project.language,
self.slug,
)
def get_downloads(self, pretty=False):
project = self.project
data = {}
if pretty:
if project.has_pdf(self.slug):
data['PDF'] = project.get_production_media_url('pdf', self.slug)
if project.has_htmlzip(self.slug):
data['HTML'] = project.get_production_media_url('htmlzip', self.slug)
if project.has_epub(self.slug):
data['Epub'] = project.get_production_media_url('epub', self.slug)
else:
if project.has_pdf(self.slug):
data['pdf'] = project.get_production_media_url('pdf', self.slug)
if project.has_htmlzip(self.slug):
data['htmlzip'] = project.get_production_media_url('htmlzip', self.slug)
if project.has_epub(self.slug):
data['epub'] = project.get_production_media_url('epub', self.slug)
return data
def get_conf_py_path(self):
conf_py_path = self.project.conf_file(self.slug)
conf_py_path = conf_py_path.replace(
self.project.checkout_path(self.slug), '')
return conf_py_path.replace('conf.py', '')
def get_build_path(self):
'''Return version build path if path exists, otherwise `None`'''
path = self.project.checkout_path(version=self.slug)
if os.path.exists(path):
return path
return None
def clean_build_path(self):
'''Clean build path for project version
Ensure build path is clean for project version. Used to ensure stale
build checkouts for each project version are removed.
'''
try:
path = self.get_build_path()
if path is not None:
log.debug('Removing build path {0} for {1}'.format(
path, self))
rmtree(path)
except OSError:
log.error('Build path cleanup failed', exc_info=True)
def get_vcs_slug(self):
slug = None
if self.slug == LATEST:
if self.project.default_branch:
slug = self.project.default_branch
else:
slug = self.project.vcs_repo().fallback_branch
elif self.slug == STABLE:
return self.identifier
else:
slug = self.slug
# https://github.com/rtfd/readthedocs.org/issues/561
# version identifiers with / characters in branch name need to un-slugify
# the branch name for remote links to work
if slug.replace('-', '/') in self.identifier:
slug = slug.replace('-', '/')
return slug
def get_github_url(self, docroot, filename, source_suffix='.rst', action='view'):
repo_url = self.project.repo
if 'github' not in repo_url:
return ''
if not docroot:
return ''
else:
if docroot[0] != '/':
docroot = "/%s" % docroot
if docroot[-1] != '/':
docroot = "%s/" % docroot
if action == 'view':
action_string = 'blob'
elif action == 'edit':
action_string = 'edit'
for regex in GITHUB_REGEXS:
match = regex.search(repo_url)
if match:
user, repo = match.groups()
break
else:
return ''
repo = repo.rstrip('/')
return GITHUB_URL.format(
user=user,
repo=repo,
version=self.remote_slug,
docroot=docroot,
path=filename,
source_suffix=source_suffix,
action=action_string,
)
def get_bitbucket_url(self, docroot, filename, source_suffix='.rst'):
repo_url = self.project.repo
if 'bitbucket' not in repo_url:
return ''
if not docroot:
return ''
for regex in BITBUCKET_REGEXS:
match = regex.search(repo_url)
if match:
user, repo = match.groups()
break
else:
return ''
repo = repo.rstrip('/')
return BITBUCKET_URL.format(
user=user,
repo=repo,
version=self.remote_slug,
docroot=docroot,
path=filename,
source_suffix=source_suffix,
)
class VersionAlias(models.Model):
project = models.ForeignKey(Project, verbose_name=_('Project'),
related_name='aliases')
from_slug = models.CharField(_('From slug'), max_length=255, default='')
to_slug = models.CharField(_('To slug'), max_length=255, default='',
blank=True)
largest = models.BooleanField(_('Largest'), default=False)
def __unicode__(self):
return ugettext(u"Alias for %(project)s: %(from)s -> %(to)s" % {
'project': self.project,
'from': self.from_slug,
'to': self.to_slug,
})
class Build(models.Model):
project = models.ForeignKey(Project, verbose_name=_('Project'),
related_name='builds')
version = models.ForeignKey(Version, verbose_name=_('Version'), null=True,
related_name='builds')
type = models.CharField(_('Type'), max_length=55, choices=BUILD_TYPES,
default='html')
state = models.CharField(_('State'), max_length=55, choices=BUILD_STATE,
default='finished')
date = models.DateTimeField(_('Date'), auto_now_add=True)
success = models.BooleanField(_('Success'), default=True)
setup = models.TextField(_('Setup'), null=True, blank=True)
setup_error = models.TextField(_('Setup error'), null=True, blank=True)
output = models.TextField(_('Output'), default='', blank=True)
error = models.TextField(_('Error'), default='', blank=True)
exit_code = models.IntegerField(_('Exit code'), null=True, blank=True)
commit = models.CharField(_('Commit'), max_length=255, null=True, blank=True)
length = models.IntegerField(_('Build Length'), null=True, blank=True)
builder = models.CharField(_('Builder'), max_length=255, null=True, blank=True)
# Manager
objects = RelatedProjectManager()
class Meta:
ordering = ['-date']
get_latest_by = 'date'
index_together = [
['version', 'state', 'type']
]
def __unicode__(self):
return ugettext(u"Build %(project)s for %(usernames)s (%(pk)s)" % {
'project': self.project,
'usernames': ' '.join(self.project.users.all()
.values_list('username', flat=True)),
'pk': self.pk,
})
@models.permalink
def get_absolute_url(self):
return ('builds_detail', [self.project.slug, self.pk])
@property
def finished(self):
'''Return if build has a finished state'''
return self.state == BUILD_STATE_FINISHED
class BuildCommandResultMixin(object):
'''Mixin for common command result methods/properties
Shared methods between the database model :py:cls:`BuildCommandResult` and
non-model respresentations of build command results from the API
'''
@property
def successful(self):
'''Did the command exit with a successful exit code'''
return self.exit_code == 0
@property
def failed(self):
'''Did the command exit with a failing exit code
Helper for inverse of :py:meth:`successful`'''
return not self.successful
class BuildCommandResult(BuildCommandResultMixin, models.Model):
build = models.ForeignKey(Build, verbose_name=_('Build'),
related_name='commands')
command = models.TextField(_('Command'))
description = models.TextField(_('Description'), blank=True)
output = models.TextField(_('Command output'), blank=True)
exit_code = models.IntegerField(_('Command exit code'))
start_time = models.DateTimeField(_('Start time'))
end_time = models.DateTimeField(_('End time'))
class Meta:
ordering = ['start_time']
get_latest_by = 'start_time'
objects = RelatedBuildManager()
def __unicode__(self):
return (ugettext(u'Build command {pk} for build {build}')
.format(pk=self.pk, build=self.build))
@property
def run_time(self):
"""Total command runtime in seconds"""
if self.start_time is not None and self.end_time is not None:
diff = self.end_time - self.start_time
return diff.seconds
|
mjirayu/sit_academy
|
refs/heads/master
|
common/lib/xmodule/xmodule/modulestore/tests/test_xml.py
|
119
|
"""
Tests around our XML modulestore, including importing
well-formed and not-well-formed XML.
"""
import os.path
import unittest
from glob import glob
from mock import patch, Mock
from xmodule.modulestore.xml import XMLModuleStore
from xmodule.modulestore import ModuleStoreEnum
from xmodule.x_module import XModuleMixin
from xmodule.tests import DATA_DIR
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.modulestore.tests.test_modulestore import check_has_course_method
def glob_tildes_at_end(path):
"""
A wrapper for the `glob.glob` function, but it always returns
files that end in a tilde (~) at the end of the list of results.
"""
result = glob(path)
with_tildes = [f for f in result if f.endswith("~")]
no_tildes = [f for f in result if not f.endswith("~")]
return no_tildes + with_tildes
class TestXMLModuleStore(unittest.TestCase):
"""
Test around the XML modulestore
"""
def test_xml_modulestore_type(self):
store = XMLModuleStore(DATA_DIR, source_dirs=[])
self.assertEqual(store.get_modulestore_type(), ModuleStoreEnum.Type.xml)
@patch('xmodule.tabs.CourseTabList.initialize_default', Mock())
def test_unicode_chars_in_xml_content(self):
# edX/full/6.002_Spring_2012 has non-ASCII chars, and during
# uniquification of names, would raise a UnicodeError. It no longer does.
# Ensure that there really is a non-ASCII character in the course.
with open(os.path.join(DATA_DIR, "toy/sequential/vertical_sequential.xml")) as xmlf:
xml = xmlf.read()
with self.assertRaises(UnicodeDecodeError):
xml.decode('ascii')
# Load the course, but don't make error modules. This will succeed,
# but will record the errors.
modulestore = XMLModuleStore(
DATA_DIR,
source_dirs=['toy'],
xblock_mixins=(XModuleMixin,),
load_error_modules=False)
# Look up the errors during load. There should be none.
errors = modulestore.get_course_errors(SlashSeparatedCourseKey("edX", "toy", "2012_Fall"))
assert errors == []
@patch("xmodule.modulestore.xml.glob.glob", side_effect=glob_tildes_at_end)
def test_tilde_files_ignored(self, _fake_glob):
modulestore = XMLModuleStore(DATA_DIR, source_dirs=['tilde'], load_error_modules=False)
about_location = SlashSeparatedCourseKey('edX', 'tilde', '2012_Fall').make_usage_key(
'about', 'index',
)
about_module = modulestore.get_item(about_location)
self.assertIn("GREEN", about_module.data)
self.assertNotIn("RED", about_module.data)
def test_get_courses_for_wiki(self):
"""
Test the get_courses_for_wiki method
"""
store = XMLModuleStore(DATA_DIR, source_dirs=['toy', 'simple'])
for course in store.get_courses():
course_locations = store.get_courses_for_wiki(course.wiki_slug)
self.assertEqual(len(course_locations), 1)
self.assertIn(course.location.course_key, course_locations)
course_locations = store.get_courses_for_wiki('no_such_wiki')
self.assertEqual(len(course_locations), 0)
# now set toy course to share the wiki with simple course
toy_course = store.get_course(SlashSeparatedCourseKey('edX', 'toy', '2012_Fall'))
toy_course.wiki_slug = 'simple'
course_locations = store.get_courses_for_wiki('toy')
self.assertEqual(len(course_locations), 0)
course_locations = store.get_courses_for_wiki('simple')
self.assertEqual(len(course_locations), 2)
for course_number in ['toy', 'simple']:
self.assertIn(SlashSeparatedCourseKey('edX', course_number, '2012_Fall'), course_locations)
def test_has_course(self):
"""
Test the has_course method
"""
check_has_course_method(
XMLModuleStore(DATA_DIR, source_dirs=['toy', 'simple']),
SlashSeparatedCourseKey('edX', 'toy', '2012_Fall'),
locator_key_fields=SlashSeparatedCourseKey.KEY_FIELDS
)
def test_branch_setting(self):
"""
Test the branch setting context manager
"""
store = XMLModuleStore(DATA_DIR, source_dirs=['toy'])
course = store.get_courses()[0]
# XML store allows published_only branch setting
with store.branch_setting(ModuleStoreEnum.Branch.published_only, course.id):
store.get_item(course.location)
# XML store does NOT allow draft_preferred branch setting
with self.assertRaises(ValueError):
with store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, course.id):
# verify that the above context manager raises a ValueError
pass # pragma: no cover
@patch('xmodule.modulestore.xml.log')
def test_dag_course(self, mock_logging):
"""
Test a course whose structure is not a tree.
"""
store = XMLModuleStore(
DATA_DIR,
source_dirs=['xml_dag'],
xblock_mixins=(XModuleMixin,),
)
course_key = store.get_courses()[0].id
mock_logging.warning.assert_called_with(
"%s has more than one definition", course_key.make_usage_key('discussion', 'duplicate_def')
)
shared_item_loc = course_key.make_usage_key('html', 'toyhtml')
shared_item = store.get_item(shared_item_loc)
parent = shared_item.get_parent()
self.assertIsNotNone(parent, "get_parent failed to return a value")
parent_loc = course_key.make_usage_key('vertical', 'vertical_test')
self.assertEqual(parent.location, parent_loc)
self.assertIn(shared_item, parent.get_children())
# ensure it's still a child of the other parent even tho it doesn't claim the other parent as its parent
other_parent_loc = course_key.make_usage_key('vertical', 'zeta')
other_parent = store.get_item(other_parent_loc)
# children rather than get_children b/c the instance returned by get_children != shared_item
self.assertIn(shared_item_loc, other_parent.children)
|
DiginessForever/machineLearning
|
refs/heads/master
|
chromedriver.py
|
1
|
import os, urllib, time, sys, requests
from PIL import Image
from selenium import webdriver
chromedriverpath = "/home/diginess/code/machineLearning/chromedriver"
browser = webdriver.Chrome(chromedriverpath)
#searchterm = "cat"
searchterm = sys.argv[1]
#Current search URL is unwieldy, check out this link for some ideas in order to break up the search options:
#https://stackoverflow.com/questions/18387598/selenium-webdriver-click-google-search
url = "https://www.google.com/search?q=" + searchterm + "&safe=on&espv=2&biw=1599&bih=726&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiI56_7isXSAhXC4iYKHcbZCLEQAUIBigB#q=" + searchterm + "&safe=off&tbm=isch&tbs=sur:fc&*"
browser.get(url)
number_of_scrolls = 2
#TO DO here: Figure out how to time it or keep testing it so that the show more button is actually clicked.
#Maybe a time.wait(1) would work...
'''
for _ in range(30):
browser.execute_script("window.scrollBy(0,10000)")
time.sleep(.1)
showmore = browser.find_elements_by_id('ksb_kvc')
for button in showmore:
button.click()
'''
for i in range(number_of_scrolls):
for j in range(30):
browser.execute_script("window.scrollBy(0,10000)")
time.sleep(.2)
time.sleep(0.5)
#showmore = browser.find_elements_by_id('ksb_kvc')
#for button in showmore:
# button.click()
try:
browser.find_element_by_xpath("//input[@value='Show more results']").click()
except Exception as e:
print("Less images found: " + str(e))
#print "Unexpected error:", sys.exc_info()[0] #if the above doesn't work on python2
break
images = browser.find_elements_by_class_name('rg_l')
if not os.path.exists("./images/" + searchterm):
os.makedirs("./images/" + searchterm)
count = 1
for image in images:
href = image.get_attribute('href')
'''
#This was the initial code I used, about 50 percent of the images were coming down
#completely corrupted / not able to be opened. After switching to requests, it works.
if '?imgurl=' in href: # Do this because often times the last result will not be an actual image
image_url_raw = href.split('?imgurl=')[1].split('&imgrefurl=')[0]
image_url = urllib.unquote(urllib.unquote(image_url_raw))
#image_url = requests.get(image_url_raw)
#print image_url
savePath = "./images/" + searchterm + "/" + "image" + str(count) + ".jpg"
image_url = urllib.quote(image_url.encode('utf8'), ':/')
#urllib.urlretrieve(image_url, savePath)
'''
if '?imgurl=' in href:
image_url_raw = href.split('?imgurl=')[1].split('&imgrefurl=')[0]
image_url = urllib.unquote(urllib.unquote(image_url_raw))
print(image_url)
with open('./images/' + searchterm + "/" + "image" + str(count) + '.jpg', 'wb') as handle:
response = requests.get(image_url, stream=True)
if not response.ok:
print response
for block in response.iter_content(1024):
if not block:
break
handle.write(block)
count += 1
print count - 1
#Note: In order to call this program from
#imageCoverter.py or the neural net code,
#use:
#os.system("chromedriver.py <arguments here>")
#The above will wait for the process to finish before
#continuing. If you want to do threads, you can use
#the multiprocessing module: https://docs.python.org/3/library/subprocess.html#module-subprocess
#Can also import the other script:
#import script1
#script.myFunction(i)
|
CuonDeveloper/cuon
|
refs/heads/master
|
cuon_client/cuon/bin/cuon/Addresses/SingleScheduling.py
|
5
|
# -*- coding: utf-8 -*-
##Copyright (C) [2003] [Jürgen Hamel, D-32584 Löhne]
##This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as
##published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version.
##This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
##warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
##for more details.
##You should have received a copy of the GNU General Public License along with this program; if not, write to the
##Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from cuon.Databases.SingleData import SingleData
import logging
import threading
import threading
import string
class SingleScheduling(SingleData):
def __init__(self, allTables):
SingleData.__init__(self)
# tables.dbd and address
self.sNameOfTable = "partner_schedul"
self.xmlTableDef = 0
self.loadTable(allTables)
#self.athread = threading.Thread(target = self.loadTable())
#self.athread.start()
self.listHeader['names'] = ['name', 'zip', 'city', 'Street', 'ID']
self.listHeader['size'] = [25,10,25,25,10]
self.out( "number of Columns ")
self.out( len(self.table.Columns))
#
self.partnerId = 0
self.liSchedulTime = None
def readNonWidgetEntries(self, dicValues):
dicValues['partnerid'] = [self.partnerId, 'int']
# print 'dicValues Schedul = ', dicValues['schedul_time_begin'], dicValues['schedul_time_end']
# print '2-->', self.liSchedulTime
# sSql = "select fct_getSchedulTime(" + `dicValues['schedul_time_begin'][0]` + ", " + `dicValues['schedul_time_end'][0]`+ ", array "+`self.liSchedulTime`+" )"
# result = self.rpc.callRP('Database.executeNormalQuery', sSql, self.dicUser)
return dicValues
def getPartnerID(self):
id = 0
if self.firstRecord.has_key('partnerid'):
id = self.firstRecord['partnerid']
return id
def getShortRemark(self):
s = None
if self.firstRecord.has_key('short_remark'):
s = self.firstRecord['short_remark']
return s
def getShortRemark(self):
s = None
if self.firstRecord.has_key('short_remark'):
s = self.firstRecord['short_remark']
return s
def getNotes(self):
s = None
if self.firstRecord.has_key('notes'):
s = self.firstRecord['notes']
return s
def fillExtraEntries(self, oneRecord):
if oneRecord.has_key('schedul_datetime'):
print '-----------------------------------------------------'
print 'Schedul-Time: ', oneRecord['schedul_datetime']
liDate = string.split(oneRecord['schedul_datetime'])
if liDate:
try:
assert len(liDate) == 2
eDate = self.getWidget('eDate')
eTime = self.getWidget('eTime')
eDate.set_text(liDate[0])
eTime.set_text(liDate[1])
#(liDate[1])
except:
print 'error in Date'
else :
print `oneRecord`
|
ar7z1/ansible
|
refs/heads/devel
|
test/units/modules/remote_management/oneview/test_oneview_fc_network_facts.py
|
47
|
# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from units.compat import unittest
from oneview_module_loader import FcNetworkFactsModule
from hpe_test_utils import FactsParamsTestCase
ERROR_MSG = 'Fake message error'
PARAMS_GET_ALL = dict(
config='config.json',
name=None
)
PARAMS_GET_BY_NAME = dict(
config='config.json',
name="Test FC Network"
)
PRESENT_NETWORKS = [{
"name": "Test FC Network",
"uri": "/rest/fc-networks/c6bf9af9-48e7-4236-b08a-77684dc258a5"
}]
class FcNetworkFactsSpec(unittest.TestCase,
FactsParamsTestCase):
def setUp(self):
self.configure_mocks(self, FcNetworkFactsModule)
self.fc_networks = self.mock_ov_client.fc_networks
FactsParamsTestCase.configure_client_mock(self, self.fc_networks)
def test_should_get_all_fc_networks(self):
self.fc_networks.get_all.return_value = PRESENT_NETWORKS
self.mock_ansible_module.params = PARAMS_GET_ALL
FcNetworkFactsModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(fc_networks=PRESENT_NETWORKS)
)
def test_should_get_fc_network_by_name(self):
self.fc_networks.get_by.return_value = PRESENT_NETWORKS
self.mock_ansible_module.params = PARAMS_GET_BY_NAME
FcNetworkFactsModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(fc_networks=PRESENT_NETWORKS)
)
if __name__ == '__main__':
unittest.main()
|
ShassAro/ShassAro
|
refs/heads/master
|
DockerAdmin/dockerVirtualEnv/lib/python2.7/site-packages/django/contrib/staticfiles/apps.py
|
101
|
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class StaticFilesConfig(AppConfig):
name = 'django.contrib.staticfiles'
verbose_name = _("Static Files")
|
ya7lelkom/googleads-python-lib
|
refs/heads/master
|
examples/dfp/v201502/creative_set_service/get_creative_sets_by_statement.py
|
3
|
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all creative sets for a master creative.
To create creative sets, run create_creative_sets.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: CreativeSetService.getCreativeSetsByStatement
"""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
MASTER_CREATIVE_ID = 'INSERT_MASTER_CREATIVE_ID_HERE'
def main(client, master_creative_id):
# Initialize appropriate service.
creative_set_service = client.GetService('CreativeSetService',
version='v201502')
# Create statement object to only select creative sets that have the given
# master creative.
values = [{
'key': 'masterCreativeId',
'value': {
'xsi_type': 'NumberValue',
'value': master_creative_id
}
}]
query = 'WHERE masterCreativeId = :masterCreativeId'
statement = dfp.FilterStatement(query, values)
# Get creative sets by statement.
while True:
response = creative_set_service.getCreativeSetsByStatement(
statement.ToStatement())
if 'results' in response:
# Display results.
for creative_set in response['results']:
print (('Creative set with ID \'%s\', master creative ID \'%s\', and '
'companion creative IDs {%s} was found.')
% (creative_set['id'], creative_set['masterCreativeId'],
','.join(creative_set['companionCreativeIds'])))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, MASTER_CREATIVE_ID)
|
jerli/sympy
|
refs/heads/master
|
examples/advanced/hydrogen.py
|
119
|
#!/usr/bin/env python
"""
This example shows how to work with the Hydrogen radial wavefunctions.
"""
from sympy import Eq, Integral, oo, pprint, symbols
from sympy.physics.hydrogen import R_nl
def main():
print("Hydrogen radial wavefunctions:")
a, r = symbols("a r")
print("R_{21}:")
pprint(R_nl(2, 1, a, r))
print("R_{60}:")
pprint(R_nl(6, 0, a, r))
print("Normalization:")
i = Integral(R_nl(1, 0, 1, r)**2 * r**2, (r, 0, oo))
pprint(Eq(i, i.doit()))
i = Integral(R_nl(2, 0, 1, r)**2 * r**2, (r, 0, oo))
pprint(Eq(i, i.doit()))
i = Integral(R_nl(2, 1, 1, r)**2 * r**2, (r, 0, oo))
pprint(Eq(i, i.doit()))
if __name__ == '__main__':
main()
|
neilLasrado/erpnext
|
refs/heads/develop
|
erpnext/education/doctype/course_topic/course_topic.py
|
8
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class CourseTopic(Document):
pass
|
rec/BiblioPixel
|
refs/heads/dev
|
bibliopixel/animation/game.py
|
2
|
from . matrix import Matrix
class Game(Matrix):
def __init__(self, layout, inputDev):
super().__init__(layout)
self._input_dev = inputDev
self._keys = None
self._lastKeys = None
self._speedStep = 0
self._speeds = {}
self._keyfuncs = {}
def _exit(self, type, value, traceback):
if hasattr(self._input_dev, 'setLightsOff'):
self._input_dev.setLightsOff(5)
self._input_dev.close()
def setSpeed(self, name, speed):
self._speeds[name] = speed
def getSpeed(self, name):
return self._speeds.get(name)
def _checkSpeed(self, speed):
return not (self._speedStep % speed)
def checkSpeed(self, name):
return name in self._speeds and self._checkSpeed(self._speeds[name])
def addKeyFunc(self, key, func, speed=1, hold=True):
if not isinstance(key, list):
key = [key]
for k in key:
self._keyfuncs[k] = {
"func": func,
"speed": speed,
"hold": hold,
"last": False,
"inter": False
}
def handleKeys(self):
for key in self._keys:
val = self._keys[key]
if key in self._keyfuncs:
cfg = self._keyfuncs[key]
speed_pass = self._checkSpeed(cfg.speed)
if cfg.hold:
if speed_pass:
if (val or cfg.inter):
cfg.func()
else:
cfg.inter = cfg.last = val
elif speed_pass:
if (val or cfg.inter) and not cfg.last:
cfg.func()
cfg.inter = cfg.last = val
else:
cfg.inter |= val
self._lastKeys = self._keys
def step(self, amt):
self._keys = self._input_dev.getKeys()
self._speedStep += 1
from .. util import deprecated
if deprecated.allowed():
BaseGameAnim = Game
|
umitproject/openmonitor-aggregator
|
refs/heads/master
|
django/db/__init__.py
|
60
|
from django.conf import settings
from django.core import signals
from django.core.exceptions import ImproperlyConfigured
from django.db.utils import (ConnectionHandler, ConnectionRouter,
load_backend, DEFAULT_DB_ALIAS, DatabaseError, IntegrityError)
__all__ = ('backend', 'connection', 'connections', 'router', 'DatabaseError',
'IntegrityError', 'DEFAULT_DB_ALIAS')
if DEFAULT_DB_ALIAS not in settings.DATABASES:
raise ImproperlyConfigured("You must define a '%s' database" % DEFAULT_DB_ALIAS)
connections = ConnectionHandler(settings.DATABASES)
router = ConnectionRouter(settings.DATABASE_ROUTERS)
# `connection`, `DatabaseError` and `IntegrityError` are convenient aliases
# for backend bits.
# DatabaseWrapper.__init__() takes a dictionary, not a settings module, so
# we manually create the dictionary from the settings, passing only the
# settings that the database backends care about. Note that TIME_ZONE is used
# by the PostgreSQL backends.
# We load all these up for backwards compatibility, you should use
# connections['default'] instead.
class DefaultConnectionProxy(object):
"""
Proxy for accessing the default DatabaseWrapper object's attributes. If you
need to access the DatabaseWrapper object itself, use
connections[DEFAULT_DB_ALIAS] instead.
"""
def __getattr__(self, item):
return getattr(connections[DEFAULT_DB_ALIAS], item)
def __setattr__(self, name, value):
return setattr(connections[DEFAULT_DB_ALIAS], name, value)
connection = DefaultConnectionProxy()
backend = load_backend(connection.settings_dict['ENGINE'])
# Register an event that closes the database connection
# when a Django request is finished.
def close_connection(**kwargs):
for conn in connections.all():
conn.close()
signals.request_finished.connect(close_connection)
# Register an event that resets connection.queries
# when a Django request is started.
def reset_queries(**kwargs):
for conn in connections.all():
conn.queries = []
signals.request_started.connect(reset_queries)
# Register an event that rolls back the connections
# when a Django request has an exception.
def _rollback_on_exception(**kwargs):
from django.db import transaction
for conn in connections:
try:
transaction.rollback_unless_managed(using=conn)
except DatabaseError:
pass
signals.got_request_exception.connect(_rollback_on_exception)
|
stochastic-technologies/impactstory-buildpack
|
refs/heads/master
|
vendor/distribute-0.6.36/setuptools/__init__.py
|
132
|
"""Extensions to the 'distutils' for large or complex distributions"""
from setuptools.extension import Extension, Library
from setuptools.dist import Distribution, Feature, _get_unpatched
import distutils.core, setuptools.command
from setuptools.depends import Require
from distutils.core import Command as _Command
from distutils.util import convert_path
import os
import sys
__version__ = '0.6'
__all__ = [
'setup', 'Distribution', 'Feature', 'Command', 'Extension', 'Require',
'find_packages'
]
# This marker is used to simplify the process that checks is the
# setuptools package was installed by the Setuptools project
# or by the Distribute project, in case Setuptools creates
# a distribution with the same version.
#
# The distribute_setup script for instance, will check if this
# attribute is present to decide whether to reinstall the package
# or not.
_distribute = True
bootstrap_install_from = None
# If we run 2to3 on .py files, should we also convert docstrings?
# Default: yes; assume that we can detect doctests reliably
run_2to3_on_doctests = True
# Standard package names for fixer packages
lib2to3_fixer_packages = ['lib2to3.fixes']
def find_packages(where='.', exclude=()):
"""Return a list all Python packages found within directory 'where'
'where' should be supplied as a "cross-platform" (i.e. URL-style) path; it
will be converted to the appropriate local path syntax. 'exclude' is a
sequence of package names to exclude; '*' can be used as a wildcard in the
names, such that 'foo.*' will exclude all subpackages of 'foo' (but not
'foo' itself).
"""
out = []
stack=[(convert_path(where), '')]
while stack:
where,prefix = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where,name)
if ('.' not in name and os.path.isdir(fn) and
os.path.isfile(os.path.join(fn,'__init__.py'))
):
out.append(prefix+name); stack.append((fn,prefix+name+'.'))
for pat in list(exclude)+['ez_setup', 'distribute_setup']:
from fnmatch import fnmatchcase
out = [item for item in out if not fnmatchcase(item,pat)]
return out
setup = distutils.core.setup
_Command = _get_unpatched(_Command)
class Command(_Command):
__doc__ = _Command.__doc__
command_consumes_arguments = False
def __init__(self, dist, **kw):
# Add support for keyword arguments
_Command.__init__(self,dist)
for k,v in kw.items():
setattr(self,k,v)
def reinitialize_command(self, command, reinit_subcommands=0, **kw):
cmd = _Command.reinitialize_command(self, command, reinit_subcommands)
for k,v in kw.items():
setattr(cmd,k,v) # update command with keywords
return cmd
import distutils.core
distutils.core.Command = Command # we can't patch distutils.cmd, alas
def findall(dir = os.curdir):
"""Find all files under 'dir' and return the list of full filenames
(relative to 'dir').
"""
all_files = []
for base, dirs, files in os.walk(dir):
if base==os.curdir or base.startswith(os.curdir+os.sep):
base = base[2:]
if base:
files = [os.path.join(base, f) for f in files]
all_files.extend(filter(os.path.isfile, files))
return all_files
import distutils.filelist
distutils.filelist.findall = findall # fix findall bug in distutils.
# sys.dont_write_bytecode was introduced in Python 2.6.
if ((hasattr(sys, "dont_write_bytecode") and sys.dont_write_bytecode) or
(not hasattr(sys, "dont_write_bytecode") and os.environ.get("PYTHONDONTWRITEBYTECODE"))):
_dont_write_bytecode = True
else:
_dont_write_bytecode = False
|
kelseyoo14/Wander
|
refs/heads/master
|
venv_2_7/lib/python2.7/site-packages/numpy/linalg/info.py
|
264
|
"""\
Core Linear Algebra Tools
-------------------------
Linear algebra basics:
- norm Vector or matrix norm
- inv Inverse of a square matrix
- solve Solve a linear system of equations
- det Determinant of a square matrix
- lstsq Solve linear least-squares problem
- pinv Pseudo-inverse (Moore-Penrose) calculated using a singular
value decomposition
- matrix_power Integer power of a square matrix
Eigenvalues and decompositions:
- eig Eigenvalues and vectors of a square matrix
- eigh Eigenvalues and eigenvectors of a Hermitian matrix
- eigvals Eigenvalues of a square matrix
- eigvalsh Eigenvalues of a Hermitian matrix
- qr QR decomposition of a matrix
- svd Singular value decomposition of a matrix
- cholesky Cholesky decomposition of a matrix
Tensor operations:
- tensorsolve Solve a linear tensor equation
- tensorinv Calculate an inverse of a tensor
Exceptions:
- LinAlgError Indicates a failed linear algebra operation
"""
from __future__ import division, absolute_import, print_function
depends = ['core']
|
lowitty/sendtrap
|
refs/heads/master
|
lib/pysnmp/proto/acmod/void.py
|
6
|
# Void Access Control Model
from pysnmp.proto import errind, error
from pysnmp import debug
# rfc3415 3.2
class Vacm:
accessModelID = 0
def isAccessAllowed(self,
snmpEngine,
securityModel,
securityName,
securityLevel,
viewType,
contextName,
variableName):
debug.logger & debug.flagACL and debug.logger('isAccessAllowed: viewType %s for variableName %s - OK' % (viewType, variableName))
# rfc3415 3.2.5c
return error.StatusInformation(errorIndication=errind.accessAllowed)
|
eayunstack/horizon
|
refs/heads/master
|
openstack_dashboard/dashboards/admin/metering/tables.py
|
37
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.contrib.humanize.templatetags import humanize
from django.utils import text
from django.utils.translation import ugettext_lazy as _
from horizon import tables
def show_date(datum):
return datum.split('T')[0]
class ModifyUsageReportParameters(tables.LinkAction):
name = "create"
verbose_name = _("Modify Usage Report Parameters")
url = "horizon:admin:metering:create"
classes = ("ajax-modal",)
icon = "edit"
class CreateCSVUsageReport(tables.LinkAction):
name = "csv"
verbose_name = _("Download CSV Summary")
url = "horizon:admin:metering:csvreport"
classes = ("btn-create",)
icon = "download"
class ReportTable(tables.DataTable):
project = tables.Column('project', verbose_name=_('Project'))
service = tables.Column('service', verbose_name=_('Service'))
meter = tables.Column('meter', verbose_name=_('Meter'))
description = tables.Column('description', verbose_name=_('Description'))
time = tables.Column('time', verbose_name=_('Day'),
filters=[show_date])
value = tables.Column('value', verbose_name=_('Value (Avg)'),
filters=[humanize.intcomma])
unit = tables.Column('unit', verbose_name=_('Unit'))
def get_object_id(self, obj):
return "%s-%s-%s" % (obj['project'], obj['service'], obj['meter'])
class Meta(object):
name = 'report_table'
verbose_name = _("Daily Usage Report")
table_actions = (ModifyUsageReportParameters, CreateCSVUsageReport)
multi_select = False
class UsageTable(tables.DataTable):
service = tables.Column('service', verbose_name=_('Service'))
meter = tables.Column('meter', verbose_name=_('Meter'))
description = tables.Column('description', verbose_name=_('Description'))
time = tables.Column('time', verbose_name=_('Day'),
filters=[show_date])
value = tables.Column('value', verbose_name=_('Value (Avg)'),
filters=[humanize.intcomma])
def __init__(self, request, *args, **kwargs):
super(UsageTable, self).__init__(request, *args, **kwargs)
self.title = getattr(self, 'title', None)
def get_object_id(self, datum):
return datum['time'] + datum['meter']
# since these tables are dynamically created and named, we use title
@property
def name(self):
# slugify was introduced in Django 1.5
if hasattr(text, 'slugify'):
return text.slugify(unicode(self.title))
else:
return self.title
def __unicode__(self):
return self.title
class Meta(object):
name = 'daily'
|
blancltd/django-latest-tweets
|
refs/heads/master
|
setup.py
|
1
|
#!/usr/bin/env python
from codecs import open
from setuptools import find_packages, setup
with open('README.rst', 'r', 'utf-8') as f:
readme = f.read()
setup(
name='django-latest-tweets',
version='0.4.6',
description='Latest Tweets for Django',
long_description=readme,
url='https://github.com/developersociety/django-latest-tweets',
maintainer='The Developer Society',
maintainer_email='studio@dev.ngo',
platforms=['any'],
install_requires=[
'twitter>=1.9.1',
'requests>=2.0',
],
packages=find_packages(),
include_package_data=True,
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.11',
'Framework :: Django :: 2.2',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
license='BSD',
)
|
Tabea-K/csvkit
|
refs/heads/master
|
csvkit/grep.py
|
21
|
#!/usr/bin/env python
import six
from csvkit.exceptions import ColumnIdentifierError
class FilteringCSVReader(six.Iterator):
"""
Given any row iterator, only return rows which pass the filter.
If 'header' is False, then all rows must pass the filter; by default, the first row will be passed
through untested.
The value of patterns may be either a sequence or a dictionary. Items in the sequence and values in the
dictionary may be strings, regular expressions, or functions. For each row in the wrapped iterator,
these values will be used as tests, and the row will only be yielded by the filter if all values pass
their corresponding tests. This behavior can be toggled so that all rows which pass any of the tests
will be yielded by specifying "any_match=True" in the constructor.
Empty values (the blank string or None) not be tested; the value in that position will not affect whether
or not the filtering reader yields a prospective row. To test for explicitly blank, use a regular
expression such as "^$" or "^\s*$"
If patterns is a dictionary, the keys can be integers identifying indices in the input rows, or, if 'header'
is True (as it is by default), they can be strings matching column names in the first row of the reader.
If patterns is a sequence, then it is assumed that they will be applied to the
equivalently positioned values in the test rows.
By specifying 'inverse=True', only rows which do not match the patterns will be passed by the filter. The header,
if there is one, will always be returned regardless of the value for 'inverse'.
"""
returned_header = False
column_names = None
def __init__(self, reader, patterns, header=True, any_match=False, inverse=False):
super(FilteringCSVReader, self).__init__()
self.reader = reader
self.header = header
if self.header:
self.column_names = next(reader)
self.any_match = any_match
self.inverse = inverse
self.patterns = standardize_patterns(self.column_names,patterns)
def __iter__(self):
return self
def __next__(self):
if self.column_names and not self.returned_header:
self.returned_header = True
return self.column_names
while True:
row = next(self.reader)
if self.test_row(row):
return row
raise StopIteration()
def test_row(self, row):
for idx, test in self.patterns.items():
if self.any_match and test(row[idx]):
return not self.inverse # True
if not self.any_match and not test(row[idx]):
return self.inverse # False
return not self.inverse # True
def standardize_patterns(column_names, patterns):
"""
Given patterns in any of the permitted input forms, return a dict whose keys
are column indices and whose values are functions which return a boolean value whether the value passes.
If patterns is a dictionary and any of its keys are values in column_names, the returned dictionary will
have those keys replaced with the integer position of that value in column_names
"""
try:
# Dictionary of patterns
patterns = dict((k, pattern_as_function(v)) for k, v in patterns.items() if v)
if not column_names: return patterns
p2 = {}
for k in patterns:
if k in column_names:
idx = column_names.index(k)
if idx in patterns:
raise ColumnIdentifierError("Column %s has index %i which already has a pattern." % (k,idx))
p2[idx] = patterns[k]
else:
p2[k] = patterns[k]
return p2
except AttributeError:
# Sequence of patterns
return dict((i, pattern_as_function(x)) for i, x in enumerate(patterns))
def pattern_as_function(obj):
# obj is function
if hasattr(obj, '__call__'):
return obj
# obj is regex object
if hasattr(obj, 'match'):
return regex_callable(obj)
# obj is string
return lambda x: obj in x
class regex_callable(object):
def __init__(self, pattern):
self.pattern = pattern
def __call__(self, arg):
return self.pattern.match(arg)
|
EdLogan18/logan-repository
|
refs/heads/master
|
plugin.video.MediaPlay-TV/tv5.py
|
67
|
import base64
import zlib, urllib,urllib2,re
key=base64.b64decode("ZXQgb3VhaSBtZWMh")
def getUrl(url, cookieJar=None,post=None, timeout=20, headers=None):
cookie_handler = urllib2.HTTPCookieProcessor(cookieJar)
opener = urllib2.build_opener(cookie_handler, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())
#opener = urllib2.install_opener(opener)
req = urllib2.Request(url)
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.154 Safari/537.36')
if headers:
for h,hv in headers:
req.add_header(h,hv)
response = opener.open(req,post,timeout=timeout)
link=response.read()
response.close()
return link;
def decode_base64_and_inflate( b64string ):
decoded_data = base64.b64decode( b64string )
# print ord(decoded_data[0])
return zlib.decompress( decoded_data , 15)
def deflate_and_base64_encode( string_val ):
zlibbed_str = zlib.compress( string_val )
compressed_string =zlibbed_str## zlibbed_str[2:-4]
return base64.b64encode( compressed_string )
def decode(param1, param2):
param1dec=decode_base64_and_inflate(param1)
_loc3_ = bytearray()
_loc3_.extend(param1dec)
_loc4_ = 0;
_loc5_ = len(param1dec);
_loc6_ = 0;
while _loc6_ < _loc5_:
_loc3_[_loc6_] = _loc3_[_loc6_] ^ ord(param2[_loc4_]);
_loc4_+=1;
if(_loc4_ >= len(param2)):
_loc4_ = 0;
_loc6_+=1;
return _loc3_
def encode(param1, param2):
param1dec=param1
_loc3_ = bytearray()
_loc3_.extend(param1dec)
_loc4_ = 0;
_loc5_ = len(param1dec);
_loc6_ = 0;
while _loc6_ < _loc5_:
_loc3_[_loc6_] = _loc3_[_loc6_] ^ ord(param2[_loc4_]);
_loc4_+=1;
if(_loc4_ >= len(param2)):
_loc4_ = 0;
_loc6_+=1;
return deflate_and_base64_encode(_loc3_.decode("utf-8"))
return _loc3_
def extractUrl(uid):
#enc="eNrjYnGVFRFl8GeOYHERtPTnZuDlYZPgYZdhkfXlCgjR9+XhZAlmCBTVlBRhYI1QFhAMAbIFBKMkPAJURcOcxWNcwwEd4gnn"
# eJzjYnGVFRFl8GeOYHERtPTnZuDlYZPgYZdhkfXlCgjR9+XhZAlmCBTVlBRhYI1QFhAMAbIFBKMkPAJURcOcxWNcwwEd4gnn
str="operation=getPlaylist&uid=%s"%urllib.quote_plus(uid)
str=encode(str,key)
s=getUrl("http://www.tv5mondeplusafrique.com/html/servicesV2/getPlaylist.xml?BulkLoaderNoCache=2_2&",post=str)
s=decode(s,key)
print "returned", repr(s.decode("unicode-escape"))
from BeautifulSoup import BeautifulStoneSoup, BeautifulSoup, BeautifulSOAP
xmlobj= BeautifulSOAP(s.decode("unicode-escape"), convertEntities=BeautifulStoneSoup.XML_ENTITIES)
vurl=xmlobj("video")[0];
# print vurl
su=vurl("secureurl")[0].string
su=re.sub('[\[CDATA\]]', '', su)
#print su
# print 'yyyyyyyyyyyy',vurl
if 'manifest.f4m?' in su:
su='plugin://plugin.video.f4mTester/?url='+urllib.quote_plus(su)
return su
#print extractUrl("aime_malgre_lui_s01_ep42_4377314")
#d1= decode(enc,key)
#print d1
#d2=encode(d1,key)
#print d2
#print d1==d2
#print 1/0
#print decode(enc, key)
#print decode("eNrjYnGVFRFl8GeOYHERVPHh4+FlCJZ0FvNg9HRkk2DxEdXmYPJlYnP1clBnDWVxCEsXcxEEsnkC4mLdRJQdhCOcgiLFAFb/C4g=",key)
#print decode("eJytV2l328iVLSwsgmABRBEEQWIhZVuW1ZKiSHa3bKslmSBRJCFiITaS4CK2J1YnfWKPM3b6W4711wOJcttJus9JZuZrnaq33Hffe7ey0YxuWm3SrvN+2aGvFuGoHb9fxwAEWtEynNLM6qAmOWqAwqANzjzJvDHI+ISYk8WncKDO+uGa9njZVpUEapWE5oH6PyUlY4yqSceZMqRwEDqcScBBsR1azVl/1XeaQPwTdWnohqKTeHvv7Vby7vDlzvfX3jcnfyJPXu/sHD4By8xNEYj40UpLWIztxXc/PTyytl8/eHj8hrz4eWf2NMOdrWfV1bBiUJwZcYt1IDVJ26hCm1uE5qWW0hIGMU3BdiYRoy6DcROhYHE8hlAUiDdZ7L+/IJTOVUibSDaRCF2r5Od1ngINl08rPmnhXsgWAa/TyGEwpCgMHtdoWvSe8HY5CbWohNI2qEcgIVRKw9QsQYFgl5EsjXglelgvkqCGmxENy7aajMWi9ZDoSKiQbYcvygiUgNgpSyQAFSfh6uqt/XZNQRGD1GIfXU98K2CUrVgjL7KywQ7AhUAHZd0PIOv06HJhCBLoCtuZ7n1TJzztoMWf//Bh1NffLAMrx59LWF6g1skbiDkUeftFuyxbwerZrc25plBdxpAM4kEYYGiVihxvsWVIXSY/mrUKTywS4qRfuj6rmKSt4Nt4pr6CYm/d+4mtO8ED0Yg2vsDgJVe0W6xGPYQOXasJJYscd4zkA6+Khut533RgGzySKtBpuwH1XPEMqepw5wjGKv5y/ihi1KNZ5LKQT3vT2Z47d/5Mk9HPwub8ztcqiPl0R3cAWyfuDOw1uALxtim/XgwYzdru5vc/8jIx0z1ujiiec+5zaUGaelgPWK3mcI97nWzL05m4i6/TK8EmLwilcXoe8/MhrYDBK7boswyMc/sHJtBdzuh9gIYHseJwh0LF38TWQd6jiq9iX0+/o4gOBB8n3LakAt97RC8Qjvo3O8hvg55O1IC+9ZVcFfN62WYN5XWp1e33w0pZqvIRq6hDXmvFuKZbd/y0pXQwjfEic4kELd0Qi60QiR0atxb9yZW8ir2RIhsdA3GDnPNdFduQqk6hbz7R3j8P2kgGgwLuleCquiaZ8xadoC7aYJhs6vUXUysYvabKd0F+fjz/Aye6IU49dFvT2KOwj0AcFAeXs93lLAJw4kyaB81JvfeOfRIgY3N+X6O9ew5Yes4fEl6CGEiODtdpUlNgW5D8I0YUjC7WQnBpGBJtRNwR9hZD6tuTx7t/Pbgop5jVOnOlrZilesRWI8jWkKALXkdUPNEoCSIDVLG96DO1KlXRAJXK+7dcbaTcpSJfduRZN8rqT58cC8ms2ut7K9TjZGWkxrQh9D1U5gegmFhyyrYLFMjrZS4Xk2BYKsPOrJhEdrqUF+FcXllXvIATnI3H6rhbm3baXI+auWXd8MT6ctxdQFBdSl2hnqKGGE9zlpBZLcz6pXDRWdnNqxrD4+UqxWPVWkrjcRUlE3mikCVh+T5KL/62yGfUl9xTiNecA8CZyzYGpnRRSNlTCSf7pN3PTmpXUVJNu0YHcEIrrPNpHr9Irat0U564vGbzM4HzeQky60n/ltsWW7eEAZjMG2LR5oaCz2UxpNiRFmClxul+WXK5fhtVEzBBHqUuPbpctIcga0su8nWE/bJGaKli0tOBNMSY7k3zWgBeICBLKUNFUWuQMVXc1Zlfs1MMNOgaPivDuD7RvKk3SvSxmDslXaqEPIT5sYTpdNojX+KvGmOTx+lQckpyTXPosqUPsN80ZOgzQLTLQNCZhA0YCjLOJGl2EPRLwHf53GaNzns5j4fO+wJwJm0N1Om4GVCYtX2a8+0y7ZhapUgAENKBTxuGgoTPfguCESMGD2sU8bKeOZlxKH8oNCX5NoZ/eduQGHPSYQsxKqT8TAVE4wUkD77GZw501h5wPBuz7PCWtzmG9EA1Q036h3xNqympCOo+JTVBwOTx5zO8WxeLPgdVezCjP+eLsDDmZzjv6/yOogz4r/HURVbgOD6qmwE1XYwsloOIU1yuyNpuA/kRa8b5tJrRUuZGCBCDAmbOHzUGt/O2opG4ZQX1/Vjfw/ptnEMOC3RNECNuWElQFov53vQ7Sc4lLT6CysmYymNgTY+fIc7lVYhKlNOFIN+nZV/KY6bghZY8BXLOQws2WNtDJTYGmasZriYthzCcuDlWFe2mhc+6aMMNv3Uqpacg84JeOygNAqZcvDGoC3YcrK1BSYmLv9yRkvzOQlrGsTTtfI15r1jj8qW6tlLZ8oBeQF9zwCkNJVs1Sf52hL7wH9gdIA/rus3nuN3hgyERG1Xkt8DKVMwZ3/z2+7fx4G4WPZuf9/vt0W0vxHpNGfRo7pXI4v6z+S8x3M+fl2401yxaWmqkjWDlt7AaKQ4tuaxF0dgeNlI4q+a6BcjV1cToFDhMqKPjd4uhzUu44DDiysFgnM791lQbfc6r6Tbq97wK+ABc3mmqoxrHkqcc7dGQHDVr4KaVRp8AK0/30rk6n8TqyEjo5qpvc0D2kDYKHVywaiU+wkk+f5ajCVuqdE6bVp4jd5OFCznsYC/lWIsHV0zYaaysdCYWFZYhkDSKnOkw9ElccuimIRSs3G/KAuvISsNqFkbMYphASkCt7w9+/24713IPts5fh5fqEDdI6FRNp1mC7Wfnbx99m+u3l9vPf8gWca79CiTOgMOaPj96CPzNHvFEx+eHCAl2jolWBGbemyuc+PIsSNZGl6rHLHCAvk4rZomelcxYH62aLjIqwzoAmlFI6JZU/AT1hN/L9Vgbn9lKV17vIXhDizDktRtmj82lTFn9VNcjZLD6TTZZNMOO9CX3QWNqpdmv5W6AwkmPxjC3n5rUBRovJvEnTr8BLbYZMKeVT0Dri7pyEtMqGAJ4s+hnYS1LrXaept5kpx9fPOj9197WdXByePbh5a2+siorMs7zwnxawq0YKiChwOrD0RE4OP3uABzunXw8zq5f9aJM2Nqz//vw+C9b31uPi7v7j3/Y3wJnp8/2ftp9vhbrG/uGKNDDMm4dGJWNxuMUo1MvcQ6T85yiIpHioB+nsT4l2mqQc6Oh0inS/dDxP2PYigsCl1FISgnllIe9Ammd8i5n5nuqutH8k74874Rz2rvTJKbXj+fKLB1XKuzAS2U0bSN1WJZ/NpvLkcvVz4Zc08lnlHbDKePm6aC67CSZHCzWubZ3+dHvDj56B4dvD5592NuxTl9vnxy8J8To5HrezbVNhM2EkdaPry/I3tvd8z+eHz21r88nC/OyuOFbCMsdpJFwZ6NJYoR/I19LW3v/Vr6s9ZShXZTr/71qVs3nm5LuJ2atdNvLZyELEyhdzOSnYT6XurVsagEP0dAYdKSrwXRV71Xk/iCJeFoGCVtm/6o2TK8BTvqI470Szv9NcY5hGH/K9Xbxdt4+VegYm+TIjk5qqyRUwy4dOhsOvN1Ofjx8sfv47e7dP8i9/wdV7MXL18Gzre2fHnyzg07savfoeCV1jo9/+N35x5wbL1+/Ot57Q4gcz+/7LlOHolFBrYuN7jUNNq3c8+SfNWd1/bXmvJQ9VnAhVKbG515La8Ae3+2Om83uOM13x368tvNeyJIzZdJTl3Y8AixH94FD3+nV5fRWr9JBkW1FLOYD+kHEDHu/rn+s8Ub/JP1fcPjh4OV18GLr8R9fbfXeHZ2Ds293T98/IpF5ec/51qZ3Ds8ODsDW9u9/fH4MHm3t2h9Xif/muw7+z3FIYpz+H3C4ijY4kPQfcViF/z4O/cm9Duz8P+EwUP9XOCx+A4euzBOgt+K5jJL8k91+/3etec8W",key)
|
ifduyue/django
|
refs/heads/master
|
django/conf/locale/zh_Hant/__init__.py
|
12133432
| |
d8c/Mobile-Security-Framework-MobSF
|
refs/heads/master
|
MobSF/__init__.py
|
12133432
| |
denisenkom/django-sqlserver
|
refs/heads/master
|
broken-tests/expressions_case/__init__.py
|
12133432
| |
pzajda/eloquence
|
refs/heads/master
|
scons-local-2.5.0/SCons/Tool/dvipdf.py
|
3
|
"""SCons.Tool.dvipdf
Tool-specific initialization for dvipdf.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/dvipdf.py rel_2.5.0:3543:937e55cd78f7 2016/04/09 11:29:54 bdbaddog"
import SCons.Action
import SCons.Defaults
import SCons.Tool.pdf
import SCons.Tool.tex
import SCons.Util
_null = SCons.Scanner.LaTeX._null
def DviPdfPsFunction(XXXDviAction, target = None, source= None, env=None):
"""A builder for DVI files that sets the TEXPICTS environment
variable before running dvi2ps or dvipdf."""
try:
abspath = source[0].attributes.path
except AttributeError :
abspath = ''
saved_env = SCons.Scanner.LaTeX.modify_env_var(env, 'TEXPICTS', abspath)
result = XXXDviAction(target, source, env)
if saved_env is _null:
try:
del env['ENV']['TEXPICTS']
except KeyError:
pass # was never set
else:
env['ENV']['TEXPICTS'] = saved_env
return result
def DviPdfFunction(target = None, source= None, env=None):
result = DviPdfPsFunction(PDFAction,target,source,env)
return result
def DviPdfStrFunction(target = None, source= None, env=None):
"""A strfunction for dvipdf that returns the appropriate
command string for the no_exec options."""
if env.GetOption("no_exec"):
result = env.subst('$DVIPDFCOM',0,target,source)
else:
result = ''
return result
PDFAction = None
DVIPDFAction = None
def PDFEmitter(target, source, env):
"""Strips any .aux or .log files from the input source list.
These are created by the TeX Builder that in all likelihood was
used to generate the .dvi file we're using as input, and we only
care about the .dvi file.
"""
def strip_suffixes(n):
return not SCons.Util.splitext(str(n))[1] in ['.aux', '.log']
source = list(filter(strip_suffixes, source))
return (target, source)
def generate(env):
"""Add Builders and construction variables for dvipdf to an Environment."""
global PDFAction
if PDFAction is None:
PDFAction = SCons.Action.Action('$DVIPDFCOM', '$DVIPDFCOMSTR')
global DVIPDFAction
if DVIPDFAction is None:
DVIPDFAction = SCons.Action.Action(DviPdfFunction, strfunction = DviPdfStrFunction)
import pdf
pdf.generate(env)
bld = env['BUILDERS']['PDF']
bld.add_action('.dvi', DVIPDFAction)
bld.add_emitter('.dvi', PDFEmitter)
env['DVIPDF'] = 'dvipdf'
env['DVIPDFFLAGS'] = SCons.Util.CLVar('')
env['DVIPDFCOM'] = 'cd ${TARGET.dir} && $DVIPDF $DVIPDFFLAGS ${SOURCE.file} ${TARGET.file}'
# Deprecated synonym.
env['PDFCOM'] = ['$DVIPDFCOM']
def exists(env):
SCons.Tool.tex.generate_darwin(env)
return env.Detect('dvipdf')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
dkieffer/timeblob
|
refs/heads/master
|
timeblobsite/settings/base.py
|
1
|
"""
Django settings for timeblobsite project.
Generated by 'django-admin startproject' using Django 1.9.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
import sys
TESTING = len(sys.argv) > 1 and sys.argv[1] == 'test'
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'e1$qdwc0_09z^xt@2e%0_n9*7!i)nb&)zp5hwn@)v(k#x326_z'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'timeblob',
'django_sandstorm'
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.RemoteUserBackend',
'django.contrib.auth.backends.ModelBackend'
)
ROOT_URLCONF = 'timeblobsite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'timeblobsite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = "static/"
SANDSTORM = False
|
Kagami/kisa
|
refs/heads/master
|
lib/twisted/persisted/journal/rowjournal.py
|
19
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
#
"""Journal using twisted.enterprise.row RDBMS support.
You're going to need the following table in your database::
| CREATE TABLE journalinfo
| (
| commandIndex int
| );
| INSERT INTO journalinfo VALUES (0);
"""
from __future__ import nested_scopes
# twisted imports
from twisted.internet import defer
# sibling imports
import base
# constants for command list
INSERT, DELETE, UPDATE = range(3)
class RowJournal(base.Journal):
"""Journal that stores data 'snapshot' in using twisted.enterprise.row.
Use this as the reflector instead of the original reflector.
It may block on creation, if it has to run recovery.
"""
def __init__(self, log, journaledService, reflector):
self.reflector = reflector
self.commands = []
self.syncing = 0
base.Journal.__init__(self, log, journaledService)
def updateRow(self, obj):
"""Mark on object for updating when sync()ing."""
self.commands.append((UPDATE, obj))
def insertRow(self, obj):
"""Mark on object for inserting when sync()ing."""
self.commands.append((INSERT, obj))
def deleteRow(self, obj):
"""Mark on object for deleting when sync()ing."""
self.commands.append((DELETE, obj))
def loadObjectsFrom(self, tableName, parentRow=None, data=None, whereClause=None, forceChildren=0):
"""Flush all objects to the database and then load objects."""
d = self.sync()
d.addCallback(lambda result: self.reflector.loadObjectsFrom(
tableName, parentRow=parentRow, data=data, whereClause=whereClause,
forceChildren=forceChildren))
return d
def sync(self):
"""Commit changes to database."""
if self.syncing:
raise ValueError, "sync already in progress"
comandMap = {INSERT : self.reflector.insertRowSQL,
UPDATE : self.reflector.updateRowSQL,
DELETE : self.reflector.deleteRowSQL}
sqlCommands = []
for kind, obj in self.commands:
sqlCommands.append(comandMap[kind](obj))
self.commands = []
if sqlCommands:
self.syncing = 1
d = self.reflector.dbpool.runInteraction(self._sync, self.latestIndex, sqlCommands)
d.addCallback(self._syncDone)
return d
else:
return defer.succeed(1)
def _sync(self, txn, index, commands):
"""Do the actual database synchronization."""
for c in commands:
txn.execute(c)
txn.update("UPDATE journalinfo SET commandIndex = %d" % index)
def _syncDone(self, result):
self.syncing = 0
return result
def getLastSnapshot(self):
"""Return command index of last snapshot."""
conn = self.reflector.dbpool.connect()
cursor = conn.cursor()
cursor.execute("SELECT commandIndex FROM journalinfo")
return cursor.fetchall()[0][0]
|
qiwihui/rss.daozhang.info
|
refs/heads/master
|
diy/handlers/zhihu.py
|
1
|
#_*_ coding:utf-8 _*_
import tornado.web
import tornado.gen
import tornado.httpclient
import json
ZHIHU_URL = 'http://news.at.zhihu.com/api/1.2/news/latest'
headers = {
'User-Agent':"ZhihuNotMoe/2333",
}
class ZhihuHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
@tornado.gen.coroutine
def get(self):
client = tornado.httpclient.AsyncHTTPClient()
response = yield client.fetch(ZHIHU_URL, headers=headers)
if response.code == 200:
news = json.loads(response.body.decode('utf-8'))
entrys = news['news']
mc = self.application.mc
cache = mc.get('zhihu')
if cache:
for e in entrys:
if e['url'] in cache:
e['body'] = cache[e['url']][1]
e['share_url'] = cache[e['url']][0]
no_content = [ e for e in entrys if not 'body' in e ]
if no_content:
responses = yield [client.fetch(x['url'], headers=headers) for x in no_content]
for i, response in enumerate(responses):
if response.code == 200:
entry = json.loads(response.body.decode('utf-8'))
no_content[i]['body'] = entry['body']
no_content[i]['share_url'] = entry['share_url']
else:
entrys.remove(no_content[i])
continue
mc.set('zhihu', dict([ (e['url'], (e['share_url'], e['body'])) for e in entrys if 'body' in e ]), 604800)
self.set_header("Content-Type", "application/xml; charset=UTF-8")
self.render("zhihu.xml", entrys=entrys)
else:
raise tornado.web.HTTPError(response.code)
|
w1ll1am23/home-assistant
|
refs/heads/dev
|
homeassistant/components/mqtt/vacuum/__init__.py
|
7
|
"""Support for MQTT vacuums."""
import functools
import voluptuous as vol
from homeassistant.components.vacuum import DOMAIN
from homeassistant.helpers.reload import async_setup_reload_service
from .. import DOMAIN as MQTT_DOMAIN, PLATFORMS
from ..mixins import async_setup_entry_helper
from .schema import CONF_SCHEMA, LEGACY, MQTT_VACUUM_SCHEMA, STATE
from .schema_legacy import PLATFORM_SCHEMA_LEGACY, async_setup_entity_legacy
from .schema_state import PLATFORM_SCHEMA_STATE, async_setup_entity_state
def validate_mqtt_vacuum(value):
"""Validate MQTT vacuum schema."""
schemas = {LEGACY: PLATFORM_SCHEMA_LEGACY, STATE: PLATFORM_SCHEMA_STATE}
return schemas[value[CONF_SCHEMA]](value)
PLATFORM_SCHEMA = vol.All(
MQTT_VACUUM_SCHEMA.extend({}, extra=vol.ALLOW_EXTRA), validate_mqtt_vacuum
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up MQTT vacuum through configuration.yaml."""
await async_setup_reload_service(hass, MQTT_DOMAIN, PLATFORMS)
await _async_setup_entity(async_add_entities, config)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up MQTT vacuum dynamically through MQTT discovery."""
setup = functools.partial(
_async_setup_entity, async_add_entities, config_entry=config_entry
)
await async_setup_entry_helper(hass, DOMAIN, setup, PLATFORM_SCHEMA)
async def _async_setup_entity(
async_add_entities, config, config_entry=None, discovery_data=None
):
"""Set up the MQTT vacuum."""
setup_entity = {LEGACY: async_setup_entity_legacy, STATE: async_setup_entity_state}
await setup_entity[config[CONF_SCHEMA]](
config, async_add_entities, config_entry, discovery_data
)
|
lbt/boss-launcher-webhook
|
refs/heads/master
|
src/participants/auto_promote.py
|
1
|
# Copyright (C) 2013 Jolla Ltd.
# Contact: Islam Amer <islam.amer@jollamobile.com>
# All rights reserved.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Used to autopromote a just triggered service:
:term:`Workitem` fields IN:
:Parameters:
:ev.namespace (string):
Used to contact the right OBS instance.
:project (string):
Project where service was triggered
:package (string)
Package name that was triggered
:target_project (string)
Project to which promotion should happen
:term:`Workitem` fields OUT:
:Returns:
:result (Boolean):
True if the everything went OK, False otherwise
"""
from boss.obs import BuildServiceParticipant
import osc
from urlparse import urlparse
import os
from lxml import etree
from boss.bz.config import parse_bz_config
from boss.bz.rest import BugzillaError
os.environ['DJANGO_SETTINGS_MODULE'] = 'webhook_launcher.settings'
import django
django.setup()
from webhook_launcher.app.models import WebHookMapping, Project, get_or_none
class ParticipantHandler(BuildServiceParticipant):
""" Participant class as defined by the SkyNET API """
def handle_wi_control(self, ctrl):
""" job control thread """
pass
@BuildServiceParticipant.get_oscrc
def handle_lifecycle_control(self, ctrl):
""" participant control thread """
if ctrl.message == "start":
self.setup_config(ctrl.config)
def setup_config(self, config):
"""
:param config: ConfigParser instance with the bugzilla configuration
"""
self.bzs = parse_bz_config(config)
# If there are any auth errors in the config, find out now.
for bzconfig in self.bzs.values():
bzconfig['interface'].login()
@BuildServiceParticipant.setup_obs
def handle_wi(self, wid):
""" Workitem handling function """
wid.result = True
f = wid.fields
project = f.project
package = f.package
gated_project = f.gated_project
if not project or not gated_project:
wid.result = True
return
# events for official projects that are gated get diverted to a side project
prjobj = get_or_none(Project, name=gated_project, obs__apiurl=self.obs.apiurl)
if prjobj and prjobj.gated:
webhook = get_or_none(WebHookMapping, pk=f.pk)
actions = [{"action" : "submit", "src_project" : project, "src_package" : package,
"tgt_project" : gated_project, "tgt_package" : package}]
description = "%s @ %s" % (webhook.tag or webhook.rev_or_head, str(webhook))
comment = ""
result = self.obs.createRequest(options_list=actions, description=description, comment=comment, supersede=True, opt_sourceupdate="cleanup")
if not result:
raise RuntimeError("Something went wrong while creating project %s" % project)
wid.result = True
|
toshywoshy/ansible
|
refs/heads/devel
|
lib/ansible/modules/windows/win_say.py
|
21
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Jon Hawkesworth (@jhawkesworth) <figs@unity.demon.co.uk>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_say
version_added: "2.3"
short_description: Text to speech module for Windows to speak messages and optionally play sounds
description:
- Uses .NET libraries to convert text to speech and optionally play .wav sounds. Audio Service needs to be running and some kind of speakers or
headphones need to be attached to the windows target(s) for the speech to be audible.
options:
msg:
description:
- The text to be spoken.
- Use either C(msg) or C(msg_file).
- Optional so that you can use this module just to play sounds.
type: str
msg_file:
description:
- Full path to a windows format text file containing the text to be spoken.
- Use either C(msg) or C(msg_file).
- Optional so that you can use this module just to play sounds.
type: path
voice:
description:
- Which voice to use. See notes for how to discover installed voices.
- If the requested voice is not available the default voice will be used.
Example voice names from Windows 10 are C(Microsoft Zira Desktop) and C(Microsoft Hazel Desktop).
type: str
speech_speed:
description:
- How fast or slow to speak the text.
- Must be an integer value in the range -10 to 10.
- -10 is slowest, 10 is fastest.
type: int
default: 0
start_sound_path:
description:
- Full path to a C(.wav) file containing a sound to play before the text is spoken.
- Useful on conference calls to alert other speakers that ansible has something to say.
type: path
end_sound_path:
description:
- Full path to a C(.wav) file containing a sound to play after the text has been spoken.
- Useful on conference calls to alert other speakers that ansible has finished speaking.
type: path
notes:
- Needs speakers or headphones to do anything useful.
- |
To find which voices are installed, run the following Powershell commands.
Add-Type -AssemblyName System.Speech
$speech = New-Object -TypeName System.Speech.Synthesis.SpeechSynthesizer
$speech.GetInstalledVoices() | ForEach-Object { $_.VoiceInfo }
$speech.Dispose()
- Speech can be surprisingly slow, so it's best to keep message text short.
seealso:
- module: win_msg
- module: win_toast
author:
- Jon Hawkesworth (@jhawkesworth)
'''
EXAMPLES = r'''
- name: Warn of impending deployment
win_say:
msg: Warning, deployment commencing in 5 minutes, please log out.
- name: Using a different voice and a start sound
win_say:
start_sound_path: C:\Windows\Media\ding.wav
msg: Warning, deployment commencing in 5 minutes, please log out.
voice: Microsoft Hazel Desktop
- name: With start and end sound
win_say:
start_sound_path: C:\Windows\Media\Windows Balloon.wav
msg: New software installed
end_sound_path: C:\Windows\Media\chimes.wav
- name: Text from file example
win_say:
start_sound_path: C:\Windows\Media\Windows Balloon.wav
msg_file: AppData\Local\Temp\morning_report.txt
end_sound_path: C:\Windows\Media\chimes.wav
'''
RETURN = r'''
message_text:
description: The text that the module attempted to speak.
returned: success
type: str
sample: "Warning, deployment commencing in 5 minutes."
voice:
description: The voice used to speak the text.
returned: success
type: str
sample: Microsoft Hazel Desktop
voice_info:
description: The voice used to speak the text.
returned: when requested voice could not be loaded
type: str
sample: Could not load voice TestVoice, using system default voice
'''
|
abutcher/openshift-ansible
|
refs/heads/master
|
roles/lib_utils/src/test/unit/test_repoquery.py
|
63
|
'''
Unit tests for repoquery
'''
import os
import sys
import unittest
import mock
# Removing invalid variable names for tests so that I can
# keep them brief
# pylint: disable=invalid-name,no-name-in-module
# Disable import-error b/c our libraries aren't loaded in jenkins
# pylint: disable=import-error,wrong-import-position
# place class in our python path
module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
sys.path.insert(0, module_path)
from repoquery import Repoquery # noqa: E402
class RepoQueryTest(unittest.TestCase):
'''
Test class for RepoQuery
'''
@mock.patch('repoquery._run')
def test_querying_a_package(self, mock_cmd):
''' Testing querying a package '''
# Arrange
# run_ansible input parameters
params = {
'state': 'list',
'name': 'bash',
'query_type': 'repos',
'verbose': False,
'show_duplicates': False,
'match_version': None,
'ignore_excluders': False,
}
valid_stderr = '''Repo rhel-7-server-extras-rpms forced skip_if_unavailable=True due to: /etc/pki/entitlement/3268107132875399464-key.pem
Repo rhel-7-server-rpms forced skip_if_unavailable=True due to: /etc/pki/entitlement/4128505182875899164-key.pem''' # not real
# Return values of our mocked function call. These get returned once per call.
mock_cmd.side_effect = [
(0, b'4.2.46|21.el7_3|x86_64|rhel-7-server-rpms|4.2.46-21.el7_3', valid_stderr), # first call to the mock
]
# Act
results = Repoquery.run_ansible(params, False)
# Assert
self.assertEqual(results['state'], 'list')
self.assertFalse(results['changed'])
self.assertTrue(results['results']['package_found'])
self.assertEqual(results['results']['returncode'], 0)
self.assertEqual(results['results']['package_name'], 'bash')
self.assertEqual(results['results']['versions'], {'latest_full': '4.2.46-21.el7_3',
'available_versions': ['4.2.46'],
'available_versions_full': ['4.2.46-21.el7_3'],
'latest': '4.2.46'})
# Making sure our mock was called as we expected
mock_cmd.assert_has_calls([
mock.call(['/usr/bin/repoquery', '--plugins', '--quiet', '--pkgnarrow=repos', '--queryformat=%{version}|%{release}|%{arch}|%{repo}|%{version}-%{release}', 'bash']),
])
|
CeltonMcGrath/TACTIC
|
refs/heads/master
|
src/tactic/protocol/rest_test.py
|
6
|
#!/usr/bin/python
###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
import tacticenv
from pyasm.common import Container, jsonloads, Environment, Xml
from pyasm.security import Batch
from pyasm.search import Search, SearchType
from pyasm.unittest import UnittestEnvironment
import unittest
import urllib2
class RestTest(unittest.TestCase):
def test_all(my):
test_env = UnittestEnvironment()
test_env.create()
try:
my._setup()
print
print
print
my._test_accept()
my._test_method()
my._test_custom_handler()
print
print
print
finally:
test_env.delete()
def send_request(my, url, headers, data={} ):
ticket = Environment.get_ticket()
method = headers.get("Method")
if method == 'POST':
data['login_ticket'] = ticket
import urllib
data = urllib.urlencode(data)
request = urllib2.Request(url, data)
else:
url = "%s?login_ticket=%s" % (url, ticket)
request = urllib2.Request(url)
for key,value in headers.items():
request.add_header(key,value)
try:
response = urllib2.urlopen(request)
except Exception, e:
# try again
print "WARNING: ", e
response = urllib2.urlopen(request)
#print response.info().headers
value = response.read()
accept = headers.get("Accept")
if accept == "application/json":
value = jsonloads(value)
return value
def _setup(my):
url = SearchType.create("config/url")
url.set_value("url", "/rest/{code}")
url.set_value('widget', '''
<element>
<display class='tactic.protocol.PythonRestHandler'>
<script_path>rest/test</script_path>
</display>
</element>
''')
url.commit()
url = SearchType.create("config/url")
url.set_value("url", "/rest2")
url.set_value('widget', '''
<element>
<display class='tactic.protocol.TestCustomRestHandler'>
</display>
</element>
''')
url.commit()
url = SearchType.create("config/url")
url.set_value("url", "/rest3/{method}/{data}")
url.set_value('widget', '''
<element>
<display class='tactic.protocol.SObjectRestHandler'>
</display>
</element>
''')
url.commit()
script = SearchType.create("config/custom_script")
script.set_value("folder", "rest")
script.set_value("title", "test")
script.set_value("script", """
from pyasm.common import Xml
accept = kwargs.get("Accept")
method = kwargs.get("Method")
print "kwargs: ", kwargs
code = kwargs.get("code")
if code == "CODE0123":
return "OK"
if method == "POST":
return "Method is POST"
if accept == "application/json":
return [3,2,1]
else:
return Xml('''
<arr>
<int>1</int>
<int>2</int>
<int>3</int>
</arr>
''')
""")
script.commit()
def _test_accept(my):
# try json
url = "http://localhost/tactic/unittest/rest"
headers = {
"Accept": "application/json"
}
ret_val = my.send_request(url, headers)
my.assertEquals( [3,2,1], ret_val)
# try xml
url = "http://localhost/tactic/unittest/rest"
headers = {
"Accept": "application/xml"
}
ret_val = my.send_request(url, headers)
xml = Xml(ret_val)
values = xml.get_values("arr/int")
my.assertEquals( ['1','2','3'], values)
# try json
url = "http://localhost/tactic/unittest/rest/CODE0123"
headers = {
"Accept": "application/json"
}
ret_val = my.send_request(url, headers)
my.assertEquals( "OK", ret_val)
def _test_method(my):
# try json
url = "http://localhost/tactic/unittest/rest"
headers = {
"Accept": "application/json",
"Method": "POST"
}
ret_val = my.send_request(url, headers)
my.assertEquals( "Method is POST", ret_val)
def _test_custom_handler(my):
# try json
url = "http://localhost/tactic/unittest/rest2"
headers = {
"Accept": "application/json",
"Method": "POST"
}
ret_val = my.send_request(url, headers)
my.assertEquals( "Test Custom POST", ret_val)
# try json
url = "http://localhost/tactic/unittest/rest3/expression"
headers = {
"Accept": "application/json",
"Method": "POST"
}
data = {
'expression': '@SOBJECT(unittest/person)'
}
ret_val = my.send_request(url, headers, data)
print ret_val
def _test_update(my):
# try json
url = "http://localhost/tactic/unittest/rest3/person/CODE0123"
headers = {
"Accept": "application/json",
"Method": "PUT"
}
data = {
'description': 'abcdefg'
}
ret_val = my.send_request(url, headears, data)
if __name__ == "__main__":
Batch()
unittest.main()
|
vishnubob/snowflake
|
refs/heads/master
|
src/movie.py
|
1
|
class RenderMovie(object):
def __init__(self, name):
self.name = name
self.replay = LatticeReplay(name)
def run(self):
if not os.path.exists("frames"):
os.mkdir("frames")
x = iter(self.replay)
for (idx, frame) in enumerate(self.replay):
fn = "frames/%s_%09d.png" % (self.name, idx + 1)
frame.save_image(fn)
class LatticeReplay(object):
class ReplayIterator(object):
def __init__(self, replay):
self.replay = replay
self.idx = 0
def next(self):
try:
lattice = self.replay.get_lattice(self.idx)
self.idx += 1
return lattice
except IndexError:
raise StopIteration
def __init__(self, name):
self.name = name
self.current_frame = None
self.current_replay = None
pfn = "%s.pickle" % self.name
self.lattice = CrystalLattice.load_lattice(pfn)
self.scan_replays()
def __iter__(self):
return self.ReplayIterator(self)
def get_lattice(self, step):
(step, dm, cm) = self.get_step(step)
for (idx, cell) in enumerate(zip(dm, cm)):
self.lattice.cells[idx].diffusive_mass = cell[0]
self.lattice.cells[idx].crystal_mass = cell[1]
self.lattice.cells[idx].attached = bool(cell[1])
for cell in self.lattice.cells:
cell.update_boundary()
return self.lattice
def get_step(self, step):
idx = bisect.bisect_left(self.replay_map, step + 1)
if self.current_frame != idx or not self.current_replay:
self.current_frame = idx
fn = self.replays[self.current_frame]
print "loading", fn
f = open(fn)
self.current_replay = pickle.load(f)
offset = self.current_replay[0][0]
return self.current_replay[step - offset]
def scan_replays(self):
replays = []
fn_re = re.compile("cell_log_(\d+).pickle")
for fn in os.listdir('.'):
m = fn_re.search(fn)
if m:
step = int(m.group(1))
replays.append((fn, step))
replays.sort(key=operator.itemgetter(1))
self.replays = [rp[0] for rp in replays]
self.replay_map = [rp[1] for rp in replays]
|
arasuarun/shogun
|
refs/heads/develop
|
examples/undocumented/python_modular/transfer_multitask_clustered_logistic_regression.py
|
17
|
#!/usr/bin/env python
from numpy import array,hstack,sin,cos
from numpy.random import seed, rand
from tools.load import LoadMatrix
lm=LoadMatrix()
traindat = lm.load_numbers('../data/fm_train_real.dat')
testdat = lm.load_numbers('../data/fm_test_real.dat')
label_traindat = lm.load_labels('../data/label_train_twoclass.dat')
parameter_list = [[traindat,testdat,label_traindat]]
def transfer_multitask_clustered_logistic_regression (fm_train=traindat,fm_test=testdat,label_train=label_traindat):
from modshogun import BinaryLabels, RealFeatures, Task, TaskGroup, MultitaskClusteredLogisticRegression, MSG_DEBUG
features = RealFeatures(hstack((traindat,sin(traindat),cos(traindat))))
labels = BinaryLabels(hstack((label_train,label_train,label_train)))
n_vectors = features.get_num_vectors()
task_one = Task(0,n_vectors//3)
task_two = Task(n_vectors//3,2*n_vectors//3)
task_three = Task(2*n_vectors//3,n_vectors)
task_group = TaskGroup()
task_group.append_task(task_one)
task_group.append_task(task_two)
task_group.append_task(task_three)
mtlr = MultitaskClusteredLogisticRegression(1.0,100.0,features,labels,task_group,2)
#mtlr.io.set_loglevel(MSG_DEBUG)
mtlr.set_tolerance(1e-3) # use 1e-2 tolerance
mtlr.set_max_iter(100)
mtlr.train()
mtlr.set_current_task(0)
#print mtlr.get_w()
out = mtlr.apply_regression().get_labels()
return out
if __name__=='__main__':
print('TransferMultitaskClusteredLogisticRegression')
transfer_multitask_clustered_logistic_regression(*parameter_list[0])
|
0sw4l/villas-de-san-pablo
|
refs/heads/master
|
apps/habilidades_blandas/views.py
|
1
|
from django.core.urlresolvers import reverse_lazy
from django.shortcuts import render
from . import models
from . import forms
from apps.utils import views
class CapacitacionBaseView(object):
model = models.Capacitacion
form_class = forms.CapacitacionForm
success_url = reverse_lazy('habilidades_blandas:lista_capacitacion')
class CapacitacionCreateView(CapacitacionBaseView, views.BaseCreateView):
pass
class CapacitacionUpdateView(CapacitacionBaseView, views.BaseUpdateView):
pass
class CapacitacionListView(views.BaseListViewDinamicHeader):
HEADER = ('id', 'Nombre',)
model = models.Capacitacion
template_name = 'apps/habilidades_blandas/capacitacion_list.html'
class HabilidadesBlandasBaseView(object):
model = models.HabilidadBlanda
form_class = forms.HabilidadBlandaForm
success_url = reverse_lazy('habilidades_blandas:lista_habilidades_blandas')
class HabilidadesBlandasCreateView(HabilidadesBlandasBaseView, views.BaseCreateView):
pass
class HabilidadesBlandasUpdateView(HabilidadesBlandasBaseView, views.BaseUpdateView):
pass
class HabilidadesBlandasListView(views.BaseListViewDinamicHeader):
HEADER = ('Persona', 'Capacitacion', 'Estado Certificado', 'Tipo Alerta', 'Test', 'Observaciones')
model = models.HabilidadBlanda
template_name = 'apps/habilidades_blandas/habilidades_blandas_list.html'
|
arju88nair/projectCulminate
|
refs/heads/master
|
venv/lib/python3.5/site-packages/pkg_resources/__init__.py
|
59
|
"""
Package resource API
--------------------
A resource is a logical file contained within a package, or a logical
subdirectory thereof. The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is. Do not use os.path operations to manipulate resource
names being passed into the API.
The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files. It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
"""
from __future__ import absolute_import
import sys
import os
import io
import time
import re
import types
import zipfile
import zipimport
import warnings
import stat
import functools
import pkgutil
import operator
import platform
import collections
import plistlib
import email.parser
import tempfile
import textwrap
from pkgutil import get_importer
try:
import _imp
except ImportError:
# Python 3.2 compatibility
import imp as _imp
from pkg_resources.extern import six
from pkg_resources.extern.six.moves import urllib, map, filter
# capture these to bypass sandboxing
from os import utime
try:
from os import mkdir, rename, unlink
WRITE_SUPPORT = True
except ImportError:
# no write support, probably under GAE
WRITE_SUPPORT = False
from os import open as os_open
from os.path import isdir, split
try:
import importlib.machinery as importlib_machinery
# access attribute to force import under delayed import mechanisms.
importlib_machinery.__name__
except ImportError:
importlib_machinery = None
from pkg_resources.extern import packaging
__import__('pkg_resources.extern.packaging.version')
__import__('pkg_resources.extern.packaging.specifiers')
__import__('pkg_resources.extern.packaging.requirements')
__import__('pkg_resources.extern.packaging.markers')
if (3, 0) < sys.version_info < (3, 3):
msg = (
"Support for Python 3.0-3.2 has been dropped. Future versions "
"will fail here."
)
warnings.warn(msg)
# declare some globals that will be defined later to
# satisfy the linters.
require = None
working_set = None
class PEP440Warning(RuntimeWarning):
"""
Used when there is an issue with a version or specifier not complying with
PEP 440.
"""
class _SetuptoolsVersionMixin(object):
def __hash__(self):
return super(_SetuptoolsVersionMixin, self).__hash__()
def __lt__(self, other):
if isinstance(other, tuple):
return tuple(self) < other
else:
return super(_SetuptoolsVersionMixin, self).__lt__(other)
def __le__(self, other):
if isinstance(other, tuple):
return tuple(self) <= other
else:
return super(_SetuptoolsVersionMixin, self).__le__(other)
def __eq__(self, other):
if isinstance(other, tuple):
return tuple(self) == other
else:
return super(_SetuptoolsVersionMixin, self).__eq__(other)
def __ge__(self, other):
if isinstance(other, tuple):
return tuple(self) >= other
else:
return super(_SetuptoolsVersionMixin, self).__ge__(other)
def __gt__(self, other):
if isinstance(other, tuple):
return tuple(self) > other
else:
return super(_SetuptoolsVersionMixin, self).__gt__(other)
def __ne__(self, other):
if isinstance(other, tuple):
return tuple(self) != other
else:
return super(_SetuptoolsVersionMixin, self).__ne__(other)
def __getitem__(self, key):
return tuple(self)[key]
def __iter__(self):
component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
replace = {
'pre': 'c',
'preview': 'c',
'-': 'final-',
'rc': 'c',
'dev': '@',
}.get
def _parse_version_parts(s):
for part in component_re.split(s):
part = replace(part, part)
if not part or part == '.':
continue
if part[:1] in '0123456789':
# pad for numeric comparison
yield part.zfill(8)
else:
yield '*'+part
# ensure that alpha/beta/candidate are before final
yield '*final'
def old_parse_version(s):
parts = []
for part in _parse_version_parts(s.lower()):
if part.startswith('*'):
# remove '-' before a prerelease tag
if part < '*final':
while parts and parts[-1] == '*final-':
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == '00000000':
parts.pop()
parts.append(part)
return tuple(parts)
# Warn for use of this function
warnings.warn(
"You have iterated over the result of "
"pkg_resources.parse_version. This is a legacy behavior which is "
"inconsistent with the new version class introduced in setuptools "
"8.0. In most cases, conversion to a tuple is unnecessary. For "
"comparison of versions, sort the Version instances directly. If "
"you have another use case requiring the tuple, please file a "
"bug with the setuptools project describing that need.",
RuntimeWarning,
stacklevel=1,
)
for part in old_parse_version(str(self)):
yield part
class SetuptoolsVersion(_SetuptoolsVersionMixin, packaging.version.Version):
pass
class SetuptoolsLegacyVersion(_SetuptoolsVersionMixin,
packaging.version.LegacyVersion):
pass
def parse_version(v):
try:
return SetuptoolsVersion(v)
except packaging.version.InvalidVersion:
return SetuptoolsLegacyVersion(v)
_state_vars = {}
def _declare_state(vartype, **kw):
globals().update(kw)
_state_vars.update(dict.fromkeys(kw, vartype))
def __getstate__():
state = {}
g = globals()
for k, v in _state_vars.items():
state[k] = g['_sget_'+v](g[k])
return state
def __setstate__(state):
g = globals()
for k, v in state.items():
g['_sset_'+_state_vars[k]](k, g[k], v)
return state
def _sget_dict(val):
return val.copy()
def _sset_dict(key, ob, state):
ob.clear()
ob.update(state)
def _sget_object(val):
return val.__getstate__()
def _sset_object(key, ob, state):
ob.__setstate__(state)
_sget_none = _sset_none = lambda *args: None
def get_supported_platform():
"""Return this platform's maximum compatible version.
distutils.util.get_platform() normally reports the minimum version
of Mac OS X that would be required to *use* extensions produced by
distutils. But what we want when checking compatibility is to know the
version of Mac OS X that we are *running*. To allow usage of packages that
explicitly require a newer version of Mac OS X, we must also know the
current version of the OS.
If this condition occurs for any other platform with a version in its
platform strings, this function should be extended accordingly.
"""
plat = get_build_platform()
m = macosVersionString.match(plat)
if m is not None and sys.platform == "darwin":
try:
plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
except ValueError:
# not Mac OS X
pass
return plat
__all__ = [
# Basic resource access and distribution/entry point discovery
'require', 'run_script', 'get_provider', 'get_distribution',
'load_entry_point', 'get_entry_map', 'get_entry_info',
'iter_entry_points',
'resource_string', 'resource_stream', 'resource_filename',
'resource_listdir', 'resource_exists', 'resource_isdir',
# Environmental control
'declare_namespace', 'working_set', 'add_activation_listener',
'find_distributions', 'set_extraction_path', 'cleanup_resources',
'get_default_cache',
# Primary implementation classes
'Environment', 'WorkingSet', 'ResourceManager',
'Distribution', 'Requirement', 'EntryPoint',
# Exceptions
'ResolutionError', 'VersionConflict', 'DistributionNotFound',
'UnknownExtra', 'ExtractionError',
# Warnings
'PEP440Warning',
# Parsing functions and string utilities
'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
# filesystem utilities
'ensure_directory', 'normalize_path',
# Distribution "precedence" constants
'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
# "Provider" interfaces, implementations, and registration/lookup APIs
'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
'register_finder', 'register_namespace_handler', 'register_loader_type',
'fixup_namespace_packages', 'get_importer',
# Deprecated/backward compatibility only
'run_main', 'AvailableDistributions',
]
class ResolutionError(Exception):
"""Abstract base for dependency resolution errors"""
def __repr__(self):
return self.__class__.__name__+repr(self.args)
class VersionConflict(ResolutionError):
"""
An already-installed version conflicts with the requested version.
Should be initialized with the installed Distribution and the requested
Requirement.
"""
_template = "{self.dist} is installed but {self.req} is required"
@property
def dist(self):
return self.args[0]
@property
def req(self):
return self.args[1]
def report(self):
return self._template.format(**locals())
def with_context(self, required_by):
"""
If required_by is non-empty, return a version of self that is a
ContextualVersionConflict.
"""
if not required_by:
return self
args = self.args + (required_by,)
return ContextualVersionConflict(*args)
class ContextualVersionConflict(VersionConflict):
"""
A VersionConflict that accepts a third parameter, the set of the
requirements that required the installed Distribution.
"""
_template = VersionConflict._template + ' by {self.required_by}'
@property
def required_by(self):
return self.args[2]
class DistributionNotFound(ResolutionError):
"""A requested distribution was not found"""
_template = ("The '{self.req}' distribution was not found "
"and is required by {self.requirers_str}")
@property
def req(self):
return self.args[0]
@property
def requirers(self):
return self.args[1]
@property
def requirers_str(self):
if not self.requirers:
return 'the application'
return ', '.join(self.requirers)
def report(self):
return self._template.format(**locals())
def __str__(self):
return self.report()
class UnknownExtra(ResolutionError):
"""Distribution doesn't have an "extra feature" of the given name"""
_provider_factories = {}
PY_MAJOR = sys.version[:3]
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(loader_type, provider_factory):
"""Register `provider_factory` to make providers for `loader_type`
`loader_type` is the type or class of a PEP 302 ``module.__loader__``,
and `provider_factory` is a function that, passed a *module* object,
returns an ``IResourceProvider`` for that module.
"""
_provider_factories[loader_type] = provider_factory
def get_provider(moduleOrReq):
"""Return an IResourceProvider for the named module or requirement"""
if isinstance(moduleOrReq, Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, '__loader__', None)
return _find_adapter(_provider_factories, loader)(module)
def _macosx_vers(_cache=[]):
if not _cache:
version = platform.mac_ver()[0]
# fallback for MacPorts
if version == '':
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
if hasattr(plistlib, 'readPlist'):
plist_content = plistlib.readPlist(plist)
if 'ProductVersion' in plist_content:
version = plist_content['ProductVersion']
_cache.append(version.split('.'))
return _cache[0]
def _macosx_arch(machine):
return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
def get_build_platform():
"""Return this platform's string for platform-specific distributions
XXX Currently this is the same as ``distutils.util.get_platform()``, but it
needs some hacks for Linux and Mac OS X.
"""
try:
# Python 2.7 or >=3.2
from sysconfig import get_platform
except ImportError:
from distutils.util import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith('macosx-'):
try:
version = _macosx_vers()
machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]),
_macosx_arch(machine))
except ValueError:
# if someone is running a non-Mac darwin system, this will fall
# through to the default implementation
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
# XXX backward compat
get_platform = get_build_platform
def compatible_platforms(provided, required):
"""Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
"""
if provided is None or required is None or provided==required:
# easy case
return True
# Mac OS X special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macosx designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if dversion == 7 and macosversion >= "10.3" or \
dversion == 8 and macosversion >= "10.4":
return True
# egg isn't macosx or legacy darwin
return False
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or \
provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
def run_script(dist_spec, script_name):
"""Locate distribution `dist_spec` and run its `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
require(dist_spec)[0].run_script(script_name, ns)
# backward compatibility
run_main = run_script
def get_distribution(dist):
"""Return a current distribution object for a Requirement or string"""
if isinstance(dist, six.string_types):
dist = Requirement.parse(dist)
if isinstance(dist, Requirement):
dist = get_provider(dist)
if not isinstance(dist, Distribution):
raise TypeError("Expected string, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist, group, name):
"""Return `name` entry point of `group` for `dist` or raise ImportError"""
return get_distribution(dist).load_entry_point(group, name)
def get_entry_map(dist, group=None):
"""Return the entry point map for `group`, or the full entry map"""
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return get_distribution(dist).get_entry_info(group, name)
class IMetadataProvider:
def has_metadata(name):
"""Does the package's distribution contain the named metadata?"""
def get_metadata(name):
"""The named metadata resource as a string"""
def get_metadata_lines(name):
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
def metadata_isdir(name):
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
def metadata_listdir(name):
"""List of metadata names in the directory (like ``os.listdir()``)"""
def run_script(script_name, namespace):
"""Execute the named script in the supplied namespace dictionary"""
class IResourceProvider(IMetadataProvider):
"""An object that provides access to package resources"""
def get_resource_filename(manager, resource_name):
"""Return a true filesystem path for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_stream(manager, resource_name):
"""Return a readable file-like object for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_string(manager, resource_name):
"""Return a string containing the contents of `resource_name`
`manager` must be an ``IResourceManager``"""
def has_resource(resource_name):
"""Does the package contain the named resource?"""
def resource_isdir(resource_name):
"""Is the named resource a directory? (like ``os.path.isdir()``)"""
def resource_listdir(resource_name):
"""List of resource names in the directory (like ``os.listdir()``)"""
class WorkingSet(object):
"""A collection of active distributions on sys.path (or a similar list)"""
def __init__(self, entries=None):
"""Create working set from list of path entries (default=sys.path)"""
self.entries = []
self.entry_keys = {}
self.by_key = {}
self.callbacks = []
if entries is None:
entries = sys.path
for entry in entries:
self.add_entry(entry)
@classmethod
def _build_master(cls):
"""
Prepare the master working set.
"""
ws = cls()
try:
from __main__ import __requires__
except ImportError:
# The main program does not list any requirements
return ws
# ensure the requirements are met
try:
ws.require(__requires__)
except VersionConflict:
return cls._build_from_requirements(__requires__)
return ws
@classmethod
def _build_from_requirements(cls, req_spec):
"""
Build a working set from a requirement spec. Rewrites sys.path.
"""
# try it without defaults already on sys.path
# by starting with an empty path
ws = cls([])
reqs = parse_requirements(req_spec)
dists = ws.resolve(reqs, Environment())
for dist in dists:
ws.add(dist)
# add any missing entries from sys.path
for entry in sys.path:
if entry not in ws.entries:
ws.add_entry(entry)
# then copy back to sys.path
sys.path[:] = ws.entries
return ws
def add_entry(self, entry):
"""Add a path item to ``.entries``, finding any distributions on it
``find_distributions(entry, True)`` is used to find distributions
corresponding to the path entry, and they are added. `entry` is
always appended to ``.entries``, even if it is already present.
(This is because ``sys.path`` can contain the same value more than
once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
equal ``sys.path``.)
"""
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in find_distributions(entry, True):
self.add(dist, entry, False)
def __contains__(self, dist):
"""True if `dist` is the active distribution for its project"""
return self.by_key.get(dist.key) == dist
def find(self, req):
"""Find a distribution matching requirement `req`
If there is an active distribution for the requested project, this
returns it as long as it meets the version requirement specified by
`req`. But, if there is an active distribution for the project and it
does *not* meet the `req` requirement, ``VersionConflict`` is raised.
If there is no active distribution for the requested project, ``None``
is returned.
"""
dist = self.by_key.get(req.key)
if dist is not None and dist not in req:
# XXX add more info
raise VersionConflict(dist, req)
return dist
def iter_entry_points(self, group, name=None):
"""Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
"""
for dist in self:
entries = dist.get_entry_map(group)
if name is None:
for ep in entries.values():
yield ep
elif name in entries:
yield entries[name]
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns)
def __iter__(self):
"""Yield distributions for non-duplicate projects in the working set
The yield order is the order in which the items' path entries were
added to the working set.
"""
seen = {}
for item in self.entries:
if item not in self.entry_keys:
# workaround a cache issue
continue
for key in self.entry_keys[item]:
if key not in seen:
seen[key]=1
yield self.by_key[key]
def add(self, dist, entry=None, insert=True, replace=False):
"""Add `dist` to working set, associated with `entry`
If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
On exit from this routine, `entry` is added to the end of the working
set's ``.entries`` (if it wasn't already present).
`dist` is only added to the working set if it's for a project that
doesn't already have a distribution in the set, unless `replace=True`.
If it's added, any callbacks registered with the ``subscribe()`` method
will be called.
"""
if insert:
dist.insert_on(self.entries, entry, replace=replace)
if entry is None:
entry = dist.location
keys = self.entry_keys.setdefault(entry,[])
keys2 = self.entry_keys.setdefault(dist.location,[])
if not replace and dist.key in self.by_key:
# ignore hidden distros
return
self.by_key[dist.key] = dist
if dist.key not in keys:
keys.append(dist.key)
if dist.key not in keys2:
keys2.append(dist.key)
self._added_new(dist)
def resolve(self, requirements, env=None, installer=None,
replace_conflicting=False):
"""List all distributions needed to (recursively) meet `requirements`
`requirements` must be a sequence of ``Requirement`` objects. `env`,
if supplied, should be an ``Environment`` instance. If
not supplied, it defaults to all distributions available within any
entry or distribution in the working set. `installer`, if supplied,
will be invoked with each requirement that cannot be met by an
already-installed distribution; it should return a ``Distribution`` or
``None``.
Unless `replace_conflicting=True`, raises a VersionConflict exception if
any requirements are found on the path that have the correct name but
the wrong version. Otherwise, if an `installer` is supplied it will be
invoked to obtain the correct version of the requirement and activate
it.
"""
# set up the stack
requirements = list(requirements)[::-1]
# set of processed requirements
processed = {}
# key -> dist
best = {}
to_activate = []
req_extras = _ReqExtras()
# Mapping of requirement to set of distributions that required it;
# useful for reporting info about conflicts.
required_by = collections.defaultdict(set)
while requirements:
# process dependencies breadth-first
req = requirements.pop(0)
if req in processed:
# Ignore cyclic or redundant dependencies
continue
if not req_extras.markers_pass(req):
continue
dist = best.get(req.key)
if dist is None:
# Find the best distribution and add it to the map
dist = self.by_key.get(req.key)
if dist is None or (dist not in req and replace_conflicting):
ws = self
if env is None:
if dist is None:
env = Environment(self.entries)
else:
# Use an empty environment and workingset to avoid
# any further conflicts with the conflicting
# distribution
env = Environment([])
ws = WorkingSet([])
dist = best[req.key] = env.best_match(req, ws, installer)
if dist is None:
requirers = required_by.get(req, None)
raise DistributionNotFound(req, requirers)
to_activate.append(dist)
if dist not in req:
# Oops, the "best" so far conflicts with a dependency
dependent_req = required_by[req]
raise VersionConflict(dist, req).with_context(dependent_req)
# push the new requirements onto the stack
new_requirements = dist.requires(req.extras)[::-1]
requirements.extend(new_requirements)
# Register the new requirements needed by req
for new_requirement in new_requirements:
required_by[new_requirement].add(req.project_name)
req_extras[new_requirement] = req.extras
processed[req] = True
# return list of distros to activate
return to_activate
def find_plugins(self, plugin_env, full_env=None, installer=None,
fallback=True):
"""Find all activatable distributions in `plugin_env`
Example usage::
distributions, errors = working_set.find_plugins(
Environment(plugin_dirlist)
)
# add plugins+libs to sys.path
map(working_set.add, distributions)
# display errors
print('Could not load', errors)
The `plugin_env` should be an ``Environment`` instance that contains
only distributions that are in the project's "plugin directory" or
directories. The `full_env`, if supplied, should be an ``Environment``
contains all currently-available distributions. If `full_env` is not
supplied, one is created automatically from the ``WorkingSet`` this
method is called on, which will typically mean that every directory on
``sys.path`` will be scanned for distributions.
`installer` is a standard installer callback as used by the
``resolve()`` method. The `fallback` flag indicates whether we should
attempt to resolve older versions of a plugin if the newest version
cannot be resolved.
This method returns a 2-tuple: (`distributions`, `error_info`), where
`distributions` is a list of the distributions found in `plugin_env`
that were loadable, along with any other distributions that are needed
to resolve their dependencies. `error_info` is a dictionary mapping
unloadable plugin distributions to an exception instance describing the
error that occurred. Usually this will be a ``DistributionNotFound`` or
``VersionConflict`` instance.
"""
plugin_projects = list(plugin_env)
# scan project names in alphabetic order
plugin_projects.sort()
error_info = {}
distributions = {}
if full_env is None:
env = Environment(self.entries)
env += plugin_env
else:
env = full_env + plugin_env
shadow_set = self.__class__([])
# put all our entries in shadow_set
list(map(shadow_set.add, self))
for project_name in plugin_projects:
for dist in plugin_env[project_name]:
req = [dist.as_requirement()]
try:
resolvees = shadow_set.resolve(req, env, installer)
except ResolutionError as v:
# save error info
error_info[dist] = v
if fallback:
# try the next older version of project
continue
else:
# give up on this project, keep going
break
else:
list(map(shadow_set.add, resolvees))
distributions.update(dict.fromkeys(resolvees))
# success, no need to try any more versions of this project
break
distributions = list(distributions)
distributions.sort()
return distributions, error_info
def require(self, *requirements):
"""Ensure that distributions matching `requirements` are activated
`requirements` must be a string or a (possibly-nested) sequence
thereof, specifying the distributions and versions required. The
return value is a sequence of the distributions that needed to be
activated to fulfill the requirements; all relevant distributions are
included, even if they were already activated in this working set.
"""
needed = self.resolve(parse_requirements(requirements))
for dist in needed:
self.add(dist)
return needed
def subscribe(self, callback):
"""Invoke `callback` for all distributions (including existing ones)"""
if callback in self.callbacks:
return
self.callbacks.append(callback)
for dist in self:
callback(dist)
def _added_new(self, dist):
for callback in self.callbacks:
callback(dist)
def __getstate__(self):
return (
self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
self.callbacks[:]
)
def __setstate__(self, e_k_b_c):
entries, keys, by_key, callbacks = e_k_b_c
self.entries = entries[:]
self.entry_keys = keys.copy()
self.by_key = by_key.copy()
self.callbacks = callbacks[:]
class _ReqExtras(dict):
"""
Map each requirement to the extras that demanded it.
"""
def markers_pass(self, req):
"""
Evaluate markers for req against each extra that
demanded it.
Return False if the req has a marker and fails
evaluation. Otherwise, return True.
"""
extra_evals = (
req.marker.evaluate({'extra': extra})
for extra in self.get(req, ())
)
return not req.marker or any(extra_evals) or req.marker.evaluate()
class Environment(object):
"""Searchable snapshot of distributions on a search path"""
def __init__(self, search_path=None, platform=get_supported_platform(),
python=PY_MAJOR):
"""Snapshot distributions available on a search path
Any distributions found on `search_path` are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used.
`platform` is an optional string specifying the name of the platform
that platform-specific distributions must be compatible with. If
unspecified, it defaults to the current platform. `python` is an
optional string naming the desired version of Python (e.g. ``'3.3'``);
it defaults to the current version.
You may explicitly set `platform` (and/or `python`) to ``None`` if you
wish to map *all* distributions, not just those compatible with the
running platform or Python version.
"""
self._distmap = {}
self.platform = platform
self.python = python
self.scan(search_path)
def can_add(self, dist):
"""Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned.
"""
return (self.python is None or dist.py_version is None
or dist.py_version==self.python) \
and compatible_platforms(dist.platform, self.platform)
def remove(self, dist):
"""Remove `dist` from the environment"""
self._distmap[dist.key].remove(dist)
def scan(self, search_path=None):
"""Scan `search_path` for distributions usable in this environment
Any distributions found are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used. Only distributions conforming to
the platform/python version defined at initialization are added.
"""
if search_path is None:
search_path = sys.path
for item in search_path:
for dist in find_distributions(item):
self.add(dist)
def __getitem__(self, project_name):
"""Return a newest-to-oldest list of distributions for `project_name`
Uses case-insensitive `project_name` comparison, assuming all the
project's distributions use their project's name converted to all
lowercase as their key.
"""
distribution_key = project_name.lower()
return self._distmap.get(distribution_key, [])
def add(self, dist):
"""Add `dist` if we ``can_add()`` it and it has not already been added
"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key, [])
if dist not in dists:
dists.append(dist)
dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
def best_match(self, req, working_set, installer=None):
"""Find distribution best matching `req` and usable on `working_set`
This calls the ``find(req)`` method of the `working_set` to see if a
suitable distribution is already active. (This may raise
``VersionConflict`` if an unsuitable version of the project is already
active in the specified `working_set`.) If a suitable distribution
isn't active, this method returns the newest distribution in the
environment that meets the ``Requirement`` in `req`. If no suitable
distribution is found, and `installer` is supplied, then the result of
calling the environment's ``obtain(req, installer)`` method will be
returned.
"""
dist = working_set.find(req)
if dist is not None:
return dist
for dist in self[req.key]:
if dist in req:
return dist
# try to download/install
return self.obtain(req, installer)
def obtain(self, requirement, installer=None):
"""Obtain a distribution matching `requirement` (e.g. via download)
Obtain a distro that matches requirement (e.g. via download). In the
base ``Environment`` class, this routine just returns
``installer(requirement)``, unless `installer` is None, in which case
None is returned instead. This method is a hook that allows subclasses
to attempt other ways of obtaining a distribution before falling back
to the `installer` argument."""
if installer is not None:
return installer(requirement)
def __iter__(self):
"""Yield the unique project names of the available distributions"""
for key in self._distmap.keys():
if self[key]:
yield key
def __iadd__(self, other):
"""In-place addition of a distribution or environment"""
if isinstance(other, Distribution):
self.add(other)
elif isinstance(other, Environment):
for project in other:
for dist in other[project]:
self.add(dist)
else:
raise TypeError("Can't add %r to environment" % (other,))
return self
def __add__(self, other):
"""Add an environment or distribution to an environment"""
new = self.__class__([], platform=None, python=None)
for env in self, other:
new += env
return new
# XXX backward compatibility
AvailableDistributions = Environment
class ExtractionError(RuntimeError):
"""An error occurred extracting a resource
The following attributes are available from instances of this exception:
manager
The resource manager that raised this exception
cache_path
The base directory for resource extraction
original_error
The exception instance that caused extraction to fail
"""
class ResourceManager:
"""Manage resource extraction and packages"""
extraction_path = None
def __init__(self):
self.cached_files = {}
def resource_exists(self, package_or_requirement, resource_name):
"""Does the named resource exist?"""
return get_provider(package_or_requirement).has_resource(resource_name)
def resource_isdir(self, package_or_requirement, resource_name):
"""Is the named resource an existing directory?"""
return get_provider(package_or_requirement).resource_isdir(
resource_name
)
def resource_filename(self, package_or_requirement, resource_name):
"""Return a true filesystem path for specified resource"""
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
def resource_stream(self, package_or_requirement, resource_name):
"""Return a readable file-like object for specified resource"""
return get_provider(package_or_requirement).get_resource_stream(
self, resource_name
)
def resource_string(self, package_or_requirement, resource_name):
"""Return specified resource as a string"""
return get_provider(package_or_requirement).get_resource_string(
self, resource_name
)
def resource_listdir(self, package_or_requirement, resource_name):
"""List the contents of the named resource directory"""
return get_provider(package_or_requirement).resource_listdir(
resource_name
)
def extraction_error(self):
"""Give an error message for problems extracting file(s)"""
old_exc = sys.exc_info()[1]
cache_path = self.extraction_path or get_default_cache()
tmpl = textwrap.dedent("""
Can't extract file(s) to egg cache
The following error occurred while trying to extract file(s) to the Python egg
cache:
{old_exc}
The Python egg cache directory is currently set to:
{cache_path}
Perhaps your account does not have write access to this directory? You can
change the cache directory by setting the PYTHON_EGG_CACHE environment
variable to point to an accessible directory.
""").lstrip()
err = ExtractionError(tmpl.format(**locals()))
err.manager = self
err.cache_path = cache_path
err.original_error = old_exc
raise err
def get_cache_path(self, archive_name, names=()):
"""Return absolute location in cache for `archive_name` and `names`
The parent directory of the resulting path will be created if it does
not already exist. `archive_name` should be the base filename of the
enclosing egg (which may not be the name of the enclosing zipfile!),
including its ".egg" extension. `names`, if provided, should be a
sequence of path name parts "under" the egg's extraction location.
This method should only be called by resource providers that need to
obtain an extraction location, and only for names they intend to
extract, as it tracks the generated names for possible cleanup later.
"""
extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name+'-tmp', *names)
try:
_bypass_ensure_directory(target_path)
except:
self.extraction_error()
self._warn_unsafe_extraction_path(extract_path)
self.cached_files[target_path] = 1
return target_path
@staticmethod
def _warn_unsafe_extraction_path(path):
"""
If the default extraction path is overridden and set to an insecure
location, such as /tmp, it opens up an opportunity for an attacker to
replace an extracted file with an unauthorized payload. Warn the user
if a known insecure location is used.
See Distribute #375 for more details.
"""
if os.name == 'nt' and not path.startswith(os.environ['windir']):
# On Windows, permissions are generally restrictive by default
# and temp directories are not writable by other users, so
# bypass the warning.
return
mode = os.stat(path).st_mode
if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
msg = ("%s is writable by group/others and vulnerable to attack "
"when "
"used with get_resource_filename. Consider a more secure "
"location (set with .set_extraction_path or the "
"PYTHON_EGG_CACHE environment variable)." % path)
warnings.warn(msg, UserWarning)
def postprocess(self, tempname, filename):
"""Perform any platform-specific postprocessing of `tempname`
This is where Mac header rewrites should be done; other platforms don't
have anything special they should do.
Resource providers should call this method ONLY after successfully
extracting a compressed resource. They must NOT call it on resources
that are already in the filesystem.
`tempname` is the current (temporary) name of the file, and `filename`
is the name it will be renamed to by the caller after this routine
returns.
"""
if os.name == 'posix':
# Make the resource executable
mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
os.chmod(tempname, mode)
def set_extraction_path(self, path):
"""Set the base path where resources will be extracted to, if needed.
If you do not call this routine before any extractions take place, the
path defaults to the return value of ``get_default_cache()``. (Which
is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
platform-specific fallbacks. See that routine's documentation for more
details.)
Resources are extracted to subdirectories of this path based upon
information given by the ``IResourceProvider``. You may set this to a
temporary directory, but then you must call ``cleanup_resources()`` to
delete the extracted files when done. There is no guarantee that
``cleanup_resources()`` will be able to remove all extracted files.
(Note: you may not change the extraction path for a given resource
manager once resources have been extracted, unless you first call
``cleanup_resources()``.)
"""
if self.cached_files:
raise ValueError(
"Can't change extraction path, files already extracted"
)
self.extraction_path = path
def cleanup_resources(self, force=False):
"""
Delete all extracted resource files and directories, returning a list
of the file and directory names that could not be successfully removed.
This function does not have any concurrency protection, so it should
generally only be called when the extraction path is a temporary
directory exclusive to a single process. This method is not
automatically called; you must call it explicitly or register it as an
``atexit`` function if you wish to ensure cleanup of a temporary
directory used for extractions.
"""
# XXX
def get_default_cache():
"""Determine the default cache location
This returns the ``PYTHON_EGG_CACHE`` environment variable, if set.
Otherwise, on Windows, it returns a "Python-Eggs" subdirectory of the
"Application Data" directory. On all other systems, it's "~/.python-eggs".
"""
try:
return os.environ['PYTHON_EGG_CACHE']
except KeyError:
pass
if os.name!='nt':
return os.path.expanduser('~/.python-eggs')
# XXX this may be locale-specific!
app_data = 'Application Data'
app_homes = [
# best option, should be locale-safe
(('APPDATA',), None),
(('USERPROFILE',), app_data),
(('HOMEDRIVE','HOMEPATH'), app_data),
(('HOMEPATH',), app_data),
(('HOME',), None),
# 95/98/ME
(('WINDIR',), app_data),
]
for keys, subdir in app_homes:
dirname = ''
for key in keys:
if key in os.environ:
dirname = os.path.join(dirname, os.environ[key])
else:
break
else:
if subdir:
dirname = os.path.join(dirname, subdir)
return os.path.join(dirname, 'Python-Eggs')
else:
raise RuntimeError(
"Please set the PYTHON_EGG_CACHE enviroment variable"
)
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""
Convert an arbitrary string to a standard version string
"""
try:
# normalize the version
return str(packaging.version.Version(version))
except packaging.version.InvalidVersion:
version = version.replace(' ','.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def safe_extra(extra):
"""Convert an arbitrary string to a standard 'extra' name
Any runs of non-alphanumeric characters are replaced with a single '_',
and the result is always lowercased.
"""
return re.sub('[^A-Za-z0-9.]+', '_', extra).lower()
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-','_')
def invalid_marker(text):
"""
Validate text as a PEP 508 environment marker; return an exception
if invalid or False otherwise.
"""
try:
evaluate_marker(text)
except SyntaxError as e:
e.filename = None
e.lineno = None
return e
return False
def evaluate_marker(text, extra=None):
"""
Evaluate a PEP 508 environment marker.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
This implementation uses the 'pyparsing' module.
"""
try:
marker = packaging.markers.Marker(text)
return marker.evaluate()
except packaging.markers.InvalidMarker as e:
raise SyntaxError(e)
class NullProvider:
"""Try to implement resources and metadata for arbitrary PEP 302 loaders"""
egg_name = None
egg_info = None
loader = None
def __init__(self, module):
self.loader = getattr(module, '__loader__', None)
self.module_path = os.path.dirname(getattr(module, '__file__', ''))
def get_resource_filename(self, manager, resource_name):
return self._fn(self.module_path, resource_name)
def get_resource_stream(self, manager, resource_name):
return io.BytesIO(self.get_resource_string(manager, resource_name))
def get_resource_string(self, manager, resource_name):
return self._get(self._fn(self.module_path, resource_name))
def has_resource(self, resource_name):
return self._has(self._fn(self.module_path, resource_name))
def has_metadata(self, name):
return self.egg_info and self._has(self._fn(self.egg_info, name))
if sys.version_info <= (3,):
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info, name))
else:
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info, name)).decode("utf-8")
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def resource_isdir(self, resource_name):
return self._isdir(self._fn(self.module_path, resource_name))
def metadata_isdir(self, name):
return self.egg_info and self._isdir(self._fn(self.egg_info, name))
def resource_listdir(self, resource_name):
return self._listdir(self._fn(self.module_path, resource_name))
def metadata_listdir(self, name):
if self.egg_info:
return self._listdir(self._fn(self.egg_info, name))
return []
def run_script(self, script_name, namespace):
script = 'scripts/'+script_name
if not self.has_metadata(script):
raise ResolutionError("No script named %r" % script_name)
script_text = self.get_metadata(script).replace('\r\n', '\n')
script_text = script_text.replace('\r', '\n')
script_filename = self._fn(self.egg_info, script)
namespace['__file__'] = script_filename
if os.path.exists(script_filename):
source = open(script_filename).read()
code = compile(source, script_filename, 'exec')
exec(code, namespace, namespace)
else:
from linecache import cache
cache[script_filename] = (
len(script_text), 0, script_text.split('\n'), script_filename
)
script_code = compile(script_text, script_filename,'exec')
exec(script_code, namespace, namespace)
def _has(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _isdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _listdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _fn(self, base, resource_name):
if resource_name:
return os.path.join(base, *resource_name.split('/'))
return base
def _get(self, path):
if hasattr(self.loader, 'get_data'):
return self.loader.get_data(path)
raise NotImplementedError(
"Can't perform this operation for loaders without 'get_data()'"
)
register_loader_type(object, NullProvider)
class EggProvider(NullProvider):
"""Provider based on a virtual filesystem"""
def __init__(self, module):
NullProvider.__init__(self, module)
self._setup_prefix()
def _setup_prefix(self):
# we assume here that our metadata may be nested inside a "basket"
# of multiple eggs; that's why we use module_path instead of .archive
path = self.module_path
old = None
while path!=old:
if _is_unpacked_egg(path):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
break
old = path
path, base = os.path.split(path)
class DefaultProvider(EggProvider):
"""Provides access to package resources in the filesystem"""
def _has(self, path):
return os.path.exists(path)
def _isdir(self, path):
return os.path.isdir(path)
def _listdir(self, path):
return os.listdir(path)
def get_resource_stream(self, manager, resource_name):
return open(self._fn(self.module_path, resource_name), 'rb')
def _get(self, path):
with open(path, 'rb') as stream:
return stream.read()
@classmethod
def _register(cls):
loader_cls = getattr(importlib_machinery, 'SourceFileLoader',
type(None))
register_loader_type(loader_cls, cls)
DefaultProvider._register()
class EmptyProvider(NullProvider):
"""Provider that returns nothing for all requests"""
_isdir = _has = lambda self, path: False
_get = lambda self, path: ''
_listdir = lambda self, path: []
module_path = None
def __init__(self):
pass
empty_provider = EmptyProvider()
class ZipManifests(dict):
"""
zip manifest builder
"""
@classmethod
def build(cls, path):
"""
Build a dictionary similar to the zipimport directory
caches, except instead of tuples, store ZipInfo objects.
Use a platform-specific path separator (os.sep) for the path keys
for compatibility with pypy on Windows.
"""
with ContextualZipFile(path) as zfile:
items = (
(
name.replace('/', os.sep),
zfile.getinfo(name),
)
for name in zfile.namelist()
)
return dict(items)
load = build
class MemoizedZipManifests(ZipManifests):
"""
Memoized zipfile manifests.
"""
manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')
def load(self, path):
"""
Load a manifest at path or return a suitable manifest already loaded.
"""
path = os.path.normpath(path)
mtime = os.stat(path).st_mtime
if path not in self or self[path].mtime != mtime:
manifest = self.build(path)
self[path] = self.manifest_mod(manifest, mtime)
return self[path].manifest
class ContextualZipFile(zipfile.ZipFile):
"""
Supplement ZipFile class to support context manager for Python 2.6
"""
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __new__(cls, *args, **kwargs):
"""
Construct a ZipFile or ContextualZipFile as appropriate
"""
if hasattr(zipfile.ZipFile, '__exit__'):
return zipfile.ZipFile(*args, **kwargs)
return super(ContextualZipFile, cls).__new__(cls)
class ZipProvider(EggProvider):
"""Resource support for zips and eggs"""
eagers = None
_zip_manifests = MemoizedZipManifests()
def __init__(self, module):
EggProvider.__init__(self, module)
self.zip_pre = self.loader.archive+os.sep
def _zipinfo_name(self, fspath):
# Convert a virtual filename (full path to file) into a zipfile subpath
# usable with the zipimport directory cache for our target archive
if fspath.startswith(self.zip_pre):
return fspath[len(self.zip_pre):]
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.zip_pre)
)
def _parts(self, zip_path):
# Convert a zipfile subpath into an egg-relative path part list.
# pseudo-fs path
fspath = self.zip_pre+zip_path
if fspath.startswith(self.egg_root+os.sep):
return fspath[len(self.egg_root)+1:].split(os.sep)
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.egg_root)
)
@property
def zipinfo(self):
return self._zip_manifests.load(self.loader.archive)
def get_resource_filename(self, manager, resource_name):
if not self.egg_name:
raise NotImplementedError(
"resource_filename() only supported for .egg, not .zip"
)
# no need to lock for extraction, since we use temp names
zip_path = self._resource_to_zip(resource_name)
eagers = self._get_eager_resources()
if '/'.join(self._parts(zip_path)) in eagers:
for name in eagers:
self._extract_resource(manager, self._eager_to_zip(name))
return self._extract_resource(manager, zip_path)
@staticmethod
def _get_date_and_size(zip_stat):
size = zip_stat.file_size
# ymdhms+wday, yday, dst
date_time = zip_stat.date_time + (0, 0, -1)
# 1980 offset already done
timestamp = time.mktime(date_time)
return timestamp, size
def _extract_resource(self, manager, zip_path):
if zip_path in self._index():
for name in self._index()[zip_path]:
last = self._extract_resource(
manager, os.path.join(zip_path, name)
)
# return the extracted directory name
return os.path.dirname(last)
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not WRITE_SUPPORT:
raise IOError('"os.rename" and "os.unlink" are not supported '
'on this platform')
try:
real_path = manager.get_cache_path(
self.egg_name, self._parts(zip_path)
)
if self._is_current(real_path, zip_path):
return real_path
outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path))
os.write(outf, self.loader.get_data(zip_path))
os.close(outf)
utime(tmpnam, (timestamp, timestamp))
manager.postprocess(tmpnam, real_path)
try:
rename(tmpnam, real_path)
except os.error:
if os.path.isfile(real_path):
if self._is_current(real_path, zip_path):
# the file became current since it was checked above,
# so proceed.
return real_path
# Windows, del old file and retry
elif os.name=='nt':
unlink(real_path)
rename(tmpnam, real_path)
return real_path
raise
except os.error:
# report a user-friendly error
manager.extraction_error()
return real_path
def _is_current(self, file_path, zip_path):
"""
Return True if the file_path is current for this zip_path
"""
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not os.path.isfile(file_path):
return False
stat = os.stat(file_path)
if stat.st_size!=size or stat.st_mtime!=timestamp:
return False
# check that the contents match
zip_contents = self.loader.get_data(zip_path)
with open(file_path, 'rb') as f:
file_contents = f.read()
return zip_contents == file_contents
def _get_eager_resources(self):
if self.eagers is None:
eagers = []
for name in ('native_libs.txt', 'eager_resources.txt'):
if self.has_metadata(name):
eagers.extend(self.get_metadata_lines(name))
self.eagers = eagers
return self.eagers
def _index(self):
try:
return self._dirindex
except AttributeError:
ind = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in ind:
ind[parent].append(parts[-1])
break
else:
ind[parent] = [parts.pop()]
self._dirindex = ind
return ind
def _has(self, fspath):
zip_path = self._zipinfo_name(fspath)
return zip_path in self.zipinfo or zip_path in self._index()
def _isdir(self, fspath):
return self._zipinfo_name(fspath) in self._index()
def _listdir(self, fspath):
return list(self._index().get(self._zipinfo_name(fspath), ()))
def _eager_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.egg_root, resource_name))
def _resource_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.module_path, resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
class FileMetadata(EmptyProvider):
"""Metadata handler for standalone PKG-INFO files
Usage::
metadata = FileMetadata("/path/to/PKG-INFO")
This provider rejects all data and metadata requests except for PKG-INFO,
which is treated as existing, and will be the contents of the file at
the provided location.
"""
def __init__(self, path):
self.path = path
def has_metadata(self, name):
return name=='PKG-INFO' and os.path.isfile(self.path)
def get_metadata(self, name):
if name=='PKG-INFO':
with io.open(self.path, encoding='utf-8') as f:
try:
metadata = f.read()
except UnicodeDecodeError as exc:
# add path context to error message
tmpl = " in {self.path}"
exc.reason += tmpl.format(self=self)
raise
return metadata
raise KeyError("No metadata except PKG-INFO is available")
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
class PathMetadata(DefaultProvider):
"""Metadata provider for egg directories
Usage::
# Development eggs:
egg_info = "/path/to/PackageName.egg-info"
base_dir = os.path.dirname(egg_info)
metadata = PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
# Unpacked egg directories:
egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
"""
def __init__(self, path, egg_info):
self.module_path = path
self.egg_info = egg_info
class EggMetadata(ZipProvider):
"""Metadata provider for .egg files"""
def __init__(self, importer):
"""Create a metadata provider from a zipimporter"""
self.zip_pre = importer.archive+os.sep
self.loader = importer
if importer.prefix:
self.module_path = os.path.join(importer.archive, importer.prefix)
else:
self.module_path = importer.archive
self._setup_prefix()
_declare_state('dict', _distribution_finders = {})
def register_finder(importer_type, distribution_finder):
"""Register `distribution_finder` to find distributions in sys.path items
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `distribution_finder` is a callable that, passed a path
item and the importer instance, yields ``Distribution`` instances found on
that path item. See ``pkg_resources.find_on_path`` for an example."""
_distribution_finders[importer_type] = distribution_finder
def find_distributions(path_item, only=False):
"""Yield distributions accessible via `path_item`"""
importer = get_importer(path_item)
finder = _find_adapter(_distribution_finders, importer)
return finder(importer, path_item, only)
def find_eggs_in_zip(importer, path_item, only=False):
"""
Find eggs in zip files; possibly multiple nested eggs.
"""
if importer.archive.endswith('.whl'):
# wheels are not supported with this finder
# they don't have PKG-INFO metadata, and won't ever contain eggs
return
metadata = EggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
yield Distribution.from_filename(path_item, metadata=metadata)
if only:
# don't yield nested distros
return
for subitem in metadata.resource_listdir('/'):
if _is_unpacked_egg(subitem):
subpath = os.path.join(path_item, subitem)
for dist in find_eggs_in_zip(zipimport.zipimporter(subpath), subpath):
yield dist
register_finder(zipimport.zipimporter, find_eggs_in_zip)
def find_nothing(importer, path_item, only=False):
return ()
register_finder(object, find_nothing)
def find_on_path(importer, path_item, only=False):
"""Yield distributions accessible on a sys.path directory"""
path_item = _normalize_cached(path_item)
if os.path.isdir(path_item) and os.access(path_item, os.R_OK):
if _is_unpacked_egg(path_item):
yield Distribution.from_filename(
path_item, metadata=PathMetadata(
path_item, os.path.join(path_item,'EGG-INFO')
)
)
else:
# scan for .egg and .egg-info in directory
for entry in os.listdir(path_item):
lower = entry.lower()
if lower.endswith('.egg-info') or lower.endswith('.dist-info'):
fullpath = os.path.join(path_item, entry)
if os.path.isdir(fullpath):
# egg-info directory, allow getting metadata
metadata = PathMetadata(path_item, fullpath)
else:
metadata = FileMetadata(fullpath)
yield Distribution.from_location(
path_item, entry, metadata, precedence=DEVELOP_DIST
)
elif not only and _is_unpacked_egg(entry):
dists = find_distributions(os.path.join(path_item, entry))
for dist in dists:
yield dist
elif not only and lower.endswith('.egg-link'):
with open(os.path.join(path_item, entry)) as entry_file:
entry_lines = entry_file.readlines()
for line in entry_lines:
if not line.strip():
continue
path = os.path.join(path_item, line.rstrip())
dists = find_distributions(path)
for item in dists:
yield item
break
register_finder(pkgutil.ImpImporter, find_on_path)
if hasattr(importlib_machinery, 'FileFinder'):
register_finder(importlib_machinery.FileFinder, find_on_path)
_declare_state('dict', _namespace_handlers={})
_declare_state('dict', _namespace_packages={})
def register_namespace_handler(importer_type, namespace_handler):
"""Register `namespace_handler` to declare namespace packages
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `namespace_handler` is a callable like this::
def namespace_handler(importer, path_entry, moduleName, module):
# return a path_entry to use for child packages
Namespace handlers are only called if the importer object has already
agreed that it can handle the relevant path item, and they should only
return a subpath if the module __path__ does not already contain an
equivalent subpath. For an example namespace handler, see
``pkg_resources.file_ns_handler``.
"""
_namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
"""Ensure that named package includes a subpath of path_item (if needed)"""
importer = get_importer(path_item)
if importer is None:
return None
loader = importer.find_module(packageName)
if loader is None:
return None
module = sys.modules.get(packageName)
if module is None:
module = sys.modules[packageName] = types.ModuleType(packageName)
module.__path__ = []
_set_parent_ns(packageName)
elif not hasattr(module,'__path__'):
raise TypeError("Not a package:", packageName)
handler = _find_adapter(_namespace_handlers, importer)
subpath = handler(importer, path_item, packageName, module)
if subpath is not None:
path = module.__path__
path.append(subpath)
loader.load_module(packageName)
_rebuild_mod_path(path, packageName, module)
return subpath
def _rebuild_mod_path(orig_path, package_name, module):
"""
Rebuild module.__path__ ensuring that all entries are ordered
corresponding to their sys.path order
"""
sys_path = [_normalize_cached(p) for p in sys.path]
def position_in_sys_path(path):
"""
Return the ordinal of the path based on its position in sys.path
"""
path_parts = path.split(os.sep)
module_parts = package_name.count('.') + 1
parts = path_parts[:-module_parts]
return sys_path.index(_normalize_cached(os.sep.join(parts)))
orig_path.sort(key=position_in_sys_path)
module.__path__[:] = [_normalize_cached(p) for p in orig_path]
def declare_namespace(packageName):
"""Declare that package 'packageName' is a namespace package"""
_imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path, parent = sys.path, None
if '.' in packageName:
parent = '.'.join(packageName.split('.')[:-1])
declare_namespace(parent)
if parent not in _namespace_packages:
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError:
raise TypeError("Not a package:", parent)
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent,[]).append(packageName)
_namespace_packages.setdefault(packageName,[])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
# if they apply
_handle_ns(packageName, path_item)
finally:
_imp.release_lock()
def fixup_namespace_packages(path_item, parent=None):
"""Ensure that previously-declared namespace packages include path_item"""
_imp.acquire_lock()
try:
for package in _namespace_packages.get(parent,()):
subpath = _handle_ns(package, path_item)
if subpath:
fixup_namespace_packages(subpath, package)
finally:
_imp.release_lock()
def file_ns_handler(importer, path_item, packageName, module):
"""Compute an ns-package subpath for a filesystem or zipfile importer"""
subpath = os.path.join(path_item, packageName.split('.')[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item)==normalized:
break
else:
# Only return the path if it's not already there
return subpath
register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
register_namespace_handler(zipimport.zipimporter, file_ns_handler)
if hasattr(importlib_machinery, 'FileFinder'):
register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
return None
register_namespace_handler(object, null_ns_handler)
def normalize_path(filename):
"""Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(filename))
def _normalize_cached(filename, _cache={}):
try:
return _cache[filename]
except KeyError:
_cache[filename] = result = normalize_path(filename)
return result
def _is_unpacked_egg(path):
"""
Determine if given path appears to be an unpacked egg.
"""
return (
path.lower().endswith('.egg')
)
def _set_parent_ns(packageName):
parts = packageName.split('.')
name = parts.pop()
if parts:
parent = '.'.join(parts)
setattr(sys.modules[parent], name, sys.modules[packageName])
def yield_lines(strs):
"""Yield non-empty/non-comment lines of a string or sequence"""
if isinstance(strs, six.string_types):
for s in strs.splitlines():
s = s.strip()
# skip blank lines/comments
if s and not s.startswith('#'):
yield s
else:
for ss in strs:
for s in yield_lines(ss):
yield s
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
r"""
(?P<name>[^-]+) (
-(?P<ver>[^-]+) (
-py(?P<pyver>[^-]+) (
-(?P<plat>.+)
)?
)?
)?
""",
re.VERBOSE | re.IGNORECASE,
).match
class EntryPoint(object):
"""Object representing an advertised importable object"""
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if not MODULE(module_name):
raise ValueError("Invalid module name", module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras
self.dist = dist
def __str__(self):
s = "%s = %s" % (self.name, self.module_name)
if self.attrs:
s += ':' + '.'.join(self.attrs)
if self.extras:
s += ' [%s]' % ','.join(self.extras)
return s
def __repr__(self):
return "EntryPoint.parse(%r)" % str(self)
def load(self, require=True, *args, **kwargs):
"""
Require packages for this EntryPoint, then resolve it.
"""
if not require or args or kwargs:
warnings.warn(
"Parameters to load are deprecated. Call .resolve and "
".require separately.",
DeprecationWarning,
stacklevel=2,
)
if require:
self.require(*args, **kwargs)
return self.resolve()
def resolve(self):
"""
Resolve the entry point from its module and attrs.
"""
module = __import__(self.module_name, fromlist=['__name__'], level=0)
try:
return functools.reduce(getattr, self.attrs, module)
except AttributeError as exc:
raise ImportError(str(exc))
def require(self, env=None, installer=None):
if self.extras and not self.dist:
raise UnknownExtra("Can't require() without a distribution", self)
reqs = self.dist.requires(self.extras)
items = working_set.resolve(reqs, env, installer)
list(map(working_set.add, items))
pattern = re.compile(
r'\s*'
r'(?P<name>.+?)\s*'
r'=\s*'
r'(?P<module>[\w.]+)\s*'
r'(:\s*(?P<attr>[\w.]+))?\s*'
r'(?P<extras>\[.*\])?\s*$'
)
@classmethod
def parse(cls, src, dist=None):
"""Parse a single entry point from string `src`
Entry point syntax follows the form::
name = some.module:some.attr [extra1, extra2]
The entry name and module name are required, but the ``:attrs`` and
``[extras]`` parts are optional
"""
m = cls.pattern.match(src)
if not m:
msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
raise ValueError(msg, src)
res = m.groupdict()
extras = cls._parse_extras(res['extras'])
attrs = res['attr'].split('.') if res['attr'] else ()
return cls(res['name'], res['module'], attrs, extras, dist)
@classmethod
def _parse_extras(cls, extras_spec):
if not extras_spec:
return ()
req = Requirement.parse('x' + extras_spec)
if req.specs:
raise ValueError()
return req.extras
@classmethod
def parse_group(cls, group, lines, dist=None):
"""Parse an entry point group"""
if not MODULE(group):
raise ValueError("Invalid group name", group)
this = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if ep.name in this:
raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name]=ep
return this
@classmethod
def parse_map(cls, data, dist=None):
"""Parse a map of entry point groups"""
if isinstance(data, dict):
data = data.items()
else:
data = split_sections(data)
maps = {}
for group, lines in data:
if group is None:
if not lines:
continue
raise ValueError("Entry points must be listed in groups")
group = group.strip()
if group in maps:
raise ValueError("Duplicate group name", group)
maps[group] = cls.parse_group(group, lines, dist)
return maps
def _remove_md5_fragment(location):
if not location:
return ''
parsed = urllib.parse.urlparse(location)
if parsed[-1].startswith('md5='):
return urllib.parse.urlunparse(parsed[:-1] + ('',))
return location
def _version_from_file(lines):
"""
Given an iterable of lines from a Metadata file, return
the value of the Version field, if present, or None otherwise.
"""
is_version_line = lambda line: line.lower().startswith('version:')
version_lines = filter(is_version_line, lines)
line = next(iter(version_lines), '')
_, _, value = line.partition(':')
return safe_version(value.strip()) or None
class Distribution(object):
"""Wrap an actual or potential sys.path entry w/metadata"""
PKG_INFO = 'PKG-INFO'
def __init__(self, location=None, metadata=None, project_name=None,
version=None, py_version=PY_MAJOR, platform=None,
precedence=EGG_DIST):
self.project_name = safe_name(project_name or 'Unknown')
if version is not None:
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = metadata or empty_provider
@classmethod
def from_location(cls, location, basename, metadata=None, **kw):
project_name, version, py_version, platform = [None]*4
basename, ext = os.path.splitext(basename)
if ext.lower() in _distributionImpl:
cls = _distributionImpl[ext.lower()]
match = EGG_NAME(basename)
if match:
project_name, version, py_version, platform = match.group(
'name', 'ver', 'pyver', 'plat'
)
return cls(
location, metadata, project_name=project_name, version=version,
py_version=py_version, platform=platform, **kw
)._reload_version()
def _reload_version(self):
return self
@property
def hashcmp(self):
return (
self.parsed_version,
self.precedence,
self.key,
_remove_md5_fragment(self.location),
self.py_version or '',
self.platform or '',
)
def __hash__(self):
return hash(self.hashcmp)
def __lt__(self, other):
return self.hashcmp < other.hashcmp
def __le__(self, other):
return self.hashcmp <= other.hashcmp
def __gt__(self, other):
return self.hashcmp > other.hashcmp
def __ge__(self, other):
return self.hashcmp >= other.hashcmp
def __eq__(self, other):
if not isinstance(other, self.__class__):
# It's not a Distribution, so they are not equal
return False
return self.hashcmp == other.hashcmp
def __ne__(self, other):
return not self == other
# These properties have to be lazy so that we don't have to load any
# metadata until/unless it's actually needed. (i.e., some distributions
# may not know their name or version without loading PKG-INFO)
@property
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
@property
def parsed_version(self):
if not hasattr(self, "_parsed_version"):
self._parsed_version = parse_version(self.version)
return self._parsed_version
def _warn_legacy_version(self):
LV = packaging.version.LegacyVersion
is_legacy = isinstance(self._parsed_version, LV)
if not is_legacy:
return
# While an empty version is technically a legacy version and
# is not a valid PEP 440 version, it's also unlikely to
# actually come from someone and instead it is more likely that
# it comes from setuptools attempting to parse a filename and
# including it in the list. So for that we'll gate this warning
# on if the version is anything at all or not.
if not self.version:
return
tmpl = textwrap.dedent("""
'{project_name} ({version})' is being parsed as a legacy,
non PEP 440,
version. You may find odd behavior and sort order.
In particular it will be sorted as less than 0.0. It
is recommended to migrate to PEP 440 compatible
versions.
""").strip().replace('\n', ' ')
warnings.warn(tmpl.format(**vars(self)), PEP440Warning)
@property
def version(self):
try:
return self._version
except AttributeError:
version = _version_from_file(self._get_metadata(self.PKG_INFO))
if version is None:
tmpl = "Missing 'Version:' header and/or %s file"
raise ValueError(tmpl % self.PKG_INFO, self)
return version
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
dm = self.__dep_map = {None: []}
for name in 'requires.txt', 'depends.txt':
for extra, reqs in split_sections(self._get_metadata(name)):
if extra:
if ':' in extra:
extra, marker = extra.split(':', 1)
if invalid_marker(marker):
# XXX warn
reqs=[]
elif not evaluate_marker(marker):
reqs=[]
extra = safe_extra(extra) or None
dm.setdefault(extra,[]).extend(parse_requirements(reqs))
return dm
def requires(self, extras=()):
"""List of Requirements needed for this distro if `extras` are used"""
dm = self._dep_map
deps = []
deps.extend(dm.get(None, ()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError:
raise UnknownExtra(
"%s has no such extra feature %r" % (self, ext)
)
return deps
def _get_metadata(self, name):
if self.has_metadata(name):
for line in self.get_metadata_lines(name):
yield line
def activate(self, path=None):
"""Ensure distribution is importable on `path` (default=sys.path)"""
if path is None:
path = sys.path
self.insert_on(path, replace=True)
if path is sys.path:
fixup_namespace_packages(self.location)
for pkg in self._get_metadata('namespace_packages.txt'):
if pkg in sys.modules:
declare_namespace(pkg)
def egg_name(self):
"""Return what this distribution's standard .egg filename should be"""
filename = "%s-%s-py%s" % (
to_filename(self.project_name), to_filename(self.version),
self.py_version or PY_MAJOR
)
if self.platform:
filename += '-' + self.platform
return filename
def __repr__(self):
if self.location:
return "%s (%s)" % (self, self.location)
else:
return str(self)
def __str__(self):
try:
version = getattr(self, 'version', None)
except ValueError:
version = None
version = version or "[unknown version]"
return "%s %s" % (self.project_name, version)
def __getattr__(self, attr):
"""Delegate all unrecognized public attributes to .metadata provider"""
if attr.startswith('_'):
raise AttributeError(attr)
return getattr(self._provider, attr)
@classmethod
def from_filename(cls, filename, metadata=None, **kw):
return cls.from_location(
_normalize_cached(filename), os.path.basename(filename), metadata,
**kw
)
def as_requirement(self):
"""Return a ``Requirement`` that matches this distribution exactly"""
if isinstance(self.parsed_version, packaging.version.Version):
spec = "%s==%s" % (self.project_name, self.parsed_version)
else:
spec = "%s===%s" % (self.project_name, self.parsed_version)
return Requirement.parse(spec)
def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group, name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group, name),))
return ep.load()
def get_entry_map(self, group=None):
"""Return the entry point map for `group`, or the full entry map"""
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(
self._get_metadata('entry_points.txt'), self
)
if group is not None:
return ep_map.get(group,{})
return ep_map
def get_entry_info(self, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return self.get_entry_map(group).get(name)
def insert_on(self, path, loc=None, replace=False):
"""Insert self.location in path before its nearest parent directory"""
loc = loc or self.location
if not loc:
return
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath= [(p and _normalize_cached(p) or p) for p in path]
for p, item in enumerate(npath):
if item == nloc:
break
elif item == bdir and self.precedence == EGG_DIST:
# if it's an .egg, give it precedence over its directory
if path is sys.path:
self.check_version_conflict()
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
if path is sys.path:
self.check_version_conflict()
if replace:
path.insert(0, loc)
else:
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while True:
try:
np = npath.index(nloc, p+1)
except ValueError:
break
else:
del npath[np], path[np]
# ha!
p = np
return
def check_version_conflict(self):
if self.key == 'setuptools':
# ignore the inevitable setuptools self-conflicts :(
return
nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
loc = normalize_path(self.location)
for modname in self._get_metadata('top_level.txt'):
if (modname not in sys.modules or modname in nsp
or modname in _namespace_packages):
continue
if modname in ('pkg_resources', 'setuptools', 'site'):
continue
fn = getattr(sys.modules[modname], '__file__', None)
if fn and (normalize_path(fn).startswith(loc) or
fn.startswith(self.location)):
continue
issue_warning(
"Module %s was already imported from %s, but %s is being added"
" to sys.path" % (modname, fn, self.location),
)
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for " + repr(self))
return False
return True
def clone(self,**kw):
"""Copy this distribution, substituting in any changed keyword args"""
names = 'project_name version py_version platform location precedence'
for attr in names.split():
kw.setdefault(attr, getattr(self, attr, None))
kw.setdefault('metadata', self._provider)
return self.__class__(**kw)
@property
def extras(self):
return [dep for dep in self._dep_map if dep]
class EggInfoDistribution(Distribution):
def _reload_version(self):
"""
Packages installed by distutils (e.g. numpy or scipy),
which uses an old safe_version, and so
their version numbers can get mangled when
converted to filenames (e.g., 1.11.0.dev0+2329eae to
1.11.0.dev0_2329eae). These distributions will not be
parsed properly
downstream by Distribution and safe_version, so
take an extra step and try to get the version number from
the metadata file itself instead of the filename.
"""
md_version = _version_from_file(self._get_metadata(self.PKG_INFO))
if md_version:
self._version = md_version
return self
class DistInfoDistribution(Distribution):
"""Wrap an actual or potential sys.path entry w/metadata, .dist-info style"""
PKG_INFO = 'METADATA'
EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
@property
def _parsed_pkg_info(self):
"""Parse and cache metadata"""
try:
return self._pkg_info
except AttributeError:
metadata = self.get_metadata(self.PKG_INFO)
self._pkg_info = email.parser.Parser().parsestr(metadata)
return self._pkg_info
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._compute_dependencies()
return self.__dep_map
def _compute_dependencies(self):
"""Recompute this distribution's dependencies."""
dm = self.__dep_map = {None: []}
reqs = []
# Including any condition expressions
for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
reqs.extend(parse_requirements(req))
def reqs_for_extra(extra):
for req in reqs:
if not req.marker or req.marker.evaluate({'extra': extra}):
yield req
common = frozenset(reqs_for_extra(None))
dm[None].extend(common)
for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
extra = safe_extra(extra.strip())
dm[extra] = list(frozenset(reqs_for_extra(extra)) - common)
return dm
_distributionImpl = {
'.egg': Distribution,
'.egg-info': EggInfoDistribution,
'.dist-info': DistInfoDistribution,
}
def issue_warning(*args,**kw):
level = 1
g = globals()
try:
# find the first stack frame that is *not* code in
# the pkg_resources module, to use for the warning
while sys._getframe(level).f_globals is g:
level += 1
except ValueError:
pass
warnings.warn(stacklevel=level + 1, *args, **kw)
class RequirementParseError(ValueError):
def __str__(self):
return ' '.join(self.args)
def parse_requirements(strs):
"""Yield ``Requirement`` objects for each specification in `strs`
`strs` must be a string, or a (possibly-nested) iterable thereof.
"""
# create a steppable iterator, so we can handle \-continuations
lines = iter(yield_lines(strs))
for line in lines:
# Drop comments -- a hash without a space may be in a URL.
if ' #' in line:
line = line[:line.find(' #')]
# If there is a line continuation, drop it, and append the next line.
if line.endswith('\\'):
line = line[:-2].strip()
line += next(lines)
yield Requirement(line)
class Requirement(packaging.requirements.Requirement):
def __init__(self, requirement_string):
"""DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
try:
super(Requirement, self).__init__(requirement_string)
except packaging.requirements.InvalidRequirement as e:
raise RequirementParseError(str(e))
self.unsafe_name = self.name
project_name = safe_name(self.name)
self.project_name, self.key = project_name, project_name.lower()
self.specs = [
(spec.operator, spec.version) for spec in self.specifier]
self.extras = tuple(map(safe_extra, self.extras))
self.hashCmp = (
self.key,
self.specifier,
frozenset(self.extras),
str(self.marker) if self.marker else None,
)
self.__hash = hash(self.hashCmp)
def __eq__(self, other):
return (
isinstance(other, Requirement) and
self.hashCmp == other.hashCmp
)
def __ne__(self, other):
return not self == other
def __contains__(self, item):
if isinstance(item, Distribution):
if item.key != self.key:
return False
item = item.version
# Allow prereleases always in order to match the previous behavior of
# this method. In the future this should be smarter and follow PEP 440
# more accurately.
return self.specifier.contains(item, prereleases=True)
def __hash__(self):
return self.__hash
def __repr__(self): return "Requirement.parse(%r)" % str(self)
@staticmethod
def parse(s):
req, = parse_requirements(s)
return req
def _get_mro(cls):
"""Get an mro for a type or classic class"""
if not isinstance(cls, type):
class cls(cls, object): pass
return cls.__mro__[1:]
return cls.__mro__
def _find_adapter(registry, ob):
"""Return an adapter factory for `ob` from `registry`"""
for t in _get_mro(getattr(ob, '__class__', type(ob))):
if t in registry:
return registry[t]
def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def _bypass_ensure_directory(path):
"""Sandbox-bypassing version of ensure_directory()"""
if not WRITE_SUPPORT:
raise IOError('"os.mkdir" not supported on this platform.')
dirname, filename = split(path)
if dirname and filename and not isdir(dirname):
_bypass_ensure_directory(dirname)
mkdir(dirname, 0o755)
def split_sections(s):
"""Split a string or iterable thereof into (section, content) pairs
Each ``section`` is a stripped version of the section header ("[section]")
and each ``content`` is a list of stripped lines excluding blank lines and
comment-only lines. If there are any such lines before the first section
header, they're returned in a first ``section`` of ``None``.
"""
section = None
content = []
for line in yield_lines(s):
if line.startswith("["):
if line.endswith("]"):
if section or content:
yield section, content
section = line[1:-1].strip()
content = []
else:
raise ValueError("Invalid section heading", line)
else:
content.append(line)
# wrap up last segment
yield section, content
def _mkstemp(*args,**kw):
old_open = os.open
try:
# temporarily bypass sandboxing
os.open = os_open
return tempfile.mkstemp(*args,**kw)
finally:
# and then put it back
os.open = old_open
# Silence the PEP440Warning by default, so that end users don't get hit by it
# randomly just because they use pkg_resources. We want to append the rule
# because we want earlier uses of filterwarnings to take precedence over this
# one.
warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
# from jaraco.functools 1.3
def _call_aside(f, *args, **kwargs):
f(*args, **kwargs)
return f
@_call_aside
def _initialize(g=globals()):
"Set up global resource manager (deliberately not state-saved)"
manager = ResourceManager()
g['_manager'] = manager
for name in dir(manager):
if not name.startswith('_'):
g[name] = getattr(manager, name)
@_call_aside
def _initialize_master_working_set():
"""
Prepare the master working set and make the ``require()``
API available.
This function has explicit effects on the global state
of pkg_resources. It is intended to be invoked once at
the initialization of this module.
Invocation by other packages is unsupported and done
at their own risk.
"""
working_set = WorkingSet._build_master()
_declare_state('object', working_set=working_set)
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
# backward compatibility
run_main = run_script
# Activate all distributions already on sys.path, and ensure that
# all distributions added to the working set in the future (e.g. by
# calling ``require()``) will get activated as well.
add_activation_listener(lambda dist: dist.activate())
working_set.entries=[]
# match order
list(map(working_set.add_entry, sys.path))
globals().update(locals())
|
naresh21/synergetics-edx-platform
|
refs/heads/oxa/master.fic
|
lms/djangoapps/instructor_task/models.py
|
2
|
"""
WE'RE USING MIGRATIONS!
If you make changes to this model, be sure to create an appropriate migration
file and check it in at the same time as your model changes. To do that,
1. Go to the edx-platform dir
2. ./manage.py schemamigration instructor_task --auto description_of_your_change
3. Add the migration file created in edx-platform/lms/djangoapps/instructor_task/migrations/
ASSUMPTIONS: modules have unique IDs, even across different module_types
"""
from uuid import uuid4
import csv
import json
import hashlib
import os.path
from django.conf import settings
from django.contrib.auth.models import User
from django.core.files.base import ContentFile
from django.db import models, transaction
from openedx.core.storage import get_storage
from openedx.core.djangoapps.xmodule_django.models import CourseKeyField
# define custom states used by InstructorTask
QUEUING = 'QUEUING'
PROGRESS = 'PROGRESS'
class InstructorTask(models.Model):
"""
Stores information about background tasks that have been submitted to
perform work by an instructor (or course staff).
Examples include grading and rescoring.
`task_type` identifies the kind of task being performed, e.g. rescoring.
`course_id` uses the course run's unique id to identify the course.
`task_key` stores relevant input arguments encoded into key value for testing to see
if the task is already running (together with task_type and course_id).
`task_input` stores input arguments as JSON-serialized dict, for reporting purposes.
Examples include url of problem being rescored, id of student if only one student being rescored.
`task_id` stores the id used by celery for the background task.
`task_state` stores the last known state of the celery task
`task_output` stores the output of the celery task.
Format is a JSON-serialized dict. Content varies by task_type and task_state.
`requester` stores id of user who submitted the task
`created` stores date that entry was first created
`updated` stores date that entry was last modified
"""
class Meta(object):
app_label = "instructor_task"
task_type = models.CharField(max_length=50, db_index=True)
course_id = CourseKeyField(max_length=255, db_index=True)
task_key = models.CharField(max_length=255, db_index=True)
task_input = models.CharField(max_length=255)
task_id = models.CharField(max_length=255, db_index=True) # max_length from celery_taskmeta
task_state = models.CharField(max_length=50, null=True, db_index=True) # max_length from celery_taskmeta
task_output = models.CharField(max_length=1024, null=True)
requester = models.ForeignKey(User, db_index=True)
created = models.DateTimeField(auto_now_add=True, null=True)
updated = models.DateTimeField(auto_now=True)
subtasks = models.TextField(blank=True) # JSON dictionary
def __repr__(self):
return 'InstructorTask<%r>' % ({
'task_type': self.task_type,
'course_id': self.course_id,
'task_input': self.task_input,
'task_id': self.task_id,
'task_state': self.task_state,
'task_output': self.task_output,
},)
def __unicode__(self):
return unicode(repr(self))
@classmethod
def create(cls, course_id, task_type, task_key, task_input, requester):
"""
Create an instance of InstructorTask.
"""
# create the task_id here, and pass it into celery:
task_id = str(uuid4())
json_task_input = json.dumps(task_input)
# check length of task_input, and return an exception if it's too long:
if len(json_task_input) > 255:
fmt = 'Task input longer than 255: "{input}" for "{task}" of "{course}"'
msg = fmt.format(input=json_task_input, task=task_type, course=course_id)
raise ValueError(msg)
# create the task, then save it:
instructor_task = cls(
course_id=course_id,
task_type=task_type,
task_id=task_id,
task_key=task_key,
task_input=json_task_input,
task_state=QUEUING,
requester=requester
)
instructor_task.save_now()
return instructor_task
@transaction.atomic
def save_now(self):
"""
Writes InstructorTask immediately, ensuring the transaction is committed.
"""
self.save()
@staticmethod
def create_output_for_success(returned_result):
"""
Converts successful result to output format.
Raises a ValueError exception if the output is too long.
"""
# In future, there should be a check here that the resulting JSON
# will fit in the column. In the meantime, just return an exception.
json_output = json.dumps(returned_result)
if len(json_output) > 1023:
raise ValueError("Length of task output is too long: {0}".format(json_output))
return json_output
@staticmethod
def create_output_for_failure(exception, traceback_string):
"""
Converts failed result information to output format.
Traceback information is truncated or not included if it would result in an output string
that would not fit in the database. If the output is still too long, then the
exception message is also truncated.
Truncation is indicated by adding "..." to the end of the value.
"""
tag = '...'
task_progress = {'exception': type(exception).__name__, 'message': unicode(exception.message)}
if traceback_string is not None:
# truncate any traceback that goes into the InstructorTask model:
task_progress['traceback'] = traceback_string
json_output = json.dumps(task_progress)
# if the resulting output is too long, then first shorten the
# traceback, and then the message, until it fits.
too_long = len(json_output) - 1023
if too_long > 0:
if traceback_string is not None:
if too_long >= len(traceback_string) - len(tag):
# remove the traceback entry entirely (so no key or value)
del task_progress['traceback']
too_long -= (len(traceback_string) + len('traceback'))
else:
# truncate the traceback:
task_progress['traceback'] = traceback_string[:-(too_long + len(tag))] + tag
too_long = 0
if too_long > 0:
# we need to shorten the message:
task_progress['message'] = task_progress['message'][:-(too_long + len(tag))] + tag
json_output = json.dumps(task_progress)
return json_output
@staticmethod
def create_output_for_revoked():
"""Creates standard message to store in output format for revoked tasks."""
return json.dumps({'message': 'Task revoked before running'})
class ReportStore(object):
"""
Simple abstraction layer that can fetch and store CSV files for reports
download. Should probably refactor later to create a ReportFile object that
can simply be appended to for the sake of memory efficiency, rather than
passing in the whole dataset. Doing that for now just because it's simpler.
"""
@classmethod
def from_config(cls, config_name):
"""
Return one of the ReportStore subclasses depending on django
configuration. Look at subclasses for expected configuration.
"""
# Convert old configuration parameters to those expected by
# DjangoStorageReportStore for backward compatibility
config = getattr(settings, config_name, {})
storage_type = config.get('STORAGE_TYPE', '').lower()
if storage_type == 's3':
return DjangoStorageReportStore(
storage_class='openedx.core.storage.S3ReportStorage',
storage_kwargs={
'bucket': config['BUCKET'],
'location': config['ROOT_PATH'],
'custom_domain': config.get("CUSTOM_DOMAIN", None),
'querystring_expire': 300,
'gzip': True,
},
)
if storage_type == 'azure':
return DjangoStorageReportStore(
storage_class='openedx.core.storage.AzureStorageExtended',
storage_kwargs={
'container': config['CONTAINER'],
'url_expiry_secs': config.get('URL_EXPIRY_SECS', 300)
}
)
elif storage_type == 'localfs':
return DjangoStorageReportStore(
storage_class='django.core.files.storage.FileSystemStorage',
storage_kwargs={
'location': config['ROOT_PATH'],
},
)
return DjangoStorageReportStore.from_config(config_name)
def _get_utf8_encoded_rows(self, rows):
"""
Given a list of `rows` containing unicode strings, return a
new list of rows with those strings encoded as utf-8 for CSV
compatibility.
"""
for row in rows:
yield [unicode(item).encode('utf-8') for item in row]
class DjangoStorageReportStore(ReportStore):
"""
ReportStore implementation that delegates to django's storage api.
"""
def __init__(self, storage_class=None, storage_kwargs=None):
if storage_kwargs is None:
storage_kwargs = {}
self.storage = get_storage(storage_class, **storage_kwargs)
@classmethod
def from_config(cls, config_name):
"""
By default, the default file storage specified by the `DEFAULT_FILE_STORAGE`
setting will be used. To configure the storage used, add a dict in
settings with the following fields::
STORAGE_CLASS : The import path of the storage class to use. If
not set, the DEFAULT_FILE_STORAGE setting will be used.
STORAGE_KWARGS : An optional dict of kwargs to pass to the storage
constructor. This can be used to specify a
different S3 bucket or root path, for example.
Reference the setting name when calling `.from_config`.
"""
return cls(
getattr(settings, config_name).get('STORAGE_CLASS'),
getattr(settings, config_name).get('STORAGE_KWARGS'),
)
def store(self, course_id, filename, buff):
"""
Store the contents of `buff` in a directory determined by hashing
`course_id`, and name the file `filename`. `buff` can be any file-like
object, ready to be read from the beginning.
"""
path = self.path_to(course_id, filename)
self.storage.save(path, buff)
def store_rows(self, course_id, filename, rows):
"""
Given a course_id, filename, and rows (each row is an iterable of
strings), write the rows to the storage backend in csv format.
"""
output_buffer = ContentFile('')
csvwriter = csv.writer(output_buffer)
csvwriter.writerows(self._get_utf8_encoded_rows(rows))
output_buffer.seek(0)
self.store(course_id, filename, output_buffer)
def links_for(self, course_id):
"""
For a given `course_id`, return a list of `(filename, url)` tuples.
Calls the `url` method of the underlying storage backend. Returned
urls can be plugged straight into an href
"""
course_dir = self.path_to(course_id)
try:
_, filenames = self.storage.listdir(course_dir)
except OSError:
# Django's FileSystemStorage fails with an OSError if the course
# dir does not exist; other storage types return an empty list.
return []
files = [(filename, os.path.join(course_dir, filename)) for filename in filenames]
files.sort(key=lambda f: self.storage.modified_time(f[1]), reverse=True)
return [
(filename, self.storage.url(full_path))
for filename, full_path in files
]
def path_to(self, course_id, filename=''):
"""
Return the full path to a given file for a given course.
"""
hashed_course_id = hashlib.sha1(course_id.to_deprecated_string()).hexdigest()
return os.path.join(hashed_course_id, filename)
|
isghe/cjdns
|
refs/heads/master
|
node_build/dependencies/libuv/build/gyp/test/mac/gyptest-objc-arc.py
|
249
|
#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that ARC objc settings are handled correctly.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
# set |match| to ignore build stderr output.
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'],
match = lambda a, b: True)
CHDIR = 'objc-arc'
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', 'arc_enabled', chdir=CHDIR)
test.build('test.gyp', 'arc_disabled', chdir=CHDIR)
test.pass_test()
|
ingokegel/intellij-community
|
refs/heads/master
|
python/testData/types/ConditionImportOuterScope/m2.py
|
39
|
foo = 0
""":type: int"""
|
zdary/intellij-community
|
refs/heads/master
|
python/testData/inspections/PyPep8NamingInspection/tmp1.py
|
83
|
class X:
pass
|
fscherf/django-cms-extensions
|
refs/heads/master
|
cms_extensions/__init__.py
|
12133432
| |
ZENGXH/scikit-learn
|
refs/heads/master
|
sklearn/tree/tests/__init__.py
|
12133432
| |
claudep/pootle
|
refs/heads/master
|
tests/pootle_language/__init__.py
|
12133432
| |
paulocheque/python-django-bootstrap
|
refs/heads/master
|
toolbox/management/__init__.py
|
12133432
| |
tgenin/botstral
|
refs/heads/master
|
save_manager.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Botstral - a bot
# Copyright (C) 2014 Thomas Génin
#
# This file is part of Botstral.
#
# Botstral is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# Botstral is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
import pickle
def textload(name):
try:
openfile = open(name, 'rb')
line = openfile.readline()
openfile.close()
lst = line.split()
except Exception as e:
print('error in opening {} file'.format(name), e)
lst = []
return dict([(element, 1) for element in lst])
def load(name, return_type='array'):
try:
openfile = open(name + '.pkl', 'rb')
obj = pickle.load(openfile)
openfile.close()
except Exception as e:
print('error in opening {}.pkl file'.format(name), e)
obj = {} if return_type == 'hash' else []
return obj
def save(obj, name):
output = open(name + '.pkl', 'wb')
pickle.dump(obj, output, -1)
output.close()
|
caotianwei/django
|
refs/heads/master
|
tests/admin_scripts/management/commands/__init__.py
|
12133432
| |
xwolf12/django
|
refs/heads/master
|
tests/i18n/other/locale/de/formats.py
|
12133432
| |
RPi-Distro/python-gpiozero
|
refs/heads/master
|
gpiozero/devices.py
|
1
|
# GPIO Zero: a library for controlling the Raspberry Pi's GPIO pins
# Copyright (c) 2015-2019 Dave Jones <dave@waveform.org.uk>
# Copyright (c) 2015-2019 Ben Nuttall <ben@bennuttall.com>
# Copyright (c) 2016 Andrew Scheller <github@loowis.durge.org>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
print_function,
absolute_import,
division,
)
nstr = str
str = type('')
import os
import atexit
import weakref
import warnings
from collections import namedtuple, OrderedDict
from itertools import chain
from types import FunctionType
from threading import Lock
from .pins import Pin
from .threads import _threads_shutdown
from .mixins import (
ValuesMixin,
SharedMixin,
)
from .exc import (
BadPinFactory,
DeviceClosed,
CompositeDeviceBadName,
CompositeDeviceBadOrder,
CompositeDeviceBadDevice,
GPIOPinMissing,
GPIOPinInUse,
GPIODeviceClosed,
PinFactoryFallback,
)
from .compat import frozendict
class GPIOMeta(type):
# NOTE Yes, this is a metaclass. Don't be scared - it's a simple one.
def __new__(mcls, name, bases, cls_dict):
# Construct the class as normal
cls = super(GPIOMeta, mcls).__new__(mcls, name, bases, cls_dict)
# If there's a method in the class which has no docstring, search
# the base classes recursively for a docstring to copy
for attr_name, attr in cls_dict.items():
if isinstance(attr, FunctionType) and not attr.__doc__:
for base_cls in cls.__mro__:
if hasattr(base_cls, attr_name):
base_fn = getattr(base_cls, attr_name)
if base_fn.__doc__:
attr.__doc__ = base_fn.__doc__
break
return cls
def __call__(cls, *args, **kwargs):
# Make sure cls has GPIOBase somewhere in its ancestry (otherwise
# setting __attrs__ below will be rather pointless)
assert issubclass(cls, GPIOBase)
if issubclass(cls, SharedMixin):
# If SharedMixin appears in the class' ancestry, convert the
# constructor arguments to a key and check whether an instance
# already exists. Only construct the instance if the key's new.
key = cls._shared_key(*args, **kwargs)
try:
self = cls._instances[key]
self._refs += 1
except (KeyError, ReferenceError) as e:
self = super(GPIOMeta, cls).__call__(*args, **kwargs)
self._refs = 1
# Replace the close method with one that merely decrements
# the refs counter and calls the original close method when
# it reaches zero
old_close = self.close
def close():
self._refs = max(0, self._refs - 1)
if not self._refs:
try:
old_close()
finally:
try:
del cls._instances[key]
except KeyError:
# If the _refs go negative (too many closes)
# just ignore the resulting KeyError here -
# it's already gone
pass
self.close = close
cls._instances[key] = weakref.proxy(self)
else:
# Construct the instance as normal
self = super(GPIOMeta, cls).__call__(*args, **kwargs)
# At this point __new__ and __init__ have all been run. We now fix the
# set of attributes on the class by dir'ing the instance and creating a
# frozenset of the result called __attrs__ (which is queried by
# GPIOBase.__setattr__). An exception is made for SharedMixin devices
# which can be constructed multiple times, returning the same instance
if not issubclass(cls, SharedMixin) or self._refs == 1:
self.__attrs__ = frozenset(dir(self))
return self
# Cross-version compatible method of using a metaclass
class GPIOBase(GPIOMeta(nstr('GPIOBase'), (), {})):
def __setattr__(self, name, value):
# This overridden __setattr__ simply ensures that additional attributes
# cannot be set on the class after construction (it manages this in
# conjunction with the meta-class above). Traditionally, this is
# managed with __slots__; however, this doesn't work with Python's
# multiple inheritance system which we need to use in order to avoid
# repeating the "source" and "values" property code in myriad places
if hasattr(self, '__attrs__') and name not in self.__attrs__:
raise AttributeError(
"'%s' object has no attribute '%s'" % (
self.__class__.__name__, name))
return super(GPIOBase, self).__setattr__(name, value)
def __del__(self):
self.close()
def close(self):
"""
Shut down the device and release all associated resources. This method
can be called on an already closed device without raising an exception.
This method is primarily intended for interactive use at the command
line. It disables the device and releases its pin(s) for use by another
device.
You can attempt to do this simply by deleting an object, but unless
you've cleaned up all references to the object this may not work (even
if you've cleaned up all references, there's still no guarantee the
garbage collector will actually delete the object at that point). By
contrast, the close method provides a means of ensuring that the object
is shut down.
For example, if you have a breadboard with a buzzer connected to pin
16, but then wish to attach an LED instead:
>>> from gpiozero import *
>>> bz = Buzzer(16)
>>> bz.on()
>>> bz.off()
>>> bz.close()
>>> led = LED(16)
>>> led.blink()
:class:`Device` descendents can also be used as context managers using
the :keyword:`with` statement. For example:
>>> from gpiozero import *
>>> with Buzzer(16) as bz:
... bz.on()
...
>>> with LED(16) as led:
... led.on()
...
"""
# This is a placeholder which is simply here to ensure close() can be
# safely called from subclasses without worrying whether super-classes
# have it (which in turn is useful in conjunction with the SourceMixin
# class).
pass
@property
def closed(self):
"""
Returns :data:`True` if the device is closed (see the :meth:`close`
method). Once a device is closed you can no longer use any other
methods or properties to control or query the device.
"""
raise NotImplementedError
def _check_open(self):
if self.closed:
raise DeviceClosed(
'%s is closed or uninitialized' % self.__class__.__name__)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
class Device(ValuesMixin, GPIOBase):
"""
Represents a single device of any type; GPIO-based, SPI-based, I2C-based,
etc. This is the base class of the device hierarchy. It defines the basic
services applicable to all devices (specifically the :attr:`is_active`
property, the :attr:`value` property, and the :meth:`close` method).
.. attribute:: pin_factory
This attribute exists at both a class level (representing the default
pin factory used to construct devices when no *pin_factory* parameter
is specified), and at an instance level (representing the pin factory
that the device was constructed with).
The pin factory provides various facilities to the device including
allocating pins, providing low level interfaces (e.g. SPI), and clock
facilities (querying and calculating elapsed times).
"""
pin_factory = None # instance of a Factory sub-class
def __init__(self, **kwargs):
# Force pin_factory to be keyword-only, even in Python 2
pin_factory = kwargs.pop('pin_factory', None)
if pin_factory is None:
if Device.pin_factory is None:
Device.pin_factory = Device._default_pin_factory()
self.pin_factory = Device.pin_factory
else:
self.pin_factory = pin_factory
if kwargs:
raise TypeError("Device.__init__() got unexpected keyword "
"argument '%s'" % kwargs.popitem()[0])
super(Device, self).__init__()
@staticmethod
def _default_pin_factory():
# We prefer RPi.GPIO here as it supports PWM, and all Pi revisions. If
# no third-party libraries are available, however, we fall back to a
# pure Python implementation which supports platforms like PyPy
#
# NOTE: If the built-in pin factories are expanded, the dict must be
# updated along with the entry-points in setup.py.
default_factories = OrderedDict((
('rpigpio', 'gpiozero.pins.rpigpio:RPiGPIOFactory'),
('rpio', 'gpiozero.pins.rpio:RPIOFactory'),
('pigpio', 'gpiozero.pins.pigpio:PiGPIOFactory'),
('native', 'gpiozero.pins.native:NativeFactory'),
))
name = os.environ.get('GPIOZERO_PIN_FACTORY')
if name is None:
# If no factory is explicitly specified, try various names in
# "preferred" order. For speed, we select from the dictionary above
# rather than importing pkg_resources and using load_entry_point
for name, entry_point in default_factories.items():
try:
mod_name, cls_name = entry_point.split(':', 1)
module = __import__(mod_name, fromlist=(cls_name,))
return getattr(module, cls_name)()
except Exception as e:
warnings.warn(
PinFactoryFallback(
'Falling back from %s: %s' % (name, str(e))))
raise BadPinFactory('Unable to load any default pin factory!')
elif name in default_factories:
# As above, this is a fast-path optimization to avoid loading
# pkg_resources (which it turns out was 80% of gpiozero's import
# time!)
mod_name, cls_name = default_factories[name].split(':', 1)
module = __import__(mod_name, fromlist=(cls_name,))
return getattr(module, cls_name)()
else:
# Slow path: load pkg_resources and try and find the specified
# entry-point. Try with the name verbatim first. If that fails,
# attempt with the lower-cased name (this ensures compatibility
# names work but we're still case insensitive for all factories)
import pkg_resources
group = 'gpiozero_pin_factories'
for factory in pkg_resources.iter_entry_points(group, name):
return factory.load()()
for factory in pkg_resources.iter_entry_points(group, name.lower()):
return factory.load()()
raise BadPinFactory('Unable to find pin factory "%s"' % name)
def __repr__(self):
return "<gpiozero.%s object>" % (self.__class__.__name__)
def _conflicts_with(self, other):
"""
Called by :meth:`Factory.reserve_pins` to test whether the *other*
:class:`Device` using a common pin conflicts with this device's intent
to use it. The default is :data:`True` indicating that all devices
conflict with common pins. Sub-classes may override this to permit
more nuanced replies.
"""
return True
@property
def value(self):
"""
Returns a value representing the device's state. Frequently, this is a
boolean value, or a number between 0 and 1 but some devices use larger
ranges (e.g. -1 to +1) and composite devices usually use tuples to
return the states of all their subordinate components.
"""
raise NotImplementedError
@property
def is_active(self):
"""
Returns :data:`True` if the device is currently active and
:data:`False` otherwise. This property is usually derived from
:attr:`value`. Unlike :attr:`value`, this is *always* a boolean.
"""
return bool(self.value)
class CompositeDevice(Device):
"""
Extends :class:`Device`. Represents a device composed of multiple devices
like simple HATs, H-bridge motor controllers, robots composed of multiple
motors, etc.
The constructor accepts subordinate devices as positional or keyword
arguments. Positional arguments form unnamed devices accessed by treating
the composite device as a container, while keyword arguments are added to
the device as named (read-only) attributes.
For example:
.. code-block:: pycon
>>> from gpiozero import *
>>> d = CompositeDevice(LED(2), LED(3), LED(4), btn=Button(17))
>>> d[0]
<gpiozero.LED object on pin GPIO2, active_high=True, is_active=False>
>>> d[1]
<gpiozero.LED object on pin GPIO3, active_high=True, is_active=False>
>>> d[2]
<gpiozero.LED object on pin GPIO4, active_high=True, is_active=False>
>>> d.btn
<gpiozero.Button object on pin GPIO17, pull_up=True, is_active=False>
>>> d.value
CompositeDeviceValue(device_0=False, device_1=False, device_2=False, btn=False)
:param Device \\*args:
The un-named devices that belong to the composite device. The
:attr:`value` attributes of these devices will be represented within
the composite device's tuple :attr:`value` in the order specified here.
:type _order: list or None
:param _order:
If specified, this is the order of named items specified by keyword
arguments (to ensure that the :attr:`value` tuple is constructed with a
specific order). All keyword arguments *must* be included in the
collection. If omitted, an alphabetically sorted order will be selected
for keyword arguments.
:type pin_factory: Factory or None
:param pin_factory:
See :doc:`api_pins` for more information (this is an advanced feature
which most users can ignore).
:param Device \\*\\*kwargs:
The named devices that belong to the composite device. These devices
will be accessible as named attributes on the resulting device, and
their :attr:`value` attributes will be accessible as named elements of
the composite device's tuple :attr:`value`.
"""
def __init__(self, *args, **kwargs):
self._all = ()
self._named = frozendict({})
self._namedtuple = None
self._order = kwargs.pop('_order', None)
pin_factory = kwargs.pop('pin_factory', None)
try:
if self._order is None:
self._order = sorted(kwargs.keys())
else:
for missing_name in set(kwargs.keys()) - set(self._order):
raise CompositeDeviceBadOrder(
'%s missing from _order' % missing_name)
self._order = tuple(self._order)
for name in set(self._order) & set(dir(self)):
raise CompositeDeviceBadName(
'%s is a reserved name' % name)
for dev in chain(args, kwargs.values()):
if not isinstance(dev, Device):
raise CompositeDeviceBadDevice(
"%s doesn't inherit from Device" % dev)
self._named = frozendict(kwargs)
self._namedtuple = namedtuple(
'%sValue' % self.__class__.__name__, chain(
('device_%d' % i for i in range(len(args))), self._order))
except:
for dev in chain(args, kwargs.values()):
if isinstance(dev, Device):
dev.close()
raise
self._all = args + tuple(kwargs[v] for v in self._order)
super(CompositeDevice, self).__init__(pin_factory=pin_factory)
def __getattr__(self, name):
# if _named doesn't exist yet, pretend it's an empty dict
if name == '_named':
return frozendict({})
try:
return self._named[name]
except KeyError:
raise AttributeError("no such attribute %s" % name)
def __setattr__(self, name, value):
# make named components read-only properties
if name in self._named:
raise AttributeError("can't set attribute %s" % name)
return super(CompositeDevice, self).__setattr__(name, value)
def __repr__(self):
try:
self._check_open()
named = len(self._named)
unnamed = len(self) - len(self._named)
if named > 0 and unnamed > 0:
return "<gpiozero.%s object containing %d devices: %s and %d unnamed>" % (
self.__class__.__name__,
len(self), ', '.join(self._order),
len(self) - len(self._named)
)
elif named > 0:
return "<gpiozero.%s object containing %d devices: %s>" % (
self.__class__.__name__,
len(self),
', '.join(self._order)
)
else:
return "<gpiozero.%s object containing %d unnamed devices>" % (
self.__class__.__name__,
len(self)
)
except DeviceClosed:
return "<gpiozero.%s object closed>" % (self.__class__.__name__)
def __len__(self):
return len(self._all)
def __getitem__(self, index):
return self._all[index]
def __iter__(self):
return iter(self._all)
@property
def all(self):
# XXX Deprecate this in favour of using the instance as a container
return self._all
def close(self):
if getattr(self, '_all', None):
for device in self._all:
if isinstance(device, Device):
device.close()
self._all = ()
@property
def closed(self):
return all(device.closed for device in self)
@property
def namedtuple(self):
"""
The :func:`~collections.namedtuple` type constructed to represent the
value of the composite device. The :attr:`value` attribute returns
values of this type.
"""
return self._namedtuple
@property
def value(self):
"""
A :func:`~collections.namedtuple` containing a value for each
subordinate device. Devices with names will be represented as named
elements. Unnamed devices will have a unique name generated for them,
and they will appear in the position they appeared in the constructor.
"""
return self.namedtuple(*(device.value for device in self))
@property
def is_active(self):
"""
Composite devices are considered "active" if any of their constituent
devices have a "truthy" value.
"""
return any(self.value)
class GPIODevice(Device):
"""
Extends :class:`Device`. Represents a generic GPIO device and provides
the services common to all single-pin GPIO devices (like ensuring two
GPIO devices do no share a :attr:`pin`).
:type pin: int or str
:param pin:
The GPIO pin that the device is connected to. See :ref:`pin-numbering`
for valid pin numbers. If this is :data:`None` a :exc:`GPIODeviceError`
will be raised. If the pin is already in use by another device,
:exc:`GPIOPinInUse` will be raised.
"""
def __init__(self, pin=None, **kwargs):
super(GPIODevice, self).__init__(**kwargs)
# self._pin must be set before any possible exceptions can be raised
# because it's accessed in __del__. However, it mustn't be given the
# value of pin until we've verified that it isn't already allocated
self._pin = None
if pin is None:
raise GPIOPinMissing('No pin given')
# Check you can reserve *before* constructing the pin
self.pin_factory.reserve_pins(self, pin)
pin = self.pin_factory.pin(pin)
self._pin = pin
self._active_state = True
self._inactive_state = False
def _state_to_value(self, state):
return int(state == self._active_state)
def _read(self):
try:
return self._state_to_value(self.pin.state)
except (AttributeError, TypeError):
self._check_open()
raise
def close(self):
super(GPIODevice, self).close()
if getattr(self, '_pin', None) is not None:
self.pin_factory.release_pins(self, self._pin.number)
self._pin.close()
self._pin = None
@property
def closed(self):
return self._pin is None
def _check_open(self):
try:
super(GPIODevice, self)._check_open()
except DeviceClosed as e:
# For backwards compatibility; GPIODeviceClosed is deprecated
raise GPIODeviceClosed(str(e))
@property
def pin(self):
"""
The :class:`Pin` that the device is connected to. This will be
:data:`None` if the device has been closed (see the
:meth:`~Device.close` method). When dealing with GPIO pins, query
``pin.number`` to discover the GPIO pin (in BCM numbering) that the
device is connected to.
"""
return self._pin
@property
def value(self):
return self._read()
def __repr__(self):
try:
return "<gpiozero.%s object on pin %r, is_active=%s>" % (
self.__class__.__name__, self.pin, self.is_active)
except DeviceClosed:
return "<gpiozero.%s object closed>" % self.__class__.__name__
def _devices_shutdown():
if Device.pin_factory is not None:
with Device.pin_factory._res_lock:
reserved_devices = {
dev
for ref_list in Device.pin_factory._reservations.values()
for ref in ref_list
for dev in (ref(),)
if dev is not None
}
for dev in reserved_devices:
dev.close()
Device.pin_factory.close()
Device.pin_factory = None
def _shutdown():
_threads_shutdown()
_devices_shutdown()
atexit.register(_shutdown)
|
perrys/WSRC
|
refs/heads/master
|
modules/wsrc/site/competitions/migrations/0003_auto_20180626_0934.py
|
1
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-06-26 08:34
from __future__ import unicode_literals
from django.db import migrations
def migrate_comp_type(apps, schema_editor):
from wsrc.site.competitions.models import CompetitionType, CompetitionGroup
old_groups = [ CompetitionType(id="squash_boxes", legacy_name="wsrc_boxes", name="Main Squash Boxes", is_knockout_comp=False),
CompetitionType(id="tournaments", legacy_name="wsrc_tournaments", name="Tournaments", is_knockout_comp=True),
CompetitionType(id="tournament_qualifiers", legacy_name="wsrc_qualifiers", name="Tournament Qualifier Boxes", is_knockout_comp=False) ]
for g in old_groups:
g.save()
old_groups = dict([(g.legacy_name, g) for g in old_groups])
# CompetitionGroup = apps.get_model('competitions', 'CompetitionGroup')
for group in CompetitionGroup.objects.all():
group.competition_type = old_groups[group.comp_type]
group.save()
def unmigrate_comp_type(apps, schema_editor):
from wsrc.site.competitions.models import CompetitionType, CompetitionGroup
for group in CompetitionGroup.objects.all():
group.competition_type = None
group.save()
CompetitionType.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('competitions', '0002_auto_20180626_0933'),
]
operations = [
migrations.RunPython(migrate_comp_type, unmigrate_comp_type),
]
|
ytjiang/django
|
refs/heads/master
|
django/contrib/gis/utils/ogrinfo.py
|
564
|
"""
This module includes some utility functions for inspecting the layout
of a GDAL data source -- the functionality is analogous to the output
produced by the `ogrinfo` utility.
"""
from django.contrib.gis.gdal import DataSource
from django.contrib.gis.gdal.geometries import GEO_CLASSES
def ogrinfo(data_source, num_features=10):
"""
Walks the available layers in the supplied `data_source`, displaying
the fields for the first `num_features` features.
"""
# Checking the parameters.
if isinstance(data_source, str):
data_source = DataSource(data_source)
elif isinstance(data_source, DataSource):
pass
else:
raise Exception('Data source parameter must be a string or a DataSource object.')
for i, layer in enumerate(data_source):
print("data source : %s" % data_source.name)
print("==== layer %s" % i)
print(" shape type: %s" % GEO_CLASSES[layer.geom_type.num].__name__)
print(" # features: %s" % len(layer))
print(" srs: %s" % layer.srs)
extent_tup = layer.extent.tuple
print(" extent: %s - %s" % (extent_tup[0:2], extent_tup[2:4]))
print("Displaying the first %s features ====" % num_features)
width = max(*map(len, layer.fields))
fmt = " %%%ss: %%s" % width
for j, feature in enumerate(layer[:num_features]):
print("=== Feature %s" % j)
for fld_name in layer.fields:
type_name = feature[fld_name].type_name
output = fmt % (fld_name, type_name)
val = feature.get(fld_name)
if val:
if isinstance(val, str):
val_fmt = ' ("%s")'
else:
val_fmt = ' (%s)'
output += val_fmt % val
else:
output += ' (None)'
print(output)
# For backwards compatibility.
sample = ogrinfo
|
oxyrox/PokemonGo-Map
|
refs/heads/master
|
pogom/utils.py
|
18
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import getpass
import argparse
import re
import uuid
import os
import json
from datetime import datetime, timedelta
import ConfigParser
from . import config
from exceptions import APIKeyException
def parse_unicode(bytestring):
decoded_string = bytestring.decode(sys.getfilesystemencoding())
return decoded_string
def parse_config(args):
Config = ConfigParser.ConfigParser()
Config.read(os.path.join(os.path.dirname(__file__), '../config/config.ini'))
args.auth_service = Config.get('Authentication', 'Service')
args.username = Config.get('Authentication', 'Username')
args.password = Config.get('Authentication', 'Password')
args.location = Config.get('Search_Settings', 'Location')
args.step_limit = int(Config.get('Search_Settings', 'Steps'))
args.scan_delay = int(Config.get('Search_Settings', 'Scan_delay'))
if Config.get('Misc', 'Google_Maps_API_Key') :
args.gmaps_key = Config.get('Misc', 'Google_Maps_API_Key')
args.host = Config.get('Misc', 'Host')
args.port = Config.get('Misc', 'Port')
return args
def get_args():
# fuck PEP8
parser = argparse.ArgumentParser()
parser.add_argument('-se', '--settings',action='store_true',default=False)
parser.add_argument('-a', '--auth-service', type=str.lower, help='Auth Service', default='ptc')
parser.add_argument('-u', '--username', help='Username', required=False)
parser.add_argument('-p', '--password', help='Password', required=False)
parser.add_argument('-l', '--location', type=parse_unicode, help='Location, can be an address or coordinates', required=False)
parser.add_argument('-st', '--step-limit', help='Steps', required=False, type=int)
parser.add_argument('-sd', '--scan-delay', help='Time delay before beginning new scan', required=False, type=int, default=1)
parser.add_argument('-dc','--display-in-console',help='Display Found Pokemon in Console',action='store_true',default=False)
parser.add_argument('-H', '--host', help='Set web server listening host', default='127.0.0.1')
parser.add_argument('-P', '--port', type=int, help='Set web server listening port', default=5000)
parser.add_argument('-L', '--locale', help='Locale for Pokemon names: default en, check'
'locale folder for more options', default='en')
parser.add_argument('-c', '--china', help='Coordinates transformer for China', action='store_true')
parser.add_argument('-d', '--debug', help='Debug Mode', action='store_true')
parser.add_argument('-m', '--mock', help='Mock mode. Starts the web server but not the background thread.', action='store_true', default=False)
parser.add_argument('-ns', '--no-server', help='No-Server Mode. Starts the searcher but not the Webserver.', action='store_true', default=False, dest='no_server')
parser.add_argument('-k', '--google-maps-key', help='Google Maps Javascript API Key', default=None, dest='gmaps_key')
parser.add_argument('-C', '--cors', help='Enable CORS on web server', action='store_true', default=False)
parser.add_argument('-D', '--db', help='Database filename', default='pogom.db')
parser.add_argument('-t', '--threads', help='Number of search threads', required=False, type=int, default=5, dest='num_threads')
parser.set_defaults(DEBUG=False)
args = parser.parse_args()
if (args.settings):
args = parse_config(args)
else:
if (args.username is None or args.location is None or args.step_limit is None):
parser.print_usage()
print sys.argv[0] + ': error: arguments -u/--username, -l/--location, -st/--step-limit are required'
sys.exit(1);
if args.password is None:
args.password = getpass.getpass()
return args
def insert_mock_data():
num_pokemon = 6
num_pokestop = 6
num_gym = 6
from .models import Pokemon, Pokestop, Gym
from .search import generate_location_steps
latitude, longitude = float(config['ORIGINAL_LATITUDE']), float(config['ORIGINAL_LONGITUDE'])
locations = [l for l in generate_location_steps((latitude, longitude), num_pokemon)]
disappear_time = datetime.now() + timedelta(hours=1)
detect_time = datetime.now()
for i in xrange(num_pokemon):
Pokemon.create(encounter_id=uuid.uuid4(),
spawnpoint_id='sp{}'.format(i),
pokemon_id=(i+1) % 150,
latitude=locations[i][0],
longitude=locations[i][1],
disappear_time=disappear_time,
detect_time=detect_time)
for i in range(num_pokestop):
Pokestop.create(pokestop_id=uuid.uuid4(),
enabled=True,
latitude=locations[i+num_pokemon][0],
longitude=locations[i+num_pokemon][1],
last_modified=datetime.now(),
#Every other pokestop be lured
lure_expiration=disappear_time if (i % 2 == 0) else None
)
for i in range(num_gym):
Gym.create(gym_id=uuid.uuid4(),
team_id=i % 3,
guard_pokemon_id=(i+1) % 150,
latitude=locations[i + num_pokemon + num_pokestop][0],
longitude=locations[i + num_pokemon + num_pokestop][1],
last_modified=datetime.now(),
enabled=True,
gym_points=1000
)
def get_pokemon_name(pokemon_id):
if not hasattr(get_pokemon_name, 'names'):
file_path = os.path.join(
config['ROOT_PATH'],
config['LOCALES_DIR'],
'pokemon.{}.json'.format(config['LOCALE']))
with open(file_path, 'r') as f:
get_pokemon_name.names = json.loads(f.read())
return get_pokemon_name.names[str(pokemon_id)]
def load_credentials(filepath):
try:
with open(filepath+os.path.sep+'/config/credentials.json') as file:
creds = json.load(file)
except IOError:
creds = {}
if not creds.get('gmaps_key'):
raise APIKeyException(\
"No Google Maps Javascript API key entered in \config\credentials.json file!"
" Please take a look at the wiki for instructions on how to generate this key,"
" then add that key to the file!")
return creds
|
raimohanska/Arduino
|
refs/heads/esp8266
|
arduino-core/src/processing/app/i18n/python/requests/packages/urllib3/util.py
|
189
|
# urllib3/util.py
# Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from base64 import b64encode
from collections import namedtuple
from socket import error as SocketError
try:
from select import poll, POLLIN
except ImportError: # `poll` doesn't exist on OSX and other platforms
poll = False
try:
from select import select
except ImportError: # `select` doesn't exist on AppEngine.
select = False
try: # Test for SSL features
SSLContext = None
HAS_SNI = False
import ssl
from ssl import wrap_socket, CERT_NONE, SSLError, PROTOCOL_SSLv23
from ssl import SSLContext # Modern SSL?
from ssl import HAS_SNI # Has SNI?
except ImportError:
pass
from .packages import six
from .exceptions import LocationParseError
class Url(namedtuple('Url', ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment'])):
"""
Datastructure for representing an HTTP URL. Used as a return value for
:func:`parse_url`.
"""
slots = ()
def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None, query=None, fragment=None):
return super(Url, cls).__new__(cls, scheme, auth, host, port, path, query, fragment)
@property
def hostname(self):
"""For backwards-compatibility with urlparse. We're nice like that."""
return self.host
@property
def request_uri(self):
"""Absolute path including the query string."""
uri = self.path or '/'
if self.query is not None:
uri += '?' + self.query
return uri
def split_first(s, delims):
"""
Given a string and an iterable of delimiters, split on the first found
delimiter. Return two split parts and the matched delimiter.
If not found, then the first part is the full input string.
Example: ::
>>> split_first('foo/bar?baz', '?/=')
('foo', 'bar?baz', '/')
>>> split_first('foo/bar?baz', '123')
('foo/bar?baz', '', None)
Scales linearly with number of delims. Not ideal for large number of delims.
"""
min_idx = None
min_delim = None
for d in delims:
idx = s.find(d)
if idx < 0:
continue
if min_idx is None or idx < min_idx:
min_idx = idx
min_delim = d
if min_idx is None or min_idx < 0:
return s, '', None
return s[:min_idx], s[min_idx+1:], min_delim
def parse_url(url):
"""
Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
performed to parse incomplete urls. Fields not provided will be None.
Partly backwards-compatible with :mod:`urlparse`.
Example: ::
>>> parse_url('http://google.com/mail/')
Url(scheme='http', host='google.com', port=None, path='/', ...)
>>> parse_url('google.com:80')
Url(scheme=None, host='google.com', port=80, path=None, ...)
>>> parse_url('/foo?bar')
Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
"""
# While this code has overlap with stdlib's urlparse, it is much
# simplified for our needs and less annoying.
# Additionally, this imeplementations does silly things to be optimal
# on CPython.
scheme = None
auth = None
host = None
port = None
path = None
fragment = None
query = None
# Scheme
if '://' in url:
scheme, url = url.split('://', 1)
# Find the earliest Authority Terminator
# (http://tools.ietf.org/html/rfc3986#section-3.2)
url, path_, delim = split_first(url, ['/', '?', '#'])
if delim:
# Reassemble the path
path = delim + path_
# Auth
if '@' in url:
auth, url = url.split('@', 1)
# IPv6
if url and url[0] == '[':
host, url = url[1:].split(']', 1)
# Port
if ':' in url:
_host, port = url.split(':', 1)
if not host:
host = _host
if not port.isdigit():
raise LocationParseError("Failed to parse: %s" % url)
port = int(port)
elif not host and url:
host = url
if not path:
return Url(scheme, auth, host, port, path, query, fragment)
# Fragment
if '#' in path:
path, fragment = path.split('#', 1)
# Query
if '?' in path:
path, query = path.split('?', 1)
return Url(scheme, auth, host, port, path, query, fragment)
def get_host(url):
"""
Deprecated. Use :func:`.parse_url` instead.
"""
p = parse_url(url)
return p.scheme or 'http', p.hostname, p.port
def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
basic_auth=None):
"""
Shortcuts for generating request headers.
:param keep_alive:
If ``True``, adds 'connection: keep-alive' header.
:param accept_encoding:
Can be a boolean, list, or string.
``True`` translates to 'gzip,deflate'.
List will get joined by comma.
String will be used as provided.
:param user_agent:
String representing the user-agent you want, such as
"python-urllib3/0.6"
:param basic_auth:
Colon-separated username:password string for 'authorization: basic ...'
auth header.
Example: ::
>>> make_headers(keep_alive=True, user_agent="Batman/1.0")
{'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
>>> make_headers(accept_encoding=True)
{'accept-encoding': 'gzip,deflate'}
"""
headers = {}
if accept_encoding:
if isinstance(accept_encoding, str):
pass
elif isinstance(accept_encoding, list):
accept_encoding = ','.join(accept_encoding)
else:
accept_encoding = 'gzip,deflate'
headers['accept-encoding'] = accept_encoding
if user_agent:
headers['user-agent'] = user_agent
if keep_alive:
headers['connection'] = 'keep-alive'
if basic_auth:
headers['authorization'] = 'Basic ' + \
b64encode(six.b(basic_auth)).decode('utf-8')
return headers
def is_connection_dropped(conn):
"""
Returns True if the connection is dropped and should be closed.
:param conn:
:class:`httplib.HTTPConnection` object.
Note: For platforms like AppEngine, this will always return ``False`` to
let the platform handle connection recycling transparently for us.
"""
sock = getattr(conn, 'sock', False)
if not sock: # Platform-specific: AppEngine
return False
if not poll: # Platform-specific
if not select: # Platform-specific: AppEngine
return False
try:
return select([sock], [], [], 0.0)[0]
except SocketError:
return True
# This version is better on platforms that support it.
p = poll()
p.register(sock, POLLIN)
for (fno, ev) in p.poll(0.0):
if fno == sock.fileno():
# Either data is buffered (bad), or the connection is dropped.
return True
def resolve_cert_reqs(candidate):
"""
Resolves the argument to a numeric constant, which can be passed to
the wrap_socket function/method from the ssl module.
Defaults to :data:`ssl.CERT_NONE`.
If given a string it is assumed to be the name of the constant in the
:mod:`ssl` module or its abbrevation.
(So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
If it's neither `None` nor a string we assume it is already the numeric
constant which can directly be passed to wrap_socket.
"""
if candidate is None:
return CERT_NONE
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'CERT_' + candidate)
return res
return candidate
def resolve_ssl_version(candidate):
"""
like resolve_cert_reqs
"""
if candidate is None:
return PROTOCOL_SSLv23
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'PROTOCOL_' + candidate)
return res
return candidate
if SSLContext is not None: # Python 3.2+
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None):
"""
All arguments except `server_hostname` have the same meaning as for
:func:`ssl.wrap_socket`
:param server_hostname:
Hostname of the expected certificate
"""
context = SSLContext(ssl_version)
context.verify_mode = cert_reqs
if ca_certs:
try:
context.load_verify_locations(ca_certs)
# Py32 raises IOError
# Py33 raises FileNotFoundError
except Exception as e: # Reraise as SSLError
raise SSLError(e)
if certfile:
# FIXME: This block needs a test.
context.load_cert_chain(certfile, keyfile)
if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI
return context.wrap_socket(sock, server_hostname=server_hostname)
return context.wrap_socket(sock)
else: # Python 3.1 and earlier
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None):
return wrap_socket(sock, keyfile=keyfile, certfile=certfile,
ca_certs=ca_certs, cert_reqs=cert_reqs,
ssl_version=ssl_version)
|
wiltonlazary/arangodb
|
refs/heads/devel
|
3rdParty/V8/V8-5.0.71.39/build/gyp/test/mac/gyptest-copy-dylib.py
|
349
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that dylibs can be copied into app bundles.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
test.run_gyp('test.gyp', chdir='copy-dylib')
test.build('test.gyp', 'test_app', chdir='copy-dylib')
test.built_file_must_exist(
'Test App.app/Contents/Resources/libmy_dylib.dylib', chdir='copy-dylib')
test.pass_test()
|
metadave/mongrel2
|
refs/heads/master
|
examples/python/tests/mongrel2_org.py
|
98
|
from mongrel2.config import *
main = Server(
uuid="2f62bd5-9e59-49cd-993c-3b6013c28f05",
access_log="/logs/access.log",
error_log="/logs/error.log",
chroot="./",
pid_file="/run/mongrel2.pid",
default_host="mongrel2.org",
name="main",
port=6767
)
test_directory = Dir(base='tests/',
index_file='index.html',
default_ctype='text/plain')
web_app_proxy = Proxy(addr='127.0.0.1', port=80)
chat_demo_dir = Dir(base='examples/chat/static/',
index_file='index.html',
default_ctype='text/plain')
chat_demo = Handler(send_spec='tcp://127.0.0.1:9999',
send_ident='54c6755b-9628-40a4-9a2d-cc82a816345e',
recv_spec='tcp://127.0.0.1:9998', recv_ident='')
handler_test = Handler(send_spec='tcp://127.0.0.1:9997',
send_ident='34f9ceee-cd52-4b7f-b197-88bf2f0ec378',
recv_spec='tcp://127.0.0.1:9996', recv_ident='')
# the r'' string syntax means to not interpret any \ chars, for regexes
mongrel2 = Host(name="mongrel2.org", routes={
r'@chat': chat_demo,
r'/handlertest': handler_test,
r'/chat/': web_app_proxy,
r'/': web_app_proxy,
r'/tests/': test_directory,
r'/testsmulti/(.*.json)': test_directory,
r'/chatdemo/': chat_demo_dir,
r'/static/': chat_demo_dir,
r'/mp3stream': Handler(
send_spec='tcp://127.0.0.1:9995',
send_ident='53f9f1d1-1116-4751-b6ff-4fbe3e43d142',
recv_spec='tcp://127.0.0.1:9994', recv_ident='')
})
main.hosts.add(mongrel2)
settings = {"zeromq.threads": 1}
commit([main], settings=settings)
|
kittolau/selepy
|
refs/heads/master
|
web_helper/name_generator/korean_name_generator.py
|
1
|
from web_helper.name_generator.abstract_name_generator import AbstractNameGenerator
class KoreanNameGenerator(AbstractNameGenerator):
#name pool from http://fantasynamegenerators.com/chinese_names.php
namesMale = ["Bae","Byeong Cheol","Byeong Ho","Byung Chul","Byung Ho","Byung Hoon","Chang Min","Chang Woo","Chi Hun","Chi Won","Chihu","Chihun","Chin Ho","Chong Ho","Chong Hun","Chong Su","Chong Yol","Chul Soo","Chun Ho","Chun Yong","Chung Hee","Chung Ho","Chunho","Chunso","Chunyong","Chuwon","Dae Ho","Dae Hyun","Dae Jung","Dae Won","Do Hyeon","Do Hyun","Do Yeon","Dong Gun","Dong Hyeon","Dong Hyun","Dong Jun","Dong Min","Dong Sun","Dong Wook","Du Ho","Duck Young","Eun Soo","Geon U","Gun","Gyeong Su","Ha Sun","Hae Il","Hae Seong","Hee Chul","Ho Jin","Ho Sung","Hoon","Hyeon Jun","Hyeon U","Hyo","Hyon U","Hyonjun","Hyonu","Hyuk","Hyun","Hyun Jun","Hyun Ki","Hyun Seok","Hyun Shik","Hyun Su","Hyun Woo","Hyung Joon","Il Seong","Il Song","Il Sung","In Ho","In Su","Ja Kyung","Jae","Jae Hui","Jae Hwa","Jae Sun","Jae Wook","Jae Yong","Jeong Ho","Jeong Hun","Jeong Mun","Ji Hae","Ji Hoon","Ji Hu","Ji Hun","Ji Tae","Ji Won","Jin Hee","Jin Ho","Jin Hwan","Jin Sang","Jin Young","Jong Soo","Jong Su","Jong Yeol","Jong Yul","Joo Won","Joon Ho","Ju Won","Jun Ho","Jun Seo","Jun Yeong","Jun Young","Jung","Jung Eun","Jung Hee","Jung Ho","Jung Hoon","Jung Hwa","Jung Hwan","Jung Min","Jung Nam","Jung Su","Jung Woo","Kang Dae","Ki Nam","Konu","Kun Woo","Kwan","Kwang","Kwang Ho","Kwang Hoon","Kwang Hyok","Kwang Hyun","Kwang Jo","Kwang Min","Kwang Seok","Kwang Seon","Kwang Su","Kwang Sun ","Kyong Su","Kyu Bok","Kyu Bong","Kyung","Kyung Gu","Kyung Ho","Kyung Jae","Kyung Min","Kyung Mo","Kyung Sam","Kyung Soo","Min Gyu","Min Ho","Min Hyuk","Min Jae","Min Jun","Min Ki","Min Kyu","Min Kyung","Min Soo","Min Su","Min'gyu","Minjae","Minjun","Minsu","Mun Hee","Myung Dae","Myung Hee","Myung Ki","Nam Gi","Nam Il","Nam Kyu","Nam Seon","Nam Sun","Pyong Chol","Pyong Ho","Sang Chol","Sang Chul","Sang Hoon","Sang Hun","Sang Jun","Sang Ki","Sang Kyu","Sang Min","Se Yeon","Se Yoon","Seo Jun","Seon","Seong","Seong Gi","Seong Ho","Seong Hun","Seong Hyeon","Seong Jin","Seong Min","Seong Su","Seung Eun","Seung Gi","Seung Hee","Seung Heon","Seung Ho","Seung Hoon","Seung Hyeon","Seung Hyun","Seung Min","Seung Won","Seung Woo","Shi Won","Shi Woo","Shin","Shin Il","Shin Young","Si U","Si'u","Sochun","Song Gi","Song Ho","Song Hun","Song Jin","Song Min","Song Su","Songhyon","Songmin","Soo Hyun","Soo Yeon","Suk Chul","Sun Woo","Sung Chul","Sung Ho","Sung Hoon","Sung Hyun","Sung Jin","Sung Ki","Sung Min","Sung Nam","Sung Soo","Sunghyon","Tae Hee","Tae Hyun","Tae Won","Tae Woo","Tae Woong","Tae Yeon","Tae Young","Tohyon","Tong Hyon","Tonghyon","U Jin","Ujin","Woo Jin","Woo Sung","Ye Jun","Yejun","Yeon Seok","Yeon Woo","Yeong Cheol","Yeong Gi","Yeong Ho","Yeong Hwan","Yeong Jin","Yeong Sik","Yeong Su","Yo Han","Yong Chol","Yong Gi","Yong Ho","Yong Hwan","Yong Jin","Yong Joon","Yong Sik","Yong Sook","Yong Su","Yong Sun","Young","Young Chul","Young Gi","Young Ho","Young Hwan","Young Il","Young Ja","Young Jae","Young Jin","Young Min","Young Nam","Young Nam ","Young Shik","Young Soo","Young Su"];
namesFemale = ["Ae Ra","Ae Ri","Ae","Ah Hyun","Ah Joong","Ah Ra","Bit Na","Bo Hee","Bo Kyung","Bo Ra","Bo Young","Bo Yun","Ch'un Ja","Chae Young","Chi Hye","Chi U","Chi Un","Chi Yon","Chi Yong","Chi'u","Chi'un","Chihye","Chihyon","Chimin","Chiyong","Chiyun","Chong Hui","Chong Ja","Chong Suk","Chong Sun","Chun Hwa","Chun Ja","Chung Ah","Da Bin","Da Hae","Da Hee","Do Yeon","Doo Na","Eon Jeong","Eul Dong","Eun Ah","Eun Bi","Eun Chae","Eun Gyung","Eun Ha","Eun Hee","Eun Hye","Eun Ji","Eun Jin","Eun Joo","Eun Ju","Eun Jung","Eun Kyeong","Eun Kyung","Eun Seo","Eun Song","Eun Soo","Eun Sook","Eun Young","Eun","Ga In","Ga Yun","Geum Suk","Geun Young","Go Eun","Gri Na","Ha Eun","Ha Na","Ha Neul","Ha Sun","Ha'un","Hae Sook","Hae Young","Han Bi","Han Byul","Hee Ae","Hee Bon","Hee Jin","Hee Ra","Hee Sun","Hee Yun","Hee Yung","Ho Jung","Hwa Young","Hwi Hyang","Hye Bin","Hye Gyo","Hye Ja","Hye Jin","Hye Jung","Hye Kyung","Hye Ok","Hye Rim","Hye Soo","Hye Sook","Hye Sun","Hye Young","Hyejin","Hyo Jin","Hyo Joo","Hyo Ju","Hyo Jung","Hyo Ri","Hyo Rin","Hyon Jong","Hyon Ju","Hyon Suk","Hyun Ah","Hyun Joo","Hyun Ju","Hyun Jung","Hyun Sook","In Hye","In Sook","In Suk","In Young","Ja Hye","Ja Hyun","Ja Kyung","Ja Ok","Jae Yun","Jeong Ja","Ji Ae","Ji Eun","Ji Hae","Ji Hee","Ji Ho","Ji Hye","Ji Hyo","Ji Hyun","Ji Min","Ji Na","Ji Soo","Ji Su","Ji Sun","Ji Won","Ji Woo","Ji Yong","Ji Yoon","Ji Young","Ji Yun","Ji Yung","Jin Hee","Jin Ju","Jin Shil","Jin Young","Jin Yung","Jin","Jiyeon","Joo Eun","Ju Ah","Ju Hee","Ju Hyun","Jung Ah","Jung Ahn","Jung Eum","Jung Eun","Jung Hee","Jung Hwa","Jung Hyun","Jung Ok","Jung Soo","Jung Sook","Jung Soon","Jung Won","Jung Yoon","Jung","Kang Hee","Kyong Hui","Kyong Ja","Kyong Ok","Kyong Suk","Kyu Ri","Kyung Hee","Kyung Ja","Kyung Jin","Kyung Min","Kyung Ok","Kyung Sook","Li Na","Mi Gyong","Mi Hyun","Mi Kyung","Mi Ri","Mi Ryung","Mi So","Mi Sook","Mi Suk","Mi Yeon","Mi Yong","Mi Young","Mi Yun","Mi Yung","Min Ah","Min Hee","Min Ji","Min Joo","Min Ju","Min Jung","Min Kyung","Min Seo","Min Sun","Min Yung","Min","Minji","Minso","Moon Hee","Myong Suk","Myung Hee","Myung Sook","Na Rae","Na Woon","Na Young","Nam Joo","Nam Seon","Nam Sun","Nara","Ok Bin","Ok Sook","Ran","Ri Na","Rim","Ryu Won","Sa Rang","San Ha","Sang Hee","Se Ah","Se Bin","Se Eun","Se Jung","Se Yeon","Seo Hee","Seo Hyeon","Seo Yeon","Seo Yun","Seong Eon","Seong Ja","Seong","Seul Gi","Seul Ki","Seung Eun","Seung Hee","Seung Hyun","Seung Min","Seung Yun","Shi Eun","Shi Won","Shin Ae","Shin Hye","Shin Young","Si Yeon","So Hee","So Hyon","So Ra","So Ri","So Yeon","So Yi","So Young","So Yun","So Yung","Sohyon","Sol Bi","Sol Mi","Son Ha","Son Yong","Song Hee","Soo Ah","Soo Hyun","Soo Jin","Soo Jung","Soo Kyung","Soo Yeon","Sook Ja","Soon Hee","Soon Ja","Soyon","Soyun","Su Bin","Su Hwa","Su Hyun","Su Ji","Su Jin","Su Jung","Su Mi","Su Min","Su Yun","Su Yung","Subin","Suh Hyung","Sujin","Suk Ja","Sulgi","Sun Ah","Sun Hi","Sun Hwa","Sun Ja","Sun Jung","Sun Mi","Sun Young","Sun Yung","Sun","Sung Eun","Sung Ryung","Sung Sook","Sung Yun","Tae Hee","Tae Ran","Tae Yeon","Tae Young","Tae Yun","Tam Hee","Un Gyong","Un Jong","Un Ju","Un Yong","Unji","Unso","Won Sook","Woon Kye","Ye Eun","Ye Hee","Ye Jin","Ye Seul","Ye Won","Ye'un","Yeh Jin","Yeo Jin","Yeo Jung","Yeo Woon","Yeon Hee","Yeon Hong","Yeon Joo","Yeon Seo","Yeon Woo","Yeong Hee","Yeong","Yi Hyun","Yi Jae","Yi Jin","Yi","Yo Won","Yong Hui","Yong Ja","Yong Mi","Yong Suk","Yoo Jin","Yoo Mi","Yoo Ri","Yoo Sun","Yoon Ah","Yoon Hee","Yoon Ji","Yoon Jin","Yoon Jung","Yoon Mi","Yoon Sook","Yoon Young","Young Ae","Young Ah","Young Eun","Young Hee","Young Ja","Young Mi","Young Nam","Young Ok","Young Ran","Young Sook","Yu Jin","Yu Ni","Yu Ri","Yujin","Yun Ji","Yun Ju","Yun Seo","Yun Soo","Yung Hee","Yunso"]
namesFamily = ["Ae","Ah","An","Ch'a","Ch'ae","Ch'ang","Ch'o","Ch'oe","Ch'on","Ch'u","Cha","Chang","Changgok","Che","Chegal","Chi","Chin","Cho","Chom","Chon","Chong","Chu","Chun","Chung","Chup","Chwa","Eoh","Ha","Hae","Hak","Ham","Han","Ho","Hong","Hu","Hung","Hwa","Hwan","Hwang","Hwangbo","Hyon","Hyong","Im","In","Ka","Kae","Kal","Kam","Kan","Kang","Kangjon","Ki","Kil","Kim","Ko","Kok","Kong","Ku","Kuk","Kum","Kun","Kung","Kwak","Kwok","Kwon","Kye","Kyo","Kyon","Kyong","Ma","Mae","Maeng","Man","Mangjol","Mi","Min","Mo","Mok","Muk","Mun","Myo","Myong","Na","Nae","Nam","Namgung","Nan","Nang","No","Noe","Nu","Ogum","Oh","Ok","Om","On","Ong","P'aeng","P'an","P'i","P'il","P'o","P'ung","P'yo","P'yon","P'yong","Pae","Paek","Pak","Pan","Pang","Pi","Pin","Ping","Pok","Pom","Pong","Pu","Pyon","Ra","Ran","Rang","Ri","Rim","Ro","Roe","Ru","Ryang","Ryo","Ryom","Ryon","Ryong","Ryu","Ryuk","Sa","Sagong","Sam","Sang","Si","Sim","Sin","Sip","So","Sobong","Sok","Sol","Somun","Son","Song","Sonu","Sop","Su","Sun","Sung","T'ae","T'ak","T'an","Tae","Tam","Tan","Tang","To","Tokko","Ton","Tong","Tongbang","Tu","Uh","Um","Un","Wang","Wi","Won","Wu","Ya","Yang","Ye","Yi","Yo","Yom","Yon","Yong","Yop","Yu","Yuk","Yun"];
def __init__(self):
super(KoreanNameGenerator, self).__init__()
def getMaleName(self):
return self.nameGen(self.namesMale,self.namesFamily)
def getFemaleName(self):
return self.nameGen(self.namesFemale,self.namesFamily)
|
blacktear23/django
|
refs/heads/master
|
django/contrib/admin/actions.py
|
160
|
"""
Built-in, globally-available admin actions.
"""
from django import template
from django.core.exceptions import PermissionDenied
from django.contrib.admin import helpers
from django.contrib.admin.util import get_deleted_objects, model_ngettext
from django.db import router
from django.shortcuts import render_to_response
from django.utils.encoding import force_unicode
from django.utils.translation import ugettext_lazy, ugettext as _
def delete_selected(modeladmin, request, queryset):
"""
Default action which deletes the selected objects.
This action first displays a confirmation page whichs shows all the
deleteable objects, or, if the user has no permission one of the related
childs (foreignkeys), a "permission denied" message.
Next, it delets all selected objects and redirects back to the change list.
"""
opts = modeladmin.model._meta
app_label = opts.app_label
# Check that the user has delete permission for the actual model
if not modeladmin.has_delete_permission(request):
raise PermissionDenied
using = router.db_for_write(modeladmin.model)
# Populate deletable_objects, a data structure of all related objects that
# will also be deleted.
deletable_objects, perms_needed, protected = get_deleted_objects(
queryset, opts, request.user, modeladmin.admin_site, using)
# The user has already confirmed the deletion.
# Do the deletion and return a None to display the change list view again.
if request.POST.get('post'):
if perms_needed:
raise PermissionDenied
n = queryset.count()
if n:
for obj in queryset:
obj_display = force_unicode(obj)
modeladmin.log_deletion(request, obj, obj_display)
queryset.delete()
modeladmin.message_user(request, _("Successfully deleted %(count)d %(items)s.") % {
"count": n, "items": model_ngettext(modeladmin.opts, n)
})
# Return None to display the change list page again.
return None
if len(queryset) == 1:
objects_name = force_unicode(opts.verbose_name)
else:
objects_name = force_unicode(opts.verbose_name_plural)
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": objects_name}
else:
title = _("Are you sure?")
context = {
"title": title,
"objects_name": objects_name,
"deletable_objects": [deletable_objects],
'queryset': queryset,
"perms_lacking": perms_needed,
"protected": protected,
"opts": opts,
"root_path": modeladmin.admin_site.root_path,
"app_label": app_label,
'action_checkbox_name': helpers.ACTION_CHECKBOX_NAME,
}
# Display the confirmation page
return render_to_response(modeladmin.delete_selected_confirmation_template or [
"admin/%s/%s/delete_selected_confirmation.html" % (app_label, opts.object_name.lower()),
"admin/%s/delete_selected_confirmation.html" % app_label,
"admin/delete_selected_confirmation.html"
], context, context_instance=template.RequestContext(request))
delete_selected.short_description = ugettext_lazy("Delete selected %(verbose_name_plural)s")
|
adammaikai/OmicsPipe2.0
|
refs/heads/master
|
build/lib.linux-x86_64-2.7/omics_pipe/Tumorseq_MUTECT.py
|
2
|
#!/usr/bin/env python
#from sumatra.projects import load_project
#from sumatra.parameters import build_parameters
#from sumatra.decorators import capture
from ruffus import *
import sys
import os
import time
import datetime
import drmaa
from omics_pipe.utils import *
from omics_pipe.parameters.default_parameters import default_parameters
from omics_pipe.modules.fastqc import fastqc
from omics_pipe.modules.bwa import bwa_mem
from omics_pipe.modules.picard_mark_duplicates import picard_mark_duplicates
from omics_pipe.modules.GATK_preprocessing_WES import GATK_preprocessing_WES
from omics_pipe.modules.GATK_variant_filtering import GATK_variant_filtering
from omics_pipe.modules.mutect import mutect
p = Bunch(default_parameters)
os.chdir(p.WORKING_DIR)
now = datetime.datetime.now()
date = now.strftime("%Y-%m-%d %H:%M")
print p
for step in p.STEPS:
vars()['inputList_' + step] = []
for sample in p.SAMPLE_LIST:
vars()['inputList_' + step].append([sample, "%s/%s_%s_completed.flag" % (p.FLAG_PATH, step, sample)])
print vars()['inputList_' + step]
#FASTQC
@parallel(inputList_fastqc)
@check_if_uptodate(check_file_exists)
def run_fastqc(sample, fastqc_flag):
fastqc(sample, fastqc_flag)
return
#BWA
@parallel(inputList_bwa_mem)
@check_if_uptodate(check_file_exists)
def run_bwa_mem(sample, bwa_mem_flag):
bwa_mem(sample, bwa_mem_flag)
return
#picard_mark_duplicates
@parallel(inputList_picard_mark_duplicates)
@check_if_uptodate(check_file_exists)
@follows(run_bwa_mem)
def run_picard_mark_duplicates(sample, picard_mark_duplicates_flag):
picard_mark_duplicates(sample, picard_mark_duplicates_flag)
return
#GATK_preprocessing
@parallel(inputList_GATK_preprocessing_WES)
@check_if_uptodate(check_file_exists)
@follows(run_picard_mark_duplicates)
def run_GATK_preprocessing_WES(sample, GATK_preprocessing_WES_flag):
GATK_preprocessing_WES(sample, GATK_preprocessing_WES_flag)
return
#Mutect_Tumor/Normal
@parallel([["mutect", "%s/mutect_completed.flag" % (p.FLAG_PATH)]])
@check_if_uptodate(check_file_exists)
@follows(run_GATK_preprocessing_WES)
def run_mutect(sample, mutect_flag):
mutect(sample, mutect_flag)
return
#GATK_filter_variants
@parallel([["mutect", "%s/GATK_variant_filtering_completed.flag" % (p.FLAG_PATH)]])
@check_if_uptodate(check_file_exists)
@follows(run_mutect)
def run_GATK_variant_filtering(sample, GATK_variant_filtering_flag):
GATK_variant_filtering(sample, GATK_variant_filtering_flag)
return
@parallel([["last_function", "%s/last_function_completed.flag" % (p.FLAG_PATH)]])
@check_if_uptodate(check_file_exists)
@follows(run_GATK_variant_filtering, run_fastqc)
def last_function(sample, last_function_flag):
print "PIPELINE HAS FINISHED SUCCESSFULLY!!! YAY!"
pipeline_graph_output = p.FLAG_PATH + "/pipeline_" + sample + "_" + str(date) + ".pdf"
#pipeline_printout_graph (pipeline_graph_output,'pdf', step, no_key_legend=False)
stage = "last_function"
flag_file = "%s/%s_%s_completed.flag" % (p.FLAG_PATH, stage, sample)
open(flag_file, 'w').close()
return
if __name__ == '__main__':
pipeline_run(p.STEP, multiprocess = p.PIPE_MULTIPROCESS, verbose = p.PIPE_VERBOSE, gnu_make_maximal_rebuild_mode = p.PIPE_REBUILD)
|
jjmleiro/hue
|
refs/heads/master
|
desktop/core/ext-py/Mako-0.8.1/mako/__init__.py
|
38
|
# mako/__init__.py
# Copyright (C) 2006-2012 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
__version__ = '0.8.1'
|
sgraham/nope
|
refs/heads/master
|
third_party/webdriver/pylib/test/selenium/webdriver/support/event_firing_webdriver_tests.py
|
19
|
#!/usr/bin/python
# Copyright 2011 Software Freedom Conservancy.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from cStringIO import StringIO
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.events import EventFiringWebDriver, \
AbstractEventListener
class EventFiringWebDriverTests(unittest.TestCase):
def setup_method(self, method):
self.log = StringIO()
def test_should_fire_navigation_events(self):
log = self.log
class TestListener(AbstractEventListener):
def before_navigate_to(self, url, driver):
log.write("before_navigate_to %s" % url.split("/")[-1])
def after_navigate_to(self, url, driver):
log.write("after_navigate_to %s" % url.split("/")[-1])
def before_navigate_back(self, driver):
log.write("before_navigate_back")
def after_navigate_back(self, driver):
log.write("after_navigate_back")
def before_navigate_forward(self, driver):
log.write("before_navigate_forward")
def after_navigate_forward(self, driver):
log.write("after_navigate_forward")
ef_driver = EventFiringWebDriver(self.driver, TestListener())
ef_driver.get(self._pageURL("formPage"))
ef_driver.find_element(by=By.ID, value="imageButton").submit()
self.assertEqual(ef_driver.title, "We Arrive Here")
ef_driver.back()
self.assertEqual(ef_driver.title, "We Leave From Here")
ef_driver.forward()
self.assertEqual(ef_driver.title, "We Arrive Here")
self.assertEqual("before_navigate_to formPage.html" \
+ "after_navigate_to formPage.html" \
+ "before_navigate_back" \
+ "after_navigate_back" \
+ "before_navigate_forward" \
+ "after_navigate_forward", log.getvalue())
def test_should_fire_click_event(self):
log = self.log
class TestListener(AbstractEventListener):
def before_click(self, element, driver):
log.write("before_click")
def after_click(self, element, driver):
log.write("after_click")
ef_driver = EventFiringWebDriver(self.driver, TestListener())
ef_driver.get(self._pageURL("clicks"))
ef_driver.find_element(By.ID, "overflowLink").click()
self.assertEqual(ef_driver.title, "XHTML Test Page")
self.assertEqual("before_click" + "after_click", log.getvalue())
def test_should_fire_change_value_event(self):
log = self.log
class TestListener(AbstractEventListener):
def before_change_value_of(self, element, driver):
log.write("before_change_value_of")
def after_change_value_of(self, element, driver):
log.write("after_change_value_of")
ef_driver = EventFiringWebDriver(self.driver, TestListener())
ef_driver.get(self._pageURL("readOnlyPage"))
element = ef_driver.find_element_by_id("writableTextInput")
element.clear()
self.assertEqual("", element.get_attribute("value"))
ef_driver.get(self._pageURL("javascriptPage"))
keyReporter = ef_driver.find_element(by=By.ID, value="keyReporter")
keyReporter.send_keys("abc def")
self.assertEqual(keyReporter.get_attribute("value"), "abc def")
self.assertEqual("before_change_value_of" \
+ "after_change_value_of" \
+ "before_change_value_of" \
+ "after_change_value_of", log.getvalue())
def test_should_fire_find_event(self):
log = self.log
class TestListener(AbstractEventListener):
def before_find(self, by, value, driver):
log.write("before_find by %s %s" % (by, value))
def after_find(self, by, value, driver):
log.write("after_find by %s %s" % (by, value))
ef_driver = EventFiringWebDriver(self.driver, TestListener())
ef_driver.get(self._pageURL("simpleTest"))
e = ef_driver.find_element_by_id("oneline")
self.assertEqual("A single line of text", e.text)
e = ef_driver.find_element_by_xpath("/html/body/p[1]")
self.assertEqual("A single line of text", e.text)
ef_driver.get(self._pageURL("frameset"))
elements = ef_driver.find_elements_by_css_selector("frame#sixth")
self.assertEqual(1, len(elements))
self.assertEqual("frame", elements[0].tag_name.lower())
self.assertEqual("sixth", elements[0].get_attribute("id"))
self.assertEqual("before_find by id oneline" \
+ "after_find by id oneline" \
+ "before_find by xpath /html/body/p[1]" \
+ "after_find by xpath /html/body/p[1]" \
+ "before_find by css selector frame#sixth" \
+ "after_find by css selector frame#sixth" , log.getvalue())
def test_should_call_listener_when_an_exception_is_thrown(self):
log = self.log
class TestListener(AbstractEventListener):
def on_exception(self, exception, driver):
if isinstance(exception, NoSuchElementException):
log.write("NoSuchElementException is thrown")
ef_driver = EventFiringWebDriver(self.driver, TestListener())
ef_driver.get(self._pageURL("simpleTest"))
try:
ef_driver.find_element(By.ID, "foo")
self.fail("Expected exception to be propagated")
except NoSuchElementException:
pass
self.assertEqual("NoSuchElementException is thrown", log.getvalue())
def test_should_unwrap_element_args_when_calling_scripts(self):
ef_driver = EventFiringWebDriver(self.driver, AbstractEventListener())
ef_driver.get(self._pageURL("javascriptPage"))
button = ef_driver.find_element_by_id("plainButton")
value = ef_driver.execute_script(
"arguments[0]['flibble'] = arguments[0].getAttribute('id'); return arguments[0]['flibble']",
button)
self.assertEqual("plainButton", value)
def test_should_be_able_to_access_wrapped_instance_from_event_calls(self):
driver = self.driver
class TestListener(AbstractEventListener):
def before_navigate_to(self, url, d):
assert driver is d
ef_driver = EventFiringWebDriver(driver, TestListener())
wrapped_driver = ef_driver.wrapped_driver
assert driver is wrapped_driver
ef_driver.get(self._pageURL("simpleTest"))
def teardown_method(self, method):
self.log.close()
def _pageURL(self, name):
return "http://localhost:%d/%s.html" % (self.webserver.port, name)
|
mpasternak/michaldtz-fixes-518-522
|
refs/heads/master
|
pyglet/input/darwin_hid.py
|
4
|
# Uses the HID API introduced in Mac OS X version 10.5
# http://developer.apple.com/library/mac/#technotes/tn2007/tn2187.html
from ctypes import *
from ctypes import util
# Load frameworks
iokit = cdll.LoadLibrary(util.find_library('IOKit'))
cf = cdll.LoadLibrary(util.find_library('CoreFoundation'))
# Core Foundation constants
kCFStringEncodingASCII = 0x0600
kCFStringEncodingUnicode = 0x0100
kCFStringEncodingUTF8 = 0x08000100
kCFNumberIntType = 9
kCFRunLoopDefaultMode = c_void_p.in_dll(iokit, 'kCFRunLoopDefaultMode')
# IOKit constants from
# /System/Library/Frameworks/IOKit.framework/Headers/hid/IOHIDKeys.h
kIOHIDOptionsTypeNone = 0x00
kIOHIDOptionsTypeSeizeDevice = 0x01
kIOHIDElementTypeInput_Misc = 1
kIOHIDElementTypeInput_Button = 2
kIOHIDElementTypeInput_Axis = 3
kIOHIDElementTypeInput_ScanCodes = 4
kIOHIDElementTypeOutput = 129
kIOHIDElementTypeFeature = 257
kIOHIDElementTypeCollection = 513
# /System/Library/Frameworks/IOKit.framework/Headers/hid/IOHIDUsageTables.h
kHIDPage_GenericDesktop = 0x01
kHIDPage_Consumer = 0x0C
kHIDUsage_GD_SystemSleep = 0x82
kHIDUsage_GD_SystemWakeUp = 0x83
kHIDUsage_GD_SystemAppMenu = 0x86
kHIDUsage_GD_SystemMenu = 0x89
kHIDUsage_GD_SystemMenuRight = 0x8A
kHIDUsage_GD_SystemMenuLeft = 0x8B
kHIDUsage_GD_SystemMenuUp = 0x8C
kHIDUsage_GD_SystemMenuDown = 0x8D
kHIDUsage_Csmr_Menu = 0x40
kHIDUsage_Csmr_FastForward = 0xB3
kHIDUsage_Csmr_Rewind = 0xB4
kHIDUsage_Csmr_Eject = 0xB8
kHIDUsage_Csmr_Mute = 0xE2
kHIDUsage_Csmr_VolumeIncrement = 0xE9
kHIDUsage_Csmr_VolumeDecrement = 0xEA
# Setup return types for functions that return pointers.
# (Otherwise ctypes returns 32-bit int which breaks on 64-bit systems.)
# Note that you must also wrap the return value with c_void_p before
# you use it as an argument to another function, otherwise ctypes will
# automatically convert it back to a 32-bit int again.
cf.CFStringCreateWithCString.restype = c_void_p
cf.CFArrayGetValueAtIndex.restype = c_void_p
cf.CFRunLoopGetCurrent.restype = c_void_p
cf.CFRunLoopGetMain.restype = c_void_p
iokit.IOHIDDeviceGetProperty.restype = c_void_p
iokit.IOHIDDeviceCopyMatchingElements.restype = c_void_p
iokit.IOHIDValueGetElement.restype = c_void_p
iokit.IOHIDElementGetName.restype = c_void_p
iokit.IOHIDManagerCreate.restype = c_void_p
iokit.IOHIDManagerCopyDevices.restype = c_void_p
# Callback function types
HIDManagerCallback = CFUNCTYPE(None, c_void_p, c_int, c_void_p, c_void_p)
HIDDeviceCallback = CFUNCTYPE(None, c_void_p, c_int, c_void_p)
HIDDeviceValueCallback = CFUNCTYPE(None, c_void_p, c_int, c_void_p, c_void_p)
######################################################################
# Core Foundation type to Python type conversion functions
def CFSTR(text):
return c_void_p(cf.CFStringCreateWithCString(None, text.encode('utf8'), kCFStringEncodingUTF8))
def cfstring_to_string(cfstring):
length = cf.CFStringGetLength(cfstring)
size = cf.CFStringGetMaximumSizeForEncoding(length, kCFStringEncodingUTF8)
buffer = c_buffer(size + 1)
result = cf.CFStringGetCString(cfstring, buffer, len(buffer), kCFStringEncodingUTF8)
if result:
return buffer.value
def cfnumber_to_int(cfnumber):
result = c_int()
if cf.CFNumberGetValue(cfnumber, kCFNumberIntType, byref(result)):
return result.value
def cfset_to_set(cfset):
count = cf.CFSetGetCount(cfset)
buffer = (c_void_p * count)()
cf.CFSetGetValues(cfset, byref(buffer))
return set([ cftype_to_value(c_void_p(buffer[i])) for i in range(count) ])
def cfarray_to_list(cfarray):
count = cf.CFArrayGetCount(cfarray)
return [ cftype_to_value(c_void_p(cf.CFArrayGetValueAtIndex(cfarray, i)))
for i in range(count) ]
def cftype_to_value(cftype):
if not cftype:
return None
typeID = cf.CFGetTypeID(cftype)
if typeID == cf.CFStringGetTypeID():
return cfstring_to_string(cftype)
elif typeID == cf.CFNumberGetTypeID():
return cfnumber_to_int(cftype)
elif typeID == iokit.IOHIDDeviceGetTypeID():
return HIDDevice.get_device(cftype)
elif typeID == iokit.IOHIDElementGetTypeID():
return HIDDeviceElement.get_element(cftype)
else:
return cftype
######################################################################
# HID Class Wrappers
# Lookup tables cache python objects for the devices and elements so that
# we can avoid creating multiple wrapper objects for the same device.
_device_lookup = {} # IOHIDDeviceRef to python HIDDevice object
_element_lookup = {} # IOHIDElementRef to python HIDDeviceElement object
class HIDValue:
def __init__(self, valueRef):
# Check that this is a valid IOHIDValue.
assert(valueRef)
assert(cf.CFGetTypeID(valueRef) == iokit.IOHIDValueGetTypeID())
self.valueRef = valueRef
self.timestamp = iokit.IOHIDValueGetTimeStamp(valueRef)
self.intvalue = iokit.IOHIDValueGetIntegerValue(valueRef)
elementRef = c_void_p(iokit.IOHIDValueGetElement(valueRef))
self.element = HIDDeviceElement.get_element(elementRef)
class HIDDevice:
@classmethod
def get_device(cls, deviceRef):
# deviceRef is a c_void_p pointing to an IOHIDDeviceRef
if deviceRef.value in _device_lookup:
return _device_lookup[deviceRef.value]
else:
device = HIDDevice(deviceRef)
return device
def __init__(self, deviceRef):
# Check that we've got a valid IOHIDDevice.
assert(deviceRef)
assert(cf.CFGetTypeID(deviceRef) == iokit.IOHIDDeviceGetTypeID())
_device_lookup[deviceRef.value] = self
self.deviceRef = deviceRef
# Set attributes from device properties.
self.transport = self.get_property("Transport")
self.vendorID = self.get_property("VendorID")
self.vendorIDSource = self.get_property("VendorIDSource")
self.productID = self.get_property("ProductID")
self.versionNumber = self.get_property("VersionNumber")
self.manufacturer = self.get_property("Manufacturer")
self.product = self.get_property("Product")
self.serialNumber = self.get_property("SerialNumber") # always returns None; apple bug?
self.locationID = self.get_property("LocationID")
self.primaryUsage = self.get_property("PrimaryUsage")
self.primaryUsagePage = self.get_property("PrimaryUsagePage")
# Populate self.elements with our device elements.
self.get_elements()
# Set up callback functions.
self.value_observers = set()
self.removal_observers = set()
self.register_removal_callback()
self.register_input_value_callback()
def dump_info(self):
for x in ('manufacturer', 'product', 'transport', 'vendorID', 'vendorIDSource', 'productID',
'versionNumber', 'serialNumber', 'locationID', 'primaryUsage', 'primaryUsagePage'):
value = getattr(self, x)
print x + ":", value
def unique_identifier(self):
# Since we can't rely on the serial number, create our own identifier.
# Can use this to find devices when they are plugged back in.
return (self.manufacturer, self.product, self.vendorID, self.productID,
self.versionNumber, self.primaryUsage, self.primaryUsagePage)
def get_property(self, name):
cfvalue = c_void_p(iokit.IOHIDDeviceGetProperty(self.deviceRef, CFSTR(name)))
return cftype_to_value(cfvalue)
def open(self, exclusive_mode=False):
if exclusive_mode: options = kIOHIDOptionsTypeSeizeDevice
else: options = kIOHIDOptionsTypeNone
return bool(iokit.IOHIDDeviceOpen(self.deviceRef, options))
def close(self):
return bool(iokit.IOHIDDeviceClose(self.deviceRef, kIOHIDOptionsTypeNone))
def schedule_with_run_loop(self):
iokit.IOHIDDeviceScheduleWithRunLoop(
self.deviceRef,
c_void_p(cf.CFRunLoopGetCurrent()),
kCFRunLoopDefaultMode)
def unschedule_from_run_loop(self):
iokit.IOHIDDeviceUnscheduleFromRunLoop(
self.deviceRef,
c_void_p(cf.CFRunLoopGetCurrent()),
kCFRunLoopDefaultMode)
def get_elements(self):
cfarray = c_void_p(iokit.IOHIDDeviceCopyMatchingElements(self.deviceRef, None, 0))
self.elements = cfarray_to_list(cfarray)
cf.CFRelease(cfarray)
# Page and usage IDs are from the HID usage tables located at
# http://www.usb.org/developers/devclass_docs/Hut1_12.pdf
def conforms_to(self, page, usage):
return bool(iokit.IOHIDDeviceConformsTo(self.deviceRef, page, usage))
def is_pointer(self): return self.conforms_to(0x01, 0x01)
def is_mouse(self): return self.conforms_to(0x01, 0x02)
def is_joystick(self): return self.conforms_to(0x01, 0x04)
def is_gamepad(self): return self.conforms_to(0x01, 0x05)
def is_keyboard(self): return self.conforms_to(0x01, 0x06)
def is_keypad(self): return self.conforms_to(0x01, 0x07)
def is_multi_axis(self): return self.conforms_to(0x01, 0x08)
def py_removal_callback(self, context, result, sender):
self = _device_lookup[sender] # avoid wonky python context issues
# Dispatch removal message to all observers.
for x in self.removal_observers:
if hasattr(x, 'device_removed'):
x.device_removed(self)
# Remove self from device lookup table.
del _device_lookup[sender]
# Remove device elements from lookup table.
for key, value in _element_lookup.items():
if value in self.elements:
del _element_lookup[key]
def register_removal_callback(self):
self.removal_callback = HIDDeviceCallback(self.py_removal_callback)
iokit.IOHIDDeviceRegisterRemovalCallback(
self.deviceRef,
self.removal_callback,
None)
def add_removal_observer(self, observer):
self.removal_observers.add(observer)
def py_value_callback(self, context, result, sender, value):
v = HIDValue(c_void_p(value))
# Dispatch value changed message to all observers.
for x in self.value_observers:
if hasattr(x, 'device_value_changed'):
x.device_value_changed(self, v)
def register_input_value_callback(self):
self.value_callback = HIDDeviceValueCallback(self.py_value_callback)
iokit.IOHIDDeviceRegisterInputValueCallback(
self.deviceRef,
self.value_callback,
None)
def add_value_observer(self, observer):
self.value_observers.add(observer)
def get_value(self, element):
# If the device is not open, then returns None
valueRef = c_void_p()
iokit.IOHIDDeviceGetValue(self.deviceRef, element.elementRef, byref(valueRef))
if valueRef:
return HIDValue(valueRef)
else:
return None
class HIDDeviceElement:
@classmethod
def get_element(cls, elementRef):
# elementRef is a c_void_p pointing to an IOHIDDeviceElementRef
if elementRef.value in _element_lookup:
return _element_lookup[elementRef.value]
else:
element = HIDDeviceElement(elementRef)
return element
def __init__(self, elementRef):
# Check that we've been passed a valid IOHIDElement.
assert(elementRef)
assert(cf.CFGetTypeID(elementRef) == iokit.IOHIDElementGetTypeID())
_element_lookup[elementRef.value] = self
self.elementRef = elementRef
# Set element properties as attributes.
self.cookie = iokit.IOHIDElementGetCookie(elementRef)
self.type = iokit.IOHIDElementGetType(elementRef)
if self.type == kIOHIDElementTypeCollection:
self.collectionType = iokit.IOHIDElementGetCollectionType(elementRef)
else:
self.collectionType = None
self.usagePage = iokit.IOHIDElementGetUsagePage(elementRef)
self.usage = iokit.IOHIDElementGetUsage(elementRef)
self.isVirtual = bool(iokit.IOHIDElementIsVirtual(elementRef))
self.isRelative = bool(iokit.IOHIDElementIsRelative(elementRef))
self.isWrapping = bool(iokit.IOHIDElementIsWrapping(elementRef))
self.isArray = bool(iokit.IOHIDElementIsArray(elementRef))
self.isNonLinear = bool(iokit.IOHIDElementIsNonLinear(elementRef))
self.hasPreferredState = bool(iokit.IOHIDElementHasPreferredState(elementRef))
self.hasNullState = bool(iokit.IOHIDElementHasNullState(elementRef))
self.name = cftype_to_value(iokit.IOHIDElementGetName(elementRef))
self.reportID = iokit.IOHIDElementGetReportID(elementRef)
self.reportSize = iokit.IOHIDElementGetReportSize(elementRef)
self.reportCount = iokit.IOHIDElementGetReportCount(elementRef)
self.unit = iokit.IOHIDElementGetUnit(elementRef)
self.unitExponent = iokit.IOHIDElementGetUnitExponent(elementRef)
self.logicalMin = iokit.IOHIDElementGetLogicalMin(elementRef)
self.logicalMax = iokit.IOHIDElementGetLogicalMax(elementRef)
self.physicalMin = iokit.IOHIDElementGetPhysicalMin(elementRef)
self.physicalMax = iokit.IOHIDElementGetPhysicalMax(elementRef)
class HIDManager:
def __init__(self):
# Create the HID Manager.
self.managerRef = c_void_p(iokit.IOHIDManagerCreate(None, kIOHIDOptionsTypeNone))
assert(self.managerRef)
assert cf.CFGetTypeID(self.managerRef) == iokit.IOHIDManagerGetTypeID()
self.schedule_with_run_loop()
self.matching_observers = set()
self.register_matching_callback()
self.get_devices()
def get_devices(self):
# Tell manager that we are willing to match *any* device.
# (Alternatively, we could restrict by device usage, or usage page.)
iokit.IOHIDManagerSetDeviceMatching(self.managerRef, None)
# Copy the device set and convert it to python.
cfset = c_void_p(iokit.IOHIDManagerCopyDevices(self.managerRef))
self.devices = cfset_to_set(cfset)
cf.CFRelease(cfset)
def open(self):
iokit.IOHIDManagerOpen(self.managerRef, kIOHIDOptionsTypeNone)
def close(self):
iokit.IOHIDManagerClose(self.managerRef, kIOHIDOptionsTypeNone)
def schedule_with_run_loop(self):
iokit.IOHIDManagerScheduleWithRunLoop(
self.managerRef,
c_void_p(cf.CFRunLoopGetCurrent()),
kCFRunLoopDefaultMode)
def unschedule_from_run_loop(self):
iokit.IOHIDManagerUnscheduleFromRunLoop(
self.managerRef,
c_void_p(cf.CFRunLoopGetCurrent()),
kCFRunLoopDefaultMode)
def py_matching_callback(self, context, result, sender, device):
d = HIDDevice.get_device(c_void_p(device))
if d not in self.devices:
self.devices.add(d)
for x in self.matching_observers:
if hasattr(x, 'device_discovered'):
x.device_discovered(d)
def register_matching_callback(self):
self.matching_callback = HIDManagerCallback(self.py_matching_callback)
iokit.IOHIDManagerRegisterDeviceMatchingCallback(
self.managerRef,
self.matching_callback,
None)
######################################################################
# Pyglet interface to HID
from base import Device, Control, AbsoluteAxis, RelativeAxis, Button
from base import Joystick, AppleRemote
from base import DeviceExclusiveException
_axis_names = {
(0x01, 0x30): 'x',
(0x01, 0x31): 'y',
(0x01, 0x32): 'z',
(0x01, 0x33): 'rx',
(0x01, 0x34): 'ry',
(0x01, 0x35): 'rz',
(0x01, 0x38): 'wheel',
(0x01, 0x39): 'hat',
}
_button_names = {
(kHIDPage_GenericDesktop, kHIDUsage_GD_SystemSleep): 'sleep',
(kHIDPage_GenericDesktop, kHIDUsage_GD_SystemWakeUp): 'wakeup',
(kHIDPage_GenericDesktop, kHIDUsage_GD_SystemAppMenu): 'menu',
(kHIDPage_GenericDesktop, kHIDUsage_GD_SystemMenu): 'select',
(kHIDPage_GenericDesktop, kHIDUsage_GD_SystemMenuRight): 'right',
(kHIDPage_GenericDesktop, kHIDUsage_GD_SystemMenuLeft): 'left',
(kHIDPage_GenericDesktop, kHIDUsage_GD_SystemMenuUp): 'up',
(kHIDPage_GenericDesktop, kHIDUsage_GD_SystemMenuDown): 'down',
(kHIDPage_Consumer, kHIDUsage_Csmr_FastForward): 'right_hold',
(kHIDPage_Consumer, kHIDUsage_Csmr_Rewind): 'left_hold',
(kHIDPage_Consumer, kHIDUsage_Csmr_Menu): 'menu_hold',
(0xff01, 0x23): 'select_hold',
(kHIDPage_Consumer, kHIDUsage_Csmr_Eject): 'eject',
(kHIDPage_Consumer, kHIDUsage_Csmr_Mute): 'mute',
(kHIDPage_Consumer, kHIDUsage_Csmr_VolumeIncrement): 'volume_up',
(kHIDPage_Consumer, kHIDUsage_Csmr_VolumeDecrement): 'volume_down'
}
class PygletDevice(Device):
def __init__(self, display, device, manager):
super(PygletDevice, self).__init__(display, device.product)
self.device = device
self.device_identifier = self.device.unique_identifier()
self.device.add_value_observer(self)
self.device.add_removal_observer(self)
manager.matching_observers.add(self)
self._create_controls()
self._is_open = False
self._is_exclusive = False
def open(self, window=None, exclusive=False):
super(PygletDevice, self).open(window, exclusive)
self.device.open(exclusive)
self.device.schedule_with_run_loop()
self._is_open = True
self._is_exclusive = exclusive
self._set_initial_control_values()
def close(self):
super(PygletDevice, self).close()
self.device.close()
self._is_open = False
def get_controls(self):
return self._controls.values()
def device_removed(self, hid_device):
# Called by device when it is unplugged.
# Set device to None, but Keep self._controls around
# in case device is plugged back in.
self.device = None
def device_discovered(self, hid_device):
# Called by HID manager when new device is found.
# If our device was disconnected, reconnect when it is plugged back in.
if not self.device and self.device_identifier == hid_device.unique_identifier():
self.device = hid_device
self.device.add_value_observer(self)
self.device.add_removal_observer(self)
# Don't need to recreate controls since this is same device.
# They are indexed by cookie, which is constant.
if self._is_open:
self.device.open(self._is_exclusive)
self.device.schedule_with_run_loop()
def device_value_changed(self, hid_device, hid_value):
# Called by device when input value changes.
control = self._controls[hid_value.element.cookie]
control._set_value(hid_value.intvalue)
def _create_controls(self):
self._controls = {}
for element in self.device.elements:
raw_name = element.name or '0x%x:%x' % (element.usagePage, element.usage)
if element.type in (kIOHIDElementTypeInput_Misc, kIOHIDElementTypeInput_Axis):
name = _axis_names.get((element.usagePage, element.usage))
if element.isRelative:
control = RelativeAxis(name, raw_name)
else:
control = AbsoluteAxis(name, element.logicalMin, element.logicalMax, raw_name)
elif element.type == kIOHIDElementTypeInput_Button:
name = _button_names.get((element.usagePage, element.usage))
control = Button(name, raw_name)
else:
continue
control._cookie = element.cookie
self._controls[control._cookie] = control
def _set_initial_control_values(self):
# Must be called AFTER the device has been opened.
for element in self.device.elements:
if element.cookie in self._controls:
control = self._controls[element.cookie]
hid_value = self.device.get_value(element)
if hid_value:
control._set_value(hid_value.intvalue)
######################################################################
_manager = HIDManager()
def get_devices(display=None):
return [ PygletDevice(display, device, _manager) for device in _manager.devices ]
def get_joysticks(display=None):
return [ Joystick(PygletDevice(display, device, _manager)) for device in _manager.devices
if device.is_joystick() or device.is_gamepad() or device.is_multi_axis() ]
def get_apple_remote(display=None):
for device in _manager.devices:
if device.product == 'Apple IR':
return AppleRemote(PygletDevice(display, device, _manager))
|
myselfHimanshu/Udacity-DataML
|
refs/heads/master
|
Intro-To-Data-Science/Lesson2/PS2_9.py
|
2
|
import pandas
def get_hourly_exits(df):
'''
The data in the MTA Subway Turnstile data reports on the cumulative
number of entries and exits per row. Assume that you have a dataframe
called df that contains only the rows for a particular turnstile machine
(i.e., unique SCP, C/A, and UNIT). This function should change
these cumulative exit numbers to a count of exits since the last reading
(i.e., exits since the last row in the dataframe).
More specifically, you want to do two things:
1) Create a new column called EXITSn_hourly
2) Assign to the column the difference between EXITSn of the current row
and the previous row. If there is any NaN, fill/replace it with 0.
You may find the pandas functions shift() and fillna() to be helpful in this exercise.
Example dataframe below:
Unnamed: 0 C/A UNIT SCP DATEn TIMEn DESCn ENTRIESn EXITSn ENTRIESn_hourly EXITSn_hourly
0 0 A002 R051 02-00-00 05-01-11 00:00:00 REGULAR 3144312 1088151 0 0
1 1 A002 R051 02-00-00 05-01-11 04:00:00 REGULAR 3144335 1088159 23 8
2 2 A002 R051 02-00-00 05-01-11 08:00:00 REGULAR 3144353 1088177 18 18
3 3 A002 R051 02-00-00 05-01-11 12:00:00 REGULAR 3144424 1088231 71 54
4 4 A002 R051 02-00-00 05-01-11 16:00:00 REGULAR 3144594 1088275 170 44
5 5 A002 R051 02-00-00 05-01-11 20:00:00 REGULAR 3144808 1088317 214 42
6 6 A002 R051 02-00-00 05-02-11 00:00:00 REGULAR 3144895 1088328 87 11
7 7 A002 R051 02-00-00 05-02-11 04:00:00 REGULAR 3144905 1088331 10 3
8 8 A002 R051 02-00-00 05-02-11 08:00:00 REGULAR 3144941 1088420 36 89
9 9 A002 R051 02-00-00 05-02-11 12:00:00 REGULAR 3145094 1088753 153 333
'''
#your code here
df['EXITSn_hourly'] = df['EXITSn'] - df['EXITSn'].shift()
df = df.fillna(0)
return df
|
roberzguerra/scout
|
refs/heads/master
|
core/views.py
|
1
|
# -*- coding:utf-8 -*-
from django.shortcuts import render, render_to_response
from django.template import RequestContext
def error404(request):
"""
View de Erro 404
"""
return render_to_response('core/error404.html', RequestContext(request, {}))
def error500(request):
"""
View de Erro 500
"""
return render_to_response('core/error500.html', RequestContext(request, {}))
|
mbauskar/erpnext
|
refs/heads/develop
|
erpnext/healthcare/doctype/normal_test_template/normal_test_template.py
|
30
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, ESS and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
from frappe.model.document import Document
class NormalTestTemplate(Document):
pass
|
blackzw/openwrt_sdk_dev1
|
refs/heads/master
|
staging_dir/target-mips_r2_uClibc-0.9.33.2/usr/lib/python2.7/lib2to3/fixes/fix_methodattrs.py
|
326
|
"""Fix bound method attributes (method.im_? -> method.__?__).
"""
# Author: Christian Heimes
# Local imports
from .. import fixer_base
from ..fixer_util import Name
MAP = {
"im_func" : "__func__",
"im_self" : "__self__",
"im_class" : "__self__.__class__"
}
class FixMethodattrs(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power< any+ trailer< '.' attr=('im_func' | 'im_self' | 'im_class') > any* >
"""
def transform(self, node, results):
attr = results["attr"][0]
new = unicode(MAP[attr.value])
attr.replace(Name(new, prefix=attr.prefix))
|
MSeifert04/astropy
|
refs/heads/master
|
setup.py
|
3
|
#!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# NOTE: The configuration for the package, including the name, version, and
# other information are set in the setup.cfg file. Here we mainly set up
# setup_requires and install_requires since these are determined
# programmatically.
import os
import builtins
import ah_bootstrap # noqa
from astropy_helpers.distutils_helpers import is_distutils_display_option
from astropy_helpers.setup_helpers import setup
from setuptools.config import read_configuration
# We set up the following variable because we then use this in astropy/__init__.py
# to make sure that we aren't importing astropy during the setup process (we used
# to do this)
builtins._ASTROPY_CORE_SETUP_ = True
if is_distutils_display_option():
# Avoid installing setup_requires dependencies if the user just
# queries for information
setup_requires = []
else:
setup_requires = read_configuration('setup.cfg')['options']['setup_requires']
# Make sure we have the packages needed for building astropy, but do not
# require them when installing from an sdist as the c files are included.
if not os.path.exists(os.path.join(os.path.dirname(__file__), 'PKG-INFO')):
setup_requires.extend(['cython>=0.29.13', 'jinja2>=2.7'])
setup(setup_requires=setup_requires)
|
MasterScrat/PostMonitor
|
refs/heads/master
|
server.py
|
1
|
#!/usr/bin/python
import json
import itertools
import datetime
from flask import Flask, request, redirect, url_for
from tinydb import TinyDB, where
db = TinyDB('data/records.json')
app = Flask(__name__, static_url_path='')
@app.route('/')
def root():
return app.send_static_file('www/index.html')
@app.route('/all')
def all():
return json.dumps(db.all(), sort_keys=True, indent=4, separators=(',', ': '))
@app.route('/projects')
def projects():
data = db.all()
data = sorted(data, key=lambda r: r['timestamp'])
# TODO should output series as 3 concatenated levels
grouped = {}
for record in data:
record_values = dict((key,value) for key, value in record.iteritems() if key in ('timestamp', 'score', 'num_comments'))
grouped.setdefault(to_event_url(record), []).append(record_values)
return print_json(grouped)
# Dygraphs specific format
@app.route('/dygraphs')
@app.route('/dygraphs/')
@app.route('/dygraphs/<project>')
def dygraphs(project = None):
# TODO shouldn't the DB do most of that stuff for us?! use ElasticSearch!
db.clear_cache()
if project is None:
data = db.all()
else:
data = db.search(where('project') == project)
data = sorted(data, key=lambda r: r['timestamp'])
# first need ordered full lists of timestamps and URLs
# plus build cache of all values
all_timestamps = []
all_event_urls = []
event_url_score = {}
event_url_num_comments = {}
for record in data:
timestamp = format_timestamp(record['timestamp'])
if timestamp not in all_timestamps:
all_timestamps.append(timestamp)
event_url = to_event_url(record)
if event_url not in all_event_urls:
all_event_urls.append(event_url)
# cache
if event_url not in event_url_score:
event_url_score[event_url] = {timestamp: {}}
event_url_num_comments[event_url] = {timestamp: {}}
event_url_score[event_url][timestamp] = record['score']
event_url_num_comments[event_url][timestamp] = record['num_comments']
# then for each timestamp, for each URL: check if there's a value
# if yes put it, if not put null
formatted_scores = []
formatted_num_comments = []
for timestamp in all_timestamps:
timestamp = timestamp
timestamp_scores = [timestamp]
timestamp_num_comments = [timestamp]
for event_url in all_event_urls:
if timestamp in event_url_score[event_url]:
timestamp_scores.append(event_url_score[event_url][timestamp])
timestamp_num_comments.append(event_url_num_comments[event_url][timestamp])
else:
timestamp_scores.append(None)
timestamp_num_comments.append(None)
formatted_scores.append(timestamp_scores)
formatted_num_comments.append(timestamp_num_comments)
return print_json({'score': formatted_scores, 'num_comments': formatted_num_comments, 'labels': ['x']+all_event_urls})
def format_timestamp(timestamp):
return int(timestamp*1000)
def to_event_url(record):
return record['project'] +' - '+ record['event'] +' - '+ record['url']
def print_json(obj):
return json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))
@app.route('/clear')
def clear():
db.purge_tables()
return 'ok'
if __name__ == "__main__":
app.debug = True
app.run(host= '0.0.0.0', port=8080)
|
uranusjr/django
|
refs/heads/master
|
tests/backends/oracle/__init__.py
|
12133432
| |
acshi/osf.io
|
refs/heads/develop
|
api/collections/__init__.py
|
12133432
| |
unho/pootle
|
refs/heads/master
|
pootle/apps/pootle_data/receivers.py
|
6
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import logging
from django.db.models.signals import post_save
from django.dispatch import receiver
from pootle.core.delegate import crud, data_tool, data_updater
from pootle.core.signals import create, delete, update, update_data
from pootle_store.models import Store
from pootle_translationproject.models import TranslationProject
from .models import StoreChecksData, StoreData, TPChecksData, TPData
logger = logging.getLogger(__name__)
@receiver(create, sender=StoreData)
def handle_store_data_obj_create(**kwargs):
crud.get(StoreData).create(**kwargs)
@receiver(create, sender=TPData)
def handle_tp_data_obj_create(**kwargs):
crud.get(TPData).create(**kwargs)
@receiver(update, sender=StoreData)
def handle_store_data_obj_update(**kwargs):
crud.get(StoreData).update(**kwargs)
@receiver(update, sender=TPData)
def handle_tp_data_obj_update(**kwargs):
crud.get(TPData).update(**kwargs)
@receiver(delete, sender=StoreChecksData)
def handle_store_checks_data_delete(**kwargs):
crud.get(StoreChecksData).delete(**kwargs)
@receiver(create, sender=StoreChecksData)
def handle_store_checks_data_create(**kwargs):
crud.get(StoreChecksData).create(**kwargs)
@receiver(update, sender=StoreChecksData)
def handle_store_checks_data_update(**kwargs):
crud.get(StoreChecksData).update(**kwargs)
@receiver(update, sender=TPChecksData)
def handle_tp_checks_data_update(**kwargs):
crud.get(TPChecksData).update(**kwargs)
@receiver(delete, sender=TPChecksData)
def handle_tp_checks_data_delete(**kwargs):
crud.get(TPChecksData).delete(**kwargs)
@receiver(create, sender=TPChecksData)
def handle_tp_checks_data_create(**kwargs):
crud.get(TPChecksData).create(**kwargs)
@receiver(post_save, sender=StoreData)
def handle_storedata_save(**kwargs):
tp = kwargs["instance"].store.translation_project
update_data.send(tp.__class__, instance=tp)
@receiver(update_data, sender=Store)
def handle_store_data_update(**kwargs):
store = kwargs.get("instance")
data_tool.get(Store)(store).update()
@receiver(update_data, sender=TranslationProject)
def handle_tp_data_update(**kwargs):
tp = kwargs["instance"]
if "object_list" in kwargs:
data_updater.get(TranslationProject)(
tp,
object_list=kwargs["object_list"]).update()
else:
data_tool.get(TranslationProject)(tp).update()
@receiver(post_save, sender=Store)
def handle_store_data_create(sender, instance, created, **kwargs):
if created:
data_updater.get(instance.data_tool.__class__)(instance.data_tool).update()
@receiver(post_save, sender=TranslationProject)
def handle_tp_data_create(sender, instance, created, **kwargs):
if created:
update_data.send(instance.__class__, instance=instance)
|
naresh21/synergetics-edx-platform
|
refs/heads/oxa/master.fic
|
openedx/core/djangoapps/credentials/tests/test_utils.py
|
14
|
"""Tests covering Credentials utilities."""
import unittest
from django.conf import settings
from django.core.cache import cache
from nose.plugins.attrib import attr
import httpretty
from edx_oauth2_provider.tests.factories import ClientFactory
from provider.constants import CONFIDENTIAL
from openedx.core.djangoapps.credentials.models import CredentialsApiConfig
from openedx.core.djangoapps.credentials.tests.mixins import CredentialsApiConfigMixin, CredentialsDataMixin
from openedx.core.djangoapps.credentials.utils import (
get_user_credentials,
get_user_program_credentials,
get_programs_credentials
)
from openedx.core.djangoapps.credentials.tests import factories
from openedx.core.djangoapps.programs.tests.mixins import ProgramsApiConfigMixin, ProgramsDataMixin
from openedx.core.djangoapps.programs.models import ProgramsApiConfig
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase
from student.tests.factories import UserFactory
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
@attr(shard=2)
class TestCredentialsRetrieval(ProgramsApiConfigMixin, CredentialsApiConfigMixin, CredentialsDataMixin,
ProgramsDataMixin, CacheIsolationTestCase):
""" Tests covering the retrieval of user credentials from the Credentials
service.
"""
ENABLED_CACHES = ['default']
def setUp(self):
super(TestCredentialsRetrieval, self).setUp()
ClientFactory(name=CredentialsApiConfig.OAUTH2_CLIENT_NAME, client_type=CONFIDENTIAL)
ClientFactory(name=ProgramsApiConfig.OAUTH2_CLIENT_NAME, client_type=CONFIDENTIAL)
self.user = UserFactory()
cache.clear()
def _expected_progam_credentials_data(self):
"""
Dry method for getting expected program credentials response data.
"""
return [
factories.UserCredential(
id=1,
username='test',
credential=factories.ProgramCredential(),
certificate_url=self.CREDENTIALS_API_RESPONSE['results'][0]['certificate_url'],
),
factories.UserCredential(
id=2,
username='test',
credential=factories.ProgramCredential(),
certificate_url=self.CREDENTIALS_API_RESPONSE['results'][1]['certificate_url'],
)
]
def expected_credentials_display_data(self):
""" Returns expected credentials data to be represented. """
program_credentials_data = self._expected_progam_credentials_data()
return [
{
'display_name': self.PROGRAMS_API_RESPONSE['results'][0]['name'],
'subtitle': self.PROGRAMS_API_RESPONSE['results'][0]['subtitle'],
'credential_url':program_credentials_data[0]['certificate_url']
},
{
'display_name': self.PROGRAMS_API_RESPONSE['results'][1]['name'],
'subtitle':self.PROGRAMS_API_RESPONSE['results'][1]['subtitle'],
'credential_url':program_credentials_data[1]['certificate_url']
}
]
@httpretty.activate
def test_get_user_credentials(self):
"""Verify user credentials data can be retrieve."""
self.create_credentials_config()
self.mock_credentials_api(self.user)
actual = get_user_credentials(self.user)
self.assertEqual(actual, self.CREDENTIALS_API_RESPONSE['results'])
@httpretty.activate
def test_get_user_credentials_caching(self):
"""Verify that when enabled, the cache is used for non-staff users."""
self.create_credentials_config(cache_ttl=1)
self.mock_credentials_api(self.user)
# Warm up the cache.
get_user_credentials(self.user)
# Hit the cache.
get_user_credentials(self.user)
# Verify only one request was made.
self.assertEqual(len(httpretty.httpretty.latest_requests), 1)
staff_user = UserFactory(is_staff=True)
# Hit the Credentials API twice.
for _ in range(2):
get_user_credentials(staff_user)
# Verify that three requests have been made (one for student, two for staff).
self.assertEqual(len(httpretty.httpretty.latest_requests), 3)
def test_get_user_program_credentials_issuance_disable(self):
"""Verify that user program credentials cannot be retrieved if issuance is disabled."""
self.create_credentials_config(enable_learner_issuance=False)
actual = get_user_program_credentials(self.user)
self.assertEqual(actual, [])
@httpretty.activate
def test_get_user_program_credentials_no_credential(self):
"""Verify behavior if no credential exist."""
self.create_credentials_config()
self.mock_credentials_api(self.user, data={'results': []})
actual = get_user_program_credentials(self.user)
self.assertEqual(actual, [])
@httpretty.activate
def test_get_user_programs_credentials(self):
"""Verify program credentials data can be retrieved and parsed correctly."""
# create credentials and program configuration
self.create_credentials_config()
self.create_programs_config()
# Mocking the API responses from programs and credentials
self.mock_programs_api()
self.mock_credentials_api(self.user, reset_url=False)
actual = get_user_program_credentials(self.user)
program_credentials_data = self._expected_progam_credentials_data()
expected = self.PROGRAMS_API_RESPONSE['results'][:2]
expected[0]['credential_url'] = program_credentials_data[0]['certificate_url']
expected[1]['credential_url'] = program_credentials_data[1]['certificate_url']
# checking response from API is as expected
self.assertEqual(len(actual), 2)
self.assertEqual(actual, expected)
@httpretty.activate
def test_get_user_program_credentials_revoked(self):
"""Verify behavior if credential revoked."""
self.create_credentials_config()
credential_data = {"results": [
{
"id": 1,
"username": "test",
"credential": {
"credential_id": 1,
"program_id": 1
},
"status": "revoked",
"uuid": "dummy-uuid-1"
}
]}
self.mock_credentials_api(self.user, data=credential_data)
actual = get_user_program_credentials(self.user)
self.assertEqual(actual, [])
@httpretty.activate
def test_get_programs_credentials(self):
""" Verify that the program credentials data required for display can
be retrieved.
"""
# create credentials and program configuration
self.create_credentials_config()
self.create_programs_config()
# Mocking the API responses from programs and credentials
self.mock_programs_api()
self.mock_credentials_api(self.user, reset_url=False)
actual = get_programs_credentials(self.user)
expected = self.expected_credentials_display_data()
# Checking result is as expected
self.assertEqual(len(actual), 2)
self.assertEqual(actual, expected)
|
ace-han/quanquan
|
refs/heads/develop
|
quanquan/urls/__init__.py
|
1
|
from django.conf import settings
from django.conf.urls import url, include, patterns
from django.conf.urls.i18n import i18n_patterns
from django.contrib import admin
import django.contrib.auth.urls
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from quanquan.views import index
admin.autodiscover()
urlpatterns = i18n_patterns('',
url(r'^auth/', include('authx.urls', namespace='auth')),
url(r'^authx/', include('authx.urls', namespace='authx')),
url(r'^social/', include('social.apps.django_app.urls', namespace='social')),
url(r'^admin/', include(admin.site.urls)),
url(r'^sitemap\.xml$', 'django.contrib.sitemaps.views.sitemap',),
url(r'^i18n/$', include('django.conf.urls.i18n')),
url(r'^$', index, name='index_page'),
) + patterns('', url(r'^jsi18n/(?P<packages>\S+?)/$', 'django.views.i18n.javascript_catalog'),)
urlpatterns += patterns('',
url(r'^api/', include('quanquan.urls.api', namespace='api')),
)
if settings.DEBUG:
urlpatterns = patterns('',
url(r'^media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
) + staticfiles_urlpatterns() + urlpatterns
|
mozilla/fjord
|
refs/heads/master
|
vendor/packages/carrot/tests/test_with_statement.py
|
9
|
from __future__ import with_statement
import os
import sys
import unittest
sys.path.insert(0, os.pardir)
sys.path.append(os.getcwd())
from tests.utils import test_connection_args
from carrot.connection import BrokerConnection
from carrot.messaging import Consumer, Publisher
class TestTransactioned(unittest.TestCase):
def test_with_statement(self):
with BrokerConnection(**test_connection_args()) as conn:
self.assertFalse(conn._closed)
with Publisher(connection=conn, exchange="F", routing_key="G") \
as publisher:
self.assertFalse(publisher._closed)
self.assertTrue(conn._closed)
self.assertTrue(publisher._closed)
with BrokerConnection(**test_connection_args()) as conn:
self.assertFalse(conn._closed)
with Consumer(connection=conn, queue="E", exchange="F",
routing_key="G") as consumer:
self.assertFalse(consumer._closed)
self.assertTrue(conn._closed)
self.assertTrue(consumer._closed)
if __name__ == '__main__':
unittest.main()
|
sexroute/commandergenius
|
refs/heads/sdl_android
|
project/jni/python/src/Lib/test/test_ftplib.py
|
56
|
"""Test script for ftplib module."""
# Modified by Giampaolo Rodola' to test FTP class and IPv6 environment
import ftplib
import threading
import asyncore
import asynchat
import socket
import StringIO
from unittest import TestCase
from test import test_support
from test.test_support import HOST
# the dummy data returned by server over the data channel when
# RETR, LIST and NLST commands are issued
RETR_DATA = 'abcde12345\r\n' * 1000
LIST_DATA = 'foo\r\nbar\r\n'
NLST_DATA = 'foo\r\nbar\r\n'
class DummyDTPHandler(asynchat.async_chat):
def __init__(self, conn, baseclass):
asynchat.async_chat.__init__(self, conn)
self.baseclass = baseclass
self.baseclass.last_received_data = ''
def handle_read(self):
self.baseclass.last_received_data += self.recv(1024)
def handle_close(self):
self.baseclass.push('226 transfer complete')
self.close()
class DummyFTPHandler(asynchat.async_chat):
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
self.set_terminator("\r\n")
self.in_buffer = []
self.dtp = None
self.last_received_cmd = None
self.last_received_data = ''
self.next_response = ''
self.push('220 welcome')
def collect_incoming_data(self, data):
self.in_buffer.append(data)
def found_terminator(self):
line = ''.join(self.in_buffer)
self.in_buffer = []
if self.next_response:
self.push(self.next_response)
self.next_response = ''
cmd = line.split(' ')[0].lower()
self.last_received_cmd = cmd
space = line.find(' ')
if space != -1:
arg = line[space + 1:]
else:
arg = ""
if hasattr(self, 'cmd_' + cmd):
method = getattr(self, 'cmd_' + cmd)
method(arg)
else:
self.push('550 command "%s" not understood.' %cmd)
def handle_error(self):
raise
def push(self, data):
asynchat.async_chat.push(self, data + '\r\n')
def cmd_port(self, arg):
addr = map(int, arg.split(','))
ip = '%d.%d.%d.%d' %tuple(addr[:4])
port = (addr[4] * 256) + addr[5]
s = socket.create_connection((ip, port), timeout=2)
self.dtp = DummyDTPHandler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_pasv(self, arg):
sock = socket.socket()
sock.bind((self.socket.getsockname()[0], 0))
sock.listen(5)
sock.settimeout(2)
ip, port = sock.getsockname()[:2]
ip = ip.replace('.', ','); p1 = port / 256; p2 = port % 256
self.push('227 entering passive mode (%s,%d,%d)' %(ip, p1, p2))
conn, addr = sock.accept()
self.dtp = DummyDTPHandler(conn, baseclass=self)
def cmd_eprt(self, arg):
af, ip, port = arg.split(arg[0])[1:-1]
port = int(port)
s = socket.create_connection((ip, port), timeout=2)
self.dtp = DummyDTPHandler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_epsv(self, arg):
sock = socket.socket(socket.AF_INET6)
sock.bind((self.socket.getsockname()[0], 0))
sock.listen(5)
sock.settimeout(2)
port = sock.getsockname()[1]
self.push('229 entering extended passive mode (|||%d|)' %port)
conn, addr = sock.accept()
self.dtp = DummyDTPHandler(conn, baseclass=self)
def cmd_echo(self, arg):
# sends back the received string (used by the test suite)
self.push(arg)
def cmd_user(self, arg):
self.push('331 username ok')
def cmd_pass(self, arg):
self.push('230 password ok')
def cmd_acct(self, arg):
self.push('230 acct ok')
def cmd_rnfr(self, arg):
self.push('350 rnfr ok')
def cmd_rnto(self, arg):
self.push('250 rnto ok')
def cmd_dele(self, arg):
self.push('250 dele ok')
def cmd_cwd(self, arg):
self.push('250 cwd ok')
def cmd_size(self, arg):
self.push('250 1000')
def cmd_mkd(self, arg):
self.push('257 "%s"' %arg)
def cmd_rmd(self, arg):
self.push('250 rmd ok')
def cmd_pwd(self, arg):
self.push('257 "pwd ok"')
def cmd_type(self, arg):
self.push('200 type ok')
def cmd_quit(self, arg):
self.push('221 quit ok')
self.close()
def cmd_stor(self, arg):
self.push('125 stor ok')
def cmd_retr(self, arg):
self.push('125 retr ok')
self.dtp.push(RETR_DATA)
self.dtp.close_when_done()
def cmd_list(self, arg):
self.push('125 list ok')
self.dtp.push(LIST_DATA)
self.dtp.close_when_done()
def cmd_nlst(self, arg):
self.push('125 nlst ok')
self.dtp.push(NLST_DATA)
self.dtp.close_when_done()
class DummyFTPServer(asyncore.dispatcher, threading.Thread):
handler = DummyFTPHandler
def __init__(self, address, af=socket.AF_INET):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.create_socket(af, socket.SOCK_STREAM)
self.bind(address)
self.listen(5)
self.active = False
self.active_lock = threading.Lock()
self.host, self.port = self.socket.getsockname()[:2]
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
self.active_lock.acquire()
asyncore.loop(timeout=0.1, count=1)
self.active_lock.release()
asyncore.close_all(ignore_all=True)
def stop(self):
assert self.active
self.active = False
self.join()
def handle_accept(self):
conn, addr = self.accept()
self.handler = self.handler(conn)
self.close()
def handle_connect(self):
self.close()
handle_read = handle_connect
def writable(self):
return 0
def handle_error(self):
raise
class TestFTPClass(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP(timeout=2)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_getwelcome(self):
self.assertEqual(self.client.getwelcome(), '220 welcome')
def test_sanitize(self):
self.assertEqual(self.client.sanitize('foo'), repr('foo'))
self.assertEqual(self.client.sanitize('pass 12345'), repr('pass *****'))
self.assertEqual(self.client.sanitize('PASS 12345'), repr('PASS *****'))
def test_exceptions(self):
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 400')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 499')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 500')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 599')
self.assertRaises(ftplib.error_proto, self.client.sendcmd, 'echo 999')
def test_all_errors(self):
exceptions = (ftplib.error_reply, ftplib.error_temp, ftplib.error_perm,
ftplib.error_proto, ftplib.Error, IOError, EOFError)
for x in exceptions:
try:
raise x('exception not included in all_errors set')
except ftplib.all_errors:
pass
def test_set_pasv(self):
# passive mode is supposed to be enabled by default
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(True)
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(False)
self.assertFalse(self.client.passiveserver)
def test_voidcmd(self):
self.client.voidcmd('echo 200')
self.client.voidcmd('echo 299')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 199')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 300')
def test_login(self):
self.client.login()
def test_acct(self):
self.client.acct('passwd')
def test_rename(self):
self.client.rename('a', 'b')
self.server.handler.next_response = '200'
self.assertRaises(ftplib.error_reply, self.client.rename, 'a', 'b')
def test_delete(self):
self.client.delete('foo')
self.server.handler.next_response = '199'
self.assertRaises(ftplib.error_reply, self.client.delete, 'foo')
def test_size(self):
self.client.size('foo')
def test_mkd(self):
dir = self.client.mkd('/foo')
self.assertEqual(dir, '/foo')
def test_rmd(self):
self.client.rmd('foo')
def test_pwd(self):
dir = self.client.pwd()
self.assertEqual(dir, 'pwd ok')
def test_quit(self):
self.assertEqual(self.client.quit(), '221 quit ok')
# Ensure the connection gets closed; sock attribute should be None
self.assertEqual(self.client.sock, None)
def test_retrbinary(self):
received = []
self.client.retrbinary('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA)
def test_retrlines(self):
received = []
self.client.retrlines('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA.replace('\r\n', ''))
def test_storbinary(self):
f = StringIO.StringIO(RETR_DATA)
self.client.storbinary('stor', f)
self.assertEqual(self.server.handler.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storbinary('stor', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_storlines(self):
f = StringIO.StringIO(RETR_DATA.replace('\r\n', '\n'))
self.client.storlines('stor', f)
self.assertEqual(self.server.handler.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storlines('stor foo', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_nlst(self):
self.client.nlst()
self.assertEqual(self.client.nlst(), NLST_DATA.split('\r\n')[:-1])
def test_dir(self):
l = []
self.client.dir(lambda x: l.append(x))
self.assertEqual(''.join(l), LIST_DATA.replace('\r\n', ''))
def test_makeport(self):
self.client.makeport()
# IPv4 is in use, just make sure send_eprt has not been used
self.assertEqual(self.server.handler.last_received_cmd, 'port')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), 2)
conn.close()
# IPv4 is in use, just make sure send_epsv has not been used
self.assertEqual(self.server.handler.last_received_cmd, 'pasv')
class TestIPv6Environment(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOST, 0), af=socket.AF_INET6)
self.server.start()
self.client = ftplib.FTP()
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_af(self):
self.assertEqual(self.client.af, socket.AF_INET6)
def test_makeport(self):
self.client.makeport()
self.assertEqual(self.server.handler.last_received_cmd, 'eprt')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), 2)
conn.close()
self.assertEqual(self.server.handler.last_received_cmd, 'epsv')
def test_transfer(self):
def retr():
received = []
self.client.retrbinary('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA)
self.client.set_pasv(True)
retr()
self.client.set_pasv(False)
retr()
class TestTimeouts(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(3)
self.port = test_support.bind_port(self.sock)
threading.Thread(target=self.server, args=(self.evt,self.sock)).start()
# Wait for the server to be ready.
self.evt.wait()
self.evt.clear()
ftplib.FTP.port = self.port
def tearDown(self):
self.evt.wait()
def server(self, evt, serv):
# This method sets the evt 3 times:
# 1) when the connection is ready to be accepted.
# 2) when it is safe for the caller to close the connection
# 3) when we have closed the socket
serv.listen(5)
# (1) Signal the caller that we are ready to accept the connection.
evt.set()
try:
conn, addr = serv.accept()
except socket.timeout:
pass
else:
conn.send("1 Hola mundo\n")
# (2) Signal the caller that it is safe to close the socket.
evt.set()
conn.close()
finally:
serv.close()
# (3) Signal the caller that we are done.
evt.set()
def testTimeoutDefault(self):
# default -- use global socket timeout
self.assert_(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP("localhost")
finally:
socket.setdefaulttimeout(None)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutNone(self):
# no timeout -- do not use global socket timeout
self.assert_(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP("localhost", timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertTrue(ftp.sock.gettimeout() is None)
self.evt.wait()
ftp.close()
def testTimeoutValue(self):
# a value
ftp = ftplib.FTP(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutConnect(self):
ftp = ftplib.FTP()
ftp.connect(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDifferentOrder(self):
ftp = ftplib.FTP(timeout=30)
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDirectAccess(self):
ftp = ftplib.FTP()
ftp.timeout = 30
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def test_main():
tests = [TestFTPClass, TestTimeouts]
if socket.has_ipv6:
try:
DummyFTPServer((HOST, 0), af=socket.AF_INET6)
except socket.error:
pass
else:
tests.append(TestIPv6Environment)
thread_info = test_support.threading_setup()
try:
test_support.run_unittest(*tests)
finally:
test_support.threading_cleanup(*thread_info)
if __name__ == '__main__':
test_main()
|
rbardaji/oceanobs
|
refs/heads/master
|
other_codes/map_plot.py
|
1
|
import mooda
import matplotlib.pyplot as plt
path = r"C:\Users\rbard\Desktop\MO_LATEST_TS_MO_OBSEA_20180406.nc"
wf = mooda.WaterFrame()
wf.from_netcdf(path)
for key in wf.metadata:
print(key, wf.metadata[key])
pm = mooda.PlotMap()
pm.map_mediterranean()
pm.add_point(lon=wf.metadata["last_longitude_observation"],
lat=wf.metadata["last_latitude_observation"],
label=wf.metadata["site_code"])
plt.show()
|
salvaorenick/django-cms-redirects
|
refs/heads/master
|
setup.py
|
1
|
from setuptools import setup, find_packages
import os
version = __import__('cms_redirects').__version__
install_requires = [
'setuptools',
'django',
'django-cms',
]
setup(
name = "django-cms-redirects",
version = version,
url = 'http://github.com/salvaorenick/django-cms-redirects',
license = 'BSD',
platforms=['OS Independent'],
description = "",
author = "Andrew Schoen",
author_email = 'andrew.schoen@gmail.com',
packages=find_packages(),
install_requires = install_requires,
include_package_data=True,
zip_safe=False,
classifiers = [
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: Site Management :: Link Checking',
],
package_dir={
'cms_redirects': 'cms_redirects',
},
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.