repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
Debith/py2traits
|
src/pytraits/__init__.py
|
Python
|
apache-2.0
| 818
| 0.001224
|
#!/usr/bin/python -tt
# -*- coding: utf-8 -*-
'''
Copyright 2014-2015 Teppo Perä
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from pyt
|
raits.core import ndict, Singleton
from pytraits.combiner import combine_class
from pytraits.extendable import
|
extendable
from pytraits.trait_composer import add_traits
|
pdelsante/thug
|
thug/DOM/W3C/Events/DocumentEvent.py
|
Python
|
gpl-2.0
| 876
| 0.010274
|
#!/usr/bin/env python
from thug.DOM.W3C.Core.DOMException import DOMException
from .
|
HTMLEvent import HTMLEvent
from .MouseEvent import MouseEvent
from .MutationEvent import MutationEvent
from .StorageEvent import StorageEvent
from .UIEvent import UIEvent
EventMap = {
"HTMLEvent" : HTMLEvent,
"HTMLEvents" : HTMLEvent,
"MouseEvent" : MouseEvent,
"MouseEvents" : Mou
|
seEvent,
"MutationEvent" : MutationEvent,
"MutationEvents" : MutationEvent,
"StorageEvent" : StorageEvent,
"UIEvent" : UIEvent,
"UIEvents" : UIEvent
}
# Introduced in DOM Level 2
class DocumentEvent(object):
def __init__(self, doc):
self.doc = doc
def createEvent(self, eventType):
if eventType not in EventMap:
raise DOMException(DOMException.NOT_SUPPORTED_ERR)
return EventMap[eventType]()
|
tlpinney/geomakers
|
windwaker/skyshaker/migrations/0014_auto_20141011_2142.py
|
Python
|
apache-2.0
| 548
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('skyshaker', '0013_link_embed'),
]
|
operations = [
migrations.RemoveField(
model_name='link',
name='embed',
),
migrations.AddField(
model_name='video',
name='embed',
field=model
|
s.TextField(default=b'', null=True, blank=True),
preserve_default=True,
),
]
|
Kingclove/lab5info3180
|
run.py
|
Python
|
mit
| 10,013
| 0.010786
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime
import argparse
import json
import os
import shutil
import sys
import time
import urllib2
from main import config
###############################################################################
# Options
##############################################
|
#################################
PARSER = argpars
|
e.ArgumentParser()
PARSER.add_argument(
'-w', '--watch', dest='watch', action='store_true',
help='watch files for changes when running the development web server',
)
PARSER.add_argument(
'-c', '--clean', dest='clean', action='store_true',
help='recompiles files when running the development web server',
)
PARSER.add_argument(
'-C', '--clean-all', dest='clean_all', action='store_true',
help='''Cleans all the Node & Bower related tools / libraries and updates
them to their latest versions''',
)
PARSER.add_argument(
'-m', '--minify', dest='minify', action='store_true',
help='compiles files into minified version before deploying'
)
PARSER.add_argument(
'-s', '--start', dest='start', action='store_true',
help='starts the dev_appserver.py with storage_path pointing to temp',
)
PARSER.add_argument(
'-o', '--host', dest='host', action='store', default='127.0.0.1',
help='the host to start the dev_appserver.py',
)
PARSER.add_argument(
'-p', '--port', dest='port', action='store', default='8080',
help='the port to start the dev_appserver.py',
)
PARSER.add_argument(
'-f', '--flush', dest='flush', action='store_true',
help='clears the datastore, blobstore, etc',
)
ARGS = PARSER.parse_args()
###############################################################################
# Directories
###############################################################################
DIR_BOWER_COMPONENTS = 'bower_components'
DIR_MAIN = 'main'
DIR_NODE_MODULES = 'node_modules'
DIR_STYLE = 'style'
DIR_SCRIPT = 'script'
DIR_TEMP = 'temp'
DIR_STATIC = os.path.join(DIR_MAIN, 'static')
DIR_SRC = os.path.join(DIR_STATIC, 'src')
DIR_SRC_SCRIPT = os.path.join(DIR_SRC, DIR_SCRIPT)
DIR_SRC_STYLE = os.path.join(DIR_SRC, DIR_STYLE)
DIR_DST = os.path.join(DIR_STATIC, 'dst')
DIR_DST_STYLE = os.path.join(DIR_DST, DIR_STYLE)
DIR_DST_SCRIPT = os.path.join(DIR_DST, DIR_SCRIPT)
DIR_MIN = os.path.join(DIR_STATIC, 'min')
DIR_MIN_STYLE = os.path.join(DIR_MIN, DIR_STYLE)
DIR_MIN_SCRIPT = os.path.join(DIR_MIN, DIR_SCRIPT)
DIR_LIB = os.path.join(DIR_MAIN, 'lib')
FILE_LIB = os.path.join(DIR_MAIN, 'lib.zip')
DIR_BIN = os.path.join(DIR_NODE_MODULES, '.bin')
FILE_COFFEE = os.path.join(DIR_BIN, 'coffee')
FILE_GRUNT = os.path.join(DIR_BIN, 'grunt')
FILE_LESS = os.path.join(DIR_BIN, 'lessc')
FILE_UGLIFYJS = os.path.join(DIR_BIN, 'uglifyjs')
DIR_STORAGE = os.path.join(DIR_TEMP, 'storage')
###############################################################################
# Helpers
###############################################################################
def print_out(script, filename=''):
timestamp = datetime.now().strftime('%H:%M:%S')
if not filename:
filename = '-' * 46
script = script.rjust(12, '-')
print '[%s] %12s %s' % (timestamp, script, filename)
def make_dirs(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def remove_dir(directory):
if os.path.isdir(directory):
shutil.rmtree(directory)
def clean_files():
bad_endings = ['pyc', 'pyo', '~']
print_out(
'CLEAN FILES',
'Removing files: %s' % ', '.join(['*%s' % e for e in bad_endings]),
)
for root, _, files in os.walk('.'):
for filename in files:
for bad_ending in bad_endings:
if filename.endswith(bad_ending):
os.remove(os.path.join(root, filename))
def merge_files(source, target):
fout = open(target, 'a')
for line in open(source):
fout.write(line)
fout.close()
def os_execute(executable, args, source, target, append=False):
operator = '>>' if append else '>'
os.system('"%s" %s %s %s %s' % (executable, args, source, operator, target))
def compile_script(source, target_dir):
if not os.path.isfile(source):
print_out('NOT FOUND', source)
return
target = source.replace(DIR_SRC_SCRIPT, target_dir).replace('.coffee', '.js')
if not is_dirty(source, target):
return
make_dirs(os.path.dirname(target))
if not source.endswith('.coffee'):
print_out('COPYING', source)
shutil.copy(source, target)
return
print_out('COFFEE', source)
os_execute(FILE_COFFEE, '-cp', source, target)
def compile_style(source, target_dir, check_modified=False):
if not os.path.isfile(source):
print_out('NOT FOUND', source)
return
if not source.endswith('.less'):
return
target = source.replace(DIR_SRC_STYLE, target_dir).replace('.less', '.css')
if check_modified and not is_style_modified(target):
return
minified = ''
if target_dir == DIR_MIN_STYLE:
minified = '-x'
target = target.replace('.css', '.min.css')
print_out('LESS MIN', source)
else:
print_out('LESS', source)
make_dirs(os.path.dirname(target))
os_execute(FILE_LESS, minified, source, target)
def make_lib_zip(force=False):
if force and os.path.isfile(FILE_LIB):
os.remove(FILE_LIB)
if not os.path.isfile(FILE_LIB):
print_out('ZIP', FILE_LIB)
shutil.make_archive(DIR_LIB, 'zip', DIR_LIB)
def is_dirty(source, target):
if not os.access(target, os.O_RDONLY):
return True
return os.stat(source).st_mtime - os.stat(target).st_mtime > 0
def is_style_modified(target):
for root, _, files in os.walk(DIR_SRC):
for filename in files:
path = os.path.join(root, filename)
if path.endswith('.less') and is_dirty(path, target):
return True
return False
def compile_all_dst():
for source in config.STYLES:
compile_style(os.path.join(DIR_STATIC, source), DIR_DST_STYLE, True)
for module, scripts in config.SCRIPTS:
for source in scripts:
compile_script(os.path.join(DIR_STATIC, source), DIR_DST_SCRIPT)
def update_path_separators():
def fixit(path):
return path.replace('\\', '/').replace('/', os.sep)
for idx in xrange(len(config.STYLES)):
config.STYLES[idx] = fixit(config.STYLES[idx])
for module, scripts in config.SCRIPTS:
for idx in xrange(len(scripts)):
scripts[idx] = fixit(scripts[idx])
def internet_on():
try:
urllib2.urlopen('http://74.125.228.100', timeout=1)
return True
except urllib2.URLError:
return False
def get_dependencies(file_name):
with open(file_name) as json_file:
json_data = json.load(json_file)
dependencies = json_data.get('dependencies', dict()).keys()
return dependencies + json_data.get('devDependencies', dict()).keys()
def install_dependencies():
if not internet_on():
print_out('NO INTERNET')
return
for dependency in get_dependencies('package.json'):
if not os.path.exists(os.path.join(DIR_NODE_MODULES, dependency)):
os.system('npm install')
break
for dependency in get_dependencies('bower.json'):
if not os.path.exists(os.path.join(DIR_BOWER_COMPONENTS, dependency)):
os.system('"%s" ext' % FILE_GRUNT)
break
def update_missing_args():
if ARGS.start or ARGS.clean_all:
ARGS.clean = True
def uniq(seq):
seen = set()
return [e for e in seq if e not in seen and not seen.add(e)]
###############################################################################
# Main
###############################################################################
def run_clean():
print_out('CLEAN')
clean_files()
make_lib_zip(force=True)
remove_dir(DIR_DST)
make_dirs(DIR_DST)
compile_all_dst()
print_out('DONE')
def run_clean_all():
print_out('CLEAN ALL')
remove_dir(DIR_BOWER_COMPONENTS)
remove_dir(DIR_NODE_MODULES)
def run_minify():
print_out('MINIFY')
clean_files()
make_lib_zip(force=True)
remove_dir(DIR_MIN)
make_dirs(DIR_MIN_SCRIPT)
for source in config.STYLES:
compile_style(os.path.join(DIR_STATIC, source), DIR_MIN_STYLE)
for module, scripts in config.SCRIPTS:
scripts = uniq(scripts)
coffees = ' '.join([
os.path.join(DIR_STATIC,
|
fpradah/change_data_structure
|
src/cyclomatic.py
|
Python
|
gpl-2.0
| 1,610
| 0.048447
|
import sys
import csv
def intersect(a, b):
""" return the intersection of two lists """
return list(set(a) & set(b))
def union(a, b):
""" return the union of two lists """
return list(set(a) | set(b))
def combinarA(x,y):
return (x,y,"A")
def combinarB(x,y):
return (x,y,"B")
def printCSV(list,file):
with open(file, 'wb') as myfile:
wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
wr.writerow(list)
#print sys.argv[1]
file_input = sys.argv[1]
file_output = sys.argv[2]
reader = csv.reader(open(file_input, 'rb'))
name_exist = []
ax = []
ay = []
bx = []
by = []
for index,row in enumerate(reader) :
if no
|
t row[0].isdigit() :
continue
if row[0] in name_exist :
break
name_exist.append(row[0])
row[1] = row[1].replace("TRUE","1")
row[2] = row[2].replace("TRUE"
|
,"1")
row[3] = row[3].replace("TRUE","1")
row[4] = row[4].replace("TRUE","1")
row[1] = row[1].replace("FALSE",row[1].replace("","0"))
row[2] = row[2].replace("FALSE",row[2].replace("","0"))
row[3] = row[3].replace("FALSE",row[3].replace("","0"))
row[4] = row[4].replace("FALSE",row[4].replace("","0"))
if (len(row[1])==0):
row[1] = "0"
if (len(row[2])==0):
row[2] = "0"
if (len(row[3])==0):
row[3] = "0"
if (len(row[4])==0):
row[4] = "0"
ax.append(row[0]*int(row[1]))
ay.append(row[0]*int(row[2]))
bx.append(row[0]*int(row[3]))
by.append(row[0]*int(row[4]))
ax = intersect(ax,name_exist)
ay = intersect(ay,name_exist)
bx = intersect(bx,name_exist)
by = intersect(by,name_exist)
a = map(combinarA,ax,ay)
b = map(combinarB,bx,by)
output = union(a,b)
printCSV(output,file_output)
|
erichschroeter/lieutenant
|
lieutenant/lieutenant/settings.py
|
Python
|
mit
| 4,114
| 0.002674
|
"""
Django settings for lieutenant project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'he&$oe$(3va(8la@_!8*3&b)t(3ry0k2bo*7$&$%p&qy&4fr#8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
from django.conf import global_settings
TEMPLATE_CONTEXT_PROCESSORS = global_settings.TEMPLATE_CONTEXT_PROCESSORS + (
# Required by `allauth` template tags
'django.core.context_processors.request',
# `allauth` specific context processors
'allauth.account.context_processors.account',
'dealer.contrib.django.context_processor',
)
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), 'templates'),
)
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
)
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'widget_tweaks',
'allauth',
'allauth.account',
'rest_fram
|
ework',
'taggit',
'favorites',
'taggit_serializer',
'randomslugfield',
'entries',
'tags',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.Authenti
|
cationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'dealer.contrib.django.Middleware',
)
DEBUG_TOOLBAR_PANELS = [
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.logging.LoggingPanel',
]
SITE_ID = 1
ROOT_URLCONF = 'lieutenant.urls'
WSGI_APPLICATION = 'lieutenant.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
#'default': {
#'ENGINE': 'django.db.backends.postgresql_psycopg2',
#'NAME': os.environ.get("LIEUTENANT_DB_NAME", 'lieutenant'),
#'USER': os.environ.get("LIEUTENANT_DB_USER", 'django_lieutenant'),
#'PASSWORD': os.environ.get("LIEUTENANT_DB_PASSWORD", ''),
#'HOST': os.environ.get("LIEUTENANT_DB_HOST", 'localhost'),
#'PORT': os.environ.get("LIEUTENANT_DB_PORT", ''), # Set to empty string for default
#}
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.environ.get(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
# Location where the collectstatic command will place static files
STATIC_ROOT = '/home/lieutenant/static'
LOGIN_REDIRECT_URL = '/'
# Don't ask user to confirm signing out, just do it
ACCOUNT_LOGOUT_ON_GET = True
|
teracyhq/flask-boilerplate
|
app/blueprints.py
|
Python
|
bsd-3-clause
| 289
| 0
|
# -*- coding: utf-8 -*-
"""flask blueprints"""
from .main import main_bp
from .api_1_0 import api_bp as api_1_0_bp
__all__ = ['register_blueprints']
def register_blueprints(app):
"""register blueprints"""
a
|
pp.register_bluep
|
rint(main_bp)
app.register_blueprint(api_1_0_bp)
|
kurrik/github-recs
|
src/apriori/apriori.py
|
Python
|
apache-2.0
| 5,606
| 0.021584
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Arne Roomann-Kurrik <kurrik@gmail.com>'
import os
import sys
import math
import argparse
from subprocess import call
def ParseLine(line):
# Line format is <screen_name>,"<id1>,<id2>,..."\n
split_index = line.index(',')
screen_name = line[:split_index]
repo_string = line[split_index+1:-1]
if repo_string[0] == '"':
repo_string = repo_string[1:-1]
if repo_string.find('"') > -1:
# Shouldn't be any quotes in the line, but print in case something is weird.
print 'Found quote in line which should not have one: %s' % repo_string
return screen_name, [int(x) for x in repo_string.split(',')]
def Convert(inpath, outpath, minrepos, maxrepos):
count = 0
mindump = 0
maxdump = 0
with open(inpath, 'rb') as infile:
with open(outpath, 'w') as outfile:
infile.next() # Discard header.
for line in infile:
screen_name, repos = ParseLine(line)
if len(repos) > maxrepos:
# Some users touch a LOT of repos, like "Try-Git"
print " Skipping '%s' with %s repos" % (screen_name, len(repos))
maxdump += 1
continue
if len(repos) < minrepos:
mindump += 1
continue
repos.sort()
outfile.write("%s\n" % " ".join(map(str, repos)))
count += 1
fmt = "Converted %s records from %s to %s, dropped %s small %s large records"
print fmt % (count, inpath, outpath, mindump, maxdump)
def Apriori(binary, data, rulesets, minsup, minconf):
minsup = '-s-%s' % minsup
minconf = '-c%s' % minconf
call([binary, '-tr', minsup, minconf, data, rulesets])
def ParseRule(line):
parts = line.split(' ')
consequents = list()
antecedents = list()
in_cons = True
for part in parts:
if part == '<-':
in_cons = False
continue
elif part == '':
break
elif part[0] == '(':
break
if in_cons:
consequents.append(part)
else:
antecedents.append(part)
return (frozenset(antecedents), frozenset(consequents))
def ReadRules(path):
rules = []
with open(path, 'r') as f:
for line in f:
rules.append(ParseRule(line))
return rules
def ScoreRule(ant, con, trans, cache):
if not cache.has_key(ant):
cache[ant] = ant.issubset(trans)
if not cache.has_key(con):
cache[con] = con.issubset(trans)
if cache[ant]:
if cache[con]:
return 'TP'
else:
return 'FP'
else:
if cache[con]:
return 'FN'
else:
return 'TN'
def PrintProgress(count, length):
pct = float(100 * count) / float(length)
print >> sys.stdout, "\r Line %s of %s = %3.4f" % (count, length, pct),
sys.stdout.flush()
def ScoreFile(rules, testpath):
counts = {
'Rules': len(rules),
'TP': 0,
'TN': 0,
'FP': 0,
'FN': 0,
}
length = 0
with open(testpath) as f:
for line in f:
length += 1
count = 0
with open(testpath) as f:
for line in f:
count += 1
if count % 20 == 0:
PrintProgress(count, length)
trans = frozenset(line.split(' '))
cache = {}
seen_fn = False
seen_score = False
for ant, con in rules:
score = ScoreRule(ant, con, trans, cache)
if score == 'TP' or score == 'FP':
counts[score] += 1
seen_score = True
elif score == 'FN':
seen_fn = True
if not seen_score:
if seen_fn:
counts['FN'] += 1
else:
counts['TN'] += 1
print
return counts
def Test(ruleset, test, outfile):
rules = ReadRules(ruleset)
counts = ScoreFile(rules, test)
print "Outputting counts to %s" % outfile
with open(outfile, 'w') as f:
for key, count in counts.iteritems():
f.write("%s,%s\n" % (key, count))
print '%s: %s' % (key, count)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--binary', default='lib/apriori/apriori/src/apriori')
parser.add_argument('--tmpdir', default='/tmp')
parser.add_argumen
|
t('--minsup', default=80, type=int)
parser.add_argument('--minconf', default=50, type=int)
parser.add_argument('--minrepos', default=2, type=int)
parser.add_argument('--maxrepos', default=1000, type=int)
parser.add_argument('--train', default=None, type=str)
parser.add_argument('--ruleset', default=None, type=str)
parser.add_argument('--results', default=None, type=str)
parser.add_argument('-
|
-test', default=None, type=str)
parser.add_argument('--clear', action='store_true')
args = parser.parse_args()
if args.ruleset is None:
parser.exit(1, 'Ruleset file must be specified for both train and test')
if args.train is not None:
if os.path.isfile(args.ruleset) and args.clear == False:
print 'Skipping train %s because ruleset %s exists' % (args.train, args.ruleset)
else:
tmpfile = os.path.join(args.tmpdir, 'apriori_train_tmp')
Convert(args.train, tmpfile, args.minrepos, args.maxrepos)
Apriori(args.binary, tmpfile, args.ruleset, args.minsup, args.minconf)
if args.test is not None:
if args.results is None:
parser.exit(1, 'Results output file must be specified')
if os.path.isfile(args.results) and args.clear == False:
print 'Skipping test %s because output %s exists' % (args.test, args.results)
else:
if not os.path.isfile(args.ruleset):
parser.exit(1, 'Ruleset file must exist')
tmpfile = os.path.join(args.tmpdir, 'apriori_test_tmp')
Convert(args.test, tmpfile, args.minrepos, args.maxrepos)
Test(args.ruleset, tmpfile, args.results)
|
sustainableis/python-sis
|
pysis/workertools/baseWorker.py
|
Python
|
isc
| 2,783
| 0.00539
|
from pysis import SIS
import os
import json
import pdb
class APITokenException(Exception):
pass
class BaseWorker(object):
def __init__(self, workerID, environment):
self.env = environment
self.uuid = workerID
base_url = o
|
s.getenv('BASE_URL', None)
base_domain = os.getenv('BASE_DOMAIN', None)
if base_url and base_domain:
self.api = SIS(base_url=base_url, api_domain=base_domain)
else:
self.api = SIS(base_url='http://api.ndustrial.io/v1/', api_domain='api.ndustrial.io')
self.configuration_id = None
# load configuration
self.confi
|
g = self.loadConfiguration()
def loadConfiguration(self):
self.worker = self.api.workers.get(uuid=self.uuid)
print (self.worker.label)
configValues = self.worker.getConfigurationValues(environment=self.env)
config = {}
for value in configValues:
configValue = {}
# store type
configValue['type'] = value.type
# store value
if value.type == "integer":
configValue['value'] = int(value.value)
elif value.type == "json":
configValue['value'] = json.loads(value.value)
else:
configValue['value'] = str(value.value)
# store id
configValue['id'] = value.id
# store config dict
config[value.key] = configValue
# save configuration_id
# should be the same each time
# dumb, but whatever
self.configuration_id = value.configuration_id
return config
def updateConfigurationValue(self, key, value):
# update local value
configValue = self.config[key]
configValue['value'] = value
# send along type so update completes properly
value_type = configValue['type']
value_id = configValue['id']
self.worker.updateConfigurationValue(self.configuration_id, value_id, value, value_type)
def getConfigurationValue(self, key):
try:
return self.config[key]['value']
except KeyError:
return None
def createConfigurationValue(self, key, value, value_type):
res = self.worker.createConfigurationValue(self.configuration_id, key, value, value_type)
# load local values
configValue = {'value': value, 'id': res.id, 'type': value_type}
self.config[key] = configValue
def deleteConfigurationValue(self, key, value):
self.worker.deleteConfigurationValue(self.configuration_id, value['id'])
# TODO: API provides no way of checking if deletion was successful
del self.config[key]
|
aldryn/aldryn-search
|
aldryn_search/helpers.py
|
Python
|
bsd-3-clause
| 3,579
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth.models import AnonymousUser
from django.template import Engine, RequestContext
from django.test import RequestFactory
from django.utils.text import smart_split
from cms.toolbar.toolbar import CMSToolbar
from .conf import settings
from .utils import (
_get_alias_from_language_func, _get_language_from_alias_func,
get_field_value, strip_tags,
)
try:
from django.utils.encoding import force_unicode
except ImportError:
from django.utils.encoding import force_text as force_unicode
EXCLUDED_PLUGINS = getattr(settings, 'ALDRYN_SEARCH_EXCLUDED_PLUGINS', [])
get_alias_from_language = _get_alias_from_language_func()
get_language_from_alias = _get_language_from_alias_func()
def _render_plugin(plugin, context, renderer=None):
if renderer:
content = renderer.render_plugin(
instance=plugin,
context=context,
editable=False,
)
else:
content = plugin.render_plugin(context)
return content
def get_cleaned_bits(data):
decoded = force_unicode(data)
stripped = strip_tags(decoded)
return smart_split(stripped)
def get_plugin_index_data(base_plugin, request):
text_bits = []
i
|
nstance, plugin_type = base_plugin.get_plugin_instance()
if instance is None or instance.plugin_type in EXCLUDED_PLUGINS:
# this is an empty plugin or excluded from search
return text_bits
search_fields = getattr(instance, 'search_fields', [])
if hasattr(instance, 'search_fulltext'):
# check if the
|
plugin instance has search enabled
search_contents = instance.search_fulltext
elif hasattr(base_plugin, 'search_fulltext'):
# now check in the base plugin instance (CMSPlugin)
search_contents = base_plugin.search_fulltext
elif hasattr(plugin_type, 'search_fulltext'):
# last check in the plugin class (CMSPluginBase)
search_contents = plugin_type.search_fulltext
else:
# disabled if there's search fields defined,
# otherwise it's enabled.
search_contents = not bool(search_fields)
if search_contents:
context = RequestContext(request)
updates = {}
engine = Engine.get_default()
for processor in engine.template_context_processors:
updates.update(processor(context.request))
context.dicts[context._processors_index] = updates
try:
# django-cms>=3.5
renderer = request.toolbar.content_renderer
except AttributeError:
# django-cms>=3.4
renderer = context.get('cms_content_renderer')
plugin_contents = _render_plugin(instance, context, renderer)
if plugin_contents:
text_bits = get_cleaned_bits(plugin_contents)
else:
values = (get_field_value(instance, field) for field in search_fields)
for value in values:
cleaned_bits = get_cleaned_bits(value or '')
text_bits.extend(cleaned_bits)
return text_bits
def get_request(language=None):
"""
Returns a Request instance populated with cms specific attributes.
"""
request_factory = RequestFactory(HTTP_HOST=settings.ALLOWED_HOSTS[0])
request = request_factory.get("/")
request.session = {}
request.LANGUAGE_CODE = language or settings.LANGUAGE_CODE
# Needed for plugin rendering.
request.current_page = None
request.user = AnonymousUser()
request.toolbar = CMSToolbar(request)
return request
|
dsparrow27/zoocore
|
zoo/libs/utils/modules.py
|
Python
|
gpl-3.0
| 4,491
| 0.002004
|
"""This module deals with module paths, importing and the like.
"""
import inspect
import logging
import sys
import os
import imp
import importlib
logger = logging.getLogger(__name__)
def importModule(modulePath, name=None):
"""Import's the modulePath, if ModulePath is a dottedPath then the function will use importlib otherwise it's
expected that modulePath is the absolute path to the source file. If the name arg is not provided then the basename
without the extension will be used.
:param modulePath: The module path either a dottedPath eg. zoo.libs.utils.zoomath or a absolute path.
:type modulePath: str
:param name: The name for the imported module which will be used if the modulepath is a absolute path.
:type name: str
:return: The imported module object
:rtype: ModuleObject
"""
if isDottedPath(modulePath) and not os.path.exists(modulePath):
try:
return importlib.import_module(modulePath)
except ImportError:
logger.error("Failed to load module->{}".format(modulePath), exc_info=True)
try:
if os.path.exists(modulePath):
if not name:
name = os.path.splitext(os.path.basename(modulePath))[0]
if name in sys.modules:
return sys.modules[name]
if os.path.isdir(modulePath):
modulePath = os.path.join(modulePath, "__init__.py")
if not os.path.exists(modulePath):
raise ValueError("Cannot find modulepath: {}".format(modulePath))
return imp.load_source(name, os.path.realpath(modulePath))
except ImportError:
logger.error("Failed to load module {}".format(modulePath))
raise
def iterModules(path, exclude=None):
"""Iterate of the modules of a given folder path
:param path: str, The folder path to iterate
:param exclude: list, a list of files to exclude
:return: iterator
"""
if not exclude:
exclude = []
_exclude = ["__init__.py", "__init__.pyc"]
for root, dirs, files in os.walk(path):
if "__init__.py" not in files:
continue
for f in files:
basename = os.path.basename(f)[0]
if f not in _exc
|
lude and basename not in exclude:
modulePath = os.path.join(root, f)
if f.endswith(".py") or f
|
.endswith(".pyc"):
yield modulePath
def iterMembers(module, predicate=None):
"""Iterates the members of the module, use predicte to restrict to a type
:param module:Object, the module object to iterate
:param predicate: inspect.class
:return:iterator
"""
for mod in inspect.getmembers(module, predicate=predicate):
yield mod
def isDottedPath(path):
"""Determines if the path is a dotted path. Bit of a hack
:param path: str
:return: bool
"""
return len(path.split(".")) > 2
def iterSubclassesFromModule(module, classType):
"""Iterates all classes within a module object returning subclasses of type `classType`.
:param module: the module object to iterate on
:type module: module object
:param classType: The class object
:type classType: object
:return: genertor function returning class objects
:rtype: generator(object)
"""
for member in iterMembers(module, predicate=inspect.isclass):
if issubclass(member, classType):
yield member
def asDottedPath(path):
""" Returns a dotted path relative to the python path.
.. code-block:: python
import sys
currentPath = os.path.expanduser("somerandomPath")
sys.path.append(currentPath)
asDottedPath("someRandomPath/subfolder/subsubfolder.py")
#result: subfolder.subsubfolder
:param path: the absolute path to convert to a dotted path
:type path: str
:return: The dotted path relative to the python
:rtype: str
"""
d, f = os.path.split(path)
f = os.path.splitext(f)[0]
packagePath = [f] # __package__ will be a reversed list of package name parts
syspath = sys.path
driveLetter = os.path.splitdrive(path)[0] + "\\"
while d not in syspath: # go up until we run out of __init__.py files
d, name = os.path.split(d) # pull of a lowest level directory name
if d == driveLetter or name == "":
return ""
packagePath.append(name) # add it to the package parts list
return ".".join(reversed(packagePath))
|
mozata/menpo
|
menpo/visualize/textutils.py
|
Python
|
bsd-3-clause
| 7,250
| 0.000276
|
from __future__ import division, print_function
from collections import deque
from datetime import datetime
import sys
from time import time
def progress_bar_str(percentage, bar_length=20, bar_marker='=', show_bar=True):
r"""
Returns an `str` of the specified progress percentage. The percentage is
represented either in the form of a progress bar or in the form of a
percentage number. It can be combined with the :func:`print_dynamic`
function.
Parameters
----------
percentage : `float`
The progress percentage to be printed. It must be in the range
``[0, 1]``.
bar_length : `int`, optional
Defines the length of the bar in characters.
bar_marker : `str`, optional
Defines the marker character that will be used to fill the bar.
show_bar : `bool`, optional
If ``True``, the `str` includes the bar followed by the percentage,
e.g. ``'[===== ] 50%'``
If ``False``, the `str` includes only the percentage,
e.g. ``'50%'``
Returns
-------
progress_str : `str`
The progress percentage string that can be printed.
Raises
------
ValueError
``percentage`` is not in the range ``[0, 1]``
ValueError
``bar_length`` must be an integer >= ``1``
ValueError
``bar_marker`` must be a string of length 1
Examples
--------
This for loop: ::
n_iters = 2000
for k in range(n_iters):
print_dynamic(progress_bar_str(float(k) / (n_iters-1)))
prints a progress bar of the form: ::
[============= ] 68%
"""
if percentage < 0:
raise ValueError("percentage is not in the range [0, 1]")
elif percentage > 1:
percentage = 1
if not isinstance(bar_length, int) or bar_length < 1:
raise ValueError("bar_length must be an integer >= 1")
if not isinstance(bar_marker, str) or len(bar_marker) != 1:
raise ValueError("bar_marker must be a string of length 1")
# generate output string
if show_bar:
str_param = "[%-" + str(bar_length) + "s] %d%%"
bar_percentage = int(percentage * bar_length)
return str_param % (bar_marker * bar_percentage, percentage * 100)
else:
return "%d%%" % (percentage * 100)
def print_dynamic(str_to_print):
r"""
Prints dynamically the provided `str`, i.e. the `str` is printed and then
the buffer gets flushed.
Parameters
----------
str_to_print : `str`
The string to print.
"""
# here we use the print function, so we need the __future__ import for Py2
print("{}".format(str_to_print.ljust(80)), end='\r')
# If we are in a terminal then we can flush the output and it will display
# smoothly. However, in a notebook, this call seems to prevent anything
# from being displayed at all, so we skip it.
if sys.stdout.isatty():
sys.stdout.flush()
def bytes_str(num):
r"""
Converts bytes to a human readable format. For example: ::
print_bytes(12345) returns '12.06 KB'
print_bytes(123456789) returns '117.74 MB'
Parameters
----------
num : `int`
The size in bytes.
Raises
------
ValueError
num must be int >= 0
"""
if not isinstance(num, int) or num < 0:
raise ValueError("num must be int >= 0")
for x in ['bytes', 'KB', 'MB', 'GB']:
if num < 1024.0:
return "{0:3.2f} {1:s}".format(num, x)
num /= 1024.0
return "{0:3.2f} {1:s}".format(num, 'TB')
def print_progress(iterable, prefix='', n_items=None, offset=0,
show_bar=True, show_count=True, show_eta=True,
end_with_newline=True):
r"""
Print the remaining time needed to compute over an iterable.
To use, wrap an existing iterable with this function before processing in
a for loop (see example).
The estimate of the remaining time is based on a moving average of the last
100 items completed in the loop.
Parameters
----------
iterable : `iterable`
An iterable that will be processed. The iterable is passed through by
this function, with the time taken for each complete iteration logged.
prefix : `str`, optional
If provided a string that will be prepended to the progress report at
each level.
n_items : `int`, optional
Allows for ``iterator`` t
|
o be a generator whose length will be assumed
to be `n_items`. If not provided, then ``iterator`` needs to b
|
e
`Sizable`.
offset : `int`, optional
Useful in combination with ``n_items`` - report back the progress as
if `offset` items have already been handled. ``n_items`` will be left
unchanged.
show_bar : `bool`, optional
If False, The progress bar (e.g. [========= ]) will be hidden.
show_count : `bool`, optional
If False, The item count (e.g. (4/25)) will be hidden.
show_eta : `bool`, optional
If False, The estimated time to finish (e.g. - 00:00:03 remaining)
will be hidden.
end_with_newline : `bool`, optional
If False, there will be no new line added at the end of the dynamic
printing. This means the next print statement will overwrite the
dynamic report presented here. Useful if you want to follow up a
print_progress with a second print_progress, where the second
overwrites the first on the same line.
Raises
------
ValueError
``offset`` provided without ``n_items``
Examples
--------
This for loop: ::
from time import sleep
for i in print_progress(range(100)):
sleep(1)
prints a progress report of the form: ::
[============= ] 70% (7/10) - 00:00:03 remaining
"""
if n_items is None and offset != 0:
raise ValueError('offset can only be set when n_items has been'
' manually provided.')
if prefix != '':
prefix = prefix + ': '
bar_length = 10
else:
bar_length = 20
n = n_items if n_items is not None else len(iterable)
timings = deque([], 100)
time1 = time()
for i, x in enumerate(iterable, 1 + offset):
yield x
time2 = time()
timings.append(time2 - time1)
time1 = time2
remaining = n - i
duration = datetime.utcfromtimestamp(sum(timings) / len(timings) *
remaining)
bar_str = progress_bar_str(i / n, bar_length=bar_length, show_bar=show_bar)
count_str = ' ({}/{})'.format(i, n) if show_count else ''
eta_str = " - {} remaining".format(duration.strftime('%H:%M:%S')) if show_eta else ''
print_dynamic('{}{}{}{}'.format(prefix, bar_str, count_str, eta_str))
# the iterable has now finished - to make it clear redraw the progress with
# a done message. We also hide the eta at this stage.
count_str = ' ({}/{})'.format(n, n) if show_count else ''
bar_str = progress_bar_str(1, bar_length=bar_length, show_bar=show_bar)
print_dynamic('{}{}{} - done.'.format(prefix, bar_str, count_str))
if end_with_newline:
print('')
|
paninetworks/neutron
|
neutron/tests/functional/agent/linux/test_interface.py
|
Python
|
apache-2.0
| 2,526
| 0
|
# Copyright (c) 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_utils import uuidutils
import testtools
from neutron.agent.linux import interface
from neutron.agent.linux import ip_lib
from neutron.common import exceptions
from neutron.common import utils
from neutron.tests import base as tests_base
from neutron.tests.common import net_helpers
from neutron.tests.functional.agent.linux import base
class OVSInterfaceDriverTestCase(base.BaseOVSLinuxTestCase):
def setUp(self):
super(OVSInterfaceDriverTestCase, self).setUp()
conf = cfg.ConfigOpts()
conf.register_opts(interface.OPTS)
self.interface = interface.OVSInterfaceDriver(conf)
def test_plug_checks_if_bridge_exists(self):
with testtools.ExpectedException(exceptions.BridgeDoesNotExist):
self.interface.plug(network_
|
id=42,
port_id=71,
device_name='not_a_device',
|
mac_address='',
bridge='not_a_bridge',
namespace='not_a_namespace')
def test_plug_succeeds(self):
device_name = tests_base.get_rand_name()
mac_address = utils.get_random_mac('fa:16:3e:00:00:00'.split(':'))
namespace = self.useFixture(net_helpers.NamespaceFixture()).name
bridge = self.useFixture(net_helpers.OVSBridgeFixture()).bridge
self.assertFalse(bridge.get_port_name_list())
self.interface.plug(network_id=uuidutils.generate_uuid(),
port_id=uuidutils.generate_uuid(),
device_name=device_name,
mac_address=mac_address,
bridge=bridge.br_name,
namespace=namespace)
self.assertIn(device_name, bridge.get_port_name_list())
self.assertTrue(ip_lib.device_exists(device_name, namespace))
|
erigones/Ludolph
|
ludolph/main.py
|
Python
|
bsd-3-clause
| 8,916
| 0.000673
|
"""
Ludolph: Monitoring Jabber Bot
Copyright (C) 2012-2017 Erigones, s. r. o.
This file is part of Ludolph.
See the LICENSE file for copying permission.
"""
import os
import re
import sys
import signal
import logging
from collections import namedtuple
try:
# noinspection PyCompatibility,PyUnresolvedReferences
from configparser import RawConfigParser
except ImportError:
# noinspection PyCompatibility,PyUnresolvedReferences
from ConfigParser import RawConfigParser
try:
# noinspection PyCompatibility
from importlib import reload
except ImportError:
# noinspection PyUnresolvedReferences
from imp import reload
from ludolph.utils import parse_loglevel
from ludolph.bot import LudolphBot
from ludolph.plugins.plugin import LudolphPlugin
from ludolph import __version__
LOGFORMAT = '%(asctime)s %(levelname)-8s %(name)s: %(message)s'
logger = logging.getLogger('ludolph.main')
Plugin = namedtuple('Plugin', ('name', 'module', 'cls'))
def daemonize():
"""
http://code.activestate.com/recipes/278731-creating-a-daemon-the-python-way/
http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/
"""
try:
pid = os.fork() # Fork #1
if pid > 0:
sys.exit(0) # Exit first parent
except OSError as e:
sys.stderr.write('Fork #1 failed: %d (%s)\n' % (e.errno, e.strerror))
sys.exit(1)
# The first child. Decouple from parent environment
# Become session leader of this new session.
# Also be guaranteed not to have a controlling terminal
os.chdir('/')
# noinspection PyArgumentList
os.setsid()
os.umask(0o022)
try:
pid = os.fork() # Fork #2
if pid > 0:
sys.exit(0) # Exit from second parent
except OSError as e:
sys.stderr.write('Fork #2 failed: %d (%s)\n' % (e.errno, e.strerror))
sys.exit(1)
# Close all open file descriptors
import resource # Resource usage information
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if maxfd == resource.RLIM_INFINITY:
maxfd = 1024
# Iterate through and close all file descriptors
for fd in range(0, maxfd):
try:
os.close(fd)
except OSError: # ERROR, fd wasn't open (ignored)
pass
# Redirect standard file descriptors to /dev/null
sys.stdout.flush()
sys.stderr.flush()
si = open(os.devnull, 'r')
so = open(os.devnull, 'a+')
se = open(os.devnull, 'a+')
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
return 0
def start():
"""
Start the daemon.
"""
ret = 0
cfg = 'ludolph.cfg'
cfg_fp = None
cfg_lo = ((os.path.expanduser('~'), '.' + cfg), (sys.prefix, 'etc', cfg), ('/etc', cfg))
config_base_sections = ('global', 'xmpp', 'webserver', 'cron', 'ludolph.bot')
# Try to read config file from ~/.ludolph.cfg or /etc/ludolph.cfg
for i in cfg_lo:
try:
cfg_fp = open(os.path.join(*i))
except IOError:
continue
else:
break
if not cfg_fp:
sys.stderr.write("""\nLudolph can't start!\n
You need to create a config file in one these locations: \n%s\n
You can rename ludolph.cfg.example and update the required options.
The example file is located in: %s\n\n""" % (
'\n'.join([os.path.join(*i) for i in cfg_lo]),
os.path.dirname(os.path.abspath(__file__))))
sys.exit(1)
# Read and parse configuration
# noinspection PyShadowingNames
def load_config(fp, reopen=False):
config = RawConfigParser()
if reopen:
fp = open(fp.name)
try: # config.readfp() is Deprecated since python 3.2
# noinspection PyDeprecation
read_file = config.readfp
except AttributeError:
read_file = config.read_file
|
read_file(fp)
fp.close()
return config
config = load_config(cfg_fp)
# Prepare logging configuration
logconfig = {
'level':
|
parse_loglevel(config.get('global', 'loglevel')),
'format': LOGFORMAT,
}
if config.has_option('global', 'logfile'):
logfile = config.get('global', 'logfile').strip()
if logfile:
logconfig['filename'] = logfile
# Daemonize
if config.has_option('global', 'daemon'):
if config.getboolean('global', 'daemon'):
ret = daemonize()
# Save pid file
if config.has_option('global', 'pidfile'):
try:
with open(config.get('global', 'pidfile'), 'w') as fp:
fp.write('%s' % os.getpid())
except Exception as ex:
# Setup logging just to show this error
logging.basicConfig(**logconfig)
logger.critical('Could not write to pidfile (%s)\n', ex)
sys.exit(1)
# Setup logging
logging.basicConfig(**logconfig)
# All exceptions will be logged without exit
def log_except_hook(*exc_info):
logger.critical('Unhandled exception!', exc_info=exc_info)
sys.excepthook = log_except_hook
# Default configuration
use_tls = True
use_ssl = False
address = []
# Starting
logger.info('Starting Ludolph %s (%s %s)', __version__, sys.executable, sys.version.split()[0])
logger.info('Loaded configuration from %s', cfg_fp.name)
# Load plugins
# noinspection PyShadowingNames
def load_plugins(config, reinit=False):
plugins = []
for config_section in config.sections():
config_section = config_section.strip()
if config_section in config_base_sections:
continue
# Parse other possible imports
parsed_plugin = config_section.split('.')
if len(parsed_plugin) == 1:
modname = 'ludolph.plugins.' + config_section
plugin = config_section
else:
modname = config_section
plugin = parsed_plugin[-1]
logger.info('Loading plugin: %s', modname)
try:
# Translate super_ludolph_plugin into SuperLudolphPlugin
clsname = plugin[0].upper() + re.sub(r'_+([a-zA-Z0-9])', lambda m: m.group(1).upper(), plugin[1:])
module = __import__(modname, fromlist=[clsname])
if reinit and getattr(module, '_loaded_', False):
reload(module)
module._loaded_ = True
imported_class = getattr(module, clsname)
if not issubclass(imported_class, LudolphPlugin):
raise TypeError('Plugin: %s is not LudolphPlugin instance' % modname)
plugins.append(Plugin(config_section, modname, imported_class))
except Exception as ex:
logger.exception(ex)
logger.critical('Could not load plugin: %s', modname)
return plugins
plugins = load_plugins(config)
# XMPP connection settings
if config.has_option('xmpp', 'host'):
address = [config.get('xmpp', 'host'), '5222']
if config.has_option('xmpp', 'port'):
address[1] = config.get('xmpp', 'port')
logger.info('Connecting to jabber server %s', ':'.join(address))
else:
logger.info('Using DNS SRV lookup to find jabber server')
if config.has_option('xmpp', 'tls'):
use_tls = config.getboolean('xmpp', 'tls')
if config.has_option('xmpp', 'ssl'):
use_ssl = config.getboolean('xmpp', 'ssl')
# Here we go
xmpp = LudolphBot(config, plugins=plugins)
signal.signal(signal.SIGINT, xmpp.shutdown)
signal.signal(signal.SIGTERM, xmpp.shutdown)
if hasattr(signal, 'SIGHUP'): # Windows does not support SIGHUP - bug #41
# noinspection PyUnusedLocal,PyShadowingNames
def sighup(signalnum, handler):
if xmpp.reloading:
logger.warning('Reload already in progress')
else:
xmpp.reloading = True
try:
config = load_config(cfg_fp,
|
the01/python-paps
|
paps/crowd/controller.py
|
Python
|
mit
| 5,482
| 0.000912
|
# -*- coding: UTF-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
__author__ = "d01"
__email__ = "jungflor@gmail.com"
__copyright__ = "Copyright (C) 2015-16, Florian JUNG"
__license__ = "MIT"
__version__ = "0.1.0"
__date__ = "2016-03-29"
# Created: 2015-06-10 23:54
import threading
from .pluginInterface import Plugin, PluginStartException
from paps.person import Person
class CrowdController(Plugin):
"""
Manages the audience state and the plugins
"""
def __init__(self, settings=None):
"""
Initialize object
:param settings: Settings to be passed to init (default: None)
:type settings: dict | None
:rtype: None
:raises ValueError: No plugins given
"""
if settings is None:
settings = {}
super(CrowdController, self).__init__(settings)
self.plugins = settings.get('plugins')
""" :type plugins: list[paps.crowd.pluginInterface.Plugin] """
if not self.plugins:
raise ValueError("No plugins registered")
self._people = {}
""" Current state of audience - person.id: Person()
:type _people: dict[str, paps.person.Person] """
self._people_lock = threading.Lock()
""" Lock to control access to ._people """
def on_person_new(self, people):
"""
New people joined the audience
:param people: People that just joined the audience
:type people: list[paps.person.Person]
:rtype: None
"""
self.debug("()")
changed = []
with self._people_lock:
for p in people:
person = Person.from_person(p)
if person.id in self._people:
self.warning(
u"{} already in audience".format(person.id)
)
self._people[person.id] = person
changed.append(person)
for plugin in self.plugins:
try:
plugin.on_person_new(ch
|
anged)
except:
self.exception(
u"Failed to send new people to {}".format(plugin.name)
)
def on_person_leave(self, people):
"""
People left the audience
:param people: People that left
:type people: list[paps.person.Person]
:rtype: None
"""
self.debug("()")
changed = []
with self._people_lock:
for p in people:
person = Person.from_person(p)
|
if person.id not in self._people:
self.warning(u"{} not in audience".format(person.id))
else:
del self._people[person.id]
changed.append(person)
for plugin in self.plugins:
try:
plugin.on_person_leave(changed)
except:
self.exception(
u"Failed to send leaving people to {}".format(plugin.name)
)
def on_person_update(self, people):
"""
People have changed (e.g. a sensor value)
:param people: People whos state changed (may include unchanged)
:type people: list[paps.person.Person]
:rtype: None
"""
self.debug("()")
changed = []
with self._people_lock:
for p in people:
person = Person.from_person(p)
if person.id not in self._people:
self.warning(u"{} not in audience".format(person.id))
self._people[person.id] = person
# Check if really changed? - trust source for now
changed.append(person)
for plugin in self.plugins:
try:
plugin.on_person_update(changed)
except:
self.exception(
u"Failed to send updated people to {}".format(plugin.name)
)
@property
def people(self):
"""
Get people of current audience
:return: Current people
:rtype: list[paps.people.People]
"""
with self._people_lock:
return self._people.values()
def start(self, blocking=False):
"""
Start the interface
:param blocking: Should the call block until stop() is called
(default: False)
:type blocking: bool
:rtype: None
"""
self.debug("()")
# Start the plugins
for plugin in self.plugins:
try:
# Inject self into plugin
plugin.controller = self
plugin.start(blocking=False)
except:
self.exception(
u"Failed to start plugin {}".format(plugin.name)
)
raise PluginStartException(
"Starting one or more plugins failed"
)
super(CrowdController, self).start(blocking=blocking)
def stop(self):
"""
Stop the interface
:rtype: None
"""
self.debug("()")
# Stop the plugins
for plugin in self.plugins:
try:
plugin.stop()
except:
self.exception(u"Failed to stop plugin {}".format(plugin.name))
super(CrowdController, self).stop()
|
astra-toolbox/astra-toolbox
|
python/astra/plugins/cgls.py
|
Python
|
gpl-3.0
| 2,821
| 0.001418
|
# -----------------------------------------------------------------------
# Copyright: 2010-2022, imec Vision Lab, University of Antwerp
# 2013-2022, CWI, Amsterdam
#
# Contact: astra@astra-toolbox.com
# Website: http://www.astra-toolbox.com/
#
# This file is part of the ASTRA Toolbox.
#
#
# The ASTRA Toolbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public Li
|
cense as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The ASTRA Toolbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY
|
; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the ASTRA Toolbox. If not, see <http://www.gnu.org/licenses/>.
#
# -----------------------------------------------------------------------
import astra
import numpy as np
import six
class CGLSPlugin(astra.plugin.base):
"""CGLS."""
astra_name = "CGLS-PLUGIN"
def initialize(self,cfg):
self.W = astra.OpTomo(cfg['ProjectorId'])
self.vid = cfg['ReconstructionDataId']
self.sid = cfg['ProjectionDataId']
try:
v = astra.data2d.get_shared(self.vid)
s = astra.data2d.get_shared(self.sid)
self.data_mod = astra.data2d
except Exception:
v = astra.data3d.get_shared(self.vid)
s = astra.data3d.get_shared(self.sid)
self.data_mod = astra.data3d
def run(self, its):
v = self.data_mod.get_shared(self.vid)
s = self.data_mod.get_shared(self.sid)
z = np.zeros(v.shape, dtype=np.float32)
p = np.zeros(v.shape, dtype=np.float32)
r = np.zeros(s.shape, dtype=np.float32)
w = np.zeros(s.shape, dtype=np.float32)
W = self.W
# r = s - W*v
W.FP(v, out=w)
r[:] = s
r -= w
# p = W'*r
W.BP(r, out=p)
# gamma = <p,p>
gamma = np.dot(p.ravel(), p.ravel())
for i in range(its):
# w = W * p
W.FP(p, out=w)
# alpha = gamma / <w,w>
alpha = gamma / np.dot(w.ravel(), w.ravel())
# v += alpha * p
z[:] = p
z *= alpha
v += z
# r -= alpha * w
w *= -alpha;
r += w
# z = W' * r
W.BP(r, out=z)
# beta = <z,z> / gamma
newgamma = np.dot(z.ravel(), z.ravel())
beta = newgamma / gamma
# gamma = <z,z>
gamma = newgamma
# p = z + beta * p
p *= beta
p += z
|
frePPLe/frePPLe
|
freppledb/common/notifications.py
|
Python
|
agpl-3.0
| 1,606
| 0.001868
|
#
# Copyright (C) 2020 by frePPLe bv
#
# This library is free software; you can redistribute i
|
t and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for m
|
ore details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from .models import NotificationFactory, User, Bucket, BucketDetail, Parameter
@NotificationFactory.register(User, [User])
def UserNotification(flw, msg):
return flw.content_type == msg.content_type and flw.object_pk == msg.object_pk
@NotificationFactory.register(Bucket, [Bucket, BucketDetail])
def BucketNotification(flw, msg):
if flw.content_type == msg.content_type:
return flw.object_pk == msg.object_pk
elif msg.content_type.model_class() == BucketDetail:
return flw.object_pk == msg.content_object.bucket.name
@NotificationFactory.register(BucketDetail, [BucketDetail])
def BucketDetailNotification(flw, msg):
return flw.content_type == msg.content_type and flw.object_pk == msg.object_pk
@NotificationFactory.register(Parameter, [Parameter])
def ParameterNotification(flw, msg):
return flw.content_type == msg.content_type and flw.object_pk == msg.object_pk
|
algobook/Algo_Ds_Notes
|
Topological_Sort/Topological_Sort.py
|
Python
|
gpl-3.0
| 1,538
| 0.006502
|
class Graph:
"""
* Creates a adjaceny list for a graph
* Implements a function for topological sorting
"""
def __init__(self, no_vertices):
"""
Initialises an empty adjaceny list (list of lists)
"""
self.vertices = no_vertices
self.adjlist = [
|
[] for i in xrange(0, no_vertices)]
def add_edge(self, vert1, vert2):
"""
Creates an edge between two vertices
"""
self.adjlist[vert1].append(vert2)
def topological_sort_util
|
(self, i, stack, visited):
"""
Utility function for topological sort
"""
visited[i] = True
for node in self.adjlist[i]:
if not visited[node]:
self.topological_sort_util(node, stack, visited)
stack.append(i)
def topological_sort(self):
"""
Implements topological sort (DFS based approach)
"""
stack = []
visited = [False for i in xrange(0, self.vertices)]
for i in xrange(0, self.vertices):
if not visited[i]:
self.topological_sort_util(i, stack, visited)
print "Topological Sort : ",
while len(stack) > 0:
print stack.pop(),
def main():
graph = Graph(6)
graph.add_edge(5, 2);
graph.add_edge(5, 0);
graph.add_edge(4, 0);
graph.add_edge(4, 1);
graph.add_edge(2, 3);
graph.add_edge(3, 1);
graph.topological_sort()
if __name__ == "__main__":
main()
#OUTPUT
#Topological Sort : 5 4 2 3 1 0
|
apanda/modeling
|
mcnet/components/counter.py
|
Python
|
bsd-3-clause
| 3,921
| 0.023718
|
from . import NetworkObject
import z3
class NetworkCounter (NetworkObject):
"""OK cannot count: this is sad"""
def _init (self, node, net, ctx):
super(NetworkCounter, self).init_fail(node)
self.node = node.z3Node
self.net = net
self.ctx = ctx
self.constraints = list()
self._constraints ()
self.net.SaneSend(self)
@property
def z3Node (self):
return self.node
def _addConstraints (self, solver):
solver.add(self.constraints)
def _constraints (self):
#self.count_func = z3.Function('count_%s'%(self.node), self.ctx.address, self.ctx.address, \
#z3.IntSort(), z3.IntSort())
p0 =
|
z3.Const('_counter_p0_%s'%(self.node), self.ctx.packet)
p1 = z3.Const('_counter_p1_%s'%(self.node), self.ctx.packet)
n0 = z3.Const('_counter_n0_%s'%(self.node), self.ctx.node)
n1 = z3.Const('_counter_n1_%s'%(self.node), self.ctx.node)
n2 = z3.Const('_counter_n2_%s'%(self.node), self.ctx.node)
t0 = z3.Int('_counter_t0_%s'%(self.node))
t1 = z3.Int
|
('_counter_t1_%s'%(self.node))
a0 = z3.Const('_counter_a0_%s'%(self.node), self.ctx.address)
a1 = z3.Const('_counter_a1_%s'%(self.node), self.ctx.address)
# Make sure all packets sent were first recved
self.constraints.append(z3.ForAll([n0, p0], \
z3.Implies(self.ctx.send(self.node, n0, p0), \
z3.And( \
z3.Exists([n1], \
z3.And (self.ctx.recv(n1, self.node, p0), \
n0 != n1)), \
z3.Not(z3.Exists([n2], \
z3.And(self.ctx.send(self.node, n2, p0), \
n2 != n0))), \
self.ctx.etime(self.node, p0, self.ctx.send_event) > \
self.ctx.etime(self.node, p0, self.ctx.recv_event)))))
# Make sure packets go one at a time
self.constraints.append(z3.ForAll([p0, t0], \
z3.Implies(z3.And(self.ctx.etime(self.node, p0, self.ctx.send_event) == t0, \
t0 != 0), \
z3.ForAll([p1], \
z3.Or(p0 == p1, \
self.ctx.etime(self.node, p1, \
self.ctx.send_event) != \
t0)))))
# TODO: Figure out if this needs to be implemented.
#self.constraints.append(z3.ForAll([a0, a1],
#self.count_func(a0, a1, 0) == 0))
#self.constraints.append(z3.ForAll([p0, t0], \
#z3.Implies(z3.And(self.ctx.etime(self.node, p0, self.ctx.send_event) == t0, \
#t0 > 0), \
#self.count_func(self.ctx.packet.src(p0), self.ctx.packet.dest(p0), t0) ==
#self.count_func(self.ctx.packet.src(p0), self.ctx.packet.dest(p0), t0 - 1) + 1)))
#self.constraints.append(z3.ForAll([a0, a1, t0], \
#z3.Implies( \
#z3.Not(z3.Exists([p0], \
#z3.And(self.ctx.etime(self.node, p0, self.ctx.send_event) == t0, \
#self.ctx.packet.src(p0) == a0, \
#self.ctx.packet.dest(p0) == a1))), \
#self.count_func(a0, a1, t0) == self.count_func(a0, a1, t0 - 1))))
|
hb9kns/PyBitmessage
|
src/bitmessageqt/settings.py
|
Python
|
mit
| 40,025
| 0.005347
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'settings.ui'
#
# Created: Thu Dec 25 23:21:20 2014
# by: PyQt4 UI code generator 4.10.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
from languagebox import LanguageBox
from sys import platform
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_settingsDialog(object):
def setupUi(self, settingsDialog):
settingsDialog.setObjectName(_fromUtf8("settingsDialog"))
settingsDialog.resize(521, 413)
self.gridLayout = QtGui.QGridLayout(settingsDialog)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.buttonBox = QtGui.QDialogButtonBox(settingsDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.gridLayout.addWidget(self.buttonBox, 1, 0, 1, 1)
self.tabWidgetSettings = QtGui.QTabWidget(settingsDialog)
self.tabWidgetSettings.setObjectName(_fromUtf8("tabWidgetSettings"))
self.tabUserInterface = QtGui.QWidget()
self.tabUserInterface.setEnabled(True)
self.tabUserInterface.setObjectName(_fromUtf8("tabUserInterface"))
self.formLayout = QtGui.QFormLayout(self.tabUserInterface)
self.formLayout.setObjectName(_fromUtf8("formLayout"))
self.checkBoxStartOnLogon = QtGui.QCheckBox(self.tabUserInterface)
self.checkBoxStartOnLogon.setObjectName(_fromUtf8("checkBoxStartOnLogon"))
self.formLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.checkBoxStartOnLogon)
self.groupBoxTray = QtGui.QGroupBox(self.tabUserInterface)
self.groupBoxTray.setObjectName(_fromUtf8("groupBoxTray"))
self.formLayoutTray = QtGui.QFormLayout(self.groupBoxTray)
self.formLayoutTray.setObjectName(_fromUtf8("formLayoutTray"))
self.checkBoxStartInTray = QtGui.QCheckBox(self.groupBoxTray)
self.checkBoxStartInTray.setObjectName(_fromUtf8("checkBoxStartInTray"))
self.formLayoutTray.setWidget(0, QtGui.QFormLayout.SpanningRole, self.checkBoxStartInTray)
self.checkBoxMinimizeToTray = QtGui.QCheckBox(self.groupBoxTray)
self.checkBoxMinimizeToTray.setChecked(True)
self.checkBoxMinimizeToTray.setObjectName(_fromUtf8("checkBoxMinimizeToTray"))
self.formLayoutTray.setWidget(1, QtGui.QFormLayout.LabelRole, self.checkBoxMinimizeToTray)
self.checkBoxTrayOnClose = QtGui.QCheckBox(self.groupBoxTray)
self.checkBoxTrayOnClose.setChecked(True)
self.checkBoxTrayOnClose.setObjectName(_fromUtf8("checkBoxTrayOnClose"))
self.formLayoutTray.setWidget(2, QtGui.QFormLayout.LabelRole, self.checkBoxTrayOnClose)
self.formLayout.setWidget(1, QtGui.QFormLayout.SpanningRole, self.groupBoxTray)
self.checkBoxHideTrayConnectionNotifications = QtGui.QCheckBox(self.tabUserInterface)
self.checkBoxHideTrayConnectionNotifications.setChecked(False)
self.checkBoxHideTrayConnectionNotifications.setObjectName(_fromUtf8("checkBoxHideTrayConnectionNotifications"))
self.formLayout.setWidget(2, QtGui.QFormLayout.LabelRole, self.checkBoxHideTrayConnectionNotifications)
self.checkBoxShowTrayNotifications = QtGui.QCheckBox(self.tabUserInterface)
self.checkBoxShowTrayNotifications.setObjectName(_fromUtf8("checkBoxShowTrayNotifications"))
self.formLayout.setWidget(3, QtGui.QFormLayout.LabelRole, self.checkBoxShowTrayNotifications)
self.checkBoxPortableMode = QtGui.QCheckBox(self.tabUserInterface)
self.checkBoxPortableMode.setObjectName(_fromUtf8("checkBoxPortableMode"))
self.formLayout.setWidget(4, QtGui.QFormLayout.LabelRole, self.checkBoxPortableMode)
self.PortableModeDescription = QtGui.QLabel(self.tabUserInterface)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.PortableModeDescription.sizePolicy().hasHeightForWidth())
self.PortableModeDescription.setSizePolicy(sizePolicy)
self.PortableModeDescription.setWordWrap(True)
self.PortableModeDescription.setObjectName(_fromUtf8("PortableModeDescription"))
self.formLayout.setWidget(5, QtGui.QFormLayout.SpanningRole, self.PortableModeDescription)
self.checkBoxWillinglySendToMobile = QtGui.QCheckBox(self.tabUserInterface)
self.checkBoxWillinglySendToMobile.setObjectName(_fromUtf8("checkBoxWillinglySendToMobile"))
self.formLayout.setWidget(6, QtGui.QFormLayout.SpanningRole, self.checkBoxWillinglySendToMobile)
self.checkBoxUseIdenticons = QtGui.QCheckBox(self.tabUserInterface)
self.checkBoxUseIdenticons.setObjectName(_fromUtf8("checkBoxUseIdenticons"))
self.formLayout.setWidget(7, QtGui.QFormLayout.LabelRole, self.checkBoxUseIdenticons)
self.checkBoxReplyBelow = QtGui.QCheckBox(self.tabUserInterface)
self.checkBoxReplyBelow.setObjectName(_fromUtf8("checkBoxReplyBelow"))
self.formLayout.setWidget(8, QtGui.QFormLayout.LabelRole, self.checkBoxReplyBelow)
self.groupBox = QtGui.QGroupBox(self.tabUserInterface)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.formLayout_2 = QtGui.QFormLayout(self.groupBox)
self.formLayout_2.setObjectName(_fromUtf8("formLayout_2"))
self.languageComboBox = LanguageBox(self.groupBox)
self.languageComboBox.setMinimumSize(QtCore.QSize(100, 0))
self.languageComboBox.setObjectName(_fromUtf8("languageComboBox"))
self.formLayout_2.setWidget(0, QtGui.QFormLayout.LabelRole, self.languageComboBox)
self.formLayout.setWidget(9, QtGui.QFormLayout.FieldRole, self.groupBox)
self.tabWidgetSettings.addTab(self.tabUserInterface, _fromUtf8(""))
self.tabNetworkSettings = QtGui.QWidget()
self.tabNetworkSettings.setObjectName(_fromUtf8("tabNetworkSettings"))
self.gridLayout_4 = QtGui.QGridLayout(self.tabNetworkSettings)
self.gridLayout_4.setObjectName(_fromUtf8("gridLayout_4"))
self.groupBox1 = QtGui.QGroupBox(self.tabNetworkSettings)
self.groupBox1.setObjectName(_fromUtf8("groupBox1"))
self.gridLayo
|
ut_3 = QtGui.QGridLayout(self.groupBox1)
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
#spacerItem = QtGui.QSpacerItem(125, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
#self.gridLayout_3.addItem(spacerItem, 0, 0, 1, 1)
self.label = QtGui.QLabel(self.groupBox1)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout_3.addWidget(self.label, 0, 0, 1, 1, QtCore.Qt.AlignRight)
self.lineEd
|
itTCPPort = QtGui.QLineEdit(self.groupBox1)
self.lineEditTCPPort.setMaximumSize(QtCore.QSize(70, 16777215))
self.lineEditTCPPort.setObjectName(_fromUtf8("lineEditTCPPort"))
self.gridLayout_3.addWidget(self.lineEditTCPPort, 0, 1, 1, 1, QtCore.Qt.AlignLeft)
self.labelUPnP = QtGui.QLabel(self.groupBox1)
self.labelUPnP.setObjectName(_fromUtf8("labelUPnP"))
self.gridLayout_3.addWidget(self.labelUPnP, 0, 2, 1, 1, QtCore.Qt.AlignRight)
self.checkBoxUPnP = QtGui.QCheckBox(self.groupBox1)
self.checkBoxUPnP.setObjectName(_fromUtf8("checkBoxUPnP"))
self.gridLayout_3.addWidget(self.checkBoxUPnP, 0, 3, 1, 1, QtCore.Qt.AlignLeft)
self.gridLayout_4.addWidget(self.groupBox1, 0, 0, 1, 1)
self.groupBox_3 = QtGui.QGroupBox(self.tabNetworkSettings)
self.groupBox_3.setObjectName(_fromUtf8("groupBox_3"))
self.
|
bugfree-software/the-internet-solution-python
|
tests/test_dropdown.py
|
Python
|
mit
| 614
| 0.032573
|
from . import TheInternetTestCase
from helium.api im
|
port ComboBox, select
class DropdownTest(TheInternetTestCase):
def get_page(self):
return "http://the-internet.herokuapp.com/dropdown"
def test_dropdown_exists(self):
self.assertTrue(ComboBox("Dropdown List").exists())
def test_select_value(self):
self.assertEqual(
ComboBox("Dropdown List").value, u'Please select an option'
)
select("Dropdown List", "Option 1")
self.assertEqual(
ComboBox("Dropdown List").value, u'Option 1'
)
select("Dropdown List", "Option 2")
self.asser
|
tEqual(
ComboBox("Dropdown List").value, u'Option 2'
)
|
maniero/SOpt
|
Python/Operator/OrCondition.py
|
Python
|
mit
| 224
| 0.008969
|
letra = input("Qual seu gênero:")
if letra == "F" or letra == "f":
print("Feminino")
elif letra == "M" or letra == "m":
|
print("Masculino")
else:
("sexo invalido")
#https://
|
pt.stackoverflow.com/q/405745/101
|
Vaidyanath/tempest
|
tempest/api/messaging/test_claims.py
|
Python
|
apache-2.0
| 4,171
| 0
|
# Copyright (c) 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import urlparse
from tempest_lib import decorators
from tempest.api.messaging import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
LOG = logging.getLogger(__name__)
CONF = config.CONF
class TestClaims(base.BaseMessagingTest):
@classmethod
def resource_setup(cls):
super(TestClaims, cls).resource_setup()
cls.queue_name = data_utils.rand_name('Queues-Test')
# Create Queue
cls.create_queue(cls.queue_name)
def _post_and_claim_messages(self, queue_name, repeat=1):
# Post Messages
message_body = self.generate_message_body(repeat=repeat)
self.client.post_messages(queue_name=self.queue_name,
rbody=message_body)
# Post Claim
claim_ttl = data_utils.rand_int_id(start=60,
end=CONF.messaging.max_claim_ttl)
claim_grace = data_utils.\
rand_int_id(start=60, end=CONF.messaging.max_claim_grace)
claim_body = {"ttl": claim_ttl, "grace": claim_grace}
resp, body = self.client.post_claims(queue_name=self.queue_name,
rbody=claim_body)
return resp, body
@test.attr(type='smoke')
def test_post_claim(self):
_, body = self._post_and_claim_messages(queue_name=self.queue_name)
claimed_message_uri = body[0]['href']
# Skipping this step till bug-1331517 is fixed
# Get posted claim
# self.client.query_claim(claimed_message_uri)
# Delete Claimed message
self.client.delete_messages(claimed_message_uri)
@decorators.skip_because(bug="1331517")
@test.attr(type='smoke')
def test_query_claim(self):
# Post a Claim
resp, body = self._post_and_claim_messages(queue_name=self.queue_name)
# Query Claim
claim_uri = resp
|
['location']
self.client.query_claim(claim_uri)
# Delete Claimed message
claimed_message_uri = body[0]['href']
self.delete_messages(claimed_message_uri)
@decorators.skip_because(bug="1328111")
@test.attr(type='smoke')
def test_update_claim(self):
# Post a Claim
resp,
|
body = self._post_and_claim_messages(queue_name=self.queue_name)
claim_uri = resp['location']
claimed_message_uri = body[0]['href']
# Update Claim
claim_ttl = data_utils.rand_int_id(start=60,
end=CONF.messaging.max_claim_ttl)
update_rbody = {"ttl": claim_ttl}
self.client.update_claim(claim_uri, rbody=update_rbody)
# Verify claim ttl >= updated ttl value
_, body = self.client.query_claim(claim_uri)
updated_claim_ttl = body["ttl"]
self.assertTrue(updated_claim_ttl >= claim_ttl)
# Delete Claimed message
self.client.delete_messages(claimed_message_uri)
@test.attr(type='smoke')
def test_release_claim(self):
# Post a Claim
resp, body = self._post_and_claim_messages(queue_name=self.queue_name)
claim_uri = resp['location']
# Release Claim
self.client.release_claim(claim_uri)
# Delete Claimed message
# This will implicitly verify that the claim is deleted.
message_uri = urlparse.urlparse(claim_uri).path
self.client.delete_messages(message_uri)
@classmethod
def resource_cleanup(cls):
cls.delete_queue(cls.queue_name)
super(TestClaims, cls).resource_cleanup()
|
Senseg/robotframework
|
src/robot/reporting/logreportwriters.py
|
Python
|
apache-2.0
| 2,447
| 0.001226
|
# Copyright 2008-2012 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License
|
");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY K
|
IND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
from os.path import basename, splitext
import codecs
from robot.htmldata import HtmlFileWriter, ModelWriter, LOG, REPORT
from robot.utils import utf8open
from .jswriter import JsResultWriter, SplitLogWriter
class _LogReportWriter(object):
def __init__(self, js_model):
self._js_model = js_model
def _write_file(self, path, config, template):
outfile = codecs.open(path, 'wb', encoding='UTF-8')\
if isinstance(path, basestring) else path # unit test hook
with outfile:
model_writer = RobotModelWriter(outfile, self._js_model, config)
writer = HtmlFileWriter(outfile, model_writer)
writer.write(template)
class RobotModelWriter(ModelWriter):
def __init__(self, output, model, config):
self._output = output
self._model = model
self._config = config
def write(self, line):
JsResultWriter(self._output).write(self._model, self._config)
class LogWriter(_LogReportWriter):
def write(self, path, config):
self._write_file(path, config, LOG)
if self._js_model.split_results:
self._write_split_logs(splitext(path)[0])
def _write_split_logs(self, base):
for index, (keywords, strings) in enumerate(self._js_model.split_results):
index += 1 # enumerate accepts start index only in Py 2.6+
self._write_split_log(index, keywords, strings, '%s-%d.js' % (base, index))
def _write_split_log(self, index, keywords, strings, path):
with utf8open(path, 'wb') as outfile:
writer = SplitLogWriter(outfile)
writer.write(keywords, strings, index, basename(path))
class ReportWriter(_LogReportWriter):
def write(self, path, config):
self._write_file(path, config, REPORT)
|
Comunitea/CMNT_00098_2017_JIM_addons
|
custom_documents/models/stock_picking.py
|
Python
|
agpl-3.0
| 11,479
| 0.000872
|
# -*- coding: utf-8 -*-
# © 2017 Comunitea
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import models, fields, api
from datetime import timedelta
from pytz import timezone
from odoo.addons import decimal_precision as dp
class StockPicking(models.Model):
_inherit = 'stock.picking'
neutral_document = fields.Boolean('Neutral Document',
related='sale_id.neutral_document')
operator = fields.Char('Operator')
same_day_delivery = fields.Boolean(compute='_compute_same_day_delivery')
delivery_date = fields.Char(compute='_compute_delivery_date')
delivery_amount = fields.Monetary(compute='_compute_delivery_amount')
global_discount_amount = fields.Monetary(
compute='_compute_global_discount_amount')
min_date_date = fields.Date(compute='_compute_min_date_date')
date_done_date = fields.Date(compute='_compute_min_date_date')
sale_services = fields.Many2many(
'sale.order.line', 'stock_picking_sale_order_line_services_rel',
'picking_id', 'sale_id', compute='_compute_sale_services')
purchase_currency_id = fields.Many2one(
related='purchase_id.currency_id',
string='Currency')
@api.depends('min_date')
def _compute_min_date_date(self):
for pick in self:
pick.min_date_date = pick.min_date and \
pick.min_date.split(' ')[0] or False
if pick.date_done:
pick.date_done_date = pick.date_done.split(' ')[0]
else:
pick.date_done_date = pick.min_date_date
# Si el albaran se finalizó antes de las 17:30 entre semana se envía el
# mismo día.
def _compute_same_day_delivery(self):
for pick in self:
if pick.date_done:
same_day_delivery = True
date_done = fields.Datetime.from_string(pick.date_done)\
.replace(tzinfo=timezone('Etc/UTC'))\
.astimezone(timezone(pick._context.get('tz', 'Etc/UTC')))
if date_done.hour > 17 or \
(date_done.hour == 17 and date_done.minute > 30) or \
date_done.isoweekday() in (6, 7):
same_day_delivery = False
pick.same_day_delivery = same_day_delivery
def _compute_delivery_date(self):
# Si no se envía el mismo día se comprueba que el día de envío no
# sea ni sabado ni domingo
for pick in self:
if pick.date_done:
if pick.same_day_delivery:
pick.delivery_date = pick.date_done
else:
date_done = fields.Datetime.from_string(pick.date_done)
next_date = date_done + timedelta(days=1)
delivery_date = next_date
if next_date.isoweekday() == 6:
delivery_date = next_date + timedelta(days=2)
elif next_date.isoweekday() == 7:
delivery_date = next_date + timedelta(days=1)
pick.delivery_date = delivery_date
@api.multi
def _compute_delivery_amount(self):
for picking in self:
delivery_line = picking.sale_id.order_line.filtered(
lambda x: x.product_id.delivery_cost)
if delivery_line:
picking.delivery_amount = delivery_line[0].price_subtotal
else:
picking.delivery_amount = 0.0
@api.multi
def _compute_global_discount_amount(self):
for picking in self:
global_discount_lines = picking.sale_id.order_line.filtered(
lambda x: x.promotion_line)
ep_disc = picking.sale_id.total_early_discount
if global_discount_lines or ep_disc:
picking.global_discount_amount = sum(
global_discount_lines.mapped('price_subtotal')) + ep_disc
else:
picking.global_discount_amount = 0.0
@api.multi
def _compute_amount_all(self):
res = super(StockPicking, self)._compute_amount_all()
for pick in self:
if pick.sale_id:
delivery_line = pick.sale_id.order_line.filtered(
lambda x: x.product_id.delivery_cost)
global_discount_lines = pick.sale_id.order_line.filtered(
lambda x: x.promotion_line)
if delivery_line:
amount_untaxed = sum(pick.pack_operation_ids.mapped(
'sale_price_subtotal')) + \
delivery_line[0].price_subtotal + \
sum(global_discount_lines.mapped('price_subtotal')) + \
sum(pick.sale_services.mapped('price_subtotal'))
amount_tax = sum(pick.pack_operation_ids.mapped(
'sale_price_tax')) + delivery_line[0].price_tax + \
sum(global_discount_lines.mapped('price_tax')) + \
sum(pick.sale_services.mapped('price_tax'))
pick.update({
'amount_untaxed': amount_untaxed,
'amount_tax': amount_tax,
'amount_total': amount_untaxed + amount_tax,
})
else:
amount_untaxed = sum(pick.pack_operation_ids.mapped(
'sale_price_subtotal')) + \
sum(global_discount_lines.mapped('price_subtotal')) + \
sum(pick.sale_services.mapped('price_subtotal'))
amount_tax = sum(pick.pack_operation_ids.mapped(
'sale_price_tax')) + \
sum(global_discount_lines.mapped('price_tax')) + \
sum(pick.sale_services.mapped('price_tax'))
pick.update({
'amount_untaxed': amount_untaxed,
'amount_tax': amount_tax,
'amount_total': amount_untaxed + amount_tax,
})
elif pick.purchase_id:
amount_tax = sum(pick.pack_operation_ids.mapped(
'purchase_price_tax'))
amount_total = sum(pick.pack_operation_ids.mapped(
'purchase_price_total'))
val = {
'amount_untaxed': amount_total - amount_tax,
'amount_tax': amount_tax,
'amount_total': amount_total,
}
pick.update(val)
return res
@api.depends('sale_id')
def _compute_sale_services(self):
for picking in self:
picking.sale_services = picking.sale_id.order_line.filtered(
lambda x: x.product_id.type == 'service' and not
x.product_id.delivery_cost)
@api.multi
def action_open_purchases_valued_ops(self):
action = self.env.ref(
'custom_documents.action_open_view_valued_stock_pack_op_tree').read()[0]
action['domain'] = [('id', 'in', self.pack_operation_ids.ids)]
action['context'] = {
'default_picking_id': self.id,
}
return action
class StockMove(models.Model):
_inherit = 'stock.move'
name_report = fields.Char(compute='_compute_name_report')
@api.multi
def _compute_name_report(self):
for line in self:
name_report = line.name
if '[%s]' % line.product_id.default_code in line.name:
name_report = line.name.replace(
'[%s]' % line.product_id.default_code, '')
line.name_report = name_report
@api.onchange('product_id')
def onchange_product_id(self):
"""Se hereda el onchange para establecer
|
correctamente el nombre"""
res = super(StockMove, self).onchange_product_id()
product = self.product_id
|
.with_context(lang=self.partner_id.lang or self.env.user.lang)
if product:
self.name = product.name_get()[0][1]
return res
class StockPackOperation(models.M
|
changtailiang/xbaydns
|
xbaydns/tools/dbset.py
|
Python
|
bsd-2-clause
| 760
| 0.007895
|
#!/usr/bin/env python
# encoding: utf-8
"""
dbset.py
Created by Razor <bg1tpt AT gmail.com> on 2008-03-31
Copyright (c) 2008 xBayDNS Team. All rights reserved.
"""
import bsddb, pickle, os
class Set():
def __init__(self):
self._dbname = os.t
|
mpnam()
try:
self._dbobj = bsddb.btopen(self._dbname)
except:
pass
def add(self, element):
if element == None:
return False
element_str = pickle.dumps(element)
print type(element_str)
self._dbobj[element_str] = '1'
return
|
True
def __getitem__(self, element):
if element == None:
return False
element_str = pickle.dumps(element)
return self._dbobj[element_str]
|
zenoss/ZenPacks.zenoss.Puppet
|
ZenPacks/zenoss/Puppet/interfaces.py
|
Python
|
gpl-2.0
| 1,215
| 0.002469
|
###########################################################################
#
# Copyright (C) 2012 Zenoss Inc.
#
###########################################################################
from Products.Zuul.form import schema
from Products.Zuul.utils import ZuulMessageFactory as _t
from Products.Zuul.infos.component import IComponentInfo
from Products.Zuul.interfaces import IFacade
class IPuppetClientInfo(IComponentInfo):
signed = schema.Bool(title=_t(u'Is the client SSL signed?'), group='Details')
managedDevice = schema.TextLine(title=_t(u'Zenoss Device'), group=
|
'Details')
class IPuppetFacade (IFacade):
def e
|
xportDevices(deviceClass):
"""
Export out devices in zenbatchload format.
@parameter deviceClass: location to start exporting devices (default /)
@type deviceClass: string
@return: zenbatchload format file
@rtype: string
"""
def importDevices(data):
"""
Import devices from zenbatchload format string.
@parameter data: zenbatchload format file
@type data: string
@return: key/value pairs of import statistics
@rtype: dictionary of category and statistic
"""
|
ekasitk/sahara
|
sahara/plugins/cdh/v5_3_0/validation.py
|
Python
|
apache-2.0
| 11,076
| 0
|
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.i18n import _
from sahara.plugins.cdh.v5_3_0 import plugin_utils as pu
from sahara.plugins import exceptions as ex
from sahara.plugins import utils as u
from sahara.utils import general as gu
PU = pu.PluginUtilsV530()
def validate_cluster_creating(cluster):
mng_count = _get_inst_count(cluster, 'CLOUDERA_MANAGER')
if mng_count != 1:
raise ex.InvalidComponentCountException('CLOUDERA_MANAGER',
1, mng_count)
nn_count = _get_inst_count(cluster, 'HDFS_NAMENODE')
if nn_count != 1:
raise ex.InvalidComponentCountException('HDFS_NAMENODE', 1, nn_count)
snn_count = _get_inst_count(cluster, 'HDFS_SECONDARYNAMENODE')
if snn_count != 1:
raise ex.InvalidComponentCountException('HDFS_SECONDARYNAMENODE', 1,
snn_count)
dn_count = _get_inst_count(cluster, 'HDFS_DATANODE')
replicas = PU.get_config_value('HDFS', 'dfs_replication', cluster)
if dn_count < replicas:
raise ex.InvalidComponentCountException(
'HDFS_DATANODE', replicas, dn_count,
_('Number of datanodes must be not less than dfs_replication.'))
rm_count = _get_inst_count(cluster, 'YARN_RESOURCEMANAGER')
if rm_count > 1:
raise ex.InvalidComponentCountException('YARN_RESOURCEMANAGER',
_('0 or 1'), rm_count)
hs_count = _get_inst_count(cluster, 'YARN_JOBHISTORY')
if hs_count > 1:
raise ex.InvalidComponentCountException('YARN_JOBHISTORY',
_('0 or 1'), hs_count)
if rm_count > 0 and hs_count < 1:
raise ex.RequiredServiceMissingException(
'YARN_JOBHISTORY', required_by='YARN_RESOURCEMANAGER')
nm_count = _get_inst_count(cluster, 'YARN_NODEMANAGER')
if rm_count == 0:
if nm_count > 0:
raise ex.RequiredServiceMissingException(
'YARN_RESOURCEMANAGER', required_by='YARN_NODEMANAGER')
oo_count = _get_inst_count(cluster, 'OOZIE_SERVER')
if oo_count > 1:
raise ex.InvalidComponentCountException('OOZIE_SERVER', _('0 or 1'),
oo_count)
if oo_count == 1:
if dn_count < 1:
raise ex.RequiredServiceMissingException(
'HDFS_DATANODE', required_by='OOZIE_SERVER')
if nm_count < 1:
raise ex.RequiredServiceMissingException(
'YARN_NODEMANAGER', required_by='OOZIE_SERVER')
if hs_count != 1:
raise ex.RequiredServiceMissingException(
'YARN_JOBHISTORY', required_by='OOZIE_SERVER')
hms_count = _get_inst_count(cluster, 'HIVE_METASTORE')
hvs_count = _get_inst_count(cluster, 'HIVE_SERVER2')
whc_count = _get_inst_count(cluster, 'HIVE_WEBHCAT')
if hms_count and rm_count < 1:
raise ex.RequiredServiceMissingException(
'YARN_RESOURCEMANAGER', required_by='HIVE_METASTORE')
if hms_count and not hvs_count:
raise ex.RequiredServiceMissingException(
'HIVE_SERVER2', required_by='HIVE_METASTORE')
if hvs_count and not hms_count:
raise ex.RequiredServiceMissingException(
'HIVE_METASTORE', required_by='HIVE_SERVER2')
if whc_count and not hms_count:
raise ex.RequiredServiceMissingException(
'HIVE_METASTORE', required_by='HIVE_WEBHCAT')
hue_count = _get_inst_count(cluster, 'HUE_SERVER')
if hue_count > 1:
raise ex.InvalidComponentCountException('HUE_SERVER', _('0 or 1'),
hue_count)
shs_count = _get_inst_count(cluster, 'SPARK_YARN_HISTORY_SERVER')
if shs_count > 1:
raise ex.InvalidComponentCountException('SPARK_YARN_HISTORY_SERVER',
_('0 or 1'), shs_count)
if shs_count and not rm_count:
raise ex.RequiredServiceMissingException(
'YARN_RESOURCEMANAGER', required_by='SPARK_YARN_HISTORY_SERVER')
if oo_count < 1 and hue_count:
raise ex.RequiredServiceMissingException(
'OOZIE_SERVER', required_by='HUE_SERVER')
if hms_count < 1 and hue_count:
raise ex.RequiredServiceMissingException(
'HIVE_METASTORE', required_by='HUE_SERVER')
hbm_count = _get_inst_count(cluster, 'HBASE_MASTER')
hbr_count = _get_inst_count(cluster, 'HBASE_REGIONSERVER')
zk_count = _get_inst_count(cluster, 'ZOOKEEPER_SERVER')
if hbm_count >= 1:
if zk_count < 1:
raise ex.RequiredServiceMissingException('ZOOKEEPER',
required_by='HBASE')
if hbr_count < 1:
raise ex.InvalidComponentCountException(
'HBASE_REGIONSERVER', _('at least 1'), hbr_count)
elif hbr_count >= 1:
raise ex.InvalidComponentCountException('HBASE_MASTER',
_('at least 1'), hbm_count)
a_count = _get_inst_count(cluster, 'FLUME_AGENT')
if a_count >= 1:
if dn_count < 1:
raise ex.RequiredServiceMissingException(
'HDFS_DATANODE', required_by='FLUME_AGENT')
snt_count = _get_inst_count(cluster, 'SENTRY_SERVER')
if snt_count > 1:
raise ex.InvalidComponentCountException('SENTRY_SERVER', _('0 or 1'),
snt_count)
if snt_count == 1:
if dn_count < 1:
raise ex.RequiredServiceMissingException(
'HDFS_DATANODE', required_by='SENTRY_SERVER')
if zk_count < 1:
raise ex.RequiredServiceMissingException(
'ZOOKEEPER', required_by='SENTRY_SERVER')
slr_count = _get_inst_count(cluster, 'SOLR_SERVER')
if slr_count >= 1:
if dn_count < 1:
raise ex.RequiredServiceMissingException(
'HDFS_DATANODE', required_by='SOLR_SERVER')
if zk_count < 1:
raise ex.RequiredServiceMissingException(
'ZOOKEEPER', required_by='SOLR_SERVER')
s2s_count = _get_i
|
nst_count(cluster, 'SQOOP_SERVER')
if s2s_count > 1:
raise ex.InvalidComponentCountException('SQOOP_SERVER', _('0 or 1'),
s2s_count)
if s2s_count == 1:
if dn_count < 1:
raise ex.RequiredServiceMissingException(
'HDFS_DATANODE', required_by='SQOOP_SERVER')
if nm_count < 1:
raise ex.RequiredServiceMissingException(
|
'YARN_NODEMANAGER', required_by='SQOOP_SERVER')
if hs_count != 1:
raise ex.RequiredServiceMissingException(
'YARN_JOBHISTORY', required_by='SQOOP_SERVER')
lhbi_count = _get_inst_count(cluster, 'HBASE_INDEXER')
if lhbi_count >= 1:
if dn_count < 1:
raise ex.RequiredServiceMissingException(
'HDFS_DATANODE', required_by='HBASE_INDEXER')
if zk_count < 1:
raise ex.RequiredServiceMissingException(
'ZOOKEEPER', required_by='HBASE_INDEXER')
if slr_count < 1:
raise ex.RequiredServiceMissingException(
'SOLR_SERVER', required_by='HBASE_INDEXER')
if hbm_count < 1:
raise ex.RequiredServiceMissingException(
'HBASE_MASTER', required_by='HBASE_INDEXER')
ics_count = _get_inst_count(cluster, 'IMPALA_CATALOGSERVER')
iss_count = _get_inst_count(cluster, 'IMPALA_STATESTORE')
id_count = _get_inst_co
|
Microvellum/Fluid-Designer
|
win64-vc/2.78/python/lib/unittest/__init__.py
|
Python
|
gpl-3.0
| 3,117
| 0.003208
|
"""
Python unit testing framework, based on Erich Gamma's JUnit and Kent Beck's
Smalltalk testing framework.
This module contains the core framework classes that form the basis of
specific test cases and suites (TestCase, TestSuite etc.), and also a
text-based utility class for running the tests and reporting the results
(TextTestRunner).
Simple usage:
import unittest
class IntegerArithmeticTestCase(unittest.TestCase):
def testAdd(self): ## test method names begin 'test*'
self.assertEqual((1 + 2), 3)
self.assertEqual(0 + 1, 1)
def testMultiply(self):
self.assertEqual((0 * 10), 0)
self.assertEqual((5 * 8), 40)
if __name__ == '__main__':
unittest.main()
Further information is available in the bundled documentation, and from
http://docs.python.org/library/unittest.html
Copyright (c) 1999-2003 Steve Purcell
Copyright (c) 2003-2010 Python Software Foundation
This module is free software, and you may redistribute it and/or modify
it under the same terms as Python itself, so long as this copyright message
and disclaimer are retained in their original form.
IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE. THE CODE PROV
|
IDED HEREUNDER IS ON AN "AS IS" BASIS,
AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
"""
__all__ = ['TestResult', 'TestCase', 'TestSuite',
'TextTestRunner', 'TestLoader', 'FunctionTestCase', 'main',
'defaultTestLoader', 'SkipTest', 'skip', 'skipIf', 'skipUnless',
'expectedFailur
|
e', 'TextTestResult', 'installHandler',
'registerResult', 'removeResult', 'removeHandler']
# Expose obsolete functions for backwards compatibility
__all__.extend(['getTestCaseNames', 'makeSuite', 'findTestCases'])
__unittest = True
from .result import TestResult
from .case import (TestCase, FunctionTestCase, SkipTest, skip, skipIf,
skipUnless, expectedFailure)
from .suite import BaseTestSuite, TestSuite
from .loader import (TestLoader, defaultTestLoader, makeSuite, getTestCaseNames,
findTestCases)
from .main import TestProgram, main
from .runner import TextTestRunner, TextTestResult
from .signals import installHandler, registerResult, removeResult, removeHandler
# deprecated
_TextTestResult = TextTestResult
# There are no tests here, so don't try to run anything discovered from
# introspecting the symbols (e.g. FunctionTestCase). Instead, all our
# tests come from within unittest.test.
def load_tests(loader, tests, pattern):
import os.path
# top level directory cached on loader instance
this_dir = os.path.dirname(__file__)
return loader.discover(start_dir=this_dir, pattern=pattern)
|
nOkuda/ankura
|
ankura/tokenize.py
|
Python
|
gpl-3.0
| 1,360
| 0
|
"""A collect
|
ion of tokenizers for use with ankura import pipelines"""
import re
import bs4
# Note: Each tokenizer takes in a string, and returns a list of tokens
def split(data):
"""A tokenizer which does nothing but splitting"""
return data.split()
def simple(data,
|
splitter=split):
"""A basic tokenizer which splits and does basic filtering.
The included filters and transformations include:
* lower case each token
* filter out non-alphabetic characters
"""
tokens = splitter(data)
tokens = [token.lower() for token in tokens]
tokens = [re.sub(r'[^a-z]', '', token) for token in tokens]
tokens = [token for token in tokens if token]
return tokens
def news(data, tokenizer=simple):
"""Tokenizes after skipping a file header
Using the format from the well-known 20 newsgroups dataset, we consider the
header to be everything before the first empty line in the file. The
remaining contents of the file are then tokenized.
"""
match = re.search(r'\n\s*\n', data, re.MULTILINE)
if match:
data = data[match.end():]
return tokenizer(data)
def html(data, tokenizer=simple):
"""Tokenizes by extracting text from an HTML file"""
return tokenizer(bs4.BeautifulSoup(data, 'html.parser').get_text())
def noop(data):
"""A noop tokenizer"""
return data
|
jstacoder/flask-manage
|
flask_mrbob/templates/project/+project.name+/basemodels.py
|
Python
|
bsd-3-clause
| 1,359
| 0.001472
|
# -*- coding: utf-8 -*-
"""
basemodels.py
~~~~~~~~~~~
"""
from flask.ext.login import UserMixin
from sqlalchemy.ext.declarative import declared_attr
from werkzeug.security import generate_password_hash, check_password_
|
hash
from ext import db
class BaseMixin(object):
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer,db.Sequence('user_id_seq'),primary_key=True)
@classmethod
def get_by_id(cls, id):
if any(
(isinstance(id, basestring) and id.isdigit(),
isinstance(id, (int, float))),
):
return cls.query.get(int(id))
return None
@classmethod
de
|
f create(cls, **kwargs):
instance = cls(**kwargs)
return instance.save()
def update(self, commit=True, **kwargs):
for attr, value in kwargs.iteritems():
setattr(self, attr, value)
return commit and self.save() or self
def save(self, commit=True):
db.session.add(self)
if commit:
db.session.commit()
return self
def delete(self, commit=True):
db.session.delete(self)
return commit and db.session.commit()
@property
def absolute_url(self):
return self._get_absolute_url()
def _get_absolute_url(self):
raise NotImplemented('need to define _get_absolute_url')
|
dbmi-pitt/DIKB-Micropublication
|
scripts/mp-scripts/Bio/DBXRef.py
|
Python
|
apache-2.0
| 9,448
| 0.006245
|
class DBXRef:
def __init__(self, dbname, dbid, reftype = None, negate = 0):
self.dbname = dbname
self.dbid = dbid
self.reftype = reftype
self.negate = negate
def __str__(self):
if self.reftype is None:
reftype = ""
else:
reftype = self.reftype + "="
s = "%s/%s%s" % (self.dbname, reftype, self.dbid)
if self.negate:
s = "not(%s)" % s
return s
__repr__ = __str__
class BioformatDBName:
def __getitem__(self, name):
return name
class UnknownDBName:
def __getitem__(self, name):
return "x-unknown-" + name.lower()
dbname_conversions = {
"bioformat": BioformatDBName(),
"unknown": UnknownDBName(),
"sp": {"AARHUS/GHENT-2DPAGE": "x-aarhus-ghent-2dpage",
"CARBBANK": "x-carbbank",
"DICTYDB": "x-dictydb",
"ECO2DBASE": "x-eco2dbase",
"ECOGENE": "x-ecogene",
"EMBL": "embl", # EMBL (in GO)
"FLYBASE": "fb", # Flybase (in GO)
"GCRDB": "x-gcrdb",
"HIV": "x-hiv",
"HSC-2DPAGE": "x-hsc",
"HSSP": "x-hssp",
"MAIZE-2DPAGE": "x-maize",
"MAIZEDB": "x-maizedb",
"MENDEL": "x-mendel",
"MGD": "mgd", # (in GO)
"MIM": "x-mim",
"PDB": "x-pdb", # Protein Data Bank
"PFAM": "x-pfam",
"PIR": "pir", # GO
"PROSITE": "x-prosite",
"REBASE": "x-rebase",
"SGD": "sgd", # GO
"STYGENE": "x-stygene",
"SUBTILIST": "x-subtilist",
"SWISS-2DPAGE": "x-swiss",
"TIGR": "tigr", # GO
"TRANSFAC": "x-transfac",
"WORMPEP": "x-wormpep",
"YEPD": "x-yepd",
"ZFIN": "x-zfin",
},
"go": {"CGEN": "cgen", # Compugen, Inc.
"DDB": "ddb", # DictyBase (Dictyostelium discoideum)
"DDBJ": "ddbj", # DNA Database of Japan
"EC": "ec", # Enzyme Commission
"EMBL": "embl", # EMBL Nucleotide Sequence Data Library
"ENSEMBL": "ensembl", # ENSEMBL
"ENZYME": "enzyme", # ENZYME
"FB": "fb", # FlyBase
"GB": "gb", # GenBank
"GO": "go", # Gene Ontology
"GXD": "gxd", # Gene Expression Database (mouse)
"IPR": "ipr", # InterPro
"ISBN": "isbn", # International Standard Book Number
"IUBMB": "iubmb", # International Union of Biochemistry
# and Molecular Biology
"IUPAC": "iupac", # International Union of Pure and Applied
# Chemistry
"MEDLINE": "medline", # MEDLINE
"MGD": "mgd", # Mouse Genome Database
"MGI": "mgi", # Mouse Genome Informatics
"NC-IUBMB": "NC-IUBMB",
# Nomenclature Committee of the International
# Union of Biochemistry and Molecular Biology
"PIR": "pir", # PIR
"PMID": "pmid", # PubMed
"Pombase": "pombase", # Schizosaccharomyces pombe
"Pompep": "pompep", # Schizosaccharomyces pombe Protein
# Sequence Database
"RESID": "resid", # RESID (protein post-translational modifications)
"SGD": "sgd", # Saccharomyces Genome Database
"SP": "sp", # SWISS-PROT
"SWALL": "swall", # SWISS-PROT + TrEMBL + TrEMBLnew
"TAIR": "tair", # The Arabidopsis Information Resource
"taxonID": "taxonid", # Taxonomy ID
"TC": "tc", # Transport Commission
"TIGR": "tigr", # The Institute of Genome Research
"TR": "tr", # TrEMBL
"WB": "wb", # WormBase (Caenorhabditis elegans)
},
# http://www.ncbi.nlm.nih.gov/collab/db_xref.html
"genbank": {
"ATCC": "x-atcc", # American Type Culture Collection database
# /db_xref="ATCC:123456"
"ATCC(in host)": "x-atcc-host", # See above
"ATCC(dna)": "x-atcc-dna", # See above
"BDGP_EST": "x-bdgp-est", # Berkeley Drosophila Genome Project
# EST database
# /db_xref="BDGP_EST:123456"
"BDGP_INS": "x-bdgp-ins", # Berkeley Drosophila Genome Project
# database -- Insertion
# /db_xref="BDGP_INS:123456"
"dbEST": "x-dbest", # EST database maintained at the NCBI.
# /db_xref="dbEST:123456"
"dbSNP": "x-dbsnp", # Variation database maintained at the NCBI.
# /db_xref="dbSNP:4647"
"dbSTS": "x-dbsts", # STS database maintained at the NCBI.
# /db_xref="dbSTS:456789"
"ENSEMBL": "ensembl", # Database of automatically annotated genomic data
# /db_xref="ENSEMBL:HUMAN-Clone-AC005612"
# /db_xref="ENSEMBL:HUMAN-Gene-ENSG00000007102"
"ESTLIB": "x-estlib", # EBI's EST library identifier #'
# /db_xref="ESTLIB:1200"
"FANTOM_DB": "x-fantom-db", # Database of Functional Annotation of Mouse
# /db_xref="FANTOM_DB:0610005A07"
"FLYBASE": "fb", # Database of Genetic and molecular data of Drosophila.
# /db_xref="FLYBASE:FBgn0000024"
"GDB": "x-gdb", # Human Genome Database accession numbers.
# /db_xref="GDB:G00-128-600"
"GI": "x-gi", # GenInfo identifier, used as a unique
|
sequence
# identifier for nucleotide and proteins.
# /db_xref="GI:1234567890"
"GO": "go", # Gene Ontology Database identifier
# /db_xref="GO:123"
"IMGT/LIGM": "x-imgt-ligm", # Immunogenetics database, immunoglobulins
# and T-cell receptors
# /db_xref="IM
|
GT/LIGM:U03895"
"IMGT/HLA": "x-imgt-hla", # Immunogenetics database, human MHC
# /db_xref="IMGT/HLA:HLA00031"
"LocusID": "x-locus-id", # NCBI LocusLink ID.
# /db_xref="LocusID:51199"
"MaizeDB": "x-maizedb", # Maize Genome Database unique identifiers.
# /db_xref="MaizeDB:Probe/79847"
"MGD": "mgd", # Mouse Genome Database accession numbers.
# /db_xref="MGD:123456"
"MGI": "mgi", # Medicago Genome Initiative
# /db_xref="MGI:S:20819"
"MIM": "x-mim", # Mendelian Inheritance in Man numbers.
# /db_xref="MIM:123456"
"niaEST": "x-niaEST", # NIA Mouse cDNA Project
# /db_xref="niaEST:L0304H12-3"
"PIR": "pir", # Protein Information Resource accession numbers.
# /db_xref="PIR:S12345"
"PSEUDO": "x-pseudo-embl", # EMBL pseudo protein identifier
# /db_xref="PSEUDO:CAC44644.1"
"RATMAP": "x-ratmap", # Rat Genome Database
# /db_xref="RATMAP:5"
"RiceGenes": "x-ricegenes", # Rice database accession numbers.
# /db_xref="RiceGenes:AA231856"
"REMTREMBL": "x-remtrembl",
# Computer-annotated protein sequence database containing
# the translations of those codings sequences (CDS) present
# in the EMBL Nucleotide Sequence Database that won't be '
# included in SWISS-PROT. These include: immunoglobulins and
# T-cell receptors, synthetic sequences, patent application
# sequences, small fragments, CDS not coding for real
# proteins and truncated proteins.
# example: /db_xref="REMTREMBL:CAC01666"
"RZPD": "x-rzpd", # Resource Centre Primary Dat
|
mlperf/training_results_v0.7
|
Google/benchmarks/dlrm/implementations/dlrm-research-TF-tpu-v4-16/dlrm_main.py
|
Python
|
apache-2.0
| 8,831
| 0.006115
|
"""Training script for DLRM model."""
import functools
import REDACTED
from absl import app as absl_app
from absl import flags
import numpy as np
import tensorflow.compat.v1 as tf
from REDACTED.tensorflow.python.tpu import tpu_embedding
from REDACTED.tensorflow_models.mlperf.models.rough.dlrm import dataloader
from REDACTED.tensorflow_models.mlperf.models.rough.dlrm import dlrm
from REDACTED.tensorflow_models.mlperf.models.rough.dlrm import dlrm_embedding_runner
from REDACTED.tensorflow_models.mlperf.models.rough.dlrm import feature_config as fc
from REDACTED.tensorflow_models.mlperf.models.rough.dlrm import utils
from REDACTED.tensorflow_models.mlperf.models.rough.dlrm_tf2 import common
from REDACTED.tensorflow_models.mlperf.models.rough.mlp_log import mlp_log
from REDACTED.tensorflow_models.mlperf.models.rough.util.roc_metrics.python import roc_metrics
FLAGS = flags.FLAGS
_NUM_TRAIN_EXAMPLES = 4195197692
_NUM_EVAL_EXAMPLES = 89137318
_ACCURACY_THRESH = 0.8025
def get_input_fns(params, feature_config):
"""Returns input function objects."""
def _csv_record_path(mode):
return "{data_dir}/terabyte_processed_golden_shuffled/{mode}/{mode}*".format(
data_dir=FLAGS.data_dir, mode=mode)
def _batched_tfrecord_path(mode):
"""Pre-generated data: 16 files per task for batch_size 64K."""
replica_batch_size = params["batch_size"] // params["num_shards"]
file_cnt = (128 * 1024) // replica_batch_size
if replica_batch_size > 1024:
# Minimum number of files.
file_cnt = 64
elif replica_batch_size == 432:
# Special case for batch 54K hparams.
file_cnt = 256
return "{data_dir}/terabyte_tfrecords_batched{bs}/{mode}{file_cnt}shards/{mode}*".format(
data_dir=FLAGS.data_dir,
bs=replica_batch_size,
mode=mode,
file_cnt=file_cnt)
if FLAGS.use_batched_tfrecords:
train_input_fn = dataloader.CriteoTFRecordReader(
file_path=_batched_tfrecord_path("train"),
feature_config=feature_config,
is_training=True,
use_cached_data=params["use_cached_data"],
use_synthetic_data=params["use_synthetic_data"],
params=params)
eval_input_fn = dataloader.CriteoTFRecordReader(
file_path=_batched_tfrecord_path("eval"),
feature_config=feature_config,
is_training=False,
use_cached_data=params["use_cached_data"],
use_synthetic_data=params["use_synthetic_data"],
params=params)
else:
train_input_fn = dataloader.CriteoTsvReader(
file_path=_csv_record_path("train"),
feature_config=feature_config,
is_training=True,
parallelism=16,
use_cached_data=params["use_cached_data"],
use_synthetic_data=params["use_synthetic_data"])
eval_input_fn = dataloader.CriteoTsvReader(
file_path=_csv_record_path("eval"),
feature_config=feature_config,
is_training=False,
parallelism=16,
use_cached_data=params["use_cached_data"],
use_synthetic_data=params["use_synthetic_data"])
return train_input_fn, eval_input_fn
def run_model(params,
eval_init_fn=None,
eval_finish_fn=None,
run_finish_fn=None):
"""Run the DLRM model, using a pre-defined configuration.
Args:
params: HPTuner object that provides new params for the trial.
eval_init_fn: Lambda to run at start of eval. None means use the default.
eval_finish_fn: Lambda for end of eval. None means use the default.
run_finish_fn: Lambda for end of execution. None means use the default.
Returns:
A list of tuples, each entry describing the eval metric for one eval. Each
tuple entry is (global_step, metric_value).
"""
mlp_log.mlperf_print(key="cache_clear", value=True)
mlp_log.mlperf_print(key="init_start", value=None)
mlp_log.mlperf_print("global_batch_size", params["batch_size"])
mlp_log.mlperf_print("train_samples", _NUM_TRAIN_EXAMPLES)
mlp_log.mlperf_print("eval_samples", _NUM_EVAL_EXAMPLES)
adjusted_lr = params["learning_rate"] * (params["batch_size"] / 2048.0)
mlp_log.mlperf_print("opt_base_learning_rate", adjusted_lr)
mlp_log.mlperf_print("sgd_opt_base_learning_rate", adjusted_lr)
mlp_log.mlperf_print("sgd_opt_learning_rate_decay_poly_power", 2)
mlp_log.mlperf_print("sgd_opt_learning_rate_decay_steps",
params["decay_steps"])
mlp_log.mlperf_print("lr_decay_start_steps", params["decay_start_step"])
mlp_log.mlperf_print("opt_learning_rate_warmup_steps",
params["lr_warmup_steps"])
# Used for vizier. List of tuples. Each entry is (global_step, auc_metric).
eval_metrics = [(0, 0.0)]
feature_config = fc.FeatureConfig(params)
(feature_to_config_dict,
table_to_config_dict) = feature_config.get_feature_tbl_config()
opt_params = {
"sgd":
tpu_embedding.StochasticGradientDescentParameters(
learning_rate=params["learning_rate"]),
"adagrad":
tpu_embedding.AdagradParameters(
learning_rate=params["learning_rate"],
initial_accumulator=params["adagrad_init_accum"])
}
embedding = tpu_embedding.TPUEmbedding(
table_to_config_dict,
feature_to_config_dict,
params["batch_size"],
mode=tpu_embedding.TRAINING,
optimization_parameters=opt_params[params["optimizer"]],
partition_strategy="mod",
pipeline_execution_with_tensor_core=FLAGS.pipeline_execution,
master=FLAGS.master)
runner = dlrm_embedding_runner.DLRMEmbeddingRunner(
iterations_per_loop=FLAGS.steps_between_evals,
train_steps=FLAGS.train_steps,
eval_steps=FLAGS.eval_steps,
num_replicas=FLAGS.num_tpu_shards,
sparse_features_key="cat-features",
embedding=embedding)
train_input_fn, eval_input_fn = get_input_fns(params, feature_config)
runner.initialize(
train_input_fn,
eval_input_fn,
functools.partial(dlrm.dlrm_llr_model_fn, params, feature_config),
params["batch_size"],
params["eval_batch_size"],
train_has_labels=False,
eval_has_labels=False)
mlp_log.mlperf_print("init_stop", None)
mlp_log.mlperf_print("run_start", None)
def _default_eval_init_fn(cur_step):
"""Logging statements executed before every eval."""
eval_num = cur_step // FLAGS.steps_between_evals
tf.logging.info("== Block {}. Step {} of {}".format(eval_num + 1, cur_step,
FLAGS.train_steps))
mlp_log.mlperf_print(
"block_start",
None,
metadata={
"first_epoch_num": eval_num + 1,
"epoch_count": 1
})
mlp_log.mlperf_print(
"eval_start", None, metadata={"epoch_num": eval_num + 1})
def _default_eval_finish_fn(cur_step, eval_output, summary_writer=None):
eval_num = cur_step // FLAGS.steps_between_evals
mlp_log.mlperf_print(
"eval_stop", None, metadata={"epoch_num": eval_num + 1})
mlp_log.mlperf_print(
"block_stop", None, metadata={"first_epoch_num": eval_num + 1})
tf.logging.info(
"== Eval finished (step {}). Computing metric..".format(cur_step))
results_np = np.array(eval_output["results"])
results_np = np.reshape(results_np, (-1, 2))
predictions_np = results_np[:, 0].astype(np.float32)
targets_np = results_np[:, 1].astype(np.int32)
roc_obj = roc_metrics.RocMetrics(predictions_np, targets_np)
roc_auc = roc_obj.ComputeRocAuc()
tf.logging.info("== Eval shape: {}. AUC = {:.4f}".format(
predictions_np.shape, roc_auc))
success = roc_auc
|
>= _ACCURACY_THRESH
mlp_log.mlperf_print(
"eval_accuracy", roc_auc, metadata={"epoch_num": eval_num + 1})
if success:
mlp_log.mlperf_print("run_stop", None, metadata={"status": "success"})
if summary_writer:
summary_writer.add_summary(
|
utils.create_scalar_summary("auc", roc_auc),
global_step=cur_step + FLAGS.steps_between_evals)
eval_metrics.append((cur_step + FLAGS.steps_between_evals, roc_auc))
return success
def _default_run_finish_fn(success_status):
if not success_s
|
tysonholub/twilio-python
|
tests/integration/preview/hosted_numbers/authorization_document/test_dependent_hosted_number_order.py
|
Python
|
mit
| 5,011
| 0.002594
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class DependentHostedNumberOrderTestCase(IntegrationTestCase):
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.preview.hosted_numbers.authorization_documents(sid="PXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.dependent_hosted_number_orders.list()
self.holodeck.assert_has_request(Request(
'get',
'https://preview.twilio.com/HostedNumbers/AuthorizationDocuments/PXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/DependentHostedNumberOrders',
))
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"meta": {
"first_page_url": "https://preview.twilio.com/HostedNumbers/AuthorizationDocuments/PXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/DependentHostedNumberOrders?Status=completed&FriendlyName=example&PhoneNumber=%2B19193608000&UniqueName=something123&IncomingPhoneNumberSid=PNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa&PageSize=50&Page=0",
"key": "items",
"next_page_url": null,
"page": 0,
"page_size": 50,
"previous_page_url": null,
"url": "https://preview.twilio.com/HostedNumbers/AuthorizationDocuments/PXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/DependentHostedNumberOrders?Status=completed&FriendlyName=example&PhoneNumber=%2B19193608000&UniqueName=something123&IncomingPhoneNumberSid=PNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa&PageSize=50&Page=0"
},
"items": []
}
'''
))
actual = self.client.preview.hosted_numbers.authorization_documents(sid="PXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.dependent_hosted_number_orders.list()
self.assertIsNotNone(actual)
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"meta": {
"first_page_url": "https://previe
|
w.twilio.com/HostedNumbers/AuthorizationDocuments/PXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/DependentHostedNumberOrders?PageSize=50&Page=0",
"key": "items",
"next_page_url": null,
"page": 0,
"page_size": 50,
"prev
|
ious_page_url": null,
"url": "https://preview.twilio.com/HostedNumbers/AuthorizationDocuments/PXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/DependentHostedNumberOrders?PageSize=50&Page=0"
},
"items": [
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"address_sid": "AD11111111111111111111111111111111",
"call_delay": 15,
"capabilities": {
"sms": true,
"voice": false
},
"cc_emails": [
"aaa@twilio.com",
"bbb@twilio.com"
],
"date_created": "2017-03-28T20:06:39Z",
"date_updated": "2017-03-28T20:06:39Z",
"email": "test@twilio.com",
"extension": "1234",
"friendly_name": "friendly_name",
"incoming_phone_number_sid": "PN11111111111111111111111111111111",
"phone_number": "+14153608311",
"sid": "HRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"signing_document_sid": "PX11111111111111111111111111111111",
"status": "received",
"failure_reason": "",
"unique_name": "foobar",
"verification_attempts": 0,
"verification_call_sids": [
"CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab"
],
"verification_code": "8794",
"verification_document_sid": null,
"verification_type": "phone-call"
}
]
}
'''
))
actual = self.client.preview.hosted_numbers.authorization_documents(sid="PXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.dependent_hosted_number_orders.list()
self.assertIsNotNone(actual)
|
cjaymes/pyscap
|
src/scap/model/cpe_naming_2_3/__init__.py
|
Python
|
gpl-3.0
| 821
| 0
|
# Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without
|
even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP. If not, see <http://www
|
.gnu.org/licenses/>.
TAG_MAP = {
'{http://cpe.mitre.org/naming/2.0}cpe22Type': 'Cpe22Type',
'{http://cpe.mitre.org/naming/2.0}cpe23Type': 'Cpe23Type',
}
|
googleapis/python-aiplatform
|
samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_query_artifact_lineage_subgraph_async.py
|
Python
|
apache-2.0
| 1,632
| 0.001838
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for QueryArtifactLineageSubgraph
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_QueryArtifactLineageSubgraph_async]
from google.cloud import aiplatform_v1beta1
async def sample_query_artifact_lineage_subgraph():
# Create a client
client = aiplatform_v1beta1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.QueryArtifactLineageSubgraphRequest(
artifact="artifact_value",
)
# Mak
|
e the request
response = await client.query_artifact_lineage_subgraph(request=request)
# Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_QueryArtifactLi
|
neageSubgraph_async]
|
liqd/adhocracy4
|
adhocracy4/reports/admin.py
|
Python
|
agpl-3.0
| 350
| 0
|
from django.contrib import admin
from .models import Report
@admin.register(Report)
class ReportAdmin(admin.ModelAdmin):
fields = ('content_type', 'content_
|
object', 'description', 'creator')
readonly_fields = ('creator', 'content_type', 'content_object')
list_display = ('__str__', 'creator', 'created')
|
date_hierarchy = 'created'
|
vmonteco/YAPT
|
test_files/uu_cases_regular.py
|
Python
|
gpl-3.0
| 907
| 0
|
# -*- coding: utf-8 -*-
from tools.factories import generator_factory
import ctypes
basic_cases = [
[b'%U\n', c
|
types.c_long(0)],
[b'% U\n', c
|
types.c_long(0)],
[b'%+U\n', ctypes.c_long(0)],
[b'%-U\n', ctypes.c_long(0)],
[b'%0U\n', ctypes.c_long(0)],
[b'%#U\n', ctypes.c_long(0)],
[b'%10U\n', ctypes.c_long(0)],
[b'%.6U\n', ctypes.c_long(0)],
[b'%hhU\n', ctypes.c_long(0)],
[b'%llU\n', ctypes.c_long(0)],
[b'%hU\n', ctypes.c_long(0)],
[b'%lU\n', ctypes.c_long(0)],
[b'%jU\n', ctypes.c_long(0)],
[b'%zU\n', ctypes.c_long(0)],
]
mixed_cases = [
[b'%-02U\n', ctypes.c_short(0)],
[b'% 0+-#10.5llU\n', ctypes.c_long(42)],
]
test_sets = [
{
'name': 'U tests - basics.',
'cases': basic_cases
},
{
'name': 'U tests - basics.',
'cases': mixed_cases
}
]
cases_generator = generator_factory(test_sets)
|
andrebellafronte/stoq
|
stoqlib/gui/dialogs/paymentflowhistorydialog.py
|
Python
|
gpl-2.0
| 8,070
| 0.00285
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2010 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program;
|
if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
#
|
#
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
"""Payment Flow History Report Dialog"""
from storm.expr import And, Eq, Or
from stoqlib.database.expr import Date
from stoqlib.gui.dialogs.daterangedialog import DateRangeDialog
from stoqlib.gui.utils.printing import print_report
from stoqlib.lib.message import info
from stoqlib.lib.translation import stoqlib_gettext
from stoqlib.reporting.payment import PaymentFlowHistoryReport
_ = stoqlib_gettext
# A few comments for the payment_flow_query:
# - The first table in the FROM clause is the list of all possible dates
# (due_date and paid_date) in the results. This is done so that the subsequent
# subselect can be joined properly
# - In that same subselect, we use IS NOT NULL to avoid an empty row for
# payments that were not received yet.
# - We filter out statuses (0, 5) to not include PREVIEW and CANCELED payments
# - payment_type = 1 are OUT_PAYMENTS and 0 are IN_PAYMENTS
payment_flow_query = """
SELECT all_payment_dates.date,
COALESCE(payments_to_pay.count, 0) as to_pay_payments,
COALESCE(payments_to_pay.to_pay, 0) as to_pay,
COALESCE(payments_paid.count, 0) as paid_payments,
COALESCE(payments_paid.paid, 0) as paid,
COALESCE(payments_to_receive.count, 0) as to_receive_payments,
COALESCE(payments_to_receive.to_receive, 0) as to_receive,
COALESCE(payments_received.count, 0) as received_payments,
COALESCE(payments_received.received, 0) as received
FROM (SELECT date(due_date) as date FROM payment
UNION SELECT date(paid_date) as date FROM payment WHERE
paid_date IS NOT NULL) as all_payment_dates
-- To pay (out payments)
LEFT JOIN (SELECT DATE(due_date) as date, count(1) as count, sum(value) as to_pay
FROM payment WHERE payment_type = 'out' AND status not in ('preview', 'cancelled')
GROUP BY DATE(due_date))
AS payments_to_pay ON (all_payment_dates.date = payments_to_pay.date)
-- Paid (out payments)
LEFT JOIN (SELECT DATE(paid_date) as date, count(1) as count, sum(value) as paid
FROM payment WHERE payment_type = 'out'
AND payment.status not in ('preview', 'cancelled')
GROUP BY DATE(paid_date))
AS payments_paid ON (all_payment_dates.date = payments_paid.date)
-- To receive (in payments)
LEFT JOIN (SELECT DATE(due_date) as date, count(1) as count, sum(value) as to_receive
FROM payment WHERE payment_type = 'in'
AND payment.status not in ('preview', 'cancelled')
GROUP BY DATE(due_date))
AS payments_to_receive ON (all_payment_dates.date = payments_to_receive.date)
-- Received (in payments)
LEFT JOIN (SELECT DATE(paid_date) as date, count(1) as count, sum(value) as received
FROM payment WHERE payment_type = 'in'
AND payment.status not in ('preview', 'cancelled')
GROUP BY DATE(paid_date))
AS payments_received ON (all_payment_dates.date = payments_received.date)
ORDER BY all_payment_dates.date;
"""
class PaymentFlowDay(object):
def __init__(self, store, row, previous_day=None):
"""Payment Flow History for a given date
:param row: A list of values from the payment_flow_query above
:param previous_day: The `previous_day <PaymentFlowDay>`. This is used
to calculate the expected and real balances for each day (based on the
previous dates).
"""
(date, to_pay_count, to_pay, paid_count, paid, to_receive_count,
to_receive, received_count, received) = row
self.history_date = date
# values
self.to_pay = to_pay
self.to_receive = to_receive
self.paid = paid
self.received = received
# counts
self.to_pay_payments = to_pay_count
self.to_receive_payments = to_receive_count
self.paid_payments = paid_count
self.received_payments = received_count
if previous_day:
self.previous_balance = previous_day.balance_real
else:
self.previous_balance = 0
# Today's balance is the previous day balance, plus the payments we
# received, minus what we paid. expected if for the payments we should
# have paid/received
self.balance_expected = self.previous_balance + to_receive - to_pay
self.balance_real = self.previous_balance + received - paid
self.store = store
def get_divergent_payments(self):
"""Returns a :class:`Payment` sequence that meet the following requirements:
* The payment due date, paid date or cancel date is the current
PaymentFlowHistory date.
* The payment was paid/received with different values (eg with
discount or surcharge).
* The payment was scheduled to be paid/received on the current,
but it was not.
* The payment was not expected to be paid/received on the current date.
"""
from stoqlib.domain.payment.payment import Payment
date = self.history_date
query = And(Or(Date(Payment.due_date) == date,
Date(Payment.paid_date) == date,
Date(Payment.cancel_date) == date),
Or(Eq(Payment.paid_value, None),
Payment.value != Payment.paid_value,
Eq(Payment.paid_date, None),
Date(Payment.due_date) != Date(Payment.paid_date)))
return self.store.find(Payment, query)
@classmethod
def get_flow_history(cls, store, start, end):
"""Get the payment flow history for a given date interval
This will return a list of PaymentFlowDay, one for each date that has
payments registered and are in the interval specified.
"""
history = []
previous_entry = None
for row in store.execute(payment_flow_query).get_all():
entry = cls(store, row, previous_entry)
if entry.history_date > end:
break
# We only store entries for dates higher than the user requested, but
# we still need to create the entries from the beginning, so we
# have the real balances
if entry.history_date >= start:
history.append(entry)
previous_entry = entry
return history
class PaymentFlowHistoryDialog(DateRangeDialog):
title = _(u'Payment Flow History Dialog')
desc = _("Select a date or a range to be visualised in the report:")
size = (-1, -1)
def __init__(self, store):
"""A dialog to print the PaymentFlowHistoryReport report.
:param store: a store
"""
self.store = store
DateRangeDialog.__init__(self, title=self.title, header_text=self.desc)
#
# BasicDialog
#
def confirm(self):
DateRangeDialog.confirm(self)
start = self.retval.start
end = self.retval.end
results = PaymentFlowDay.get_flow_history(self.store, start, end)
if not results:
info(_('No payment history found.'))
return False
print_report(PaymentFlowHistoryReport, payment_histories=results)
return True
|
mxyue66/ISPRS
|
extract_all_bus_stop_coordinates.py
|
Python
|
mit
| 2,342
| 0.032451
|
import numpy as np
from sklearn.cluster import DBSCAN
import csv
def load_data(fin_path):
files = file(fin_path,'r')
reader = csv.reader(files)
reader.next()
res_0 = []
for id,route_id1,route_id2,bus_id1,bus_id2,day1,day2,time1,time2,lon,lat,card_id,guid in reader:
temp_0 = []
temp_0.append(int(route_id1))
temp_0.append(float(lon))
temp_0.append(float(lat))
res_0.append(temp_0)
X_0 = np.array(res_0)
return X_0
def my_fun(fin_path, fout_path, route_id):
X = load_data(fin_path)
# print X
# Compute DBSCAN
fout = fout_path
f_new = open(fout,'w')
f_new.write('stop_id,route_id,sequence,lon,lat\n')
k = 1
for ii in route_id:
X_0 = X[np.where(X[:,0] == ii), :][0,:]
if len(X_0)<=300:
e = 20
m = 2
elif 300 < len(X_0)<= 1000:
e = 15
m = 3
else:
e = 10
m = 4
db_0 = DBSCAN(float(e)/100000, int(m)).fit(X_0[:,[1,2]])
core_samples_mask = np.zeros_like(db_0.labels_, dtype=bool)
core_samples
|
_mask[db_0.core_sample_indices_] = True
labels_0 = db_0.labels_
# Number of c
|
lusters in labels, ignoring noise if present.
n_clusters_0 = len(set(labels_0)) - (1 if -1 in labels_0 else 0)
central_points_0 = []
for i in range(1, n_clusters_0):
idx = np.where(labels_0 == i)
temp_route_id_0 = np.mean(X_0[idx,0])
temp_central_lon_0 = np.mean(X_0[idx,1])
temp_central_lat_0 = np.mean(X_0[idx,2])
temp = []
temp.append('')
temp.append(temp_route_id_0)
temp.append(i)
temp.append(temp_central_lon_0)
temp.append(temp_central_lat_0)
central_points_0.append(temp)
for stop_id,route_id,sequence,lon,lat in central_points_0:
f_new.write("%s,%s,%s,%s,%s\n" %(k,route_id,sequence,float(lon),float(lat)))
k = k+1
f_new.close()
if __name__ == "__main__":
fin = "/home/user/data/BUS_STOP_ALL_estimate.csv"
fout = "/home/user/data/BUS_STOPS.csv"
route_id = range(1,271)
my_fun(fin, fout, route_id)
#
|
pcapriotti/pledger
|
pledger/template.py
|
Python
|
mit
| 4,577
| 0.001092
|
from datetime import datetime
from .tags import has_tag
COLORS = {
"bold_white": "\033[1;37m",
"red": "\033[0;31m",
"yellow": "\033[0;33m",
"green": "\033[0;32m",
"nocolor": "\033[00m",
"blue": "\033[0;34m"}
class Template(object):
ACCOUNT_COLOR = "blue"
def __call__(self, ledgers, report, output):
for line in self.generate(ledgers, report):
output(line)
def pad(self, item, size, color=None):
text = str(item)[:size]
padlength = size - len(text)
if (padlength < 0):
padlength = 0
return "%s%s" % (" " * padlength, self.colored(color, text))
def lpad(self, item, size, color=None):
text = str(item)[:size]
padlength = size - len(text)
if (padlength < 0):
padlength = 0
return "%s%s" % (self.colored(color, text), " " * padlength)
def print_value(self, value):
if value:
text = str(value)
color = None
if value.negative():
color = "red"
return self.pad(text, 20, color)
else:
return ""
def print_account(self, account, size=39):
if size is None:
return self.colored(self.ACCOUNT_COLOR, account.name)
else:
text = account.shortened_name(size)
return self.lpad(text, size, self.ACCOUNT_COLOR)
def print_label(self, transaction, size):
color = None
|
if not has_tag(transaction, "cleared"):
color = "bold_white"
return self.lpad(transaction.label, size, color)
def colored(self, color, text):
|
if color:
return COLORS[color] + text + COLORS["nocolor"]
else:
return text
class BalanceTemplate(Template):
def generate(self, ledgers, report):
it = report.generate(ledgers)
# save total
total = next(it)
count = 0
for entry in it:
components = entry.amount.components()
for component in components[:-1]:
yield self.print_value(component)
yield self.print_value(components[-1]) + \
(" " * (entry.level - 1)) + \
self.colored(self.ACCOUNT_COLOR, entry.account)
count += 1
if count > 0:
yield "-" * 20
for component in total.amount.components():
yield self.print_value(component)
class RegisterTemplate(Template):
def generate(self, ledgers, report):
last_entry = None
for entry in report.generate(ledgers):
if last_entry and id(last_entry.transaction) == id(entry.transaction):
for line in self.print_secondary_entry(entry):
yield line
else:
for line in self.print_entry(entry):
yield line
last_entry = entry
def print_entry(self, entry):
currencies = sorted(
set(entry.entry.amount.currencies()).union(entry.total.currencies()))
components = entry.entry.amount.components(currencies)
total_components = entry.total.components(currencies)
yield "%s %s %s %s %s" % (
self.lpad(entry.date.strftime("%y-%b-%d"), 9),
self.print_label(entry.transaction, 34),
self.print_account(entry.entry.account),
self.print_value(components[0]),
self.print_value(total_components[0]))
for line in self.print_extra_components(entry, components[1:], total_components[1:]):
yield line
def print_secondary_entry(self, entry):
currencies = sorted(
set(entry.entry.amount.currencies()).union(entry.total.currencies()))
components = entry.entry.amount.components(currencies)
total_components = entry.total.components(currencies)
yield "%s %s %s %s" % (
" " * 44,
self.print_account(entry.entry.account),
self.print_value(components[0]),
self.print_value(total_components[0]))
for line in self.print_extra_components(entry, components[1:], total_components[1:]):
yield line
def print_extra_components(self, entry, components, total_components):
for i in range(len(components)):
yield "%s %s %s" % (
" " * 84,
self.print_value(components[i]),
self.print_value(total_components[i]))
def default_template(ledgers, report, *args):
return report.template(ledgers, report, *args)
|
volpino/Yeps-EURAC
|
lib/galaxy/model/mapping.py
|
Python
|
mit
| 73,637
| 0.040021
|
"""
Details of how the data model objects are mapped onto the relational database
are encapsulated here.
"""
import logging
log = logging.getLogger( __name__ )
import sys
import datetime
from galaxy.model import *
from galaxy.model.orm import *
from galaxy.model.orm.ext.assignmapper import *
from galaxy.model.custom_types import *
from galaxy.util.bunch import Bunch
from galaxy.security import GalaxyRBACAgent
from sqlalchemy.orm.collections import attribute_mapped_collection
from sqlalchemy.ext.associationproxy import association_proxy
metadata = MetaData()
context = Session = scoped_session( sessionmaker( autoflush=False, autocommit=True ) )
# For backward compatibility with "context.current"
context.current = Session
dialect_to_egg = {
"sqlite" : "pysqlite>=2",
"postgres" : "psycopg2",
"mysql" : "MySQL_python"
}
# NOTE REGARDING TIMESTAMPS:
# It is currently difficult to have the timestamps calculated by the
# database in a portable way, so we're doing it in the client. This
# also saves us from needing to postfetch on postgres. HOWEVER: it
# relies on the client's clock being set correctly, so if clustering
# web servers, use a time server to ensure synchronization
# Return the current time in UTC without any timezone information
now = datetime.datetime.utcnow
User.table = Table( "galaxy_user", metadata,
Column( "id", Integer, primary_key=True),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "email", TrimmedString( 255 ), nullable=False ),
Column( "username", TrimmedString( 255 ), index=True, unique=True ),
Column( "password", TrimmedString( 40 ), nullable=False ),
Column( "external", Boolean, default=False ),
Column( "form_values_id", Integer, ForeignKey( "form_values.id" ), index=True ),
Column( "deleted", Boolean, index=True, default=False ),
Column( "purged", Boolean, index=True, default=False ) )
UserAddress.table = Table( "user_address", metadata,
Column( "id", Integer, primary_key=True),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True ),
Column( "desc", TrimmedString( 255 )),
Column( "name", TrimmedString( 255 ), nullable=False),
Column( "institution", TrimmedString( 255 )),
Column( "address", TrimmedString( 255 ), nullable=False),
Column( "city", TrimmedString( 255 ), nullable=False),
Column( "state", TrimmedString( 255 ), nullable=False),
Column( "postal_code", TrimmedString( 255 ), nullable=False),
Column( "country", TrimmedString( 255 ), nullable=False),
Column( "phone", TrimmedString( 255 )),
Column( "deleted", Boolean, index=True, default=False ),
Column( "purged", Boolean, index=True, default=False ) )
History.table = Table( "history", metadata,
Column( "id", Integer, primary_key=True),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, index=True, default=now, onupdate=now ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True ),
Column( "name", TrimmedString( 25
|
5 ) ),
Column( "hid_counter", Integer, default=1 ),
Column( "deleted", Boolean, index=True, default=False ),
Column( "purged", Boolean, index=True, default=False ),
Column( "genome_build", TrimmedString( 40 ) ),
Column( "importable", Boolean, default=False ) )
HistoryUserShareAssociation.table = Table( "history_user_share_association", metadat
|
a,
Column( "id", Integer, primary_key=True ),
Column( "history_id", Integer, ForeignKey( "history.id" ), index=True ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True )
)
HistoryDatasetAssociation.table = Table( "history_dataset_association", metadata,
Column( "id", Integer, primary_key=True ),
Column( "history_id", Integer, ForeignKey( "history.id" ), index=True ),
Column( "dataset_id", Integer, ForeignKey( "dataset.id" ), index=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "copied_from_history_dataset_association_id", Integer, ForeignKey( "history_dataset_association.id" ), nullable=True ),
Column( "copied_from_library_dataset_dataset_association_id", Integer, ForeignKey( "library_dataset_dataset_association.id" ), nullable=True ),
Column( "hid", Integer ),
Column( "name", TrimmedString( 255 ) ),
Column( "info", TrimmedString( 255 ) ),
Column( "blurb", TrimmedString( 255 ) ),
Column( "peek" , TEXT ),
Column( "extension", TrimmedString( 64 ) ),
Column( "metadata", MetadataType(), key="_metadata" ),
Column( "parent_id", Integer, ForeignKey( "history_dataset_association.id" ), nullable=True ),
Column( "designation", TrimmedString( 255 ) ),
Column( "deleted", Boolean, index=True, default=False ),
Column( "visible", Boolean ) )
Dataset.table = Table( "dataset", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, index=True, default=now, onupdate=now ),
Column( "state", TrimmedString( 64 ), index=True ),
Column( "deleted", Boolean, index=True, default=False ),
Column( "purged", Boolean, index=True, default=False ),
Column( "purgable", Boolean, default=True ),
Column( "external_filename" , TEXT ),
Column( "_extra_files_path", TEXT ),
Column( 'file_size', Numeric( 15, 0 ) ) )
HistoryDatasetAssociationDisplayAtAuthorization.table = Table( "history_dataset_association_display_at_authorization", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, index=True, default=now, onupdate=now ),
Column( "history_dataset_association_id", Integer, ForeignKey( "history_dataset_association.id" ), index=True ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True ),
Column( "site", TrimmedString( 255 ) ) )
ImplicitlyConvertedDatasetAssociation.table = Table( "implicitly_converted_dataset_association", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "hda_id", Integer, ForeignKey( "history_dataset_association.id" ), index=True, nullable=True ),
Column( "hda_parent_id", Integer, ForeignKey( "history_dataset_association.id" ), index=True ),
Column( "deleted", Boolean, index=True, default=False ),
Column( "metadata_safe", Boolean, index=True, default=True ),
Column( "type", TrimmedString( 255 ) ) )
ValidationError.table = Table( "validation_error", metadata,
Column( "id", Integer, primary_key=True ),
Column( "dataset_id", Integer, ForeignKey( "history_dataset_association.id" ), index=True ),
Column( "message", TrimmedString( 255 ) ),
Column( "err_type", TrimmedString( 64 ) ),
Column( "attributes", TEXT ) )
Group.table = Table( "galaxy_group", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "name", String( 255 ), index=True, unique=True ),
Column( "deleted", Boolean, index=True, default=False ) )
UserGroupAssociation.table = Table( "user_group_association", metadata,
Column( "id", Integer, primary_key=True ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True ),
Column( "group_id", Integer, ForeignKey( "galaxy_group.id" ), index=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ) )
UserRoleAssociation.table = Table( "user_role_association", metadata,
Column( "id", Integer, primary_key=True ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True ),
Column( "role_id", Integer, ForeignKey( "role.id" ), index=True ),
Column( "
|
cypreess/django-tos
|
tos_i18n/translation.py
|
Python
|
bsd-3-clause
| 299
| 0.006689
|
from modeltranslation.translator import translator, TranslationOptions
from tos.models import TermsOfService
# Translations for django-tos
class Terms
|
OfServiceTranslationOptions(TranslationOptions):
fields = ('content', )
translator.re
|
gister(TermsOfService, TermsOfServiceTranslationOptions)
|
dopplershift/MetPy
|
tests/plots/test_declarative.py
|
Python
|
bsd-3-clause
| 34,530
| 0.0011
|
# Copyright (c) 2019 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Test the simplified plotting interface."""
from datetime import datetime, timedelta
from io import BytesIO
import warnings
import matplotlib
import numpy as np
import pandas as pd
import pytest
from traitlets import TraitError
import xarray as xr
from metpy.cbook import get_test_data
from metpy.io import GiniFile
from metpy.io.metar import parse_metar_file
from metpy.plots import (BarbPlot, ContourPlot, FilledContourPlot, ImagePlot, MapPanel,
PanelContainer, PlotObs)
# Fixtures to make sure we have the right backend
from metpy.testing import needs_cartopy, set_agg_backend # noqa: F401, I202
from metpy.units import units
MPL_VERSION = matplotlib.__version__[:3]
@pytest.mark.mpl_image_compare(remove_text=True, tolerance=0.005)
@needs_cartopy
def test_declarative_image():
"""Test making an image plot."""
data = xr.open_dataset(GiniFile(get_test_data('NHEM-MULTICOMP_1km_IR_20151208_2100.gini')))
img = ImagePlot()
img.data = data.metpy.parse_cf('IR')
img.colormap = 'Greys_r'
panel = MapPanel()
panel.title = 'Test'
panel.plots = [img]
pc = PanelContainer()
pc.panel = panel
pc.draw()
assert panel.ax.get_title() == 'Test'
return pc.figure
@pytest.mark.mpl_image_compare(remove_text=True,
tolerance={'3.0': 0.256}.get(MPL_VERSION, 0.022))
@needs_cartopy
def test_declarative_contour():
"""Test making a contour plot."""
data = xr.open_dataset(get_test_data('narr_example.nc', as_file_obj=False))
contour = ContourPlot()
contour.data = data
contour.field = 'Temperature'
contour.level = 700 * units.hPa
contour.contours = 30
contour.linewidth = 1
contour.linecolor = 'red'
panel = MapPanel()
p
|
anel.area = 'us'
|
panel.proj = 'lcc'
panel.layers = ['coastline', 'borders', 'usstates']
panel.plots = [contour]
pc = PanelContainer()
pc.size = (8.0, 8)
pc.panels = [panel]
pc.draw()
return pc.figure
@pytest.mark.mpl_image_compare(remove_text=True,
tolerance={'3.0': 0.216}.get(MPL_VERSION, 0.022))
@needs_cartopy
def test_declarative_figsize():
"""Test having an all float figsize."""
data = xr.open_dataset(get_test_data('narr_example.nc', as_file_obj=False))
contour = ContourPlot()
contour.data = data
contour.field = 'Temperature'
contour.level = 700 * units.hPa
contour.contours = 30
contour.linewidth = 1
contour.linecolor = 'red'
panel = MapPanel()
panel.area = 'us'
panel.proj = 'lcc'
panel.layers = ['coastline', 'borders', 'usstates']
panel.plots = [contour]
pc = PanelContainer()
pc.size = (10.5, 10.5)
pc.panels = [panel]
pc.draw()
return pc.figure
@pytest.mark.mpl_image_compare(remove_text=True,
tolerance={'3.0': 0.328}.get(MPL_VERSION, 0.022))
@needs_cartopy
def test_declarative_contour_cam():
"""Test making a contour plot with CAM data."""
data = xr.open_dataset(get_test_data('CAM_test.nc', as_file_obj=False))
contour = ContourPlot()
contour.data = data
contour.field = 'PN'
contour.time = datetime.strptime('2020-11-29 00:00', '%Y-%m-%d %H:%M')
contour.level = 1000 * units.hPa
contour.linecolor = 'black'
contour.contours = list(range(0, 1200, 4))
panel = MapPanel()
panel.plots = [contour]
panel.layout = (1, 1, 1)
panel.layers = ['coastline', 'borders', 'states', 'land']
panel.plots = [contour]
pc = PanelContainer()
pc.panels = [panel]
pc.draw()
return pc.figure
@pytest.fixture
def fix_is_closed_polygon(monkeypatch):
"""Fix matplotlib.contour._is_closed_polygons for tests.
Needed because for Matplotlib<3.3, the internal matplotlib.contour._is_closed_polygon
uses strict floating point equality. This causes the test below to yield different
results for macOS vs. Linux/Windows.
"""
monkeypatch.setattr(matplotlib.contour, '_is_closed_polygon',
lambda X: np.allclose(X[0], X[-1], rtol=1e-10, atol=1e-13),
raising=False)
@pytest.mark.mpl_image_compare(remove_text=True,
tolerance={'3.0': 23.964}.get(MPL_VERSION, 0.0184))
@needs_cartopy
def test_declarative_contour_options(fix_is_closed_polygon):
"""Test making a contour plot."""
data = xr.open_dataset(get_test_data('narr_example.nc', as_file_obj=False))
contour = ContourPlot()
contour.data = data
contour.field = 'Temperature'
contour.level = 700 * units.hPa
contour.contours = 30
contour.linewidth = 1
contour.linecolor = 'red'
contour.linestyle = 'dashed'
contour.clabels = True
panel = MapPanel()
panel.area = 'us'
panel.proj = 'lcc'
panel.layers = ['coastline', 'borders', 'usstates']
panel.plots = [contour]
pc = PanelContainer()
pc.size = (8, 8)
pc.panels = [panel]
pc.draw()
return pc.figure
@pytest.mark.mpl_image_compare(remove_text=True,
tolerance={'3.0': 19.795}.get(MPL_VERSION, 0.0134))
@needs_cartopy
def test_declarative_contour_convert_units(fix_is_closed_polygon):
"""Test making a contour plot."""
data = xr.open_dataset(get_test_data('narr_example.nc', as_file_obj=False))
contour = ContourPlot()
contour.data = data
contour.field = 'Temperature'
contour.level = 700 * units.hPa
contour.contours = 30
contour.linewidth = 1
contour.linecolor = 'red'
contour.linestyle = 'dashed'
contour.clabels = True
contour.plot_units = 'degC'
panel = MapPanel()
panel.area = 'us'
panel.proj = 'lcc'
panel.layers = ['coastline', 'borders', 'usstates']
panel.plots = [contour]
pc = PanelContainer()
pc.size = (8, 8)
pc.panels = [panel]
pc.draw()
return pc.figure
@pytest.mark.mpl_image_compare(remove_text=True,
tolerance={'3.0': 0.051}.get(MPL_VERSION, 0))
@needs_cartopy
def test_declarative_events():
"""Test that resetting traitlets properly propagates."""
data = xr.open_dataset(get_test_data('narr_example.nc', as_file_obj=False))
contour = ContourPlot()
contour.data = data
contour.field = 'Temperature'
contour.level = 850 * units.hPa
contour.contours = 30
contour.linewidth = 1
contour.linecolor = 'red'
img = ImagePlot()
img.data = data
img.field = 'v_wind'
img.level = 700 * units.hPa
img.colormap = 'hot'
img.image_range = (3000, 5000)
panel = MapPanel()
panel.area = 'us'
panel.proj = 'lcc'
panel.layers = ['coastline', 'borders', 'states']
panel.plots = [contour, img]
pc = PanelContainer()
pc.size = (8, 8.0)
pc.panels = [panel]
pc.draw()
# Update some properties to make sure it regenerates the figure
contour.linewidth = 2
contour.linecolor = 'green'
contour.level = 700 * units.hPa
contour.field = 'Specific_humidity'
img.field = 'Geopotential_height'
img.colormap = 'plasma'
img.colorbar = 'horizontal'
return pc.figure
def test_no_field_error():
"""Make sure we get a useful error when the field is not set."""
data = xr.open_dataset(get_test_data('narr_example.nc', as_file_obj=False))
contour = ContourPlot()
contour.data = data
contour.level = 700 * units.hPa
with pytest.raises(ValueError):
contour.draw()
def test_no_field_error_barbs():
"""Make sure we get a useful error when the field is not set."""
data = xr.open_dataset(get_test_data('narr_example.nc', as_file_obj=False))
barbs = BarbPlot()
barbs.data = data
barbs.level = 700 * units.hPa
with pytest.raises(TraitError):
barbs.draw()
def test_projection_object(ccrs, cfeature):
"""Test that we can pass a custom map projection."""
data = xr.open_dataset(get_test_data('narr_example.nc', as_file_obj=False))
contour = ContourPlot()
contour.data = data
c
|
jmons/ramlwrap
|
tests/RamlWrapTest/tests/test_raml_v1.py
|
Python
|
mit
| 1,448
| 0.004144
|
"""Tests for RamlWrap"""
import inspect
import json
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../")))
from django.test import TestCase, Client
def _get_parent_class(method):
"""Return the class for the given method."""
members = inspect.getmembers(method)
for m in members:
if m[0] == "__self__":
return m[1]
def _internal_mockfunc(request, example):
pass
def _internal_mock_post(request, example):
"""json loads the request and return it."""
return json.loads(request.validated_data)
class RamlWrapv1TestCase(TestCase):
client = None
def setUp(self):
self.client = Clien
|
t()
def test_raml_with_multiple_examples__only_one_is_returned(self):
"""Test that a valid get request with no target returns
the example json.
"""
expected_data_1 = {"exampleData": "This is the first example response"}
expected_data_2 = {"exampleData2": "Thi
|
s is a second example"}
response = self.client.get("/ramlv1-api/multi-example")
reply_data = response.content.decode("utf-8")
actual_response = json.loads(reply_data)
# Due to the unordered nature of dictionaries in certain Python versions, we are happy if either one of
# the examples is returned
self.assertTrue(actual_response == expected_data_1 or actual_response == expected_data_2)
|
tequa/ammisoft
|
ammimain/WinPython-64bit-2.7.13.1Zero/python-2.7.13.amd64/Scripts/pilconvert.py
|
Python
|
bsd-3-clause
| 2,427
| 0.000824
|
#!C:\Users\DMoran\Downloads\WinPython-64bit-2.7.13.1Zero\python-2.7.13.amd64\python.exe
#
# The Python Imaging Library.
# $Id$
#
# convert image files
#
# History:
# 0.1 96-04-20 fl Created
# 0.2 96-10-04 fl Use draft mode when converting images
# 0.3 96-12-30 fl Optimize output (PNG, JPEG)
# 0.4 97-01-18 fl Made optimize an option (PNG, JPEG)
# 0.5 98-12-30 fl Fixed -f option (from Anthony Baxter)
#
from __future__ import print_function
import getopt
import string
import sys
from PIL import Image
def usage():
print("PIL Convert 0.5/1998-12-30 -- convert image files")
print("Usage: pilconvert [option] infile outfile")
print()
print("Options:")
print()
print(" -c <format> convert to format (default is given by extension)")
print()
print(" -g convert to greyscale")
print(" -p convert to palette image (using standard palette)")
print(" -r convert to rgb")
print()
print(" -o optimize output (trade speed for size)")
print(" -q <value> set compression quality (0-100, JPEG only)")
print()
print(" -f list supported file formats")
sys.exit(1)
if len(sys.argv) == 1:
usage()
try:
opt, argv = getopt.getopt(sys.argv[1:], "c:dfgopq:r")
except getopt.error as v:
print(v)
sys.exit(1)
output_format = None
convert = None
options = {}
for o, a in opt:
if o == "-f":
Image
|
.init()
id = sorted(Image.ID)
print("Supported formats (* indicates output format):")
|
for i in id:
if i in Image.SAVE:
print(i+"*", end=' ')
else:
print(i, end=' ')
sys.exit(1)
elif o == "-c":
output_format = a
if o == "-g":
convert = "L"
elif o == "-p":
convert = "P"
elif o == "-r":
convert = "RGB"
elif o == "-o":
options["optimize"] = 1
elif o == "-q":
options["quality"] = string.atoi(a)
if len(argv) != 2:
usage()
try:
im = Image.open(argv[0])
if convert and im.mode != convert:
im.draft(convert, im.size)
im = im.convert(convert)
if output_format:
im.save(argv[1], output_format, **options)
else:
im.save(argv[1], **options)
except:
print("cannot convert image", end=' ')
print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
|
BurtBiel/azure-cli
|
src/command_modules/azure-cli-network/azure/cli/command_modules/network/mgmt_lb/lib/operations/lb_operations.py
|
Python
|
mit
| 9,696
| 0.002063
|
#---------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#---------------------------------------------------------------------------------------------
#pylint: skip-file
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class LbOperations(object):
"""LbOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def create_or_update(
self, resource_group_name, deployment_name, load_balancer_name, content_version=None, backend_pool_name=None, dns_name_type="none", frontend_ip_name="LoadBalancerFrontEnd", location=None, private_ip_address=None, private_ip_address_allocation="dynamic", public_ip_address=None, public_ip_address_allocation="dynamic", public_ip_address_type="new", public_ip_dns_name=None, subnet=None, subnet_address_prefix="10.0.0.0/24", subnet_type="none", tags=None, virtual_network_name=None, vnet_address_prefix="10.0.0.0/16", custom_headers=None, raw=False, **operation_config):
"""
Create or update a virtual machine.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param load_balancer_name: Name for load balancer.
:type load_balancer_name: str
:param content_version: If included it must match the ContentVersion
in the template.
:type content_version: str
:param backend_pool_name: Name of load balancer backend pool.
:type backend_pool_name: str
:param dns_name_type: Associate VMs with a public IP address to a DNS
name. Possible values include: 'none', 'new'
:type dns_name_type: str or :class:`dnsNameType
<lbcreationclient.models.dnsNameType>`
:param frontend_ip_name: Name of the frontend IP configuration.
:type frontend_ip_name: str
:param location: Location for load balancer resource.
:type location: str
:param private_ip_address: Static private IP address to use.
:type private_ip_address: str
:param private_ip_address_allocation: Private IP address allocation
method. Possible values include: 'dynamic', 'static'
:type private_ip_address_allocation: str or
:class:`privateIpAddressAllocation
<lbcreationclient.models.privateIpAddressAllocation>`
:param public_ip_address: Name or ID of the public IP address to use.
:type public_ip_address: str
:param public_ip_address_allocation: Public IP address allocation
method. Possible values include: 'dynamic', 'static'
:type public_ip_address_allocation: str or
:class:`publicIpAddressAllocation
<lbcreationclient.models.publicIpAddressAllocation>`
:param public_ip_address_type: Type of Public IP Address to associate
with the load balancer. Possible values include: 'none', 'new',
'existingName', 'existingId'
:type public_ip_address_type: str or :class:`publicIpAddressType
<lbcreationclient.models.publicIpAddressType>`
:param public_ip_dns_name: Globally unique DNS Name for the Public IP
used to access the Virtual Machine (new public IP only).
:type public_ip_dns_name: str
:param subnet: The subnet name or ID to associate with the load
balancer. Cannot be used in conjunction with a Public IP.
:type subnet: str
:param subnet_address_prefix: The subnet address prefix in CIDR
format (new subnet only).
:type subnet_address_prefix: str
:param subnet_type: Use new, existing or no subnet. Possible values
include: 'none', 'new', 'existingName', 'existingId'
:type subnet_type: str or :class:`subnetType
<lbcreationclient.models.subnetType>`
:param tags: Tags object.
:type tags: object
:param virtual_network_name: The VNet name containing the subnet.
Cannot be used in conjunction with a Public IP.
:type virtual_network_name: str
:param vnet_address_prefix: The virtual network IP address prefix in
CIDR format (new subnet only).
:type vnet_address_prefix: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`DeploymentExtended
<default.models.DeploymentExtended>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
parameters = models.DeploymentLb(content_version=content_version, backend_pool_name=backend_pool_name, dns_name_type=dns_name_type, frontend_ip_name=frontend_ip_name, load_balancer_name=load_balancer_name, location=location, private_ip_address=private_ip_address, private_ip_address_allocation=private_ip_address_allocation, public_ip_address=public_ip_address, public_ip_address_allocation=public_ip_address_allocation, public_ip_address_type=public_ip_address_type, public_ip_dns_name=public_ip_dns_name, subnet=subnet, subnet_address_prefix=subnet_address_prefix, subnet_type=subnet_type, tags=tags, virtual_network_name=virtual_network_name, vnet_address_prefix=vnet_address_prefix)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=64, min_length=1, pattern='^[-\w\._]+$'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', max_length=64, min_length=1, pattern='^[-\w\._]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
|
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] =
|
str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'DeploymentLb')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
|
Erotemic/vtool
|
vtool_ibeis/ellipse.py
|
Python
|
apache-2.0
| 15,540
| 0.003861
|
"""
OLD MODULE, needs reimplemenetation of select features and deprication
This module should handle all things elliptical
"""
from __future__ import absolute_import, division, print_function
from six.moves import zip, range
from numpy.core.umath_tests import matrix_multiply
import scipy.signal as spsignal
import numpy as np
from vtool_ibeis import keypoint as ktool
from vtool_ibeis import image as gtool
import ubelt as ub
import utool as ut
try:
import cv2
except ImportError as ex:
print('ERROR: import cv2 is failing!')
cv2 = ut.DynStruct()
def adaptive_scale(img_fpath, kpts, nScales=4, low=-.5, high=.5, nSamples=16):
#imgBGR = cv2.imread(img_fpath, flags=cv2.CV_LOAD_IMAGE_COLOR)
imgBGR = gtool.imread(img_fpath)
nKp = len(kpts)
dtype_ = kpts.dtype
# Work with float65
kpts_ = np.array(kpts, dtype=np.float64)
# Expand each keypoint into a number of different scales
expanded_kpts = expand_scales(kpts_, nScales, low, high)
# Sample gradient magnitude around the border
border_vals_sum = sample_ell_border_vals(imgBGR, expanded_kpts, nKp, nScales, nSamples)
# interpolate maxima
subscale_kpts = subscale_peaks(border_vals_sum, kpts_, nScales, low, high)
# Make sure that the new shapes are in bounds
height, width = imgBGR.shape[0:2]
isvalid = check_kpts_in_bounds(subscale_kpts, width, height)
# Convert to the original dtype
adapted_kpts = np.array(subscale_kpts[isvalid], dtype=dtype_)
return adapted_kpts
def check_kpts_in_bounds(kpts_, width, height):
# Test to make sure the extents of the keypoints are in bounds
unit_bbox = np.array([(-1, -1, 1),
(-1, 1, 1),
( 1, -1, 1),
( 1, 1, 1)]).T
#invV = kpts_to_invV(kpts_)
invV = ktool.get_invV_mats3x3(kpts_)
bbox_pts = [v.dot(unit_bbox)[0:2] for v in invV]
maxx = np.array([pts[0].max() for pts in bbox_pts]) < width
minx = np.array([pts[0].min() for pts in bbox_pts]) > 0
maxy = np.array([pts[1].max() for pts in bbox_pts]) < height
miny = np.array([pts[1].min() for pts in bbox_pts]) > 0
isvalid = np.array(maxx * minx * maxy * miny, dtype=np.bool)
return isvalid
def expand_scales(kpts, nScales, low, high):
scales = 2 ** np.linspace(low, high, nScales)
expanded_kpts_list = expand_kpts(kpts, scales)
expanded_kpts = np.vstack(expanded_kpts_list)
#assert len(expanded_kpts_list) == nScales
#assert expanded_kpts.shape == (nKp * nScales, 5)
return expanded_kpts
def sample_ell_border_pts(expanded_kpts, nSamples):
ell_border_pts_list = sample_uniform(expanded_kpts, nSamples)
#assert len(ell_border_pts_list) == nKp * nScales
#assert ell_border_pts_list[0].shape == (nSamples, 2)
ell_border_pts = np.vstack(ell_border_pts_list)
#assert ell_border_pts.shape == (nKp * nScales * nSamples, 2)
#assert ell_border_pts.shape == (nKp * nScales * nSamples, 2)
return ell_border_pts
def sample_ell_border_vals(imgBGR, expanded_kpts, nKp, nScales, nSamples):
# Sample points uniformly across the boundary
ell_border_pts = sample_ell_border_pts(expanded_kpts, nSamples)
# Build gradient magnitude imaeg
imgLAB = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2LAB)
imgL = imgLAB[:, :, 0]
imgMag = gradient_magnitude(imgL)
border_vals = gtool.subpixel_values(imgMag, ell_border_pts)
#assert len(border_vals) == (nKp * nScales * nSamples)
border_vals.shape = (nKp, nScales, nSamples, 1)
border_vals_sum = border_vals.sum(3).sum(2)
#assert border_vals_sum.shape == (nKp, nScales)
return border_vals_sum
def interpolate_between(peak_list, nScales, high, low):
def bin_to_subscale(peaks):
return 2 ** ((peaks[:, 0] / nScales) * (high - low) + low)
subscale_list = [bin_to_subscale(peaks) if len(peaks) > 0 else []
for peaks in peak_list]
return s
|
ubscale_list
def subscale_peaks(border_vals_sum, kpts, nScales, low, high):
peak_list = interpolate_maxima(border_vals_sum)
subscale_list = interpolate_between(peak_list, nScales, high, low)
subscale_kpts = expand_subscales(kpts, subscale_list)
return subscale_kpts
def expand_kpts(kpts, scales):
expanded_kpts_list = []
for scale in scales:
kpts_
|
= kpts.copy()
kpts_.T[2] *= scale
kpts_.T[3] *= scale
kpts_.T[4] *= scale
expanded_kpts_list.append(kpts_)
return expanded_kpts_list
def expand_subscales(kpts, subscale_list):
subscale_kpts_list = [kp * np.array((1, 1, scale, scale, scale, 1))
for kp, subscales in zip(kpts, subscale_list)
for scale in subscales]
subscale_kpts = np.vstack(subscale_kpts_list)
return subscale_kpts
def find_maxima(y_list):
maxima_list = [spsignal.argrelextrema(y, np.greater)[0] for y in y_list]
return maxima_list
def extrema_neighbors(extrema_list, nBins):
extrema_left_list = [np.clip(extrema - 1, 0, nBins) for extrema in extrema_list]
extrema_right_list = [np.clip(extrema + 1, 0, nBins) for extrema in extrema_list]
return extrema_left_list, extrema_right_list
def find_maxima_with_neighbors(scalar_list):
y_list = [scalars for scalars in scalar_list]
nBins = len(y_list[0])
x = np.arange(nBins)
maxima_list = find_maxima(y_list)
maxima_left_list, maxima_right_list = extrema_neighbors(maxima_list, nBins)
data_list = [np.vstack([exl, exm, exr]) for exl, exm, exr in zip(maxima_left_list, maxima_list, maxima_right_list)]
x_data_list = [[] if data.size == 0 else x[data] for data in iter(data_list)]
y_data_list = [[] if data.size == 0 else y[data] for y, data in zip(y_list, data_list)]
return x_data_list, y_data_list
def interpolate_maxima(scalar_list):
# scalar_list = border_vals_sum
x_data_list, y_data_list = find_maxima_with_neighbors(scalar_list)
peak_list = interpolate_peaks(x_data_list, y_data_list)
return peak_list
def interpolate_peaks2(x_data_list, y_data_list):
coeff_list = []
for x_data, y_data in zip(x_data_list, y_data_list):
for x, y in zip(x_data.T, y_data.T):
coeff = np.polyfit(x, y, 2)
coeff_list.append(coeff)
def interpolate_peaks(x_data_list, y_data_list):
#http://stackoverflow.com/questions/717762/how-to-calculate-the-vertex-of-a-parabola-given-three-point
peak_list = []
for x_data, y_data in zip(x_data_list, y_data_list):
if len(y_data) == 0:
peak_list.append([])
continue
y1, y2, y3 = y_data
x1, x2, x3 = x_data
denom = (x1 - x2) * (x1 - x3) * (x2 - x3)
A = (x3 * (y2 - y1) + x2 * (y1 - y3) + x1 * (y3 - y2)) / denom
B = (x3 * x3 * (y1 - y2) + x2 * x2 * (y3 - y1) + x1 * x1 * (y2 - y3)) / denom
C = (x2 * x3 * (x2 - x3) * y1 + x3 * x1 * (x3 - x1) * y2 + x1 * x2 * (x1 - x2) * y3) / denom
xv = -B / (2 * A)
yv = C - B * B / (4 * A)
peak_list.append(np.vstack((xv.T, yv.T)).T)
return peak_list
def sample_uniform(kpts, nSamples=128):
"""
SeeAlso:
python -m pyhesaff.tests.test_ellipse --test-in_depth_ellipse --show
"""
nKp = len(kpts)
# Get keypoint matrix forms
invV_mats3x3 = ktool.get_invV_mats3x3(kpts)
V_mats3x3 = ktool.invert_invV_mats(invV_mats3x3)
#-------------------------------
# Get uniform points on a circle
circle_pts = homogenous_circle_pts(nSamples + 1)[0:-1]
assert circle_pts.shape == (nSamples, 3)
#-------------------------------
# Get uneven points sample (get_uneven_point_sample)
polygon1_list = matrix_multiply(invV_mats3x3, circle_pts.T).transpose(0, 2, 1)
assert polygon1_list.shape == (nKp, nSamples, 3)
# -------------------------------
# The transformed points are not sampled uniformly... Bummer
# We will sample points evenly across the sampled polygon
# then we will project them onto the ellipse
dists = np.array([circular_distance(arr) for arr in polygon1_list])
assert dists.shape == (nKp, nSamples)
# perimeter of the polygon
|
pacogomez/pyvcloud
|
tests/vcd_catalog_update.py
|
Python
|
apache-2.0
| 993
| 0.004028
|
import os
import unittest
import yaml
from pyvcloud.vcd.client import BasicLoginCredentials
from pyvcloud.vcd.client import Client
from pyvcloud.vcd.org import Org
from pyvcloud.vcd.test import TestCase
class UpdateCatalog(TestCase):
def test_create_catal
|
og(self):
logged_in_org = self.client.get_org()
org = Org(self.client, resource=logged_in_org)
|
catalog = org.create_catalog(self.config['vcd']['catalog'], 'test catalog')
assert self.config['vcd']['catalog'] == catalog.get('name')
def test_update_catalog(self):
logged_in_org = self.client.get_org()
org = Org(self.client, resource=logged_in_org)
catalog = org.update_catalog(self.config['vcd']['catalog'],
self.config['vcd']['new_name'], self.config['vcd']['new_desc'])
assert self.config['vcd']['new_name'] == catalog.get('name')
assert self.config['vcd']['new_desc'] == catalog['Description']
if __name__ == '__main__':
unittest.main()
|
infinity0/obfsproxy
|
obfsproxy/network/buffer.py
|
Python
|
bsd-3-clause
| 1,998
| 0
|
class Buffer(object):
"""
A Buffer is a simple FIFO buffer. You write() stuff to it, and you
read() them back. You can also peek() or drain() data.
"""
def __init__(self, data=''):
"""
Initialize a buffer with 'data'.
"""
self.buffer = bytes(data)
def read(self, n=-1):
"""
|
Read and return 'n' bytes from the buffer.
If 'n' is negative, read and return the whole buffer.
If 'n' is larger than the size of the buffer, read and return
the whole buffer.
"""
|
if (n < 0) or (n > len(self.buffer)):
the_whole_buffer = self.buffer
self.buffer = bytes('')
return the_whole_buffer
data = self.buffer[:n]
self.buffer = self.buffer[n:]
return data
def write(self, data):
"""
Append 'data' to the buffer.
"""
self.buffer = self.buffer + data
def peek(self, n=-1):
"""
Return 'n' bytes from the buffer, without draining them.
If 'n' is negative, return the whole buffer.
If 'n' is larger than the size of the buffer, return the whole
buffer.
"""
if (n < 0) or (n > len(self.buffer)):
return self.buffer
return self.buffer[:n]
def drain(self, n=-1):
"""
Drain 'n' bytes from the buffer.
If 'n' is negative, drain the whole buffer.
If 'n' is larger than the size of the buffer, drain the whole
buffer.
"""
if (n < 0) or (n > len(self.buffer)):
self.buffer = bytes('')
return
self.buffer = self.buffer[n:]
return
def __len__(self):
"""Returns length of buffer. Used in len()."""
return len(self.buffer)
def __nonzero__(self):
"""
Returns True if the buffer is non-empty.
Used in truth-value testing.
"""
return True if len(self.buffer) else False
|
cckim47/kimlab
|
general/merge_tables2.py
|
Python
|
mit
| 2,357
| 0.036911
|
#!/usr/bin/python
#####################################################
# example.py - a program to .... #
# #
# Author: Dave Wheeler #
# #
# Purpose: merge count tables #
# #
# Usage: python merge_tables guide_file #
#####################################################
#looks for text guide_file
#that contains files to be merged with column headers (space separted)
#ie
#file1.counts untreated1
#file2.counts untreated2
#file3.counts treated1
#file4.counts treated2
#this will generated a tab separated table like this
#
#gene untreated1 untreated2 treated1 treated2
#gene1 0 0 0 0
#gene2 1 0 11 10
#.......
##############################################
import sys
try:
infile =
|
open(sys.argv[1])
|
except IndexError:
print "No guide file provided"
sys.exit()
#make dict of genes with list of counts
#list is ordered so treatments will be preserved.
#genes = {'gene1':[1,2,3,4]}
#header keeps track of treatment order, will be as read from config
col_header = []
genes = {}
#outfile = open('merged_counts.txt','w')
for line in infile:
filename,header = line.strip().split(' ')
try:
data_f = open(filename)
except IOError:
print "%s can't be found?"%filename
sys.exit()
col_header.append(header)
#read file and add gene and counts to the dict
for line in data_f:
gene,count = line.strip().split('\t')
if gene not in genes:
genes[gene] = [count]
else:
genes[gene].append(count)
#important to close file
data_f.close()
infile.close()
#outfile.write('gene\t'+'\t'.join(col_header)+'\n')
print('gene\t'+'\t'.join(col_header)+'\n')
for gene in genes:
data = genes[gene]
#make sure each treatment has a count for this gene
#this should catch most errors
try:
assert len(data) == len(col_header)
except AssertionError:
print "one of the treatment or genes is missing or extra"
print "data, found the problem here:"
print gene,data
print "while %s columns of treatments given" %len(col_header)
sys.exit()
out_data = gene+'\t'+'\t'.join(data)+'\n'
# outfile.write(out_data)
print(out_data)
#outfile.close()
#print "Merged table is 'merged_counts.txt'"
|
great-expectations/great_expectations
|
great_expectations/datasource/data_connector/__init__.py
|
Python
|
apache-2.0
| 1,441
| 0.000694
|
# isort:skip_file
from .data_connector import DataConnector
from .runtime_data_connector import RuntimeDataConnector
from .file_path_data_connector import FilePathDataConnector
from .configured_asset_file_path_data_connector import (
ConfiguredAssetFilePathDataConnector,
)
from .infer
|
red_asset_file_path_data_connector import (
InferredAssetFilePathDataCo
|
nnector,
)
from .configured_asset_filesystem_data_connector import (
ConfiguredAssetFilesystemDataConnector,
)
from .inferred_asset_filesystem_data_connector import (
InferredAssetFilesystemDataConnector,
)
from .configured_asset_s3_data_connector import (
ConfiguredAssetS3DataConnector,
)
from .inferred_asset_s3_data_connector import InferredAssetS3DataConnector
from .configured_asset_azure_data_connector import (
ConfiguredAssetAzureDataConnector,
)
from .inferred_asset_azure_data_connector import (
InferredAssetAzureDataConnector,
)
from .configured_asset_gcs_data_connector import (
ConfiguredAssetGCSDataConnector,
)
from .inferred_asset_gcs_data_connector import (
InferredAssetGCSDataConnector,
)
from .configured_asset_sql_data_connector import (
ConfiguredAssetSqlDataConnector,
)
from .inferred_asset_sql_data_connector import (
InferredAssetSqlDataConnector,
)
from .configured_asset_dbfs_data_connector import ConfiguredAssetDBFSDataConnector
from .inferred_asset_dbfs_data_connector import InferredAssetDBFSDataConnector
|
mitsei/dlkit
|
tests/dlkit/primordium/locale/types/test_calendar.py
|
Python
|
mit
| 1,400
| 0.002857
|
import pytest
from dlkit.abstract_osid.osid import errors
from dlkit.primordium.locale.types.calendar import get_type_data
class TestCalendar(object):
def test_get_type_data_with_celestial(self):
results = get_type_data('xhosa')
assert results['domain'] == 'Calendar Types'
assert results['display_n
|
ame'] == 'Xhosa Calendar Type'
assert results['display_label'] == 'Xhosa'
assert results['description'] == 'The time type for the Xhosa calendar.'
def test_get_type_data_with_ancient_calendar(self):
results = get_type_data('assyrian')
assert resul
|
ts['domain'] == 'Ancient Calendar Types'
assert results['display_name'] == 'Assyrian Calendar Type'
assert results['display_label'] == 'Assyrian'
assert results['description'] == 'The time type for the Assyrian calendar.'
def test_get_type_data_with_alternate_calendar(self):
results = get_type_data('middle_earth')
assert results['domain'] == 'Alternative Calendar Types'
assert results['display_name'] == 'MiddleEarth, Middle-earth Calendar Type'
assert results['display_label'] == 'MiddleEarth, Middle-earth'
assert results['description'] == 'The time type for the MiddleEarth, Middle-earth calendar.'
def test_unknown_type(self):
with pytest.raises(errors.NotFound):
get_type_data('foo')
|
SingularityHA/WebUI
|
infrastructure/migrations/0017_module_list_widget_setup_js.py
|
Python
|
gpl-3.0
| 420
| 0.002381
|
# encoding: utf8
from django.db import models, migrations
class Migration(mig
|
rations.Migration):
dependencies = [
|
('infrastructure', '0016_auto_20140209_0826'),
]
operations = [
migrations.AddField(
model_name='module_list',
name='widget_setup_js',
field=models.TextField(null=True, blank=True),
preserve_default=True,
),
]
|
tbattz/logsFlightGearReplay
|
timeControl.py
|
Python
|
gpl-3.0
| 4,536
| 0.02425
|
'''
Created on 11 Aug 2016
@author: bcub3d-build-ubuntu
'''
from Tkinter import *
import ttk
from threading import Thread
import readLog
import socket
import sendDataGUI
import math
import playbackFunctions
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.figure import Figure
import cutData
import plotClasses
import sys
# Setup
if len(sys.argv)>1:
filename = sys.argv[1]
else:
filename = '45.BIN'
overwrite = False # Overwrite csv file
updateRate = 10 # Hz
mainHeaders = sorted(['GPS','IMU','RCIN','RCOU','BARO','POWR','CMD','ARSP','CURR','ATT','MAG','MODE','IMU2','AHR2','POS','MAG2','RATE','CTUN','STAT']) # The main headers to select to plot
# Flight Gear UDP Connection
UDP_IP = '127.0.0.1'
UDP_PORT = 5503
# ============================= Load Data ============================= #
# Load csv file
csvfile = readLog.convert2CSV(filename,overwrite=overwrite)
data = readLog.readCsv(csvfile)
print '------------------------------------------------------------------'
# Create socket to flight gear
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.connect((UDP_IP,UDP_PORT))
# ======================== Simulation Thread ========================= #
# Start Time Thread
simThread = sendDataGUI.outDataThread(data,sock,updateRate)
simThread.start()
# ========================= Tkinter Control ========================== #
# Create Tkinter Window
master = Tk()
master.wm_title('Pixhawk Log Playback Controls')
# Create scale bar
tickinterval = math.floor((data.timeVec[-1]/20.0)/100.0)*100
maxTime = data.timeVec[-1]
timeScale = Scale(master, from_=0, to=maxTime,tickinterval=tickinterval, orient=HORIZONTAL,length=1000,command=lambda x: simThread.updatePosition(bypass=True,bypassTime=float(timeScale.get()),mode=v))
timeScale.grid(row=0,columnspan=49)
# Mode Radio Button
v, rb = playbackFunctions.createModeRadioButton(master, row=1, column=12)
# Create Start/Pause Buttons
|
Button(master,text='Start Replay', command=lambda: simThread.startSim(timeScale.get())).grid(row=1,column=38)
Button(master,text='Pause Replay', command=simThread.pauseSim).grid(row=1,column=39)
# "Go To" Buttons and Boxes
# Go to Entry Box
e = Entry(master,width=6)
e.grid(row=1,column=1)
e.insert(0,"0")
# Create Go To Button
Button(master,text='Go to:', command=lambda: playba
|
ckFunctions.goToButton(e, timeScale, simThread)).grid(row=1,column=0)
# Seconds Label
l = Label(master,text='s')
l.grid(row=1,column=2,sticky=W)
# Time Marking
# Label
l2 = Label(master,text="Mark [Set,Jump]:")
l2.grid(row=1,column=42,sticky=E)
# Button Set 1
c1 = playbackFunctions.createMark(master,'green',10,990)
s1 = Button(master,text='S1',bg='green',command=lambda: playbackFunctions.set1(timeScale, c1, master, maxTime, simThread)).grid(row=1,column=43)
j1 = Button(master,text="J1",bg='green',command=lambda: simThread.jump1(timeScale)).grid(row=1,column=44)
# Button Set 2
c2 = playbackFunctions.createMark(master,'red',10,990)
s2 = Button(master,text='S2',bg='red',command=lambda: playbackFunctions.set2(timeScale, c2, master, maxTime, simThread)).grid(row=1,column=45)
j2 = Button(master,text="J2",bg='red',command=lambda: simThread.jump2(timeScale)).grid(row=1,column=46)
# Button Set 3
c3 = playbackFunctions.createMark(master,'cyan',10,990)
s3 = Button(master,text='S3',bg='cyan',command=lambda: playbackFunctions.set3(timeScale, c3, master, maxTime, simThread)).grid(row=1,column=47)
j3 = Button(master,text="J3",bg='cyan',command=lambda: simThread.jump3(timeScale)).grid(row=1,column=48)
# Separator
ttk.Separator(master,orient=HORIZONTAL).grid(row=2,columnspan=49,sticky='ew')
# ======================== Tkinter Plotting ========================= #
# Create Plotting Frame
plotID = 1
master.plotFrame = []
master = plotClasses.addNewFigure(plotID,master,mainHeaders,data,simThread)
# Add time selector (First Plot Only)
master.plotFrame[0] = plotClasses.addTimeSelector(master.plotFrame[0])
# Add Forever y Limits Checkbox
master.plotFrame[0] = plotClasses.addForeverYLimits(master.plotFrame[0])
# ========================= Tkinter Loop ============================ #
while True:
if simThread.running:
timeScale.set(simThread.currTime)
# Update Plots
for plotFrame in master.plotFrame:
plotFrame.updatePlot()
plotFrame.canvas.draw()
master.update_idletasks()
master.update()
# Close Socket
sock.close()
|
moijes12/treeherder
|
treeherder/model/management/commands/init_datasources.py
|
Python
|
mpl-2.0
| 1,226
| 0.000816
|
from optparse import make_option
from django.core.management.base import BaseCommand
from django.utils.six.moves import input
from treeherder.model.models import Datasource, Repository
class Command(BaseCommand):
help = ("Populate the datasource table and"
"create the connected databases")
option_list = BaseCommand.option_list + (
make_option('--reset',
action='store_true',
dest='reset',
default=False,
help='Reset the datasources if they already exists'),
)
def handle(self, *args, **options):
|
if options["reset"]:
confirm = input("""You have requested an init of the datasources.
This will IRREVERSIBLY DESTROY all data in the per-project databases.
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel: """)
if confirm == "yes":
for ds in Datasource.objects.all():
ds.delete()
|
projects = Repository.objects.filter(active_status='active').values_list('name', flat=True)
for project in projects:
Datasource.objects.get_or_create(project=project)
Datasource.reset_cache()
|
caio2k/RIDE
|
utest/controller/ui/test_treecontroller.py
|
Python
|
apache-2.0
| 6,000
| 0.000333
|
import unittest
from robot.parsing.model import TestCase, TestCaseFile
from robot.utils.asserts import assert_equals
from robotide.controller.commands import ChangeTag
from robotide.controller.filecontrollers import TestCaseFileController
from robotide.controller.macrocontrollers import TestCaseController
from robotide.controller.tablecontrollers import TestCaseTableController
from robotide.controller.tags import Tag
from robotide.controller.ui.treecontroller import TreeController, _History, \
TestSelectionController
class ActionRegistererMock(object):
def register_actions(self, action_collections):
self.action_collections = action_collections
def register_action(self, action):
pass
class TestTreeController(unittest.TestCase):
def test_register_tree_actions(self):
mocked_ar = ActionRegistererMock()
TreeController(None, mocked_ar, None, None).register_tree_actions()
self.assertEquals(
["Go &Back", "Go &Forward"],
[a.name for a in mocked_ar.action_collections])
class _BaseTreeControllerTest(object):
def setUp(self):
self.history = _History()
self.controller = TreeController(
self._tree_mock(), None, None, None, history=self.history)
self.controller.add_to_history("Top Suite")
def _tree_mock(self):
tree_mock = lambda: 0
self._tree_mock_items = []
tree_mock.SelectItem = lambda i: self._tree_mock_items.append(i)
return tree_mock
def _select_node(self, value):
self.controller.add_to_history(value)
def _go_back_and_return_selection(self):
self.controller.OnGoBack(None)
return self._tree_mock_items[-1]
def _go_forward_and_return_selection(self):
self.controller.OnGoForward(None)
return self._tree_mock_items[-1]
class TestNavigationHistory(_BaseTreeControllerTest, unittest.TestCase):
def test_go_back_one_level(self):
self._select_node('Top Suite Fake UK 2')
self.assertEquals('Top Suite', self._go_back_and_return_selection())
def test_go_back_two_levels(self):
nodes = ['Top Suite Fake UK 1', 'Sub Suite 1', 'Sub Suite 1 Fake UK 0']
for name in nodes:
self._select_node(name)
nodes.reverse()
for name in nodes[1:]:
self.assertEquals(name, self._go_back_and_return_selection())
def test_it_is_not_possible_to_go_back_farther_than_history(self):
nodes = ['Top Suite Fake UK 1', 'Sub Suite 1', 'Sub Suite 1 Fake UK 0']
for name in nodes:
self._select_node(name)
nodes.reverse()
for name in nodes[1:] + ['Top Suite']:
self._go_back_and_assert_selection(name)
self._go_back_and_assert_selection('Top Suite')
def test_go_back_with_selecting_in_between(self):
nodes = ['Top Suite Fake UK 1', 'Sub Suite 1', 'Sub Suite 1 Fake UK 0']
for name in nodes:
self._select_node(name)
self._go_back_and_assert_selection('Sub Suite 1')
self._select_node('Sub Suite 2 Fake UK 0')
self._go_back_and_assert_selection('Sub Suite 1')
def test_go_forward(self):
nodes = ['Top Suite Fake UK 1', 'Sub Suite 1', 'Sub Suite 1 Fake UK 0']
for name in nodes:
self._select_node(name)
for _ in range(3):
self.controller.OnGoBack(None)
for name in nodes:
self._go_forward_and_assert_selection(name)
def test_go_back_and_forward_between_suite_and_resource(self):
nodes = ['Top Suite Fake UK 0', 'Reso
|
urce Keyword',
'Sub Suite 0 Fake UK 2']
for name in nodes:
self._select_node(name)
self._go_back_and_assert_selection('Resource Keyword')
self._go_back_and_assert_selection('Top Suite Fake UK 0')
self._go_forward_and_assert_selection('Reso
|
urce Keyword')
self._go_forward_and_assert_selection('Sub Suite 0 Fake UK 2')
def _go_back_and_assert_selection(self, expected_selection):
assert_equals(self._go_back_and_return_selection(), expected_selection)
def _go_forward_and_assert_selection(self, expected_selection):
assert_equals(
self._go_forward_and_return_selection(), expected_selection)
class TestTestSelectionController(unittest.TestCase):
def setUp(self):
self._tsc = TestSelectionController()
def test_test_selection_is_empty_by_default(self):
self.assertTrue(self._tsc.is_empty())
def test_test_selection_is_not_empty_when_it_contains_a_test(self):
self._tsc.select(self._create_test())
self.assertFalse(self._tsc.is_empty())
def test_test_selection_is_empty_after_removing_same_test_from_there_even_when_it_is_not_the_same_object(self):
self._tsc.select(self._create_test())
self._tsc.select(self._create_test(), False)
self.assertTrue(self._tsc.is_empty())
def test_adding_tag_to_selected_tests(self):
tests = [self._create_test('test%d' % i) for i in range(10)]
for t in tests:
self._tsc.select(t)
self._tsc.add_tag('foo')
for t in tests:
self.assertEqual([tag.name for tag in t.tags], ['foo'])
def test_adding_a_tag_to_test_with_a_default_tag(self):
test = self._create_test()
test.datafile_controller.default_tags.execute(
ChangeTag(Tag(None), 'default'))
assert_equals([t.name for t in test.tags], ['default'])
self._tsc.select(test)
self._tsc.add_tag('custom')
self.assertEqual([t.name for t in test.tags], ['default', 'custom'])
def _create_test(self, name='test'):
suite = TestCaseFile(source='suite')
suite_controller = TestCaseFileController(suite)
parent = TestCaseTableController(
suite_controller, suite.testcase_table)
test = TestCase(parent=lambda: 0, name=name)
return TestCaseController(parent, test)
|
SlashNephy/PyChroner-Bot
|
plugins/SlashNephy/Swarm.py
|
Python
|
mit
| 3,264
| 0.004204
|
# coding=utf-8
import requests
import time
from pychroner import PluginMeta, PluginType
@PluginMeta(PluginType.Thread)
def do(pluginApi):
db = pluginApi.getMongoDB().getCollection("bot")
slack = pluginApi.getSlack()
while True:
url = f"https://api.foursquare.com/v2/users/63379277/scoreboard?oauth_token={pluginApi.config.secret.SwarmAPIKey}&v=20160419"
data = requests.get(url).json()
lastWeek = data["response"]["previousWeek"]
tmp = {
"id": lastWeek["endDate"],
"users": lastWeek["scores"],
"isFinished": True
}
if db.swarm.count({"id": lastWeek["endDate"], "isFinished": True}) == 0:
db.swarm.insert(tmp)
else:
db.swarm.update_one({"id": lastWeek["endDate"], "isFinished": True}, {"$set": tmp})
thisWeek = data["response"]["currentWeek"]
tmp = {
"id": thisWeek["endDate"],
"users": thisWeek["scores"],
"isFinished": False
}
lastData = db.swarm.find_one({"id": thisWeek["endDate"], "isFinished": False})
if db.swarm.count({"id": thisWeek["endDate"], "isFinished": False}) == 0:
db.swarm.insert(tmp)
else:
db.swarm.update_one({"id": thisWeek["endDate"], "isFinished": False}, {"$set": tmp})
# 過去のデータと比較
myObj = [x for x in tmp["users"] if x["user"]["relationship"]
|
== "self"][0]
lastMyObj = [x for x in lastData["users"] if x["user"]["relationship"] == "self"][0]
myRank = myObj["ranking"]
myScore = myObj["score"]
lastMyScore = lastMyObj["score"]
text = f"現在のSwarm順位は{myRank}位です。"
# 前回のスコアよりも高い
if myScore > lastMyScore:
# 自分が1位ではない(seniorが存在する)
if myRank != 1:
seniorObj = tmp
|
["users"][myRank - 2]
lastSeniorObj = lastData["users"][myRank - 2]
if seniorObj["score"] > lastSeniorObj['score']:
seniorName = f"{seniorObj['user']['firstName']} {seniorObj['user']['lastName']}" if "lastName" in seniorObj["user"] else seniorObj["user"]["firstName"]
text += f"{myRank - 1}位の{seniorName}まであと{seniorObj['score'] - myObj['score']}コインで、"
# 自分が最下位ではない(juniorが存在する)
if myRank != len(tmp["users"]):
juniorObj = tmp["users"][myRank]
lastJuniorObj = lastData["users"][myRank]
if juniorObj["score"] > lastJuniorObj["score"]:
juniorName = f"{juniorObj['user']['firstName']} {juniorObj['user']['lastName']}" if "lastName" in juniorObj["user"] else juniorObj["user"]["firstName"]
text += f"{myRank + 1}位の{juniorName}にあと{myObj['score'] - juniorObj['score']}コインで追いつかれます。"
slack.post(
channel="#swarm",
username="Swarm Ranking Checker",
text=text
)
time.sleep(60)
|
The-Cypherfunks/The-Cypherfunks
|
share/rpcuser/rpcuser.py
|
Python
|
mit
| 1,117
| 0.005372
|
#!/usr/bin/env python2
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import hashlib
import sys
import os
from random import SystemRandom
import base64
import hmac
if len(sys.argv) < 2:
sys.stderr.write('Please include username as an argument.\n')
sys.exit(0)
username = sys.argv[1]
#This uses os.urandom() underneath
cryptogen = SystemRandom()
#Create 16 byte hex salt
salt_sequence = [cryptogen.randrange(256) for i in range(16)]
hexseq = list(map(hex, salt_sequence))
salt = "".joi
|
n([x[2:] for x in hexseq])
#Create 32 byte b64 password
password = base64.urlsafe_b64encode(os.urandom(32))
digestmod = hashlib.sha256
if sys.version_info.major >= 3:
password = password.decode('utf-8')
digestmod = 'SHA256'
m = hmac.new(bytearray(salt, 'utf-8'), bytearray(password, 'utf-8'), digestmod)
result = m.hexdige
|
st()
print("String to be appended to cypherfunk.conf:")
print("rpcauth="+username+":"+salt+"$"+result)
print("Your password:\n"+password)
|
jctanner/ansibullbot
|
tests/unit/triagers/plugins/test_rebuild_merge.py
|
Python
|
gpl-3.0
| 3,614
| 0.00249
|
#!/usr/bin/env python
import json
import logging
import tempfile
import unittest
import pytest
from tests.utils.issue_mock import IssueMock
from tests.utils.repo_mock import RepoMock
from tests.utils.helpers import get_issue
from ansibullbot.triagers.plugins.ci_rebuild import get_rebuild_merge_facts
from ansibullbot.wrappers.issuewrapper import IssueWrapper
from ansibullbot.wrappers.historywrapper import HistoryWrapper
'''
logging.level = logging.DEBUG
consoleHandler = logging.StreamHandler()
logFormatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s")
consoleHandler.setFormatter(logFormatter)
rootLogger = logging.getLogger()
rootLogger.addHandler(consoleHandler)
'''
def load_update_fetch(datatype):
logging.debug(datatype)
return []
class TestRebuildMergeFacts(unittest.TestCase):
def test0(self):
# command issued, test ran, time to merge
datafile = u'tests/fixtures/rebuild_merge/0_issue.yml'
statusfile = u'tests/fixtures/rebuild_merge/0_prstatus.json'
with get_issue(datafile, statusfile) as iw:
meta = {
u'is_pullrequest': True,
u'is_needs_revision': False,
u'is_needs_rebase': False,
u'needs_rebuild': False,
u'ci_run_number': 0,
}
rbfacts = get_rebuild_merge_facts(iw, meta, [u'superman'])
assert rbfacts[u'needs_rebuild'] == False
assert rbfacts[u'admin_merge'] == True
def test1(self):
# new test is in progress, do not rebuild and do not merge
datafile = u'tests/fixtures/rebuild_merge/1_issue.yml'
statusfile = u'tests/fixtures/rebuild_merge/1_prstatus.json'
with get_issue(datafile, statusfile) as iw:
|
meta = {
u'is_pullrequest': True,
u'is_needs_revision': False,
u'is_needs_rebase': False,
u'needs_rebuild': False,
u'ci_run_number': 0
}
rbfacts = get_rebuild_merge_facts(iw, meta, [u'superman'])
assert rbfacts[u'needs_rebuild'] == False
asse
|
rt rbfacts[u'admin_merge'] == False
def test2(self):
# command given, time to rebuild but not merge
datafile = u'tests/fixtures/rebuild_merge/2_issue.yml'
statusfile = u'tests/fixtures/rebuild_merge/2_prstatus.json'
with get_issue(datafile, statusfile) as iw:
meta = {
u'is_pullrequest': True,
u'is_needs_revision': False,
u'is_needs_rebase': False,
u'needs_rebuild': False,
u'ci_run_number': 0
}
rbfacts = get_rebuild_merge_facts(iw, meta, [u'superman'])
#assert rbfacts[u'needs_rebuild'] == True
assert rbfacts[u'needs_rebuild_all'] == True
assert rbfacts[u'admin_merge'] == False
def test3(self):
# command given, new commit created, do not rebuild or merge
datafile = u'tests/fixtures/rebuild_merge/3_issue.yml'
statusfile = u'tests/fixtures/rebuild_merge/3_prstatus.json'
with get_issue(datafile, statusfile) as iw:
meta = {
u'is_pullrequest': True,
u'is_needs_revision': False,
u'is_needs_rebase': False,
u'needs_rebuild': False,
u'ci_run_number': 0
}
rbfacts = get_rebuild_merge_facts(iw, meta, [u'superman'])
assert rbfacts[u'needs_rebuild'] == False
assert rbfacts[u'admin_merge'] == False
|
savi-dev/keystone
|
tests/test_migrate_nova_auth.py
|
Python
|
apache-2.0
| 5,879
| 0
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the 'License'); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from keystone.common.sql import nova
from keystone.common.sql import util as sql_util
from keystone import config
from keystone.contrib.ec2.backends import sql as ec2_sql
from keystone.identity.backends import sql as identity_sql
from keystone import test
CONF = config.CONF
FIXTURE = {
'users': [
{'id': 'user1', 'name': 'uname1', 'password': 'acc1'},
{'id': 'user4', 'name': 'uname4', 'password': 'acc1'},
{'id': 'user2', 'name': 'uname2', 'password': 'acc2'},
{'id': 'user3', 'name': 'uname3', 'password': 'acc3'},
],
'roles': ['role1', 'role2', 'role3'],
'role_user_tenant_list': [
{'user_id': 'user1', 'role': 'role1', 'tenant_id': 'proj1'},
{'user_id': 'user1', 'role': 'role2', 'tenant_id': 'proj1'},
{'user_id': 'user4', 'role': 'role1', 'tenant_id': 'proj4'},
{'user_id': 'user2', 'role': 'role1', 'tenant_id': 'proj1'},
{'user_id': 'user2', 'role': 'role1', 'tenant_id': 'proj2'},
{'user_id': 'user2', 'role': 'role2', 'tenant_id': 'proj2'},
{'user_id': 'user3', 'role': 'role3', 'tenant_id': 'proj1'},
],
'user_tenant_list': [
{'tenant_id': 'proj1', 'user_id': 'user
|
1'},
{'tenant_id': 'proj4', 'user_id': 'user4'},
{'tenant_id': 'proj1', 'user_id': 'user2'},
{'tenant_id': 'proj2', 'user_id': 'user2'},
{'tenant_id': 'proj1', 'user_id': 'user3'},
],
'ec2_credentials': [
{'access_key': 'acc1', 'secret_key': 'sec1', 'user_id': 'user1'},
{'access_key': 'acc4', 'secret_key': 'sec4', 'user_id': 'user4'},
{'access_key': 'acc2', 'secre
|
t_key': 'sec2', 'user_id': 'user2'},
{'access_key': 'acc3', 'secret_key': 'sec3', 'user_id': 'user3'},
],
'tenants': [
{'description': 'desc1', 'id': 'proj1', 'name': 'pname1'},
{'description': 'desc4', 'id': 'proj4', 'name': 'pname4'},
{'description': 'desc2', 'id': 'proj2', 'name': 'pname2'},
],
}
class MigrateNovaAuth(test.TestCase):
def setUp(self):
super(MigrateNovaAuth, self).setUp()
self.config([test.etcdir('keystone.conf.sample'),
test.testsdir('test_overrides.conf'),
test.testsdir('backend_sql.conf')])
sql_util.setup_test_database()
self.identity_api = identity_sql.Identity()
self.ec2_api = ec2_sql.Ec2()
def _create_role(self, role_name):
role_id = uuid.uuid4().hex
role_dict = {'id': role_id, 'name': role_name}
self.identity_api.create_role(role_id, role_dict)
def test_import(self):
self._create_role('role1')
nova.import_auth(FIXTURE)
users = {}
for user in ['user1', 'user2', 'user3', 'user4']:
users[user] = self.identity_api.get_user_by_name(user)
tenants = {}
for tenant in ['proj1', 'proj2', 'proj4']:
tenants[tenant] = self.identity_api.get_tenant_by_name(tenant)
membership_map = {
'user1': ['proj1'],
'user2': ['proj1', 'proj2'],
'user3': ['proj1'],
'user4': ['proj4'],
}
for (old_user, old_tenants) in membership_map.iteritems():
user = users[old_user]
membership = self.identity_api.get_tenants_for_user(user['id'])
expected = [tenants[t]['id'] for t in old_tenants]
self.assertEqual(set(expected), set(membership))
for tenant_id in membership:
password = None
for _user in FIXTURE['users']:
if _user['id'] == old_user:
password = _user['password']
self.identity_api.authenticate(user['id'], tenant_id, password)
for ec2_cred in FIXTURE['ec2_credentials']:
user_id = users[ec2_cred['user_id']]['id']
for tenant_id in self.identity_api.get_tenants_for_user(user_id):
access = '%s:%s' % (tenant_id, ec2_cred['access_key'])
cred = self.ec2_api.get_credential(access)
actual = cred['secret']
expected = ec2_cred['secret_key']
self.assertEqual(expected, actual)
roles = self.identity_api.list_roles()
role_names = set([role['name'] for role in roles])
self.assertEqual(role_names, set(['role2', 'role1', 'role3']))
assignment_map = {
'user1': {'proj1': ['role1', 'role2']},
'user2': {'proj1': ['role1'], 'proj2': ['role1', 'role2']},
'user3': {'proj1': ['role3']},
'user4': {'proj4': ['role1']},
}
for (old_user, old_tenant_map) in assignment_map.iteritems():
tenant_names = ['proj1', 'proj2', 'proj4']
for tenant_name in tenant_names:
user = users[old_user]
tenant = tenants[tenant_name]
roles = self.identity_api.get_roles_for_user_and_tenant(
user['id'], tenant['id'])
actual = [self.identity_api.get_role(role_id)['name']
for role_id in roles]
expected = old_tenant_map.get(tenant_name, [])
self.assertEqual(set(actual), set(expected))
|
starrify/scrapy
|
tests/test_utils_python.py
|
Python
|
bsd-3-clause
| 7,687
| 0.001041
|
import functools
import gc
import operator
import platform
import unittest
from datetime import datetime
from itertools import count
from warnings import catch_warnings
from scrapy.utils.python import (
memoizemethod_noargs, binary_is_text, equal_attributes,
WeakKeyCache, get_func_args, to_bytes, to_unicode,
without_none_values, MutableChain)
__doctests__ = ['scrapy.utils.python']
class MutableChainTest(unittest.TestCase):
def test_mutablechain(self):
m = MutableChain(range(2), [2, 3], (4, 5))
m.extend(range(6, 7))
m.extend([7, 8])
m.extend([9, 10], (11, 12))
self.assertEqual(next(m), 0)
self.assertEqual(m.__next__(), 1)
with catch_warnings(record=True) as warnings:
self.assertEqual(m.next(), 2)
self.assertEqual(len(warnings), 1)
self.assertIn('scrapy.utils.python.MutableChain.__next__',
str(warnings[0].message))
self.assertEqual(list(m), list(range(3, 13)))
class ToUnicodeTest(unittest.TestCase):
def test_converting_an_utf8_encoded_string_to_unicode(self):
self.assertEqual(to_unicode(b'lel\xc3\xb1e'), 'lel\xf1e')
def test_converting_a_latin_1_encoded_string_to_unicode(self):
self.assertEqual(to_unicode(b'lel\xf1e', 'latin-1'), 'lel\xf1e')
def test_converting_a_unicode_to_unicode_should_return_the_same_object(self):
self.assertEqual(to_unicode('\xf1e\xf1e\xf1e'), '\xf1e\xf1e\xf1e')
def test_converting_a_strange_object_should_raise_TypeError(self):
self.assertRaises(TypeError, to_unicode, 423)
def test_errors_argument(self):
self.assertEqual(
to_unicode(b'a\xedb', 'utf-8', errors='replace'),
'a\ufffdb'
)
class ToBytesTest(unittest.TestCase):
def test_converting_a_unicode_object_to_an_utf_8_encoded_string(self):
self.assertEqual(to_bytes('\xa3 49'), b'\xc2\xa3 49')
def test_converting_a_unicode_object_to_a_latin_1_encoded_string(self):
self.assertEqual(to_bytes('\xa3 49', 'latin-1'), b'\xa3 49')
def test_converting_a_regular_bytes_to_bytes_should_return_the_same_object(self):
self.assertEqual(to_bytes(b'lel\xf1e'), b'lel\xf1e')
def test_converting_a_strange_object_should_raise_TypeError(self):
self.assertRaises(TypeError, t
|
o_bytes, unittest)
def test_errors_argument(self):
self.assertEqual(
to_bytes('a\ufffdb', 'latin-1', errors='replace'),
b'
|
a?b'
)
class MemoizedMethodTest(unittest.TestCase):
def test_memoizemethod_noargs(self):
class A:
@memoizemethod_noargs
def cached(self):
return object()
def noncached(self):
return object()
a = A()
one = a.cached()
two = a.cached()
three = a.noncached()
assert one is two
assert one is not three
class BinaryIsTextTest(unittest.TestCase):
def test_binaryistext(self):
assert binary_is_text(b"hello")
def test_utf_16_strings_contain_null_bytes(self):
assert binary_is_text("hello".encode('utf-16'))
def test_one_with_encoding(self):
assert binary_is_text(b"<div>Price \xa3</div>")
def test_real_binary_bytes(self):
assert not binary_is_text(b"\x02\xa3")
class UtilsPythonTestCase(unittest.TestCase):
def test_equal_attributes(self):
class Obj:
pass
a = Obj()
b = Obj()
# no attributes given return False
self.assertFalse(equal_attributes(a, b, []))
# not existent attributes
self.assertFalse(equal_attributes(a, b, ['x', 'y']))
a.x = 1
b.x = 1
# equal attribute
self.assertTrue(equal_attributes(a, b, ['x']))
b.y = 2
# obj1 has no attribute y
self.assertFalse(equal_attributes(a, b, ['x', 'y']))
a.y = 2
# equal attributes
self.assertTrue(equal_attributes(a, b, ['x', 'y']))
a.y = 1
# differente attributes
self.assertFalse(equal_attributes(a, b, ['x', 'y']))
# test callable
a.meta = {}
b.meta = {}
self.assertTrue(equal_attributes(a, b, ['meta']))
# compare ['meta']['a']
a.meta['z'] = 1
b.meta['z'] = 1
get_z = operator.itemgetter('z')
get_meta = operator.attrgetter('meta')
def compare_z(obj):
return get_z(get_meta(obj))
self.assertTrue(equal_attributes(a, b, [compare_z, 'x']))
# fail z equality
a.meta['z'] = 2
self.assertFalse(equal_attributes(a, b, [compare_z, 'x']))
def test_weakkeycache(self):
class _Weakme:
pass
_values = count()
wk = WeakKeyCache(lambda k: next(_values))
k = _Weakme()
v = wk[k]
self.assertEqual(v, wk[k])
self.assertNotEqual(v, wk[_Weakme()])
self.assertEqual(v, wk[k])
del k
for _ in range(100):
if wk._weakdict:
gc.collect()
self.assertFalse(len(wk._weakdict))
def test_get_func_args(self):
def f1(a, b, c):
pass
def f2(a, b=None, c=None):
pass
def f3(a, b=None, *, c=None):
pass
class A:
def __init__(self, a, b, c):
pass
def method(self, a, b, c):
pass
class Callable:
def __call__(self, a, b, c):
pass
a = A(1, 2, 3)
cal = Callable()
partial_f1 = functools.partial(f1, None)
partial_f2 = functools.partial(f1, b=None)
partial_f3 = functools.partial(partial_f2, None)
self.assertEqual(get_func_args(f1), ['a', 'b', 'c'])
self.assertEqual(get_func_args(f2), ['a', 'b', 'c'])
self.assertEqual(get_func_args(f3), ['a', 'b', 'c'])
self.assertEqual(get_func_args(A), ['a', 'b', 'c'])
self.assertEqual(get_func_args(a.method), ['a', 'b', 'c'])
self.assertEqual(get_func_args(partial_f1), ['b', 'c'])
self.assertEqual(get_func_args(partial_f2), ['a', 'c'])
self.assertEqual(get_func_args(partial_f3), ['c'])
self.assertEqual(get_func_args(cal), ['a', 'b', 'c'])
self.assertEqual(get_func_args(object), [])
if platform.python_implementation() == 'CPython':
# TODO: how do we fix this to return the actual argument names?
self.assertEqual(get_func_args(str.split), [])
self.assertEqual(get_func_args(" ".join), [])
self.assertEqual(get_func_args(operator.itemgetter(2)), [])
elif platform.python_implementation() == 'PyPy':
self.assertEqual(get_func_args(str.split, stripself=True), ['sep', 'maxsplit'])
self.assertEqual(get_func_args(operator.itemgetter(2), stripself=True), ['obj'])
build_date = datetime.strptime(platform.python_build()[1], '%b %d %Y')
if build_date >= datetime(2020, 4, 7): # PyPy 3.6-v7.3.1
self.assertEqual(get_func_args(" ".join, stripself=True), ['iterable'])
else:
self.assertEqual(get_func_args(" ".join, stripself=True), ['list'])
def test_without_none_values(self):
self.assertEqual(without_none_values([1, None, 3, 4]), [1, 3, 4])
self.assertEqual(without_none_values((1, None, 3, 4)), (1, 3, 4))
self.assertEqual(
without_none_values({'one': 1, 'none': None, 'three': 3, 'four': 4}),
{'one': 1, 'three': 3, 'four': 4})
if __name__ == "__main__":
unittest.main()
|
CroissanceCommune/autonomie
|
autonomie/views/admin/accompagnement/activities.py
|
Python
|
gpl-3.0
| 2,419
| 0
|
# -*- coding: utf-8 -*-
# * Authors:
# * TJEBBES Gaston <g.t@majerti.fr>
# * Arezki Feth <f.a@majerti.fr>;
# * Miotte Julien <j.m@majerti.fr>;
import os
from pyramid.httpexceptions import HTTPFound
from autonomie.forms.admin import (
ActivityConfigSchema,
)
from autonomie.models.activity import (
ActivityType,
ActivityMode,
ActivityAction,
)
from autonomie.views.admin.accompagnement import (
BaseAdminAccompagnement,
AccompagnementIndexView,
ACCOMPAGNEMENT_URL,
)
ACTIVITY_URL = os.path.join(ACCOMPAGNEMENT_URL, 'activity')
class A
|
dminActivitiesView(BaseAdminAccompagnement):
"""
Activities Admin view
"""
title = u"Configuration du module de Rendez-vous"
schema = ActivityConfigSchema(title=u"")
route_name = ACTIVITY_URL
def before(self, form):
query = ActivityType.query()
types = query.filter_by(active=True)
modes = Act
|
ivityMode.query()
query = ActivityAction.query()
query = query.filter_by(parent_id=None)
actions = query.filter_by(active=True)
activity_appstruct = {
'footer': self.request.config.get("activity_footer", ""),
'types': [type_.appstruct() for type_ in types],
'modes': [mode.appstruct() for mode in modes],
'actions': self._recursive_action_appstruct(actions)
}
self._add_pdf_img_to_appstruct('activity', activity_appstruct)
form.set_appstruct(activity_appstruct)
def submit_success(self, activity_appstruct):
"""
Handle successfull activity configuration
"""
self.store_pdf_conf(activity_appstruct, 'activity')
# We delete the elements that are no longer in the appstruct
self.disable_types(activity_appstruct)
self.disable_actions(activity_appstruct, ActivityAction)
new_modes = self.delete_modes(activity_appstruct)
self.dbsession.flush()
self.add_types(activity_appstruct)
self.add_actions(activity_appstruct, "actions", ActivityAction)
self.add_modes(new_modes)
self.request.session.flash(self.validation_msg)
return HTTPFound(
self.request.route_path(self.parent_view.route_name)
)
def includeme(config):
config.add_route(ACTIVITY_URL, ACTIVITY_URL)
config.add_admin_view(AdminActivitiesView, parent=AccompagnementIndexView)
|
trel/irods-qgis
|
irods/connection.py
|
Python
|
gpl-2.0
| 5,101
| 0.007842
|
import socket
import logging
import struct
import hashlib
from irods.message import (iRODSMessage, StartupPack, AuthResponse, AuthChallenge,
OpenedDataObjRequest, FileSeekResponse, StringStringMap)
from irods.exception import get_exception_by_code, NetworkException
from irods import MAX_PASSWORD_LENGTH
from irods.api_number import api_number
logger = logging.getLogger(__name__)
class Connection(object):
def __init__(self, pool, account):
self.pool = pool
self.socket = None
self.account = account
self._connect()
self._login()
def __del__(self):
if self.socket:
self.disconnect()
def send(self, message):
str = message.pack()
logger.debug(str)
try:
self.socket.sendall(str)
except:
logger.error("Unable to send message. Connection to remote host may have closed. Releasing connection from pool.")
self.release(True)
raise NetworkException("Unable to send message")
def recv(self):
msg = iRODSMessage.recv(self.socket)
if msg.int_info < 0:
raise get_exception_by_code(msg.int_info)
return msg
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.release()
def release(self, destroy=False):
self.pool.release_connection(self, destroy)
def _connect(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((self.account.host, self.account.port))
except socket.error:
raise Exception("Could not connect to specified host and port: %s:%s" % (self.account.host, self.account.port))
self.socket = s
main_message = StartupPack(
(self.account.proxy_user, self.account.proxy_zone),
(self.account.client_user, self.account.client_zone)
)
msg = iRODSMessage(type='RODS_CONNECT', msg=main_message)
self.send(msg)
version_msg = self.recv()
def disconnect(self):
disconnect_msg = iRODSMessage(type='RODS_DISCONNECT')
self.send(disconnect_msg)
self.socket.close()
def _login(self):
# authenticate
auth_req = iRODSMessage(type='RODS_API_REQ', int_info=703)
self.send(auth_req)
# challenge
challenge_msg = self.recv()
logger.debug(challenge_msg.msg)
challenge = challenge_msg.get_main_message(AuthChallenge).challenge
padded_pwd = struct.pack("%ds" % MAX_PASSWORD_LENGTH, self.account.password)
m = hashlib.md5()
m.update(challenge)
m.update(padded_pwd)
encoded_pwd = m.digest()
encoded_pwd = encoded_pwd.replace('\x00', '\x0
|
1')
pwd_msg = AuthResponse(response=encoded_pwd, username=self.ac
|
count.proxy_user)
pwd_request = iRODSMessage(type='RODS_API_REQ', int_info=704, msg=pwd_msg)
self.send(pwd_request)
auth_response = self.recv()
def read_file(self, desc, size):
message_body = OpenedDataObjRequest(
l1descInx=desc,
len=size,
whence=0,
oprType=0,
offset=0,
bytesWritten=0,
KeyValPair_PI=StringStringMap()
)
message = iRODSMessage('RODS_API_REQ', msg=message_body,
int_info=api_number['DATA_OBJ_READ_AN'])
logger.debug(desc)
self.send(message)
response = self.recv()
return response.bs
def write_file(self, desc, string):
message_body = OpenedDataObjRequest(
l1descInx=desc,
len=len(string),
whence=0,
oprType=0,
offset=0,
bytesWritten=0,
KeyValPair_PI=StringStringMap()
)
message = iRODSMessage('RODS_API_REQ', msg=message_body,
bs=string,
int_info=api_number['DATA_OBJ_WRITE_AN'])
self.send(message)
response = self.recv()
return response.int_info
def seek_file(self, desc, offset, whence):
message_body = OpenedDataObjRequest(
l1descInx=desc,
len=0,
whence=whence,
oprType=0,
offset=offset,
bytesWritten=0,
KeyValPair_PI=StringStringMap()
)
message = iRODSMessage('RODS_API_REQ', msg=message_body,
int_info=api_number['DATA_OBJ_LSEEK_AN'])
self.send(message)
response = self.recv()
offset = response.get_main_message(FileSeekResponse).offset
return offset
def close_file(self, desc):
message_body = OpenedDataObjRequest(
l1descInx=desc,
len=0,
whence=0,
oprType=0,
offset=0,
bytesWritten=0,
KeyValPair_PI=StringStringMap()
)
message = iRODSMessage('RODS_API_REQ', msg=message_body,
int_info=api_number['DATA_OBJ_CLOSE_AN'])
self.send(message)
response = self.recv()
|
pirate/bookmark-archiver
|
archivebox/core/__init__.py
|
Python
|
mit
| 32
| 0
|
__packa
|
ge__ = 'archiveb
|
ox.core'
|
mwhooker/messier
|
messier/lib/aws/resource.py
|
Python
|
bsd-2-clause
| 1,293
| 0.000773
|
import boto.regioninfo
import datetime
from collections import MutableMapping
from json import JSONEncoder, dumps
from time import mktime
def json_encoder(obj):
if isinstance(obj, boto.regioninfo.RegionInfo):
return obj.name
elif isinstance(obj, datetime.datetime):
return int(mktime(obj.timetuple()))
return JSONEncoder.default(self, obj)
class Resource(MutableMapping):
def __init__(self, Name=None, Type=None, encoder=json_encoder, **properties):
self.name = Name
self.__type_
|
_ = Type
if not self.__type__:
self.__type__ = self.__class__.__name__
self.__encoder__ = encoder
self.__store__ = dict()
props = dict(**properties)
del props["connection"]
self.update(props)
def __getite
|
m__(self, key):
return self.__store__[key]
def __setitem__(self, key, value):
self.__store__[key] = value
def __delitem__(self, key):
del self.__store__[key]
def __iter__(self):
return iter(self.__store__)
def __len__(self):
return len(self.__store__)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, self.name)
def to_json(self):
return dumps(self.__store__, default=self.__encoder__)
|
robmcmullen/peppy
|
peppy/plugins/text_transforms.py
|
Python
|
gpl-2.0
| 25,843
| 0.003018
|
# peppy Copyright (c) 2006-2010 Rob McMullen
# Copyright (c) 2009 Christopher Barker
# Licenced under the GPLv2; see http://peppy.flipturn.org for more info
"""Some simple text transformation actions.
This plugin is a collection of some simple text transformation actions that
should be applicable to more than one major mode.
"""
import os, glob
import wx
from peppy.yapsy.plugins import *
from peppy.actions.minibuffer import *
from peppy.lib.wordwrap import texwrap
from peppy.actions.base import *
from peppy.actions import *
from peppy.debug import *
class CommentRegionAvailableAction(TextModificationAction):
"""Mixin class to make sure the mode has the ability to include comment
lines
"""
@classmethod
def worksWithMajorMode(cls, modecls):
return hasattr(modecls, "commentRegion")
class CommentRegion(CommentRegionAvailableAction):
"""Comment a line or region.
This will use the current mode's comment characters to comment out
entire blocks of lines. The comment will start in column zero, and
if there is an end comment delimiter, it will appear as the last
character(s) before the end of line indicatior.
"""
name = "&Comment Region"
default_menu = ("Transform", -600)
key_bindings = {'emacs': 'C-c C-c',
'mac': 'C-3',
}
def action(self, index=-1, multiplier=1):
self.mode.commentRegion(multiplier != 4)
class UncommentRegion(CommentRegionAvailableAction):
"""Uncomment a line or region.
This will use the current mode's comment characters to identify the
lines in the region that have been commented out, and will remove
the comment character(s) from the line.
"""
name = "&Uncomment Region"
default_menu = ("Transform", 601)
key_bindings = {'emacs': 'C-u C-c C-c',
'mac': 'M-3',
}
def action(self, index=-1, multiplier=1):
self.mode.commentRegion(False)
class Tabify(LineOrRegionMutateAction):
"""Replace spaces with tabs at the start of lines."""
name = "&Tabify"
default_menu = (("Transform/Whitespace", -800), 100)
def mutateLines(self, lines):
out = []
for line in lines:
unspace = line.lstrip(' ')
if len(unspace) < len(line):
tabs, extraspaces = divmod(len(line) - len(unspace),
self.mode.locals.tab_size)
out.append((tabs)*'\t' + extraspaces*' ' + unspace)
else:
out.append(line)
return out
class Untabify(LineOrRegionMutateAction):
"""Replace tabs with spaces at the start of lines."""
name = "&Untabify"
default_menu = ("Transform/Whitespace", 110)
def mutateLines(self, lines):
out = []
for line in lines:
untab = line.lstrip('\t')
if len(untab) < len(line):
out.append((len(line) - len(untab))*self.mode.locals.tab_size*' ' + untab)
else:
out.append(line)
return out
class RemoveTrailingWhitespace(LineOrRegionMutateAction):
"""Remove all trailing whitespace
Operates on the current line, or lines that make up the currently selected
region.
"""
name = "Remove Trailing Whitespace"
default_menu = ("Transform/Whitespace", 200)
def mutateLines(self, lines):
regex = re.compile('(.*?)([\t ]+)([\r\n]+)?$')
out = []
for line in lines:
match = regex.match(line)
if match:
# Remove everything but the line ending (if it exists)
out.append(match.group(1) + line[match.end(2):])
else:
out.append(line)
return out
class RemoveBlankLines(LineOrRegionMutateAction):
"""Remove all blank lines
Operates on the current line, or lines that make up the currently selected
region.
"""
name = "Remove Blank Lines"
default_menu = ("Transform/Whitespace", 300)
def mutateLines(self, lines):
regex = re.compile('^[\t ]*[\r\n]+?$')
out = []
for line in lines:
match = regex.match(line)
if match:
pass
else:
out.append(line)
return out
class CapitalizeWord(WordOrRegionMutateAction):
"""Title-case the current word or words in the highlighted region.
This will also move the cursor to the start of the next word.
"""
alias = "capitalize-region-or-word"
name = "Capitalize"
key_bindings = {'emacs': 'M-c',}
default_menu = (("Transform/Case", 810), 100)
def mutate(self, txt):
"""Change to title case -- first letter capitalized, rest
lower case.
"""
return txt.title()
class UpcaseWord(WordOrRegionMutateAction):
"""Upcase the current word or the highlighted region.
This will alse move the cursor to the start of the next word.
"""
alias = "upcase-region-or-word"
name = "Upcase"
key_bindings = {'emacs': 'M-u',}
default_menu = ("Transform/Case", 101)
icon = "icons/text_uppercase.png"
default_toolbar = False
def mutate(self, txt):
"""Change to all upper case.
"""
return txt.upper()
class DowncaseWord(WordOrRegionMutateAction):
"""Downcase the current word or the highlighted region.
This will also move the cursor to the start of the next word.
"""
alias = "downcase-region-or-word"
name = "Downcase"
key_bindings = {'emacs': 'M-l',}
default_menu = ("Transform/Case", 102)
icon = "ico
|
ns/text_lowercase.png"
default_toolbar = False
def mutate(self, txt):
"""Change to all lower case.
"""
return txt.lower()
class SwapcaseWord(WordOrRegionMutateAction):
"""Swap the case of the current word or the highlighted region.
This will also move the cursor to the start of the next word
|
.
"""
alias = "swapcase-region-or-word"
name = "Swap case"
default_menu = ("Transform/Case", 103)
default_toolbar = False
def mutate(self, txt):
"""Change to the opposite case (upper to lower and vice-versa).
"""
return txt.swapcase()
class Rot13(RegionMutateAction):
"""Convert the region using the rot13 encoding."""
alias = "rot13-region"
name = "Rot13"
default_menu = ("Transform", -900)
def mutate(self, txt):
"""Change to all lower case.
"""
return txt.encode('rot13')
class Backslashify(LineOrRegionMutateAction):
"""Escape the end of line character by adding backslashes.
Add backslashes to the end of every line (except the last one) in the
region so that the end-of-line character is escaped. This is useful, for
instance, within C or C++ C{#define} blocks that contain multiple line
macros.
"""
name = "Backslashify"
default_menu = ("Transform", 910)
def isActionAvailable(self):
"""The action is only available if a region has multiple lines."""
(pos, end) = self.mode.GetSelection()
return self.mode.LineFromPosition(pos) < self.mode.LineFromPosition(end) - 1
def mutateLines(self, lines):
"""Add backslashes to the end of all lines but the last
"""
out = []
eol = " \\" + self.mode.getLinesep()
for line in lines[:-1]:
out.append(line.rstrip() + eol)
out.append(lines[-1])
return out
class UnBackslashify(LineOrRegionMutateAction):
"""Remove backslashes from end of line.
Remove backslashes from the end of every line in the region so that the
end- of-line character is not escaped anymore. This is the opposite of
L{Backslashify}.
"""
alias = "remove-backslashes"
name = "Remove Backslashes"
default_menu = ("Transform", 911)
def isActionAvailable(self):
"""The action is only available if a region has multiple lines."""
(pos, end) = self.mode.GetSelection()
return self.mode.LineFromPosition(pos) < self.mode.LineFromPosition(end) - 1
def mutateLines(s
|
PorthTechnolegauIaith/moses-smt
|
scripts/mtdk/mt_update_compress_moses_ini.py
|
Python
|
mit
| 493
| 0.03854
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
file=str(sys.argv[1])
file2=str(sys.argv[2])
outfile=open(file2,'w')
with open (file) as f:
for line in f:
if line.startswith('PhraseDictionaryMemory'):
line = line.replace('PhraseDictionaryMemory','PhraseDictionaryCompact')
line = line.replace('table.gz','table.minphr')
if line.startswith('LexicalReorderin
|
g'):
line = line.replace('bidirectional-fe.gz','bidirectional-fe')
outfile.w
|
rite(line)
outfile.close()
|
dsweet04/rekall
|
rekall-agent/rekall_agent/flows/__init__.py
|
Python
|
gpl-2.0
| 156
| 0
|
from rekall_agent.flows
|
import artifact_flow
from rekall_agent.flows import collect
from rekall_agent.flows import find
from rekall_agent.flows import yar
|
a
|
ericchan2012/django-blog
|
Blog/views.py
|
Python
|
apache-2.0
| 6,779
| 0.003772
|
# -*- coding: UTF-8 -*-
from django.shortcuts import render
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from Blog.models import Article, Category, Tag, BlogComment
from Blog.forms import BlogCommentForm
from markdown import markdown
from django.views.generic.edit import FormView
from django.shortcuts import render, get_object_or_404, HttpResponseRedirect
from django.db import connection
class IndexView(ListView):
template_name = "blog/index.html"
context_object_name = "article_list"
def get_queryset(self):
article_list = Article.objects.filter(status='p')
for article in article_list:
article.body = markdown(article.body, extras=['fenced-code-blocks'], )
return article_list
def get_context_data(self, **kwargs):
kwargs['category_list'] = Category.objects.all().order_by('name')
kwargs['navigation_list'] = Article.objects.all().order_by('name')
kwargs['hotarticle_list'] = Article.objects.all().order_by('-views')[:8]
kwargs['carouselarticle_list'] = Article.objects.all().order_by('-last_modified_time')[:3]
kwargs['tag_list'] = Tag.objects.all().order_by('name')
kwargs['date_archive'] = Article.objects.archive()
return super(IndexView, self).get_context_data(**kwargs)
class NavigationView(ListView):
template_name = "blog/index.html"
context_object_name = "article_list"
def get_queryset(self):
article_list = Article.objects.filter(navigation=self.kwargs['nav_id'], status='p')
for article in article_list:
article.body = markdown(article.body, extras=['fenced-code-blocks'], )
return article_list
def get_context_data(self, **kwargs):
kwargs['navigation_list'] = Article.objects.all().order_by('name')
kwargs['category_list'] = Category.objects.all().order_by('name')
kwargs['hotarticle_list'] = Article.objects.all().order_by('-views')[:8]
kwargs['tag_list'] = Tag.objects.all().order_by('name')
return super(NavigationView, self).get_context_data(**kwargs)
class CategoryView(ListView):
template_name = "blog/index.html"
context_object_name = "article_list"
def get_queryset(self):
article_list = Article.objects.filter(category=self.kwargs['cate_id'], status='p')
for article in article_list:
article.body = markdown(article.body, extras=['fenced-code-blocks'], )
return article_list
def get_context_data(self, **kwargs):
kwargs['category_list'] = Category.objects.all().order_by('name')
return super(CategoryView, self).get_context_data(**kwargs)
class TagView(ListView):
template_name = "blog/index.html"
context_object_name = "article_list"
def get_queryset(self):
"""
根据指定的标签获取该标签下的全部文章
"""
article_list = Article.objects.filter(tags=self.kwargs['tag_id'], status='p')
for article in article_list:
article.body = markdown(article.body, extras=['fenced-code-blocks'], )
return article_list
def get_context_data(self, **kwargs):
kwargs['tag_list'] = Tag.objects.all().order_by('name')
return super(TagView, self).get_context_data(**kwargs)
class ArticleDetailView(DetailView):
model = Article
template_name = "blog/detail.html"
context_object_name = "article"
pk_url_kwarg = 'article_id'
def get_object(self):
obj = super(ArticleDetailView, self).get_object()
obj.body = markdown(obj.body, extras=['fenced-code-blocks'], )
return obj
#用来实现相关文章推荐
def list_to_dic(self,resultlist):
object_list = []
for obj in resultlist:
dic = {}
dic['id'] = obj[0]
dic['title'] = obj[1]
object_list.append(dic)
return object_list
def relate_article(self,article_id, tags):
in_where_str = "tag_id=" + str(tags[0])
for i in range(1, len(tags)):
in_where_str = in_where_str + " or tag_id=" + str(tags[i])
out_where_str = "id!=" + str(article_id)
sql = "select id,title from Blog_article as a,(select article_id,count(article_id) from Blog_article_tags where " + in_where_str + \
" group by article_id order by count(article_id) desc limit 3) as tview where id=tview.article_id and " + out_where_str
cursor = connection.cursor()
cursor.execute(sql)
resultlist = cursor.fetchall()
return list_to_dic(resultlist)
def get_context_data(self, **kwargs):
kwargs['comment_list'] = self.object.blogcomment_set.all()
kwargs['hotarticle_list'] = Article.objects.all().order_by('-views')[:8]
kwargs['tag_list'] = Tag.objects.all().order_by('name')
kwargs['category_list'] = Category.objects.all().order_by('name')
kwargs['form'] = BlogCommentForm()
# kwargs['relative_article_list'] = self.relate_article('article_id','tag_list')
return super(ArticleDetailView, self).get_context_data(**kwargs)
class ArchiveView(ListView):
template_name = "blog/index.html"
context_object_name = "article_list"
def get_queryset(self):
# 接收从url传递的year和month参数,转为int类型
year = int(self.kwargs['year'])
month = int(self.kwargs['month'])
# 按照year和month过滤文章
article_list = Article.objects.filter(created_time__year=year, created_time__month=month)
for article in article_
|
list:
article.body = markdown(article.body, extras=['fenced-code-blocks'], )
return article_list
def get_context_data(self, **kwargs):
kwargs['tag_list'] = Tag.objects.all().order_by('name')
return super(ArchiveView, self).get_context_data(**kwargs)
class CommentPostView(FormView):
form_class = BlogCommentForm
template_name = 'blog/detail.html'
def form_valid(self, form):
"""提交的数据验证合法后的逻辑"""
tar
|
get_article = get_object_or_404(Article, pk=self.kwargs['article_id'])
comment = form.save(commit=False)
comment.article = target_article
comment.save()
self.success_url = target_article.get_absolute_url()
return HttpResponseRedirect(self.success_url)
def form_invalid(self, form):
"""提交的数据验证不合法后的逻辑"""
target_article = get_object_or_404(Article, pk=self.kwargs['article_id'])
return render(self.request, 'blog/detail.html', {
'form': form,
'article': target_article,
'comment_list': target_article.blogcomment_set.all(),
})
|
kristofvanmoffaert/python-omniture
|
setup.py
|
Python
|
mit
| 1,204
| 0.001661
|
from setuptools import setup, find_packages
exec(open('omniture/version.py').read())
setup(name='omniture',
description='A wrapper for the Adobe Analytics (Omniture and SiteCatalyst) web analytics API.',
long_description=open('README.md').read(),
author='Stijn Debrouwere',
au
|
thor_email='stijn@stdout.be',
url='http://stdbrouw.github.com/python-omniture/',
download_url='http://www.github.com/stdbrouw/python-omniture/tarball/master',
version=__version__,
license='MIT',
packages=find_packages(),
keywords='data analytics api
|
wrapper adobe omniture',
install_requires=[
'requests',
'python-dateutil',
],
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Information Analysis',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3'
],
)
|
evancich/apm_motor
|
modules/waf/playground/distnet/server/cgi-bin/upload.py
|
Python
|
gpl-3.0
| 1,298
| 0.023112
|
#! /usr/bin/env python
import os, sys, tempfile, shutil, hashlib, tarfile
import cgi, cgitb
cgitb.enable()
PKGDIR = os.environ.get('PKGDIR', os.path.abspath('../packages'))
# Upload a package to the package directory.
# It is meant to contain a list of tar packages:
#
# PKGDIR/pkgname/pkgver/common.tar
# PKGDIR/pkgname/pkgver/arch1.tar
# PKGDIR/pkgname/pkgver/arch2.tar
# ...
form = cgi.FieldStorage()
def getvalue(x):
v = form.getvalue(x)
if not v:
print("Status: 413\ncontent-type: text/plain\n\nmissing %s\n" % x)
return v
pkgname = getvalue('pkgname')
pkgver = getvalue('pkgver')
pkgdata = getvalue('pkgdata')
# pkghash = getvalue('pkghash') # TODO provide away to verify file hashes and signatures?
up = os.path.join(PKGDIR, pkgname)
dest = os.path.join(up, pkgver)
if os.path.exists(dest):
print("Status: 409\ncontent-type: text/plain\n\nPackage %r already exists!\n" % dest)
else:
if not os.path.isdir(up):
os.makedirs(up)
tmp = t
|
empfile.mkdtemp(dir=up)
try:
tf = os.path.join(tmp, 'some_temporary_file')
with open(tf, 'wb') as f:
f.write(pkgdata)
with tarfile.open(tf) as f:
f.extractall(tmp)
os.remove(tf)
os.rename(tmp, dest)
finally:
# cleanup
try:
shutil.rmtree(
|
tmp)
except Exception:
pass
print('''Content-Type: text/plain\n\nok''')
|
rebelact/mailsync-app
|
mailsync/api/mailchimp.py
|
Python
|
mit
| 2,198
| 0.040491
|
import datetime
import logging
from mailsnake import MailSnake
from mailsync.models.customfield import CustomField
class MailChimp
|
(object):
def __init__(self, apikey):
self.api_key = apikey
self.provider = MailSnake(self.api_key)
def test_connection(self):
try:
self.provider.ping()
except Exception, err:
logging.error(err)
return False
return True
def get_list_custom_fields(self, listid):
custom_fields = []
try:
list_custom_fields = self.provider.listMergeVars(apikey=self.api_key,
|
id=listid)
for custom_field in list_custom_fields:
field = custom_field["name"]
custom_fields.append(CustomField(field.replace(" ", "-").lower(), field, custom_field["tag"]))
except Exception, err:
logging.error(err)
custom_fields = []
return custom_fields
def get_lists(self):
lists = []
mailchimplists = self.provider.lists()
for mailchimplist in mailchimplists["data"]:
lists.append({
"id": mailchimplist["id"],
"name": mailchimplist["name"]
})
return [{"client": "", "lists": lists}]
def prepare(self, users, header_row, column_keys):
prepared_users = []
for user in users:
prepared_user = {}
for i, user_value in enumerate(user):
if isinstance(user_value, datetime.datetime) or isinstance(user_value, datetime.date):
encoded_user_value = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
else:
encoded_user_value = user_value
prepared_user[header_row[i]] = encoded_user_value
prepared_users.append(prepared_user)
return prepared_users
def sync_to(self, list_id, header_row, user_chunk, columns):
try:
logging.info("list_id: {0}, header_row: {1}, columns: {2}".format(list_id, header_row, columns))
prepared_users = self.prepare(user_chunk, header_row, columns)
logging.info("Users prepared: {0}".format(len(prepared_users)))
result = self.provider.listBatchSubscribe(
id=list_id,
batch=prepared_users,
update_existing=True, # !
double_optin=False)
logging.info(str(result))
if result["errors"]:
return True
else:
return False
except Exception, err:
logging.error(err)
return False
|
kscottz/SkinnerBox
|
modules/CameraInterface.py
|
Python
|
mit
| 2,818
| 0.007807
|
import os
import io
import cv2
import cv
import picamera
import threading
import numpy as np
import time
class CameraInterface(threading.Thread):
def __init__(self,img_path="/img/live.jpg"):
super(CameraInterface, self).__init__()
self.setDaemon(True)
# set our path for ouput images
self.img_path = img_path
# our default size
self.img_sz = (640,480)
self.running = False
self.delay=0.1
# images for activity calculations
self._current_image = None
self._last_image = None
# our list of callbacks
self._cvlist = []
# allow the user to set a callback
def set_motion_callback(self,cb):
self._cvlist.append(cb)
# stop the main thread
def shutdown(self):
self.running = False
# do the activity calculation
def _calculate_motion(self):
# if this our first call to this function
if( self._last_image is None ):
# convert the image to gray
temp = cv2.cvtCo
|
lor(self._current_image,cv2.cv.CV_BGR2GRAY)
# set the last image to this image -- causes no motion
# on first iteration
self._last_image = temp
# make the current
|
image gray -- faster
temp = cv2.cvtColor(self._current_image,cv2.cv.CV_BGR2GRAY)
# get the diff of the images
diff = self._last_image-temp
# get the mean of absolute difference between images
change = np.mean(np.abs(diff))
# now filter the image, so we don't jump super quick
self._last_image = (0.2*temp)+(self._last_image*0.8)
# do our call backs
for cb in self._cvlist:
cb(change)
def _get_image(self):
# get a new image through a stream
stream = io.BytesIO()
# get the image out of the camera
with picamera.PiCamera() as camera:
camera.start_preview()
# have the camera do the resizing onboard and save as png
camera.capture(stream, format='png',resize=self.img_sz)
# convert the png string from the camera to np
data = np.fromstring(stream.getvalue(), dtype=np.uint8)
# "Decode" the image from the array, preserving colour
# convert the image to a cv2 image -- stripping compression
self._current_image = cv2.imdecode(data, 1)
# save the image to file
cv2.imwrite(self.img_path,self._current_image)
def run(self):
self.running = True
while self.running:
# now the thread loop is easy
# get an image
self._get_image()
# calculate motion
self._calculate_motion()
# hang out for a bit
time.sleep(self.delay)
|
SatelliteQE/robottelo
|
tests/upgrades/test_user.py
|
Python
|
gpl-3.0
| 3,413
| 0
|
"""Test for User related Upgrade Scenario's
:Requirement: UpgradedSatellite
:CaseAutomation: NotAutomated
:CaseLevel: Acceptance
:CaseComponent: UsersRoles
:Assignee: sganar
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
import pytest
class TestScenarioPositiveCreateSSHKeyInExistingUsers:
"""SSH Key can be created in existing user post upgrade
:steps:
1. From SuperAdmin create user with all the details preupgrade
satellite version
2. Upgrade Satellite to next/latest version
3. Go to the user created in preupgrade satellite version
4. Attempt to add SSH key in user
:expectedresults: Satellite admin should be able to add SSH key in
existing user post upgrade
"""
@pytest.mark.pre_upgrade
def test_pre_create_sshkey_in_existing_user(self):
"""Create User in preupgrade version
:id: preupgrade-e4338daa-272a-42e3-be45-77e1caea607f
:steps: From SuperAdmin create user with all the details preupgrade
satellite version
:expectedresults: The user should be created successfully
"""
@pytest.mark.post_upgrade
def test_post_create_sshkey_in_existing_user(self):
|
"""SSH key can be added to existing user post upgrade
:id: postupgrade-e4338daa-272a-42e3-be45-77e1caea607f
:steps: Postupgrade, Add SSH key to the existing user
:expectedresults: SSH Key should be added to the existing user
"""
class TestScenarioPositiveExistingUserPasswordlessAccessToHost:
"""Existing user can password-less access to provisioned host
:steps:
1. In preupgrade satellite, F
|
rom SuperAdmin create user with all the
details
2. Upgrade Satellite to next/latest satellite version
3. Go to the user created in preupgrade satellite
4. Add SSH key in that user
5. Choose provisioning template you would use to provision the host
in feature and add 'create_users' snippet in template
6. Provision a host through the existing user
7. Attempt to access the provisioned host through user
:expectedresults: Existing User should be able to passwordless access to
provisioned host
"""
@pytest.mark.pre_upgrade
def test_pre_existing_user_passwordless_access_to_host(self):
"""Create User in preupgrade version
:id: preupgrade-d2d94447-5fc7-49cc-840e-06568d8a5141
:steps: In preupgrade satellite, From SuperAdmin create user with all
the required details
:expectedresults: The user should be created successfully
"""
@pytest.mark.post_upgrade
def test_post_existing_user_passwordless_access_to_host(self):
"""Existing user can passwordless access to provisioned host
:id: postupgrade-d2d94447-5fc7-49cc-840e-06568d8a5141
:steps:
1. Go to the user created in preupgrade satellite
2. Add SSH key in that user
3. Choose provisioning template you would use to provision the host
in feature and add 'create_users' snippet in template
4. Provision a host through the existing user
5. Attempt to access the provisioned host through user
:expectedresults: Existing User should be able to passwordless access
to provisioned host
"""
|
eyeNsky/qgis-scripts
|
select-key-frames.py
|
Python
|
mit
| 2,286
| 0.013561
|
##[TBT-Tools]=group
##Input_Footprints=vector
##Image_IDs=field Input_Footprints
##Overlap_Threshold_0_to_1=number 0.6
from qgis.utils import *
from osgeo import ogr
from osgeo import osr
KEEP_TRESHOLD=Overlap_Threshold_0_to_1
IMAGE_IDS = Image_IDs
def calcIntersection(fpA,fpB):
f
|
pA = fpA.Buffer(0)
if not fpA.Intersect(fpB):
return False, 0
if fpA.Intersect(fpB):
areaOfIntersection = fpA.Intersection(fpB).GetArea()
percentOfIntersection = areaOfIntersection/(fpB.GetArea())
return True,percentOfIntersection
def getFPs(fpIn,IMAGE_IDS):
'''SQLite of Footprints as input, selects key frames based on overlap thresehold'''
|
fp = ogr.Open(fpIn,0)
progress.setText(fpIn)
fpLayer = fp.GetLayer(0) #assumes the footprints are the first layer
newGeom = ogr.Geometry(type=ogr.wkbGeometryCollection)
numFps = fpLayer.GetFeatureCount()
IMAGE_IDS = IMAGE_IDS.encode('utf-8') # str is imported from future, sets type to newstr. ogr does not recognize
currFp = fpLayer.GetFeature(1) # get first geom to populate the keepers poly
currFpGeom = currFp.geometry()
keepGeom = ogr.Geometry(type=ogr.wkbGeometryCollection) # create a geom to hold keepers
keepGeom.AddGeometry(currFpGeom) # add first fp
keepList = [1,numFps] # list to hold the keepers ids, with first and last frame.
keepers = 0
for i in range(2,numFps):
currFp = fpLayer.GetFeature(i)
nextFp = fpLayer.GetFeature(i+1)
thisImage = currFp.GetField(IMAGE_IDS)
nextImage = nextFp.GetField(IMAGE_IDS)
thisTime = int(thisImage[-8:])
nextTime = int(nextImage[-8:])
absDiff = abs(nextTime-thisTime)
if absDiff > 30:
keepList.append(i)
continue
a = keepGeom
b = currFp.geometry()
doesIntersect, percentIntersect = calcIntersection(a,b)
if percentIntersect < KEEP_TRESHOLD:
keepGeom.AddGeometry(b)
keepers += 1
keepList.append(i)
keepTxt = 'keeping %s of of %s images' %(keepers,numFps)
progress.setInfo(keepTxt)
time.sleep(1)
fpLayer = processing.getObject(fpIn)
fpLayer.select(keepList)
return keepList
fpIn = Input_Footprints
fps = getFPs(fpIn,IMAGE_IDS)
|
Perlence/wikigenre
|
wikigenre.py
|
Python
|
bsd-3-clause
| 6,223
| 0.000643
|
from __future__ import print_function
import logging
import re
import sys
from glob import iglob
from os.path import join, dirname, normpath
from gevent import monkey
from gevent import spawn, joinall
from gevent.event import AsyncResult
monkey.patch_socket()
monkey.patch_ssl()
import requests
from lxml import html
from mutagen import easyid3, flac, easymp4, oggvorbis, musepack
from wikiapi import WikiApi
logger = logging.getLogger(__name__)
URI_SCHEME = 'http'
ARTICLE_URI = 'wikipedia.org/wiki/'
GENRE_CACHE = {} # {(album, artist): AsyncResult([genre1, genre2, ...])}
def titlecase(string):
return u' '.join(part.capitalize() for part in string.split())
def get_genres(query):
wiki = WikiApi()
results = wiki.find(query.encode('utf-8'))
if results:
try:
url = '{0}://{1}.{2}{3}'.format(
URI_SCHEME, wiki.options['locale'], ARTICLE_URI,
results[0].encode('utf-8'))
resp = requests.get(url)
dom = html.fromstring(resp.content)
return (dom.xpath('.'
'//table[contains(@class, "haudio")]'
'//td[@class="category"]'
'/a'
'/text()') or
dom.xpath('.'
'//table[contains(@class, "infobox")]'
'//th'
'/a[text()="Genre"]'
'/..'
'/..'
'/td'
'/a'
'/text()'))
except Exception as e:
logger.error('Error getting genres for %s: %s', query, repr(e))
return []
def search_variants(artist, album):
if artist and album:
yield get_genres(u'%s (%s album)' % (album, artist))
if album:
yield get_genres(u'%s (album)' % album)
yield get_genres(album)
if artist:
yield get_genres(artist)
def albumgenres(artist='', album=''):
result = GENRE_CACHE.get((artist, album))
if result is None:
GENRE_CACHE[(artist, album)] = result = AsyncResult()
for genres in search_variants(artist, album):
if genres:
result.set(genres)
break
else:
result.set([])
return result.get()
def load_track(track):
track_lower = track.lower()
if track_lower.endswith('.mp3'):
return easyid3.EasyID3(track)
elif track_lower.endswith('.flac'):
return flac.FLAC(track)
elif track_lower.endswith('.mp4') or track_lower.endswith('.m4a'):
return easymp4.EasyMP4(track)
elif track_lower.endswith('.ogg'):
|
return oggvorbis.OggVorbis(track)
elif track_lower.endswith('.mpc'):
return musepack.Musepack(track)
else:
raise ValueError("unhandled format '%s'" % track)
def wi
|
kigenre(track, force=False):
track = normpath(track)
try:
audio = load_track(track)
audio_genre = audio.get('genre')
if audio_genre is not None and not force:
logger.info('Skipping %s', track)
else:
artist = audio.get('artist', [None])[0]
album = audio.get('album', [None])[0]
genres = map(titlecase, albumgenres(artist, album))
if genres:
audio['genre'] = genres
audio.save()
logger.info('Tagged %s', track)
else:
logger.warn('No genres found for %s', track)
except Exception as e:
logger.error('Error tagging %s: %s', track, repr(e))
raise
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('path', metavar='PATH', nargs='?',
help="path to audio files, can contain wildcards")
parser.add_argument('-q', '--query',
metavar='QUERY', nargs='?', default='',
help='fetch genres for given albums\n'
'[artist - ]album(; [artist - ]album)*')
parser.add_argument('-f', '--force', action='store_true',
help='rewrite genres even if track already has them')
args = parser.parse_args()
query = args.query
path = args.path
force = args.force
with open(join(dirname(__file__), 'wikigenre.log'), 'a') as log:
handler = logging.StreamHandler()
filehandler = logging.StreamHandler(log)
formatter = logging.Formatter('%(asctime)s;%(levelname)s;%(message)s')
handler.setFormatter(formatter)
filehandler.setFormatter(formatter)
logger.addHandler(handler)
logger.addHandler(filehandler)
logger.setLevel('DEBUG')
if query:
for artistalbum in query.split('; '):
parts = artistalbum.split(' - ', 1)
try:
artist, album = parts
except ValueError:
artist, album = '', artistalbum
print(artistalbum + ': ' +
'; '.join(map(titlecase, albumgenres(artist, album))))
elif path is not None:
logger.info('Starting')
# Escape square brackets
path = re.sub(r'([\[\]])', r'[\1]', path)
joinall([spawn(wikigenre, track, force=force)
for track in iglob(path)])
logger.info('Finished')
else:
# Read data from stdin
# Sample input: "The Beatles - [Abbey Road #07] Here Comes the Sun"
trackinfo = re.compile(
r'(.+) - \[(.+?)(?: CD\d+)?(?: #\d+)?\]')
lines = sys.stdin.read()
greenlets = []
for line in lines.splitlines():
mo = trackinfo.match(line)
if mo is None:
continue
artist, album = mo.groups()
greenlets.append(spawn(albumgenres, artist, album))
joinall(greenlets)
for greenlet in greenlets:
print('; '.join(map(titlecase, greenlet.get())))
if __name__ == '__main__':
main()
|
percyfal/bokeh
|
bokeh/layouts.py
|
Python
|
bsd-3-clause
| 19,180
| 0.00245
|
''' Functions for arranging bokeh Layout objects.
'''
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from .core.enums import Location, SizingMode
from .models.tools import ToolbarBox
from .models.plots import Plot
from .models.layouts import LayoutDOM, Row, Column, Spacer, WidgetBox
from .models.widgets import Widget
from .util._plot_arg_helpers import _convert_responsive
#-----------------------------------------------------------------------------
# Common helper functions
#-----------------------------------------------------------------------------
def _handle_children(*args, **kwargs):
children = kwargs.get('children')
# Set-up Children from args or kwargs
if len(args) > 0 and children is not None:
raise ValueError("'children' keyword cannot be used with positional arguments")
if not children:
if len(args) == 1 and isinstance(args[0], list):
children = args[0]
elif len(args) == 1 and isinstance(args[0], GridSpec):
children = args[0]
else:
children = list(args)
return children
def _verify_sizing_mode(sizing_mode):
if sizing_mode not in SizingMode:
raise ValueError("Invalid value of sizing_mode: %s" % sizing_mode)
def row(*args, **kwargs):
""" Create a row of Bokeh Layout objects. Forces all objects to
have the same sizing_mode, which is required for complex layouts to work.
Args:
children (list of :class:`~bokeh.models.layouts.LayoutDOM` ): A list of instances for
the row. Can be any of the following - :class:`~bokeh.models.plots.Plot`,
:class:`~bokeh.models.widgets.widget.Widget`, :class:`~bokeh.models.layouts.WidgetBox`,
:class:`~bokeh.models.layouts.Row`,
:class:`~bokeh.models.layouts.Column`,
:class:`~bokeh.models.tools.ToolbarBox`,
:class:`~bokeh.models.layouts.Spacer`.
sizing_mode (``"fixed"``, ``"stretch_both"``, ``"scale_width"``, ``"scale_height"``, ``"scale_both"`` ): How
will the items in the layout resize to fill the available space.
Default is ``"fixed"``. For more information on the different
modes see :attr:`~bokeh.models.layouts.LayoutDOM.sizing_mode`
description on :class:`~bokeh.models.layouts.LayoutDOM`.
responsive (``True``, ``False``): True sets ``sizing_mode`` to
``"width_ar"``. ``False`` sets sizing_mode to ``"fixed"``. Using
responsive will override sizing_mode.
Returns:
Row: A row of LayoutDOM objects all with the same sizing_mode.
Examples:
>>> row([plot_1, plot_2])
>>> row(children=[widget_box_1, plot_1], sizing_mode='stretch_both')
"""
responsive = kwargs.pop('responsive', None)
sizing_mode = kwargs.pop('sizing_mode', 'fixed')
children = kwargs.pop('children', None)
if responsive:
sizing_mode = _convert_responsive(responsive)
_verify_sizing_mode(sizing_mode)
children = _handle_children(*args, children=children)
row_children = []
for item in children:
if isinstance(item, LayoutDOM):
item.sizing_mode = sizing_mode
row_children.append(item)
else:
raise ValueError(
"""Only LayoutDOM items can be inserted into a row.
Tried to insert: %s of type %s""" % (item, type(item))
)
return Row(children=row_children, sizing_mode=sizing_mode, **kwargs)
def column(*args, **kwargs):
""" Create a column of Bokeh Layout objects. Forces all objects to
have the same sizing_mode, which is required for complex layouts to work.
Args:
children (list of :class:`~bokeh.models.layouts.LayoutDOM` ): A list of instances for
the column. Can be any of the following - :class:`~bokeh.models.plots.Plot`,
:class:`~bokeh.models.widgets.widget.Widget`, :class:`~bokeh.models.layouts.WidgetBox`,
:class:`~bokeh.models.layouts.Row`,
:class:`~bokeh.models.layouts.Column`,
:class:`~bokeh.models.tools.ToolbarBox`,
:class:`~bokeh.models.layouts.Spacer`.
sizing_mode (``"fixed"``, ``"stretch_both"``, ``"scale_width"``, ``"scale_height"``, ``"scale_both"`` ): How
will the items in the layout resize to fill the available space.
Default is ``"fixed"``. For more information on the different
modes see :attr:`~bokeh.models.layouts.LayoutDOM.sizing_mode`
description on :class:`~bokeh.models.layouts.LayoutDOM`.
responsive (``True``, ``False``): True sets ``sizing_mode`` to
``"width_ar"``. ``False`` sets sizing_mode to ``"fixed"``. Using
responsive will override sizing_mode.
Returns:
Column: A column of LayoutDOM objects all with the same sizing_mode.
Examples:
>>> column([plot_1, plot_2])
>>> column(children=[widget_box_1, plot_1], sizing_mode='stretch_both')
"""
responsive = kwargs.pop('responsive', None)
sizing_mode = kwargs.pop('sizing_mode', 'fixed')
children = kwargs.pop('children', None)
if responsive:
sizing_mode = _convert_responsive(responsive)
_verify_sizing_mode(sizing_mode)
children = _handle_children(*args, children=children)
col_children = []
for item in children:
if isinstance(item, LayoutDOM):
item.sizing_mode = si
|
zing_mode
col_children.append(item)
else:
raise ValueError(
"""Only LayoutDOM items can be inserted into a column.
Tried to insert: %s of type %s""" % (item, type(item))
)
return Column(children=col_children, sizing_mode=sizing_mode, **kwargs)
def widgetbox(*args, **kwargs):
""" Create a WidgetBox of Bokeh widgets. Forces all to
have the same si
|
zing_mode, which is required for complex layouts to work.
Args:
children (list of :class:`~bokeh.models.widgets.widget.Widget` ): A list
of widgets for the WidgetBox.
sizing_mode (``"fixed"``, ``"stretch_both"``, ``"scale_width"``, ``"scale_height"``, ``"scale_both"`` ): How
will the items in the layout resize to fill the available space.
Default is ``"fixed"``. For more information on the different
modes see :attr:`~bokeh.models.layouts.LayoutDOM.sizing_mode`
description on :class:`~bokeh.models.layouts.LayoutDOM`.
responsive (``True``, ``False``): True sets ``sizing_mode`` to
``"width_ar"``. ``False`` sets sizing_mode to ``"fixed"``. Using
responsive will override sizing_mode.
Returns:
WidgetBox: A WidgetBox of Widget instances all with the same sizing_mode.
Examples:
>>> widgetbox([button, select])
>>> widgetbox(children=[slider], sizing_mode='scale_width')
"""
responsive = kwargs.pop('responsive', None)
sizing_mode = kwargs.pop('sizing_mode', 'fixed')
children = kwargs.pop('children', None)
if responsive:
sizing_mode = _convert_responsive(responsive)
_verify_sizing_mode(sizing_mode)
children = _handle_children(*args, children=children)
widget_children = []
for item in children:
if isinstance(item, Widget):
item.sizing_mode = sizing_mode
widget_children.append(item)
else:
raise ValueError(
"""Only Widgets can be inserted into a WidgetBox.
Tried to insert: %s of type %s""" % (item, type(item))
)
return WidgetBox(children=widget_children, sizing_mode=sizing_mode, **kwargs)
def layout(*args, **kwargs):
""" Create a grid-based arrangement of Bokeh Layout objects. Forces all objects to
have the same sizing mode, which is required for complex layouts to work. Returns a nested set
of Rows and Columns.
Args:
children (list of lists of :class:`~bokeh.models.layouts.LayoutDOM` ): A l
|
jdodds/pyrana
|
pyrana/players/pygameplayer.py
|
Python
|
bsd-3-clause
| 1,508
| 0.006631
|
import pygame.event
import pygame.mixer
import pygame.display
import threading
import time
from feather import Plugin
ENDEVENT=42
class PyGamePlayer(Plugin):
listeners = set(['songloaded', 'pause', 'skipsong', 'skipalbum'])
messengers = set(['songstart', 'songpause', 'songend', 'songresume'])
name = 'PyGamePlayer'
def pre_run(self):
pygame.display.init()
pygame.mixer.init()
pygame.mixer.music.set_endevent(ENDEVENT)
t = threading.Thread(target=self._songend_bubble, args=(self,))
t.daemon = True
t.start()
def _songend_bubble(s,self):
while self.runnable:
event = pygame.event.get(ENDEVENT)
if event:
self.send('songend')
else:
|
time.sleep(0.1)
def songloaded(self, payload):
try:
pygame.mixer.music.load(payload)
except :
pass
pygame.mixer.music.play()
self.playing = True
self.send('songstart', payload)
def pause(self, payload=None):
if self.playing:
pygame.
|
mixer.music.pause()
self.playing = False
self.send('songpause')
else:
pygame.mixer.music.unpause()
self.playing = True
self.send('songresume')
def skipsong(self, payload=None):
pygame.mixer.music.stop()
def skipalbum(self, payload=None):
pygame.mixer.music.stop()
|
EmanueleCannizzaro/scons
|
test/scons-time/help/options.py
|
Python
|
mit
| 2,071
| 0.004346
|
#!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DE
|
ALINGS IN THE SOFTWARE.
#
__revision__ = "test/scons-time/help/options.py rel
|
_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Verify that the 'help' subcommand and -h, -? and --help options print
the default help.
"""
import TestSCons_time
test = TestSCons_time.TestSCons_time()
expect = [
'Usage: scons-time SUBCOMMAND [ARGUMENTS]\n',
'Type "scons-time help SUBCOMMAND" for help on a specific subcommand.\n',
'Available subcommands:\n',
' help Provides help\n',
]
test.run(arguments = 'help')
test.must_contain_all_lines(test.stdout(), expect)
test.run(arguments = '-h')
test.must_contain_all_lines(test.stdout(), expect)
test.run(arguments = '-?')
test.must_contain_all_lines(test.stdout(), expect)
test.run(arguments = '--help')
test.must_contain_all_lines(test.stdout(), expect)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
Sorrop/py-graph-algorithms
|
traversal_tests.py
|
Python
|
mit
| 1,800
| 0
|
import graph
from depth_first_search import depth_first_search
from breadth_first_search import breadth_first_search
edges = [(0, 1), (0, 2), (0, 3), (1, 4), (1, 5), (2, 6),
(2, 7), (3, 8), (3, 9), (4, 10), (4, 11)]
G, _ = graph.create_graph(edges)
start_vertex = G.get_vertex(0)
breadth = breadth_first_search(G)
breadth(G, start_vertex)
depth = depth_first_search(G)
depth(G, start_vertex)
print('Undirected Case.')
print(edges)
print(' ')
print('==============================')
print('Breadth First traversal of G')
for edge in breadth.breadth_traversal:
print((edge.endPoints()[0].element(), edge.endPoints()[1].element()))
print('==============================')
print('Depth First traversal of G')
for edge in d
|
epth.depth_traversal:
print((edge.endPoints()[0].element(), edge.endPoints()[1].element()))
print(' ')
print('=======
|
=======================')
print('==============================')
print(' ')
edges = [('a', 'b'), ('c', 'a'), ('c', 'b'),
('d', 'c'), ('d', 'e'), ('b', 'e')]
G, _ = graph.create_graph(edges, True)
start_vertex = G.get_vertex('a')
breadth = breadth_first_search(G)
breadth(G, start_vertex)
depth = depth_first_search(G)
depth(G, start_vertex)
print('Directed Case.')
print(edges)
print(' ')
print('==============================')
print('Breadth First traversal of G')
for edge in breadth.breadth_traversal:
print((edge.endPoints()[0].element(), edge.endPoints()[1].element()))
print('==============================')
print('Depth First traversal of G')
for edge in depth.depth_traversal:
print((edge.endPoints()[0].element(), edge.endPoints()[1].element()))
|
FCP-INDI/nipype
|
nipype/interfaces/mipav/tests/test_auto_JistBrainMgdmSegmentation.py
|
Python
|
bsd-3-clause
| 2,377
| 0.027766
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ....testing import assert_equal
from ..developer import JistBrainMgdmSegmentation
def test_JistBrainMgdmSegmentation_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
inAdjust=dict(argstr='--inAdjust %s',
),
inAtlas=dict(argstr='--inAtlas %s',
),
inCompute=dict(argstr='--inCompute %s',
),
inCurvature=dict(argstr='--inCurvature %f',
),
inData=dict(argstr='--inData %f',
),
inFLAIR=dict(argstr='--inFLAIR %s',
),
inMP2RAGE=dict(argstr='--inMP2RAGE %s',
),
inMP2RAGE2=dict(argstr='--inMP2RAGE2 %s',
),
inMPRAGE=dict(argstr='--inMPRAGE %s',
),
inMax=dict(argstr='--inMax %d',
),
inMin=dict(argstr='--inMin %f',
),
inOutput=dict(argstr='--inOutput %s',
),
inPV=dict(argstr='--inPV %s',
),
inPosterior=dict(argstr='--inPosterior %f',
),
inSteps=dict(argstr='--inSteps %d',
),
inTopology=dict(argstr='--inTopology %s',
),
null=dict(argstr='--null %s',
),
outLevelset=dict(argstr='--outLevelset %s',
hash_files=False,
),
outPosterior2=dict(argstr='--outPosterior2 %s',
hash_files=False,
),
outPosterior3=dict(argstr='--outPosterior3 %s',
hash_files=False,
),
outSegmented=dict(argstr='--outSegmented %s',
hash_files=False,
),
terminal_output=dict(nohash=True,
),
xDefaultMem=dict(argstr='-xDefaultMem %d',
),
xMaxProcess=dict(argstr='-xMaxProcess %d',
usedefault=True,
),
xPrefExt=dict(argstr='--xPrefExt %s',
),
)
inputs = JistBrainMgdmSegmentation.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_JistBrainMgdmSegmentation_outputs():
output_map = dict(outLevelset=dict(),
outPosterior2=dict(),
outPosterior3=dict(),
outSegmented=dict(),
)
outputs = JistBrai
|
nMgdmS
|
egmentation.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
schinckel/django-countries
|
django_countries/conf.py
|
Python
|
mit
| 2,772
| 0
|
import django.conf
class AppSettings(object):
"""
A holder for app-specific default settings that allows overriding via
the project's settings.
"""
def __getattribute__(self, attr):
if attr == attr.upper():
try:
return getattr(django.conf.settings, attr)
except AttributeError:
pass
return super(AppSettings, self).__getattribute__(attr)
class Settings(AppSettings):
COUNTRIES_FLAG_URL = 'flags/{code}.gif'
"""
The URL for a flag.
It can either be relative to the static url, or an absolute url.
The location is parsed using Python's string formatting and is passed
|
the
following arguments:
* code
* code_upper
For example: ``COUNTRIES_FLAG_URL = 'flags/16x10/{code_upper}.png'``
"""
COUNTRIES_COMMON_NAMES = True
"""
Whether to use the common nam
|
es for some countries, as opposed to the
official ISO name.
Some examples:
"Bolivia" instead of "Bolivia, Plurinational State of"
"South Korea" instead of "Korea (the Republic of)"
"Taiwan" instead of "Taiwan (Province of China)"
"""
COUNTRIES_OVERRIDE = {}
"""
A dictionary of names to override the defaults.
Note that you will need to handle translation of customised country names.
Setting a country's name to ``None`` will exclude it from the country list.
For example::
COUNTRIES_OVERRIDE = {
'NZ': _('Middle Earth'),
'AU': None
}
"""
COUNTRIES_ONLY = {}
"""
Similar to COUNTRIES_OVERRIDE
A dictionary of names to include in selection.
Note that you will need to handle translation of customised country names.
For example::
COUNTRIES_ONLY = {
'NZ': _('Middle Earth'),
'AU': _('Desert'),
}
"""
COUNTRIES_FIRST = []
"""
Countries matching the country codes provided in this list will be shown
first in the countries list (in the order specified) before all the
alphanumerically sorted countries.
"""
COUNTRIES_FIRST_REPEAT = False
"""
Countries listed in :attr:`COUNTRIES_FIRST` will be repeated again in the
alphanumerically sorted list if set to ``True``.
"""
COUNTRIES_FIRST_BREAK = None
"""
Countries listed in :attr:`COUNTRIES_FIRST` will be followed by a null
choice with this title (if set) before all the alphanumerically sorted
countries.
"""
COUNTRIES_FIRST_SORT = False
"""
Countries listed in :attr:`COUNTRIES_FIRST` will be alphanumerically
sorted based on their translated name instead of relying on their
order in :attr:`COUNTRIES_FIRST`.
"""
settings = Settings()
|
eckardm/archivematica
|
src/MCPClient/lib/clientScripts/archivematicaCreateMETSRightsDspaceMDRef.py
|
Python
|
agpl-3.0
| 3,954
| 0.002529
|
#!/usr/bin/env python2
#
# This file is part of Archivematica.
#
# Copyright 2010-2013 Artefactual Systems Inc. <http://artefactual.com>
#
# Archivematica is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Archivematica is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Archivematica. If not, see <http://www.gnu.org/licenses/>.
# @package Archivematica
# @subpackage archivematicaClientScript
# @author Joseph Perry <joseph@artefactual.com>
import os
import sys
import lxml.etree as etree
# dashboard
from main.models import File
import archivematicaXMLNamesSpace as ns
# archivematicaCommon
from sharedVariablesAcrossModules import sharedVariablesAcrossModules
def createMDRefDMDSec(LABEL, itemdirectoryPath, directoryPathSTR):
XPTR = "xpointer(id("
tree = etree.parse(itemdirectoryPath)
root = tree.getroot()
a = """<amdSec ID="amd_496">
<rightsMD ID="rightsMD_499">"""
for item in root.findall("{http://www.loc.gov/METS/}amdSec/{http://www.loc.gov/METS/}rightsMD"):
#print "rights id:", item.get("ID")
XPTR = "%s %s" % (XPTR, item.get("ID"))
XPTR = XPTR.replace(" ", "'", 1) + "'))"
mdRef = etree.Element(ns.metsBNS + "mdRef")
mdRef.set("LABEL", LABEL)
mdRef.set(ns.xlinkBNS +"href", directoryPathSTR)
mdRef.set("MDTYPE", "OTHER")
mdRef.set("OTHERMDTYPE", "METSRIGHTS")
mdRef.set("LOCTYPE","OTHER")
mdRef.set("OTHERLOCTYPE", "SYSTEM")
mdRef.set("XPTR", XPTR)
return mdRef
def archivematicaCreateMETSRightsDspaceMDRef(fileUUID, filePath, transferUUID, itemdirectoryPath):
ret = []
try:
print fileUUID, filePath
# Find the mets file. May find none.
path = "%SIPDirectory%{}/mets.xml".format(os.path.dirname(filePath))
try:
mets = File.objects.get(currentlocation=path, transfer_id=transferUUID)
except File.DoesNotExist:
pass
else:
metsFileUUID = mets.uuid
metsLoc = mets.currentlocation.replace("%SIPDirectory%", "", 1)
m
|
etsLocation = os.path.join(os.path.dirname(itemdirectoryPath), "mets.xml")
L
|
ABEL = "mets.xml-%s" % (metsFileUUID)
ret.append(createMDRefDMDSec(LABEL, metsLocation, metsLoc))
base = os.path.dirname(os.path.dirname(itemdirectoryPath))
base2 = os.path.dirname(os.path.dirname(filePath))
for dir in os.listdir(base):
fullDir = os.path.join(base, dir)
fullDir2 = os.path.join(base2, dir)
print fullDir
if dir.startswith("ITEM"):
print "continue"
continue
if not os.path.isdir(fullDir):
continue
path = "%SIPDirectory%{}/mets.xml".format(fullDir2)
try:
f = File.objects.get(currentlocation=path, transfer_id=transferUUID)
except File.DoesNotExist:
pass
else:
metsFileUUID = f.uuid
metsLoc = f.currentlocation.replace("%SIPDirectory%", "", 1)
metsLocation = os.path.join(fullDir, "mets.xml")
print metsLocation
LABEL = "mets.xml-" + metsFileUUID
ret.append(createMDRefDMDSec(LABEL, metsLocation, metsLoc))
except Exception as inst:
print >>sys.stderr, "Error creating mets dspace mdref", fileUUID, filePath
print >>sys.stderr, type(inst), inst.args
sharedVariablesAcrossModules.globalErrorCount +=1
return ret
|
nextgis-extra/tests
|
lib_gdal/gcore/pam.py
|
Python
|
gpl-2.0
| 18,275
| 0.007442
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id: pam.py 33793 2016-03-26 13:02:07Z goatbar $
#
# Project: GDAL/OGR Test Suite
# Purpose: Test functioning of the PAM metadata support.
# Author: Frank Warmerdam <warmerdam@pobox.com>
#
###############################################################################
# Copyright (c) 2003, Frank Warmerdam <warmerdam@pobox.com>
# Copyright (c) 2009-2013, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import sys
import shutil
import stat
sys.path.append( '../pymod' )
import gdaltest
from osgeo import gdal
###############################################################################
# Check that we can read PAM metadata for existing PNM file.
def pam_1():
gdaltest.pam_setting = gdal.GetConfigOption( 'GDAL_PAM_ENABLED', "NULL" )
gdal.SetConfigOption( 'GDAL_PAM_ENABLED', 'YES' )
ds = gdal.Open( "data/byte.pnm" )
base_md = ds.GetMetadata()
if len(base_md) != 2 or base_md['other'] != 'red' \
or base_md['key'] != 'value':
gdaltest.post_reason( 'Default domain metadata missing' )
return 'fail'
xml_md = ds.GetMetadata( 'xml:test' )
if len(xml_md) != 1:
gdaltest.post_reason( 'xml:test metadata missing' )
return 'fail'
if type(xml_md) != type(['abc']):
gdaltest.post_reason( 'xml:test metadata not returned as list.' )
return 'fail'
expected_xml = """<?xml version="2.0"?>
<TestXML>Value</TestXML>
"""
if xml_md[0] != expected_xml:
gdaltest.post_reason( 'xml does not match' )
|
print(xml_md)
return 'fail'
return 'success'
###############################################################################
# Verify that we can write XML to a new file.
def pam_2():
driver = gdal.GetDriverByName( 'PNM' )
ds = driver.Create( 'tmp/pam.pnm', 10, 10 )
band = ds.GetRasterBand( 1 )
band.SetMetadata(
|
{ 'other' : 'red', 'key' : 'value' } )
expected_xml = """<?xml version="2.0"?>
<TestXML>Value</TestXML>
"""
band.SetMetadata( [ expected_xml ], 'xml:test' )
band.SetNoDataValue( 100 )
ds = None
return 'success'
###############################################################################
# Check that we can read PAM metadata for existing PNM file.
def pam_3():
ds = gdal.Open( "tmp/pam.pnm" )
band = ds.GetRasterBand(1)
base_md = band.GetMetadata()
if len(base_md) != 2 or base_md['other'] != 'red' \
or base_md['key'] != 'value':
gdaltest.post_reason( 'Default domain metadata missing' )
return 'fail'
xml_md = band.GetMetadata( 'xml:test' )
if len(xml_md) != 1:
gdaltest.post_reason( 'xml:test metadata missing' )
return 'fail'
if type(xml_md) != type(['abc']):
gdaltest.post_reason( 'xml:test metadata not returned as list.' )
return 'fail'
expected_xml = """<?xml version="2.0"?>
<TestXML>Value</TestXML>
"""
if xml_md[0] != expected_xml:
gdaltest.post_reason( 'xml does not match' )
print(xml_md)
return 'fail'
if band.GetNoDataValue() != 100:
gdaltest.post_reason( 'nodata not saved via pam' )
return 'fail'
ds = None
ds = gdal.Open('tmp/pam.pnm', gdal.GA_Update)
if ds.GetRasterBand(1).DeleteNoDataValue() != 0:
gdaltest.post_reason('fail')
return 'fail'
ds = None
ds = gdal.Open('tmp/pam.pnm')
if ds.GetRasterBand(1).GetNoDataValue() is not None:
gdaltest.post_reason('got nodata value whereas none was expected')
return 'fail'
return 'success'
###############################################################################
# Check that PAM binary encoded nodata values work properly.
#
def pam_4():
# Copy test dataset to tmp directory so that the .aux.xml file
# won't be rewritten with the statistics in the master dataset.
shutil.copyfile( 'data/mfftest.hdr.aux.xml', 'tmp/mfftest.hdr.aux.xml' )
shutil.copyfile( 'data/mfftest.hdr', 'tmp/mfftest.hdr' )
shutil.copyfile( 'data/mfftest.r00', 'tmp/mfftest.r00' )
ds = gdal.Open( 'tmp/mfftest.hdr' )
stats = ds.GetRasterBand(1).GetStatistics(0,1)
if stats[0] != 0 or stats[1] != 4:
gdaltest.post_reason( 'Got wrong min/max, likely nodata not working?' )
print(stats)
return 'fail'
return 'success'
###############################################################################
# Verify that .aux files that don't match the configuration of the
# dependent file are not utilized. (#2471)
#
def pam_5():
ds = gdal.Open( 'data/sasha.tif' )
filelist = ds.GetFileList()
ds = None
if len(filelist) != 1:
print(filelist)
gdaltest.post_reason( 'did not get expected file list.' )
return 'fail'
return 'success'
###############################################################################
# Verify we can read nodata values from .aux files (#2505)
#
def pam_6():
ds = gdal.Open( 'data/f2r23.tif' )
if ds.GetRasterBand(1).GetNoDataValue() != 0:
gdaltest.post_reason( 'did not get expected .aux sourced nodata.' )
return 'fail'
ds = None
if os.path.exists('data/f2r23.tif.aux.xml'):
gdaltest.post_reason( 'did not expect .aux.xml to be created.' )
return 'fail'
return 'success'
###############################################################################
# Verify we can create overviews on PNG with PAM disabled (#3693)
#
def pam_7():
gdal.SetConfigOption( 'GDAL_PAM_ENABLED', 'NO' )
shutil.copyfile( 'data/stefan_full_rgba.png', 'tmp/stefan_full_rgba.png' )
ds = gdal.Open('tmp/stefan_full_rgba.png')
ds.BuildOverviews('NEAR', [2])
ds = None
ds = gdal.Open('tmp/stefan_full_rgba.png')
ovr_count = ds.GetRasterBand(1).GetOverviewCount()
ds = None
os.remove( 'tmp/stefan_full_rgba.png' )
os.remove( 'tmp/stefan_full_rgba.png.ovr' )
if ovr_count != 1:
return 'fail'
return 'success'
###############################################################################
# Test that Band.SetDescription() goes through PAM (#3780)
#
def pam_8():
gdal.SetConfigOption( 'GDAL_PAM_ENABLED', 'YES' )
ds = gdal.GetDriverByName('GTiff').Create('/vsimem/pam_8.tif', 1, 1, 1)
ds.GetRasterBand(1).SetDescription('foo')
ds = None
ds = gdal.Open('/vsimem/pam_8.tif')
desc = ds.GetRasterBand(1).GetDescription()
ds = None
gdal.GetDriverByName('GTiff').Delete('/vsimem/pam_8.tif')
if desc != 'foo':
print(desc)
return 'fail'
return 'success'
###############################################################################
# Test that we can retrieve projection from xml:ESRI domain
#
def pam_9():
ds = gdal.GetDriverByName('GTiff').Create('/vsimem/pam_9.tif', 1, 1, 1)
ds = None
f = gdal.VSIFOpenL('/vsimem/pam_9.tif.aux.xml', 'wb')
content = """<PAMDa
|
threeaims/browserstep
|
browserstep/__init__.py
|
Python
|
mit
| 110
| 0
|
# -*- coding: utf-8 -*-
__author__ = '
|
James Gardner'
__email__ = 'james@pythonweb.org
|
'
__version__ = '0.1.0'
|
darneymartin/ChartIT
|
src/View/Server.py
|
Python
|
mit
| 2,318
| 0.002157
|
from flask import Flask, render_template, session, redirect, url_for, escape, request
from Model.Gateway.AuthenticationGateway import AuthenticationGateway
from Controller.API.ServerController import ServerController
from Controller.API.ChartController import ChartController
from Controller.API.DataController import DataController
from Controller.API.DashboardController import DashboardController
from Controller.API.UserController import UserController
from Controller.View.ChartViewController import ChartViewController
from Controller.View.ServerViewController import ServerViewController
from Controller.View.DashboardViewController import DashboardViewController
from Controller.View.UserViewController import UserViewController
class Server(object):
app = Flask(__name__)
# set the secret key. keep this really secret:
app.secret_key = 'A0Zr98j/3yX R~XHH!jmN]LWX/,?RT'
app.register_blueprint(ServerController)
app.register_blueprint(ChartController)
app.register_blueprint(DataController)
app.register_blueprint(DashboardController)
app.register_blueprint(UserController)
app.register_blueprint(ChartViewController)
app.register_blueprint(ServerViewController)
app.register_blueprint(DashboardViewController)
app.register_blueprint(UserViewController)
@app.route('/')
def index():
if 'username' not in session:
return redirect(url_for('login'))
return render_template('index.html')
@app.route('/login', methods=['GET', 'POST'])
def login():
if re
|
quest.method == 'POST':
username = request.form['username']
password = request.form['password']
#Validate Credentials
result = AuthenticationGateway().authenticate(username,passw
|
ord)
if result is "true":
session['username'] = username
return redirect(url_for('index'))
return render_template('login.html')
@app.route('/logout')
def logout():
# remove the username from the session if it's there
session.pop('username', None)
return redirect(url_for('index'))
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
def __init__(self):
self.app.run(debug=True)
|
samedder/azure-cli
|
src/azure-cli-core/azure/cli/core/tests/test_cloud.py
|
Python
|
mit
| 10,518
| 0.002377
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import tempfile
import unittest
import mock
import multiprocessing
from azure.cli.core.cloud import (Cloud,
CloudEndpoints,
CloudSuffixes,
add_cloud,
get_cloud,
get_clouds,
get_custom_clouds,
remove_cloud,
get_active_cloud_name,
init_known_clouds,
AZURE_PUBLIC_CLOUD,
KNOWN_CLOUDS,
CloudEndpointNotSetException)
from azure.cli.core._config import get_config_parser
from azure.cli.core._profile import Profile
from azure.cli.core.util import CLIError
def _helper_get_clouds(_):
""" Helper method for multiprocessing.Pool.map func that uses throwaway arg """
get_clouds()
class TestCloud(unittest.TestCase):
@mock.patch('azure.cli.core._profile.CLOUD', Cloud('AzureCloud'))
def test_endpoint_none(self):
with self.assertRaises(CloudEndpointNotSetException):
profile = Profile()
profile.get_login_credentials()
@mock.patch('azure.cli.core.cloud.get_custom_clouds', lambda: [])
def test_add_get_delete_custom_cloud(self):
endpoint_rm = 'http://management.contoso.com'
suffix_storage = 'core.contoso.com'
endpoints = CloudEndpoints(resource_manager=endpoint_rm)
suffixes = CloudSuffixes(storage_endpoint=suffix_storage)
c = Cloud('MyOwnCloud', endpoints=endpoints, suffixes=suffixes)
with mock.patch('azure.cli.core.cloud.CLOUD_CONFIG_FILE', tempfile.mkstemp()[1]) as\
config_file:
with mock.patch('azure.cli.core.cloud.get_custom_clouds', lambda: []):
add_cloud(c)
config = get_config_parser()
config.read(config_file)
self.assertTrue(c.name in config.sections())
self.assertEqual(config.get(c.name, 'endpoint_resource_manager'), endpoint_rm)
self.assertEqual(config.get(c.name, 'suffix_storage_endpoint'), suffix_storage)
custom_clouds = get_custom_clouds()
self.assertEqual(len(custom_clouds), 1)
self.assertEqual(custom_clouds[0].name, c.name)
self.assertEqual(custom_clouds[0].endpoints.resource_manager,
c.endpoints.resource_manager)
self.assertEqual(custom_clouds[0].suffixes.storage_endpoint,
c.suffixes.storage_endpoint)
with mock.patch('azure.cli.core.cloud._get_cloud', lambda _: c):
remove_cloud(c.name)
custom_clouds = get_custom_clouds()
self.assertEqual(len(custom_clouds), 0)
def test_add_get_cloud_with_profile(self):
endpoint_rm = 'http://management.contoso.com'
endpoints = CloudEndpoints(resource_manager=endpoint_rm)
profile = '2017-03-09-profile'
c = Cloud('MyOwnCloud', endpoints=endpoints, profile=profile)
with mock.patch('azure.cli.core.cloud.CLOUD_CONFIG_FILE', tempfile.mkstemp()[1]) as\
config_file:
add_cloud(c)
config = get_config_parser()
config.read(config_file)
self.assertTrue(c.name in config.sections())
self.assertEqual(config.get(c.name, 'endpoint_resource_manager'), endpoint_rm)
self.assertEqual(config.get(c.name, 'profile'), profile)
custom_clouds = get_custom_clouds()
self.assertEqual(len(custom_clouds), 1)
self.assertEqual(custom_clouds[0].name, c.name)
self.assertEqual(custom_clouds[0].endpoints.resource_manager,
c.endpoints.resource_manager)
self.assertEqual(custom_clouds[0].profile,
c.profile)
def test_add_get_cloud_with_invalid_profile(self):
# Cloud has profile that doesn't exist so an exception should be raised
profile = 'none-existent-profile'
c = Cloud('MyOwnCloud', profile=profile)
with mock.patch('azure.cli.core.cloud.CLOUD_CONFIG_FILE', tempfile.mkstemp()[1]) as\
config_file:
add_cloud(c)
config = get_config_parser()
config.read(config_file)
self.assertTrue(c.name in config.sections())
self.assertEqual(config.get(c.name, 'profile'), profile)
with self.assertRaises(CLIError):
get_custom_clouds()
def test_get_default_latest_profile(self):
with mock.patch('azure.cli.core.cloud.CLOUD_CONFIG_FILE', tempfile.mkstemp()[1]):
clouds = get_clouds()
for c in clouds:
self.assertEqual(c.profile, 'latest')
def test_custom_cloud_management_endpoint_set(self):
# We have set management endpoint so don't override it
endpoint_rm = 'http://management.contoso.com'
endpoint_mgmt = 'http://management.core.contoso.com'
endpoints = CloudEndpoints(resource_manager=endpoint_rm, management=endpoint_mgmt)
profile = '2017-03-09-profile'
c = Cloud('MyOwnCloud', endpoints=endpoints, profile=profile)
with mock.patch('azure.cli.core.cloud.CLOUD_CONFIG_FILE', tempfile.mkstemp()[1]):
add_cloud(c)
custom_clouds = get_custom_clouds()
self.assertEqual(len(custom_clouds), 1)
self.assertEqual(custom_clouds[0].endpoints.resource_manager,
c.endpoints.resource_manager)
# CLI logic should keep our set management endpoint
self.assertEqual(custom_clouds[0].endpoints.management,
c.endpoints.management)
def test_custom_cloud_no_management_endpoint_set(self):
# Use ARM 'resource manager' endpoint as 'management' (old ASM) endpoint if only ARM endpoint is set
endpoint_rm = 'http://management.contoso.com'
endpoints = CloudEndpoints(resource_manager=endpoint_rm)
profile = '2017-03-09-profile'
c = Cloud('MyOwnCloud', endpoints=endpoints, profile=profile)
with mock.patch('azure.cli.core.cloud.CLOUD_CONFIG_FILE', tempfile.mkstemp()[1]):
add_cloud(c)
custom_clouds = get_custom_clouds()
self.assertEqual(len(custom_clouds), 1)
self.assertEqual(custom_clouds[0].endpoints.resource_manager,
c.endpoints.resource_manager)
# CLI logic should add management endpoint to equal resource_manager as we didn't set it
self.assertEqual(custom_clouds[0].endpoints.management,
c.endpoints.resource_manager)
def test_get_active_cloud_name_default(self):
expect
|
ed = AZURE_PUBLIC_CLOUD.name
actual = get_active_cloud_name()
self.assertEqual(expected, actual)
def test_known_cloud_missing_endpoint(self):
# New endpoints in cloud config should be sav
|
ed in config for the known clouds
with mock.patch('azure.cli.core.cloud.CLOUD_CONFIG_FILE', tempfile.mkstemp()[1]) as\
config_file:
# Save the clouds to config to get started
init_known_clouds()
cloud = get_cloud(AZURE_PUBLIC_CLOUD.name)
self.assertEqual(cloud.endpoints.batch_resource_id,
AZURE_PUBLIC_CLOUD.endpoints.batch_resource_id)
# Remove an endpoint from the cloud config (leaving other config values as is)
config = get_config_parser()
config.read(config_file)
config.remove_option(AZURE_PUBLIC_CLOUD.name, 'endpoint_batch_resourc
|
bswartz/cinder
|
cinder/tests/unit/api/contrib/test_qos_specs_manage.py
|
Python
|
apache-2.0
| 33,465
| 0
|
# Copyright 2013 eBay Inc.
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
import webob
from cinder.api.contrib import qos_specs_manage
from cinder import context
from cinder import db
from cinder import exception
from cinder import objects
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_notifier
def stub_qos_specs(id):
res = dict(name='qos_specs_' + str(id))
res.update(dict(consumer='back-end'))
res.update(dict(id=str(id)))
specs = {"key1": "value1",
"key2": "value2",
"key3": "value3",
"key4": "value4",
"key5": "value5"}
res.update(dict(specs=specs))
return objects.QualityOfServiceSpecs(**res)
def stub_qos_associates(id):
return [{
'association_type': 'volume_type',
'name': 'FakeVolTypeName',
'id': fake.VOLUME_TYPE_ID}]
def return_qos_specs_get_all(context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
return [
stub_qos_specs(fake.QOS_SPEC_ID),
stub_qos_specs(fake.QOS_SPEC2_ID),
stub_qos_specs(fake.QOS_SPEC3_ID),
]
def return_qos_specs_get_qos_specs(context, id):
if id == fake.WILL_NOT_BE_FOUND_ID:
raise exception.QoSSpecsNotFound(specs_id=id)
return stub_qos_specs(id)
def return_qos_specs_delete(context, id, force):
if id == fake.WILL_NOT_BE_FOUND_ID:
raise exception.QoSSpecsNotFound(specs_id=id)
elif id == fake.IN_USE_ID:
raise exception.QoSSpecsInUse(specs_id=id)
pass
def return_qos_specs_delete_keys(context, id, keys):
if id == fake.WILL_NOT_BE_FOUND_ID:
raise exception.QoSSpecsNotFound(specs_id=id)
if 'foo' in keys:
raise exception.QoSSpecsKeyNotFound(specs_id=id,
specs_key='foo')
def return_qos_specs_update(context, id, specs):
if id == fake.WILL_NOT_BE_FOUND_ID:
raise exception.QoSSpecsNotFound(specs_id=id)
elif id == fake.INVALID_ID:
raise exception.InvalidQoSSpecs(reason=id)
elif id == fake.UPDATE_FAILED_ID:
raise exception.QoSSpecsUpdateFailed(specs_id=id,
qos_specs=specs)
pass
def return_qos_specs_create(context, name, specs):
if name == 'qos_spec_%s' % fake.ALREADY_EXISTS_ID:
raise exception.QoSSpecsExists(specs_id=name)
elif name == 'qos_spec_%s' % fake.ACTION_FAILED_ID:
raise exception.QoSSpecsCreateFailed(name=id, qos_specs=specs)
elif name == 'qos_spec_%s' % fake.INVALID_ID:
raise exception.InvalidQoSSpecs(reason=name)
return objects.QualityOfServiceSpecs(name=name,
specs=specs,
consumer='back-end',
id=fake.QOS_SPEC_ID)
def return_get_qos_associations(context, id):
if id == fake.WILL_NOT_BE_FOUND_ID:
raise exception.QoSSpecsNotFound(specs_id=id)
elif id == fake.RAISE_ID:
raise exception.CinderException()
return stub_qos_associates(id)
def return_associate_qos_specs(context, id, type_id):
if id == fake.WILL_N
|
OT_BE_FOUND_ID:
raise exception.QoSSpecsNotFound(specs_id=id)
el
|
if id == fake.ACTION_FAILED_ID:
raise exception.QoSSpecsAssociateFailed(specs_id=id,
type_id=type_id)
elif id == fake.ACTION2_FAILED_ID:
raise exception.QoSSpecsDisassociateFailed(specs_id=id,
type_id=type_id)
if type_id == fake.WILL_NOT_BE_FOUND_ID:
raise exception.VolumeTypeNotFound(
volume_type_id=type_id)
pass
def return_disassociate_all(context, id):
if id == fake.WILL_NOT_BE_FOUND_ID:
raise exception.QoSSpecsNotFound(specs_id=id)
elif id == fake.ACTION2_FAILED_ID:
raise exception.QoSSpecsDisassociateFailed(specs_id=id,
type_id=None)
@ddt.ddt
class QoSSpecManageApiTest(test.TestCase):
def _create_qos_specs(self, name, values=None):
"""Create a transfer object."""
if values:
specs = dict(name=name, qos_specs=values)
else:
specs = {'name': name,
'consumer': 'back-end',
'specs': {
'key1': 'value1',
'key2': 'value2'}}
return db.qos_specs_create(self.ctxt, specs)['id']
def setUp(self):
super(QoSSpecManageApiTest, self).setUp()
self.flags(host='fake')
self.controller = qos_specs_manage.QoSSpecsController()
self.ctxt = context.RequestContext(user_id=fake.USER_ID,
project_id=fake.PROJECT_ID,
is_admin=True)
self.user_ctxt = context.RequestContext(
fake.USER_ID, fake.PROJECT_ID, auth_token=True)
self.qos_id1 = self._create_qos_specs("Qos_test_1")
self.qos_id2 = self._create_qos_specs("Qos_test_2")
self.qos_id3 = self._create_qos_specs("Qos_test_3")
self.qos_id4 = self._create_qos_specs("Qos_test_4")
@mock.patch('cinder.volume.qos_specs.get_all_specs',
side_effect=return_qos_specs_get_all)
def test_index(self, mock_get_all_specs):
req = fakes.HTTPRequest.blank('/v2/%s/qos-specs' % fake.PROJECT_ID)
res = self.controller.index(req)
self.assertEqual(3, len(res['qos_specs']))
names = set()
for item in res['qos_specs']:
self.assertEqual('value1', item['specs']['key1'])
names.add(item['name'])
expected_names = ['qos_specs_%s' % fake.QOS_SPEC_ID,
'qos_specs_%s' % fake.QOS_SPEC2_ID,
'qos_specs_%s' % fake.QOS_SPEC3_ID]
self.assertEqual(set(expected_names), names)
def test_index_with_limit(self):
url = '/v2/%s/qos-specs?limit=2' % fake.PROJECT_ID
req = fakes.HTTPRequest.blank(url, use_admin_context=True)
res = self.controller.index(req)
self.assertEqual(2, len(res['qos_specs']))
self.assertEqual(self.qos_id4, res['qos_specs'][0]['id'])
self.assertEqual(self.qos_id3, res['qos_specs'][1]['id'])
expect_next_link = ('http://localhost/v2/%s/qos-specs?limit'
'=2&marker=%s') % (
fake.PROJECT_ID, res['qos_specs'][1]['id'])
self.assertEqual(expect_next_link, res['qos_specs_links'][0]['href'])
def test_index_with_offset(self):
url = '/v2/%s/qos-specs?offset=1' % fake.PROJECT_ID
req = fakes.HTTPRequest.blank(url, use_admin_context=True)
res = self.controller.index(req)
self.assertEqual(3, len(res['qos_specs']))
def test_index_with_offset_out_of_range(self):
url = '/v2/%s/qos-specs?offset=356576877698707' % fake.PROJECT_ID
req = fakes.HTTPRequest.blank(url, use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index,
req)
def test_index_with_limit_and_offset(self):
url = '/v2/%s/qos-specs?limit=2&offset=1' % fake.PROJECT_ID
req = fakes.HTTPRequest.blank(url, use_admin_context=True)
res = self.controller.index(req)
self.assertEqual(2, len(res['qos_spe
|
andrewbird/wader
|
plugins/devices/huawei_k4505.py
|
Python
|
gpl-2.0
| 3,211
| 0.001558
|
# -*- coding: utf-8 -*-
# Copyright (C) 2006-2011 Vodafone España, S.A.
# Author: Andrew Bird
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from twisted.internet import defer, reactor
from twisted.internet.task import deferLater
from wader.common import consts
from core.hardware.base import build_band_d
|
ict
from core.hardware.huawei import (HuaweiWCDMADevicePlugin,
HuaweiWCDMACustomizer,
HuaweiWCDMAWrapper,
HUAWEI_BAND_DICT)
class HuaweiK4505Wrapper(HuaweiWCDMAWrapper):
"""
|
:class:`~core.hardware.huawei.HuaweiWCDMAWrapper` for the K4505
"""
def enable_radio(self, enable):
"""
Enables the radio according to ``enable``
It will not enable it if it's already enabled and viceversa
"""
def check_if_necessary(status):
if (status == 1 and enable) or (status == 0 and not enable):
return defer.succeed('OK')
d = super(HuaweiK4505Wrapper, self).enable_radio(enable)
d.addCallback(lambda x: deferLater(reactor, 5, lambda: x))
return d
d = self.get_radio_status()
d.addCallback(check_if_necessary)
return d
def send_ussd(self, ussd):
return self._send_ussd_ucs2_mode(ussd)
class HuaweiK4505Customizer(HuaweiWCDMACustomizer):
"""
:class:`~core.hardware.huawei.HuaweiWCDMACustomizer` for the K4505
"""
wrapper_klass = HuaweiK4505Wrapper
# GSM/GPRS/EDGE 850/900/1800/1900 MHz
# HSDPA/UMTS 2100/900 MHz
band_dict = build_band_dict(
HUAWEI_BAND_DICT,
[consts.MM_NETWORK_BAND_ANY,
consts.MM_NETWORK_BAND_G850,
consts.MM_NETWORK_BAND_EGSM,
consts.MM_NETWORK_BAND_DCS,
consts.MM_NETWORK_BAND_PCS,
# consts.MM_NETWORK_BAND_U900, # waiting for docs
consts.MM_NETWORK_BAND_U2100])
class HuaweiK4505(HuaweiWCDMADevicePlugin):
"""
:class:`~core.plugin.DevicePlugin` for Huawei's Vodafone K4505
"""
name = "Huawei K4505"
version = "0.1"
author = u"Andrew Bird"
custom = HuaweiK4505Customizer()
quirks = {
'needs_enable_before_pin_check': True,
}
__remote_name__ = "K4505"
__properties__ = {
'ID_VENDOR_ID': [0x12d1],
'ID_MODEL_ID': [0x1464],
}
conntype = consts.WADER_CONNTYPE_USB
huaweik4505 = HuaweiK4505()
|
lhfei/spark-in-action
|
spark-2.x/src/main/python/ml/count_vectorizer_example.py
|
Python
|
apache-2.0
| 1,595
| 0.000627
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You u
|
nder the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WIT
|
HOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from pyspark.sql import SparkSession
# $example on$
from pyspark.ml.feature import CountVectorizer
# $example off$
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("CountVectorizerExample")\
.getOrCreate()
# $example on$
# Input data: Each row is a bag of words with a ID.
df = spark.createDataFrame([
(0, "a b c".split(" ")),
(1, "a b b c a".split(" "))
], ["id", "words"])
# fit a CountVectorizerModel from the corpus.
cv = CountVectorizer(inputCol="words", outputCol="features", vocabSize=3, minDF=2.0)
model = cv.fit(df)
result = model.transform(df)
result.show(truncate=False)
# $example off$
spark.stop()
|
ygol/odoo
|
addons/l10n_fi/models/__init__.py
|
Python
|
agpl-3.0
| 156
| 0
|
# -*- coding:utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from . imp
|
ort account_journal
from . import account_move
|
|
barentsen/dave
|
blsCode/yash_bls.py
|
Python
|
mit
| 17,107
| 0.0318
|
from __future__ import division, print_function
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import clean_and_search
from ktransit import FitTransit
from multiprocessing import Pool
from scipy import ndimage
import glob, timeit, sys
import time as pythonTime
# OPTIONS
doPlot = True
plotOption = 'save'
secondary = True
resultsFilename = '/Users/Yash/Desktop/results.txt'
figureSaveLocation = '/Users/Yash/Desktop/'
# -------- PLOTTING OPTIONS -------- #
import matplotlib
def plateau(array, threshold):
"""Find plateaus in an array, i.e continuous regions that exceed threshold
Given an array of numbers, return a 2d array such that
out[:,0] marks the indices where the array crosses threshold from
below, and out[:,1] marks the next time the array crosses that
same threshold from below.
Inputs:
array (1d numpy array)
threshold (float or array) If threshold is a single number, any point
above that value is above threshold. If it's an array,
it must have the same length as the first argument, and
an array[i] > threshold[i] to be included as a plateau
Returns:
Numpy 2d array with 2 columns.
Notes:
To find the length of the plateaus, use
out[:,1] - out[:,0]
To find the length of the largest plateau, use
np.max(out[:,1] - out[:,0])
The algorithm fails if a value is exactly equal to the threshold.
To guard against this, we add a very small amount to threshold
to ensure floating point arithmetic prevents two numbers being
exactly equal."""
arr = array.astype(np.float32)
arr = arr - threshold + 1e-12
arrPlus = np.roll(arr, 1)
#Location of changes from -ve to +ve (or vice versa)
#Last point is bogus , so we calculate it by hand
sgnChange = arr*arrPlus
#Roll around can't compute sign change for zeroth elt.
sgnChange[0] = +1
if arr[0] > 0:
sgnChange[0] = -1
loc = np.where(sgnChange < 0)[0]
if np.fmod( len(loc), 2) != 0:
loc.resize( (len(loc)+1))
loc[-1] = len(arr)
return loc
def outlierRemoval(time, flux):
fluxDetrended = medianDetrend(flux, 3)
out1 = plateau(fluxDetrended, 5 * np.std(fluxDetrended))
out2 = plateau(-fluxDetrended, 5 * np.std(fluxDetrended))
if out1 == [] and out2 == []:
singleOutlierIndices = []
else:
outliers = np.append(out1, out2).reshape(-1,2)
# Only want groups of one outlier, since > 1 may be transit points
singleOutlierIndices = np.sort(outliers[(outliers[:,1] - outliers[:,0] == 1)][:,0])
# Check periodicity of outliers, with PRECISION of 0.0205 days
# 0.0205 days = 29.52 minutes = ~length of long cadence
precision = 0.0205
outlierTimes = time[singleOutlierIndices]
diffs = [outlierTimes[i+1] - outlierTimes[i] for i in range(0, len(outlierTimes)-1)]
diffs = [round(d, 5) for d in diffs]
if len(singleOutlierIndices) >= 4:
if len(set(diffs)) == len(diffs):
possibleTimes = np.array([])
else:
period = max(set(diffs), key = diffs.count) # period = most common difference
epoch = outlierTimes[diffs.index(period)]
possibleTimes = np.arange(epoch, outlierTimes[-1] + 0.5*period, period)
notOutliers = []
for i in range(len(outlierTimes)):
if np.any((abs(possibleTimes - outlierTimes[i]) < precision)):
notOutliers.append(i)
singleOutlierIndices = np.delete(singleOutlierIndices, notOutliers)
elif len(singleOutlierIndices) == 3:
if abs(diffs[0] - diffs[1]) < precision:
singleOutlierIndices = []
# Uncomment to see how the plotting algorithm worked for a lightcurve
# ----------------------------- PLOTTING ----------------------------- #
# plt.subplot(311)
# plt.scatter(time, flux, marker = '.', s = 1, color = 'k', alpha = 1)
# plt.scatter(time[singleOutlierIndices], flux[singleOutlierIndices],
# s = 30, marker = 'o', facecolors = 'none', edgecolors = 'r')
# plt.title('Original')
# plt.subplot(312)
# plt.scatter(time, fluxDetrended, marker = '.', s = 1, color = 'k', alpha = 1)
# plt.scatter(time[singleOutlierIndices], fluxDetrended[singleOutlierIndices],
# s = 30, marker = 'o', facecolors = 'none', edgecolors = 'r')
# x1, x2, y1, y2 = plt.axis()
# plt.hlines([-5*np.std(fluxDetrended), 5*np.std(fluxDetrended)], x1, x2,
# color = 'b', linestyles = 'dashed')
# plt.axis([x1, x2, y1, y2])
# plt.title('Detrended')
# plt.subplot(313)
# plt.scatter(np.delete(time, singleOutlierIndices), np.delete(flux, singleOutlierIndices),
# marker = '.', s = 1, color = 'k', alpha = 1)
# plt.title('Outliers removed: ' + str(len(singleOutlierIndices)))
# plt.show()
# -------------------------------------------------------------------- #
return np.delete(time, singleOutlierIndices), np.delete(flux, singleOutlierIndices)
def medianDetrend(flux, binWidth):
halfNumPoints = binWidth // 2
medians = []
for i in range(len(flux)):
if i < halfNumPoints:
medians.append(np.median(flux[:i+halfNumPoints+1]))
elif i > len(flux) - halfNumPoints - 1:
medians.append(np.median(flux[i-halfNumPoints:]))
else:
medians.append(np.median(flux[i-halfNumPoints : i+halfNumPoints+1]))
return flux - medians
def getPhase(time, flux, period, epoch, centerPhase = 0):
"""Get the phase of a lightcurve.
How it works using an example where epoch = 2, period = 3:
1. Subtract the epoch from all times [1, 2, 3, 4, 5, 6, 7] to get
[-1, 0, 1, 2, 3, 4, 5] then divide by the period [3] to get all time
values in phase values which gets you [-0.3, 0, 0.3, 0.6, 1, 1.3, 1.6]
2. Subtract the PHASE NUMBER (floor function) from each PHASE (date1)
which gets you [0.7, 0, 0.3, 0.6, 0, 0.3, 0.6]
3. Sort all the adjusted phases to get [0, 0, 0.3, 0.3, 0.6, 0.6, 0.7]
THERE WILL BE negative values in the beginning here, just not in this example
since no ex. time value divided by the period left a decimal less than 0.25
4. Sort the flux values in the same way the phases were sorted
Inputs:
time Time values of data. (IN DAYS)
flux Flux values of data.
period Period of transit.
epoch Epoch of transit.
centerPhase Which phase should be at the center.
Returns:
q1 Phase values. (IN HOURS)
f1 Flux values for each phase.
"""
epoch += centerPhase * period
date1 = (time - epoch) / period + 0.5
phi1 = ((date1) - np.floor(date1)) - 0.5
q1 = np.sort(phi1) * period * 24.
f1 = flux[np.argsort(phi1)]
return q1, f1
def fitModel(time, flux, guessDict, freeParPlanet, ferr = 0):
if not np.all(ferr): ferr = np.ones_like(flux)*1.E-5
freeParStar = ['rho']
# Make the fitting object according to guess dictionary
fitT = FitTransit()
fitT.add_guess_star(ld1 = 0, ld2 = 0)
fitT.add_guess_planet(period = guessDict['period'],
T0 = guessDict['T0'])
fitT.add_data(time = time, flux = flux, ferr = ferr)
fitT.free_parameters(freeParStar, freeParPlanet)
fitT.do_fit()
return fitT
def do_bls_and_fit(time, flux, min_period, max_period):
S = clean_and_search.Search(time, flux + 1, np.ones_like(flux)*1.E-5)
S.do_bls2(min_period = min_period,
max_period = max_period,
min_duration_hours = 1.5,
max_duration_hours = 6.,
freq_step = 1.E-4,
doplot = False,
norm = False)
guessDict = {'period': S.periods[0],
'T0': S.epoch}
freeParPlanet = ['period', 'T0', 'rprs']
fitT = fitModel(time, flux, guessDict, freeParPlanet)
# Readability of output data
period = fitT.fitresultplanets['pnum0']['period']
epoch = fitT.fitresultplanets['pnum0']['T0']
k = fitT.fitresultplanets['pnum0']['rprs']
rho = fitT.fitresultstellar['rho']
duration = computeTransitDurati
|
on(period, rho, k)
if not duration:
duration = S.duration * 24
# Calculating transit depth significance
## fitT.transitmodel sometimes has a NaN value
sigma = computePointSigma(time, flux, fitT.transitmodel, period, epoch, duration)
depth = k ** 2
significance = depth / sigma
phase = getPhase(time, flux, period, epoch)[0]
nTransitPoints = np.sum((-duration * 0.5 < phase) & (phase < duration * 0.5))
S
|
NR = significance * nTransitPoints**0.5
return S
|
google/timesketch
|
timesketch/migrations/versions/654121a84a33_.py
|
Python
|
apache-2.0
| 3,278
| 0.014033
|
"""Add Graph and GraphCache models
Revision ID: 654121a84a33
Revises: fc7bc5c66c63
Create Date: 2020-11-16 21:02:36.249989
"""
# revision identifiers, used by Alembic.
revision = '654121a84a33'
down_revision = 'fc7bc5c66c63'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('graph',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('sketch_id', sa.Integer(), nullable=True),
sa.Column('name', sa.UnicodeText(), nullable=True),
sa.Column('description', sa.UnicodeText(), nullable=True),
sa.Column('graph_config', sa.UnicodeText(), nullable=True),
sa.Column('graph_elements', sa.UnicodeText(), nullable=True),
sa.Column('graph_thumbnail', sa.UnicodeText(), nullable=True),
sa.Column('num_nodes', sa.Integer(), nullable=True),
sa.Column('num_edges', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['sketch_id'], ['sketch.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('graphcache',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTi
|
me(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('sketch_id', sa.Integer(), nullable=True),
sa.Column('graph_plugin', sa.UnicodeText
|
(), nullable=True),
sa.Column('graph_config', sa.UnicodeText(), nullable=True),
sa.Column('graph_elements', sa.UnicodeText(), nullable=True),
sa.Column('num_nodes', sa.Integer(), nullable=True),
sa.Column('num_edges', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['sketch_id'], ['sketch.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('graph_comment',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('comment', sa.UnicodeText(), nullable=True),
sa.Column('parent_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['parent_id'], ['graph.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('graph_label',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('label', sa.Unicode(length=255), nullable=True),
sa.Column('parent_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['parent_id'], ['graph.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('graph_label')
op.drop_table('graph_comment')
op.drop_table('graphcache')
op.drop_table('graph')
# ### end Alembic commands ###
|
arsenetar/dupeguru
|
core/pe/cache_sqlite.py
|
Python
|
gpl-3.0
| 5,161
| 0.001744
|
# Copyright 2016 Virgil Dupras
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
import os
import os.path as op
import logging
import sqlite3 as sqlite
from .cach
|
e import string_to_colors, colors_to_string
c
|
lass SqliteCache:
"""A class to cache picture blocks in a sqlite backend."""
def __init__(self, db=":memory:", readonly=False):
# readonly is not used in the sqlite version of the cache
self.dbname = db
self.con = None
self._create_con()
def __contains__(self, key):
sql = "select count(*) from pictures where path = ?"
result = self.con.execute(sql, [key]).fetchall()
return result[0][0] > 0
def __delitem__(self, key):
if key not in self:
raise KeyError(key)
sql = "delete from pictures where path = ?"
self.con.execute(sql, [key])
# Optimized
def __getitem__(self, key):
if isinstance(key, int):
sql = "select blocks from pictures where rowid = ?"
else:
sql = "select blocks from pictures where path = ?"
result = self.con.execute(sql, [key]).fetchone()
if result:
result = string_to_colors(result[0])
return result
else:
raise KeyError(key)
def __iter__(self):
sql = "select path from pictures"
result = self.con.execute(sql)
return (row[0] for row in result)
def __len__(self):
sql = "select count(*) from pictures"
result = self.con.execute(sql).fetchall()
return result[0][0]
def __setitem__(self, path_str, blocks):
blocks = colors_to_string(blocks)
if op.exists(path_str):
mtime = int(os.stat(path_str).st_mtime)
else:
mtime = 0
if path_str in self:
sql = "update pictures set blocks = ?, mtime = ? where path = ?"
else:
sql = "insert into pictures(blocks,mtime,path) values(?,?,?)"
try:
self.con.execute(sql, [blocks, mtime, path_str])
except sqlite.OperationalError:
logging.warning("Picture cache could not set value for key %r", path_str)
except sqlite.DatabaseError as e:
logging.warning("DatabaseError while setting value for key %r: %s", path_str, str(e))
def _create_con(self, second_try=False):
def create_tables():
logging.debug("Creating picture cache tables.")
self.con.execute("drop table if exists pictures")
self.con.execute("drop index if exists idx_path")
self.con.execute("create table pictures(path TEXT, mtime INTEGER, blocks TEXT)")
self.con.execute("create index idx_path on pictures (path)")
self.con = sqlite.connect(self.dbname, isolation_level=None)
try:
self.con.execute("select path, mtime, blocks from pictures where 1=2")
except sqlite.OperationalError: # new db
create_tables()
except sqlite.DatabaseError as e: # corrupted db
if second_try:
raise # Something really strange is happening
logging.warning("Could not create picture cache because of an error: %s", str(e))
self.con.close()
os.remove(self.dbname)
self._create_con(second_try=True)
def clear(self):
self.close()
if self.dbname != ":memory:":
os.remove(self.dbname)
self._create_con()
def close(self):
if self.con is not None:
self.con.close()
self.con = None
def filter(self, func):
to_delete = [key for key in self if not func(key)]
for key in to_delete:
del self[key]
def get_id(self, path):
sql = "select rowid from pictures where path = ?"
result = self.con.execute(sql, [path]).fetchone()
if result:
return result[0]
else:
raise ValueError(path)
def get_multiple(self, rowids):
sql = "select rowid, blocks from pictures where rowid in (%s)" % ",".join(map(str, rowids))
cur = self.con.execute(sql)
return ((rowid, string_to_colors(blocks)) for rowid, blocks in cur)
def purge_outdated(self):
"""Go through the cache and purge outdated records.
A record is outdated if the picture doesn't exist or if its mtime is greater than the one in
the db.
"""
todelete = []
sql = "select rowid, path, mtime from pictures"
cur = self.con.execute(sql)
for rowid, path_str, mtime in cur:
if mtime and op.exists(path_str):
picture_mtime = os.stat(path_str).st_mtime
if int(picture_mtime) <= mtime:
# not outdated
continue
todelete.append(rowid)
if todelete:
sql = "delete from pictures where rowid in (%s)" % ",".join(map(str, todelete))
self.con.execute(sql)
|
any1m1c/ipc20161
|
lista2/ipc_lista2.16.py
|
Python
|
apache-2.0
| 824
| 0.01699
|
#EQUIPE 2
#Nahan Trindade Passos - 1615310021
#Ana Beatriz Frota - 1615310027
#
#
#
#
#
#
import math
print("Digite os termos da equacao ax2+bx+c")
a = float(input("Digite o valor de A:\n"))
if(a
|
==0):
print("Nao e uma equacao de segundo grau")
else:
b = float(input("Valor de B:\n"))
c = float(input("Valor de C:\n"))
delta = (math.pow(b,2) - (4*a*c))
if(delta<0):
print("A equacao nao possui raizes reais")
elif(delta == 0):
raiz = ((-1)*b + math.
|
sqrt(delta))/(2*a)
print("A equacao possui apenas uma raiz",raiz)
else:
raiz1 = ((-1)*b + math.sqrt(delta))/(2*a)
raiz2 = ((-1)*b - math.sqrt(delta))/(2*a)
print("A equacao possui duas raizes")
print("Primeira raiz:",raiz1)
print("Segunda raiz:",raiz2)
|
googleapis/python-dialogflow
|
google/cloud/dialogflow_v2/services/session_entity_types/transports/grpc.py
|
Python
|
apache-2.0
| 18,035
| 0.002384
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.dialogflow_v2.types import session_entity_type
from google.cloud.dialogflow_v2.types import (
session_entity_type as gcd_session_entity_type,
)
from google.protobuf import empty_pb2 # type: ignore
from .base import SessionEntityTypesTransport, DEFAULT_CLIENT_INFO
class SessionEntityTypesGrpcTransport(SessionEntityTypesTransport):
"""gRPC backend transport for SessionEntityTypes.
Service for managing
[SessionEntityTypes][google.cloud.dialogflow.v2.SessionEntityType].
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "dialogflow.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_fo
|
r_mtls and not ssl_
|
channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
|
jairideout/qiime2
|
qiime2/sdk/tests/test_artifact.py
|
Python
|
bsd-3-clause
| 17,709
| 0
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2017, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import collections
import os
import tempfile
import unittest
import uuid
import qiime2.core.type
from qiime2.sdk import Artifact
from qiime2.sdk.result import ResultMetadata
import qiime2.core.archive as archive
from qiime2.core.testing.type import IntSequence1, FourInts, Mapping
from qiime2.core.testing.util import get_dummy_plugin, ArchiveTestingMixin
class TestArtifact(unittest.TestCase, ArchiveTestingMixin):
def setUp(self):
# Ignore the returned dummy plugin object, just run this to verify the
# plugin exists as the tests rely on it being loaded.
get_dummy_plugin()
# TODO standardize temporary directories created by QIIME 2
self.test_dir = tempfile.TemporaryDirectory(prefix='qiime2-test-temp-')
self.provenance_capture = archive.ImportProvenanceCapture()
def tearDown(self):
self.test_dir.cleanup()
def test_private_constructor(self):
with self.assertRaisesRegex(
NotImplementedError,
'Artifact constructor.*private.*Artifact.load'):
Artifact()
# Note on testing strategy below: many of the tests for `_from_view` and
# `load` are similar, with the exception that when `load`ing, the
# artifact's UUID is known so more specific assertions can be performed.
# While these tests appear somewhat redundant, they are important because
# they exercise the same operations on Artifact objects constructed from
# different sources, whose codepaths have very different internal behavior.
# This internal behavior could be tested explicitly but it is safer to test
# the public API behavior (e.g. as a user would interact with the object)
# in case the internals change.
def test_from_view(self):
artifact = Artifact._from_view(FourInts, [-1, 42, 0, 43], list,
self.provenance_capture)
self.assertEqual(artifact.type, FourInts)
# We don't know what the UUID is because it's generated within
# Artifact._from_view.
self.assertIsInstance(artifact.uuid, uuid.UUID)
self.assertEqual(artifact.view(list), [-1, 42, 0, 43])
# Can produce same view if called again.
self.assertEqual(artifact.view(list), [-1, 42, 0, 43])
def test_from_view_different_type_with_multiple_view_types(self):
artifact = Artifact._from_view(IntSequence1, [42, 42, 43, -999, 42],
list, self.provenance_capture)
self.assertEqual(artifact.type, IntSequence1)
self.assertIsInstance(artifact.uuid, uuid.UUID)
self.assertEqual(artifact.view(list),
[42, 42, 43, -999, 42])
self.assertEqual(artifact.view(list),
[42, 42, 43, -999, 42])
self.assertEqual(artifact.view(collections.Counter),
collections.Counter({42: 3, 43: 1, -999: 1}))
self.assertEqual(artifact.view(collections.Counter),
collections.Counter({42: 3, 43: 1, -999: 1}))
def test_from_view_and_save(self):
fp = os.path.join(self.test_dir.name, 'artifact.qza')
# Using four-ints data layout because it has multiple files, some of
# which are in a nested directory.
artifact = Artifact._from_view(FourInts, [-1, 42, 0, 43], list,
self.provenance_capture)
artifact.save(fp)
root_dir = str(artifact.uuid)
expected = {
'VERSION',
'metadata.yaml',
'data/file1.txt',
'data/file2.txt',
'data/nested/file3.txt',
'data/nested/file4.txt',
'provenance/metadata.yaml',
'provenance/VERSION',
'provenance/action/action.yaml'
}
self.assertArchiveMembers(fp, root_dir, expected)
def test_load(self):
saved_artifact = Artifact.import_data(FourInts, [-1, 42, 0, 43])
fp = os.path.join(self.test_dir.name, 'artifact.qza')
saved_artifact.save(fp)
artifact = Artifact.load(fp)
self.assertEqual(artifact.type, FourInts)
self.assertEqual(artifact.uuid, saved_artifact.uuid)
self.assertEqual(artifact.view(list), [-1, 42, 0, 43])
self.assertEqual(artifact.view(list), [-1, 42, 0, 43])
def test_load_different_type_with_multiple_view_types(self):
saved_artifact = Artifact.import_data(IntSequence1,
[42, 42, 43, -999, 42])
fp = os.path.join(self.test_dir.name, 'artifact.qza')
saved_artifact.save(fp)
artifact = Artifact.load(fp)
self.assertEqual(artifact.type, IntSequence1)
self.assertEqual(artifact.uuid, saved_artifact.uuid)
self.assertEqual(artifact.view(list),
[42, 42, 43, -999, 42])
self.assertEqual(artifact.view(list),
[42, 42, 43, -999, 42])
self.assertEqual(artifact.view(collections.Counter),
collections.Counter({42: 3, 43: 1, -999: 1}))
self.assertEqual(artifact.view(collections.Counter),
collections.Counter({42: 3, 43: 1, -999: 1}))
def
|
test_load_and_save(self):
fp1 = os.path.join(self.test_dir.name, 'artifact1.qza')
fp2 = os.path.join(self.test_dir.na
|
me, 'artifact2.qza')
artifact = Artifact.import_data(FourInts, [-1, 42, 0, 43])
artifact.save(fp1)
artifact = Artifact.load(fp1)
# Overwriting its source file works.
artifact.save(fp1)
# Saving to a new file works.
artifact.save(fp2)
root_dir = str(artifact.uuid)
expected = {
'VERSION',
'metadata.yaml',
'data/file1.txt',
'data/file2.txt',
'data/nested/file3.txt',
'data/nested/file4.txt',
'provenance/metadata.yaml',
'provenance/VERSION',
'provenance/action/action.yaml'
}
self.assertArchiveMembers(fp1, root_dir, expected)
root_dir = str(artifact.uuid)
expected = {
'VERSION',
'metadata.yaml',
'data/file1.txt',
'data/file2.txt',
'data/nested/file3.txt',
'data/nested/file4.txt',
'provenance/metadata.yaml',
'provenance/VERSION',
'provenance/action/action.yaml'
}
self.assertArchiveMembers(fp2, root_dir, expected)
def test_roundtrip(self):
fp1 = os.path.join(self.test_dir.name, 'artifact1.qza')
fp2 = os.path.join(self.test_dir.name, 'artifact2.qza')
artifact = Artifact.import_data(FourInts, [-1, 42, 0, 43])
artifact.save(fp1)
artifact1 = Artifact.load(fp1)
artifact1.save(fp2)
artifact2 = Artifact.load(fp2)
self.assertEqual(artifact1.type, artifact2.type)
self.assertEqual(artifact1.format, artifact2.format)
self.assertEqual(artifact1.uuid, artifact2.uuid)
self.assertEqual(artifact1.view(list),
artifact2.view(list))
# double view to make sure multiple views can be taken
self.assertEqual(artifact1.view(list),
artifact2.view(list))
def test_load_with_archive_filepath_modified(self):
# Save an artifact for use in the following test case.
fp = os.path.join(self.test_dir.name, 'artifact.qza')
Artifact.import_data(FourInts, [-1, 42, 0, 43]).save(fp)
# Load the artifact from a filepath then save a different artifact to
# the same filepath. Assert that both artifacts produce the correct
# views of their data.
#
# `load` used to be lazy, o
|
ramansbach/cluster_analysis
|
clustering/scripts/old-scripts/clustering_temp.py
|
Python
|
mit
| 73,255
| 0.011958
|
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
import gsd.hoomd
import sklearn
import scipy.optimize as opt
import os
import os.path
import pdb
from sklearn.neighbors import BallTree
from sklearn.neighbors import radius_neighbors_graph
from scipy.spatial.distance import cdist,pdist
from scipy.special import erf
from scipy.sparse.csgraph import connected_components
from scipy.sparse import csr_matrix,lil_matrix,coo_matrix
#from .due import due, Doi
from .smoluchowski import massAvSize
from mpi4py import MPI
from cdistances import conOptDistanceCython,alignDistancesCython,subsquashRNG
from cdistances import squashRNGCOOCython
__all__ = ["ClusterSnapshot", "ContactClusterSnapshot",
"OpticalClusterSnapshot","AlignedClusterSnapshot",
"ContactClusterSnapshotXTC","OpticalClusterSnapshotXTC",
"SnapSystem",
"conOptDistance","conOptDistanceC","alignedDistance",
"alignedDistanceC","fixMisplacedArom","checkSymmetry",
"squashRNG","squashRNGCython","squashRNGPy","squashRNGCOO",
"squashRNGCOOCython"]
# Use duecredit (duecredit.org) to provide a citation to relevant work to
# be cited. This does nothing, unless the user has duecredit installed,
# And calls this with duecredit (as in `python -m duecredit script.py`):
'''
due.cite(Doi("10.1167/13.9.30"),
description="Simple data analysis for clustering application",
tags=["data-analysis","clustering"],
path='clustering')
'''
def checkSymmetry(csr):
"""
Checks whether a matrix in CSR sparse format is symmetric.
Parameters
----------
csr: matrix in CSR format
Returns
-------
symyes: bool
True if symmetric, False if not
"""
symyes = not (csr!=csr.transpose()).max()
return symyes
def squashRNG(rng,apermol):
"""
Reduces radius neighbors graph to a new graph based on molecules instead of
atoms.
Parameters
----------
rng: a graph in CSR format as produced by a BallTree
apermol: int
the number of atoms in a molecule
Returns
-------
molrng: a new graph in CSR format
Raises
------
RuntimeError: if the original rng is not symmetric
"""
if not checkSymmetry(rng):
raise RuntimeError("Graph is non-symmetrical")
sh = rng.shape
rng = rng.toarray()
newsh = (int(sh[0]/apermol),int(sh[1]/apermol))
#pdb.set_trace()
#molrng = lil_matrix(newsh)
molrng = np.zeros(newsh)
for i in range(0,newsh[0]):
for j in range(i+1,newsh[1]):
subrng = rng[apermol*i:apermol*(i+1),apermol*j:apermol*(j+1)]
if subrng.max():
molrng[i,j] = 1.0
molrng[j,i] = 1.0
return csr_matrix(molrng)
def squashRNGCOO(rng,apermol):
"""
Reduces radius neighbors graph to a new graph based on molecules instead of
atoms.
Uses COO format
Parameters
----------
rng: a graph in CSR format as produced by a BallTree
apermol: int
the number of atoms in a molecule
Returns
-------
molrng: a new graph in CSR format
Raises
------
RuntimeError: if the original rng is not symmetric
"""
if not checkSymmetry(rng):
raise RuntimeError("Graph is non-symmetrical")
sh = rng.shape
newsh = (int(sh[0]/apermol),int(sh[1]/apermol))
molrng = lil_matrix(newsh)
rng = coo_matrix(rng)
rows = rng.row//apermol
cols = rng.col//apermol
rowcols = rows * molrng.shape[1] + cols
urowcols = np.unique(rowcols)
rows = urowcols // molrng.shape[1]
cols = urowcols % molrng.shape[1]
#pdb.set_trace()
for i in range(len(rows)):
row = rows[i]
col = cols[i]
if col > row:
molrng[row,col] = 1
#pdb.set_trace()
return csr_matrix(molrng)
def squashRNGCython(rng,apermol):
"""
Reduces radius neighbors graph to a new graph based on molecules instead of
atoms, but uses Cython code to improve speed.
Parameters
----------
rng: a graph in CSR format as produced by a BallTree
apermol: int
the number of atoms in a molecule
Returns
-------
molrng: a new graph in CSR format
Raises
------
RuntimeError: if the original rng is not symmetric
"""
if not checkSymmetry(rng):
raise RuntimeError("Graph is non-symmetrical")
sh = rng.shape
rng = rng.toarray()
newsh = (int(sh[0]/apermol),int(sh[1]/apermol))
#pdb.set_trace()
#molrng = lil_matrix(newsh)
molrng = np.zeros(newsh)
molrng = subsquashRNG(rng,molrng,apermol)
return csr_matrix(molrng)
def squashRNGPy(rng,apermol):
"""
Reduces radius neighbors graph to a new graph based on molecules instead of
atoms. Dummy python debug test of Cython algorithm.
Parameters
----------
rng: a graph in CSR format as produced by a BallTree
apermol: int
the number of atoms in a molecule
Returns
-------
molrng: a new graph in CSR format
Raises
------
RuntimeError: if the original rng is not symmetric
"""
if not checkSymmetry(rng):
raise RuntimeError("Graph is non-symmetrical")
sh = rng.shape
rng = rng.toarray()
newsh = (int(sh[0]/apermol),int(sh[1]/apermol))
#pdb.set_trace()
#molrng = lil_matrix(newsh)
molrng = np.zeros(newsh)
molrng = subsquashRNGPy(rng,molrng,apermol)
#pdb.set_trace()
return csr_matrix(molrng)
def subsquashRNGPy(rng,molrng,apermol):
"""
Python version of c algorithm that sets the block to 0 when all are 0
and 1 if at least 1 is 1
Parameters
----------
rng: a numpy array as produced by a BallTree
apermol: int
the number of atoms in a molecule
Returns
-------
mo
|
lrng: a new graph
"""
dim = np.shape(molrng)[0]
sz = np.shape(rng)
rng = rng.reshape((1,sz[0]
|
*sz[1]))[0]
molrng = molrng.reshape((1,dim*dim))[0]
for i in range(dim):
for j in range(i+1,dim):
istart = apermol*i;
iend = apermol*(i+1);
jstart = apermol*j;
jend = apermol*(j+1);
curr = 0;
#pdb.set_trace()
for k in range(istart,iend):
for m in range(jstart,jend):
if (rng[k*dim*apermol+m] != 0.):
curr = 1;
#pdb.set_trace()
if (curr == 1):
molrng[dim*i+j] = 1.0;
molrng[dim*j+i] = 1.0;
molrng = molrng.reshape((dim,dim))
return molrng
def fixMisplacedArom(gsdfile,gsdout,idMiss,idPartner,idNotMiss,idNotPartner
,molno,ats,ts):
"""
opens a gsd file, gets the trajectory, then writes out in place with
the incorrectly placed aromatic placed correctly
Parameters
----------
gsdfile: string
filename of the file to be rewritten
gsdout: string
where to write new stuff
idMiss: the id of the misplaced aromatic within the molecule
idPartner: the id of the partner to the misplaced aromatic within the mol
idNotMiss: the complementary correctly placed aromatic
idNotPartner: idNotMiss's partner
ts: which timesteps of the trajectory to rewrite
Notes
-----
pos(idMiss) = pos(idPartner) + (pos(idNotMiss) - pos(idNotPartner))
"""
traj = gsd.hoomd.open(gsdfile)
trajnew = gsd.hoomd.open(gsdout,'wb')
offset = molno
idMisses = offset+idMiss + np.arange(0,molno*(ats-1),ats-1)
idPartners = offset + idPartner + np.arange(0,molno*(ats-1),ats-1)
idNotMisses = offset + idNotMiss + np.arange(0,molno*(ats-1),ats-1)
idNotPartners = offset + idNotPartner + np.arange(0,molno*(ats-1),ats-1)
for t in ts:
snapshot = traj[t]
box = snapshot.configuration.box[0:3]
pos = snapshot.particles.position
pds = pos[idNotMisses] - pos[
|
alanjds/drf-nested-routers
|
tests/urls.py
|
Python
|
apache-2.0
| 105
| 0
|
from tests.serialize
|
rs.urls import urlpatterns as serializ
|
ers_urls
urlpatterns = [
] + serializers_urls
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.