repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
mali/kdevelop
|
languages/qmljs/nodejsmodules/http.py
|
Python
|
gpl-2.0
| 3,792
| 0.001582
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# This file is part of qmljs, the QML/JS language support plugin for KDevelop
# Copyright (c) 2014 Denis Steckelmacher <steckdenis@yahoo.fr>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License or (at your option) version 3 or any later version
# accepted by the membership of KDE e.V. (or its successor approved
# by the membership of KDE e.V.), which shall act as a proxy
# defined in Section 14 of version 3 of the license.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from jsgenerator import *
from common import *
# Print the license of the generated file (the same as the one of this file)
license()
basicTypes(globals())
require('event')
require('net')
_function = 'function(){}'
_object = 'new Object()'
_date = 'new Date()'
Module().members(
Var('{}', 'STATUS_CODES'),
F('new Server()', 'createServer', ('requestListener', _function)),
F('new Client()', 'createClient', ('port', _int), ('host', _string)),
Class('Server').prototype('event.EventEmitter').members(
F(_void, 'listen', ('port', _int), ('
|
hostname', _string), ('backlog', _int), ('callback', _function)),
F(_void, 'close', ('callback', _function)),
Var(_int, 'maxHeadersCount'),
F(_void, 'setTimeout', ('msecs', _int), ('callback', _function)),
Var(_int, 'timeout'),
),
Class('ServerResponse').prototype('event.EventEmitter').members(
F(_void, 'writeContinue'),
F(_void, 'writeHead', ('statusCode', _int), ('reasonPhrase', _string), ('headers', _object)),
F(_
|
void, 'setTimeout', ('msecs', _int), ('callback', _function)),
Var(_int, 'statusCode'),
F(_void, 'setHeader', ('name', _string), ('value', _string)),
Var(_bool, 'headersSent'),
Var(_bool, 'sendDate'),
F(_string, 'getHeader', ('name', _string)),
F(_void, 'removeHeader', ('name', _string)),
F(_bool, 'write', ('chunk', _string), ('encoding', _string)),
F(_void, 'addTrailers', ('headers', _object)),
F(_void, 'end', ('data', _string), ('encoding', _string))
),
F('new ClientRequest()', 'request', ('options', _object), ('callback', _function)),
F('new ClientRequest()', 'get', ('options', _object), ('callback', _function)),
Class('Agent').members(
Var(_int, 'maxSockets'),
Var(_array, 'sockets'),
Var(_array, 'requests')
),
Var('new Agent()', 'globalAgent'),
Class('ClientRequest').prototype('event.EventEmitter').members(
F(_void, 'write', ('chunk', _string), ('encoding', _string)),
F(_void, 'end', ('data', _string), ('encoding', _string)),
F(_void, 'abort'),
F(_void, 'setTimeout', ('msecs', _int), ('callback', _function)),
F(_void, 'setNoDelay', ('noDelay', _bool)),
F(_void, 'setSocketKeepAlive', ('enable', _bool), ('initialDelay', _int)),
),
Class('IncomingMessage').prototype('event.EventEmitter').members(
Var(_string, 'httpVersion'),
Var(_object, 'headers'),
Var(_object, 'trailers'),
F(_void, 'setTimeout', ('msecs', _int), ('callback', _function)),
Var(_string, 'method'),
Var(_string, 'url'),
Var(_int, 'statusCode'),
Var('new net.Socket()', 'socket')
)
).print()
|
sanyaade-mobiledev/clusto
|
src/clusto/commands/attr.py
|
Python
|
bsd-3-clause
| 5,122
| 0.005076
|
#!/usr/bin/env python
# -*- mode: python; sh-basic-offset: 4; indent-tabs-mode: nil; coding: utf-8 -*-
# vim: tabstop=4 softtabstop=4 expandtab shiftwidth=4 fileencoding=utf-8
import argparse
import sys
import clusto
from clusto import drivers
from clusto import script_helper
from pprint import pprint
import sys
import traceback
JSON=False
YAML=False
try:
import yaml
YAML=True
except ImportError:
pass
try:
import simplejson as json
JSON=True
except ImportError:
try:
import json
JSON=True
except:
pass
class Attr(script_helper.Script):
'''
Operate upon an object's attributes, you should be able to
add, remove, list or set attributes of any kind
'''
obj = None
format = 'list'
def __init__(self):
script_helper.Script.__init__(self)
def run_show_yaml(self, attrs):
self.debug('Printing in format: YAML')
print yaml.safe_dump(attrs, encoding='utf-8',
explicit_start=True, default_flow_style=False)
return 0
def run_show_json(self, attrs):
self.debug('Printing in format: JSON')
print json.dumps(attrs, sort_keys=True, indent=2)
return 0
def run_show_csv(self, attrs):
self.debug('Printing in format: CSV')
print 'key;subkey;number;"value"'
for attr in attrs:
print '%s;%s;%s;"%s"' % (
str(attr['key'] or ''),
str(attr['subkey'] or ''),
str(int(attr['number'] or 0)),
str(attr['value']))
return 0
def run_show_list(self, attrs):
self.debug('Printing in format: List')
maxkey = 3 + max([len(str(_['key'])) for _ in attrs] + [0])
maxsubkey = 6 + max([len(str(_['subkey'])) for _ in attrs] + [0])
maxnumber = 3 + max([len(str(_['number'])) for _ in attrs] + [0])
if maxkey < 5: maxkey = 5
if maxsubkey < 8: maxsubkey = 8
print ''.join(['KEY'.ljust(maxkey, ' '), 'SUBKEY'.ljust(maxsubkey, ' '), 'VALUE'])
for attr in attrs:
print ''.join([str(_).ljust(maxsize, ' ') for _, maxsize in [
(attr['key'], maxkey),
(attr['subkey'], maxsubkey),
(attr['value'], 0),
]])
return 0
def run_set(self, kwargs):
kwargs.pop('merge_container_attrs')
return self.obj.set_attr(**kwargs)
def run_add(self, kwargs):
kwargs.pop('merge_container_attrs')
return self.obj.add_attr(**kwargs)
def run_delete(self, kwargs):
kwargs.pop('merge_container_attrs')
return self.obj.del_attrs(**kwargs)
def run_show(self, kwargs):
attrs = self.obj.attrs(**kwargs)
attrs.sort(key=lambda _: (_.key, _.number, _.subkey, _.value))
result = []
for attr in attrs:
row = {
'key': attr.key,
'subkey': attr.subkey,
'number': attr.number,
'type': attr.datatype,
'value': unicode(attr.value)
}
result.append(row)
return (getattr(self, 'run_show_%s' % self.format)(result))
def run(self, args):
obj = clusto.get(args.obj[0])
if not obj:
self.error('Object %s does not exist' % args.obj[0])
return -1
self.obj = obj[0]
opts = {}
kwargs = dict(args.__dict__.items())
self.format = args.format
for k in ['key', 'subkey', 'value', 'merge_container_attrs']:
if kwargs[k
|
] != None:
opts[k] = kwargs[k]
return (getattr(self, 'run_%s' % args.action[0])(opts))
def _add_arguments(self, parser):
actions = ['add', 'show', 'set', 'delete']
choices = ['list', 'csv']
if JSON:
choices.append('json')
if YAML:
choices.append('yaml')
parser.add_argument(
|
'action', nargs=1, metavar='action', choices=actions,
help='Action to execute (add, delete, set, show)')
parser.add_argument('--format', choices=choices, default='list',
help='What format to use to display the info, defaults to "list"')
parser.add_argument('-k', '--key', help='Attribute key to filter on',
default=None)
parser.add_argument('-s', '--subkey', help='Attribute subkey to filter on',
default=None)
parser.add_argument('-v', '--value', help='Attribute value to filter on',
default=None)
parser.add_argument('-m', '--merge', default=False, action='store_true',
dest='merge_container_attrs',
help='Merge container attributes recursively (defaults to False)')
parser.add_argument('obj', nargs=1, metavar='object',
help='Object to modify/query attributes from')
def add_subparser(self, subparsers):
parser = self._setup_subparser(subparsers)
self._add_arguments(parser)
def main():
attr, args = script_helper.init_arguments(Attr)
return(attr.run(args))
if __name__ == '__main__':
sys.exit(main())
|
h4ck3rm1k3/states-2
|
sabnzbd/scripts/movie-mover.py
|
Python
|
bsd-3-clause
| 5,436
| 0.001472
|
#!/usr/bin/env python
from __future__ import print_function
import errno
import json
import os
import re
import shutil
import string
import sys
import requests
CONFIG_FILE = 'scripts.conf'
EXTENSIONS = ['avi', 'm4v', 'mkv', 'mp4']
SUB_EXTENSIONS = ['idx', 'sub', 'srt']
PATTERN = re.compile('^(.*)(\d{4})\.(.*)', re.I)
REPLACEMENTS = {
':': '',
}
ENDPOINT = 'https://api.themoviedb.org/3/search/movie'
def get_config(cfgfile):
if not cfgfile.startswith('/'):
cfgfile = os.path.join(os.path.dirname(sys.argv[0]), '..', cfgfile)
config = {}
with open(cfgfile) as fh:
for line in fh:
k, v = re.split('\s+=\s+', line, 1)
config[k] = v.strip()
return config
CONFIG = get_config(CONFIG_FILE)
def get_matching_files(dirname):
# Loop through the files in ``dirname``, trying to find one which
# matches PATTERN. Also build a list of files and their sizes.
#
# If no files match PATTERN but we do find one or more files with
# a suitable extension we assume the largest file is the one we
# want. We take this file and use the ``job_dir`` as the base for
# the new file name.
files = {
'match': (),
'by_size': [],
'subs': [],
}
for root, _, fns in os.walk(dirname):
for fn in fns:
ext = fn.rsplit('.', 1)[-1].lower()
if ext in SUB_EXTENSIONS:
files['subs'].append(os.path.join(root, fn))
if ext not in EXTENSIONS:
continue
abs = os.path.abspath(os.path.join(root, fn))
files['by_size'].append((fn, os.path.getsize(abs)))
match = PATTERN.match(fn)
if match:
files['match'] = (abs, match)
break
files['by_size'] = sorted(files['by_size'], key=lambda x: x[1])
return files
def get_canonical_name(name, year):
"""
Translates a dotted filename into the real name with spaces while
preserving acronyms.
"""
if not name.endswith('.'):
name += '.'
regex = '((?:[A-Z]\.)+)'
acronyms = re.findall(regex, name)
parts = re.split(regex, name)
if not parts:
return name
canonical_name = []
for part in parts:
if part in acronyms:
part = re.sub('\.$', '', part)
else:
part = part.replace('.', ' ')
canonical_name.append(part.strip())
canonical_name = ' '.join(canonical_name)
# look up TMDb for the show name
canonical_name = get_tmdb_name(canonical_name, year)
if not canonical_name:
return None
# replace trailing dots
canonical_name = re.sub('\.+$', '', canonical_name)
# replace unsuitable characters
for k, v in REPLACEMENTS.items():
canonical_name = canonical_name.replace(k, v)
return canonical_name
def get_tmdb_name(name, year):
headers = {'Accept': 'application/json'}
params = {
'api_key': CONFIG['TMDB_API_KEY'],
'query': name,
}
response = requests.get(ENDPOINT, params=params, headers=headers)
results = json.loads(response.content.decode('utf-8'))['results']
if not results:
return None
for r in results:
if r['release_date'].split('-')[0] == year:
return r['original_title']
def mkdirp(path, mode=0755):
try:
os.makedirs(path, mode=mode)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def main(job_dir, category):
files = get_matching_files(job_dir)
if files['match']:
fn, match = files['match']
else:
# no files at all? just exit
if not files['by_size']:
print("No suitable files found")
sys.exit()
# found a potential match
fn = files['by_size'][0][0]
fn = os.path.join(job_dir, fn)
match = PATTERN.match(os.path.basename(job_dir))
if not match:
print("No suitable files found")
sys.exit()
dotted_name, year = match.groups()[:2]
canonical_name = get_canonical_name(dotted_name, year)
# bail out if TMBd doesn't find anything
if not canonical_name:
print("Could not find files in TMDb for %s %s" % (dotted_name, year))
sys.exit()
dotted_name = canonical_name.replace(' ', '.')
base_name = "%s.%s" % (dotted_name, year)
for fn in [fn] + files['subs']:
ext = fn.rsplit('.', 1)[-1]
final_name = os.path.join(CONFIG['movies_directory'], base_name + '.' + ext)
if os.path.isfile(final_name):
print("File already exists: %s" % final_name)
sys.exit(1)
# Move the files into place
shutil.move(os.path.join(job_dir, fn), final_name)
print("New file: %s (%s)" % (final_name, os.path.basename(fn)))
# Remove the old directory
shutil.r
|
mtree(job_dir)
# try to remove the empty category directories
parent_dirname = os.path.dirname(job_dir)
parent_basename = os.path.basename(parent_dirname)
if parent_basename.lower() == category.lower():
try
|
:
os.rmdir(parent_dirname)
print("Removed empty directory: %s" % parent_dirname)
except OSError:
print("Skipped non-empty directory: %s" % parent_dirname)
pass
if __name__ == '__main__':
main(sys.argv[1], sys.argv[5])
|
dvoets/fibClock
|
fSequence.py
|
Python
|
gpl-2.0
| 980
| 0.003061
|
import collections
class fSe
|
q:
def __init__(self, fNumbers):
self.fNumbers = fNumbers
self.seq = self.fSequence()
self.fDecom = self.fDecomposition()
def fSequence(self):
if self.fNumbers == 1:
fSeq = [1]
else:
fSeq = [1, 1]
if self.fNumbers > 2:
for i in range(2, self.fNumbers):
fSeq.append(fSeq[i-2]+fSeq[i-1])
return fSeq
def fDecomposition(self):
d = {}
for i in range(2**s
|
elf.fNumbers):
fmt = '{0:0' + str(self.fNumbers) + 'b}'
binToSubset = map(int, list(fmt.format(i)))
property_asel = [val for is_good, val in zip(binToSubset, self.seq) if is_good]
d.setdefault(sum(property_asel), []).append(binToSubset)
return d
def toon(self):
print self.seq
if __name__ == '__main__':
test = fSeq(5)
for i in range(12):
print 'i', test.fDecom[i]
|
jonathanslenders/libpymux
|
setup.py
|
Python
|
bsd-2-clause
| 617
| 0.003241
|
#!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.
|
core import setup
requirements = [ 'pyte', 'docopt' ]
try:
import asyncio
except ImportError:
requirements.append('asyncio')
setup(
name='libpymux',
author='Jonathan
|
Slenders',
version='0.1',
license='LICENSE.txt',
url='https://github.com/jonathanslenders/libpymux',
description='Python terminal multiplexer (Pure Python tmux clone)',
long_description=open("README.rst").read(),
packages=['libpymux'],
install_requires=requirements,
)
|
nebril/fuel-web
|
nailgun/nailgun/test/unit/test_attributes_plugin.py
|
Python
|
apache-2.0
| 10,481
| 0
|
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless re
|
quired by applicable law or agreed to in writing, software
# distributed
|
under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import os
import mock
import six
import yaml
from nailgun.db import db
from nailgun.objects import Plugin
from nailgun.plugins import attr_plugin
from nailgun.settings import settings
from nailgun.test import base
@six.add_metaclass(abc.ABCMeta)
class TestPluginBase(base.BaseTestCase):
# Prevent running tests in base class
__test__ = False
# Should be overridden in child
package_version = None
def setUp(self):
super(TestPluginBase, self).setUp()
self.plugin_metadata = self.env.get_default_plugin_metadata(
package_version=self.package_version)
self.plugin = Plugin.create(self.plugin_metadata)
self.env.create(
cluster_kwargs={'mode': 'multinode'},
release_kwargs={
'version': '2014.2-6.0',
'operating_system': 'Ubuntu'})
self.cluster = self.env.clusters[0]
self.attr_plugin = attr_plugin.wrap_plugin(self.plugin)
self.env_config = self.env.get_default_plugin_env_config()
self.get_config = lambda *args: mock.mock_open(
read_data=yaml.dump(self.env_config))()
db().flush()
@mock.patch('nailgun.plugins.attr_plugin.open', create=True)
@mock.patch('nailgun.plugins.attr_plugin.os.access')
@mock.patch('nailgun.plugins.attr_plugin.os.path.exists')
def test_get_plugin_attributes(self, mexists, maccess, mopen):
"""Should load attributes from environment_config.
Attributes should contain provided attributes by plugin and
also generated metadata
"""
maccess.return_value = True
mexists.return_value = True
mopen.side_effect = self.get_config
attributes = self.attr_plugin.get_plugin_attributes(self.cluster)
self.assertEqual(
attributes['testing_plugin']['plugin_name_text'],
self.env_config['attributes']['plugin_name_text'])
self.assertEqual(
attributes['testing_plugin']['metadata'],
self.attr_plugin.default_metadata)
def test_plugin_release_versions(self):
"""Helper should return set of all release versions this plugin
is applicable to.
"""
self.assertEqual(
self.attr_plugin.plugin_release_versions, set(['2014.2-6.0']))
def test_full_name(self):
"""Plugin full name should be made from name and version."""
self.assertEqual(
self.attr_plugin.full_name,
'{0}-{1}'.format(self.plugin.name, self.plugin.version))
def test_get_release_info(self):
"""Should return 1st plugin release info which matches
provided release.
"""
self.cluster.release.version = '2014.2.2-6.0.1'
release = self.attr_plugin.get_release_info(self.cluster.release)
self.assertEqual(release, self.plugin_metadata['releases'][0])
def test_slaves_scripts_path(self):
expected = settings.PLUGINS_SLAVES_SCRIPTS_PATH.format(
plugin_name=self.attr_plugin.path_name)
self.assertEqual(expected, self.attr_plugin.slaves_scripts_path)
@mock.patch('nailgun.plugins.attr_plugin.glob')
def test_repo_files(self, glob_mock):
self.attr_plugin.repo_files(self.cluster)
expected_call = os.path.join(
settings.PLUGINS_PATH,
self.attr_plugin.path_name,
'repositories/ubuntu',
'*')
glob_mock.glob.assert_called_once_with(expected_call)
@mock.patch('nailgun.plugins.attr_plugin.urljoin')
def test_repo_url(self, murljoin):
self.attr_plugin.repo_url(self.cluster)
repo_base = settings.PLUGINS_REPO_URL.format(
master_ip=settings.MASTER_IP,
plugin_name=self.attr_plugin.path_name)
murljoin.assert_called_once_with(repo_base, 'repositories/ubuntu')
def test_master_scripts_path(self):
base_url = settings.PLUGINS_SLAVES_RSYNC.format(
master_ip=settings.MASTER_IP,
plugin_name=self.attr_plugin.path_name)
expected = '{0}{1}'.format(base_url, 'deployment_scripts/')
self.assertEqual(
expected, self.attr_plugin.master_scripts_path(self.cluster))
def test_sync_metadata_to_db(self):
plugin_metadata = self.env.get_default_plugin_metadata()
with mock.patch.object(self.attr_plugin, '_load_config') as load_conf:
load_conf.return_value = plugin_metadata
self.attr_plugin.sync_metadata_to_db()
for key, val in six.iteritems(plugin_metadata):
self.assertEqual(
getattr(self.plugin, key), val)
class TestPluginV1(TestPluginBase):
__test__ = True
package_version = '1.0.0'
def test_primary_added_for_version(self):
stub = 'stub'
with mock.patch.object(self.attr_plugin, '_load_config') as load_conf:
load_conf.return_value = [{'role': ['controller']}]
tasks = self.attr_plugin._load_tasks(stub)
self.assertItemsEqual(
tasks[0]['role'], ['primary-controller', 'controller'])
def test_path_name(self):
self.assertEqual(
self.attr_plugin.path_name,
self.attr_plugin.full_name)
class TestPluginV2(TestPluginBase):
__test__ = True
package_version = '2.0.0'
def test_role_not_changed_for_version(self):
stub = 'stub'
with mock.patch.object(self.attr_plugin, '_load_config') as load_conf:
load_conf.return_value = [{'role': ['controller']}]
tasks = self.attr_plugin._load_tasks(stub)
self.assertItemsEqual(
tasks[0]['role'], ['controller'])
def test_path_name(self):
self.assertEqual(
self.attr_plugin.path_name,
'{0}-{1}'.format(self.plugin.name, '0.1'))
class TestPluginV3(TestPluginBase):
__test__ = True
package_version = '3.0.0'
def test_sync_metadata_to_db(self):
plugin_metadata = self.env.get_default_plugin_metadata()
attributes_metadata = self.env.get_default_plugin_env_config()
roles_metadata = self.env.get_default_plugin_node_roles_config()
volumes_metadata = self.env.get_default_plugin_volumes_config()
deployment_tasks = self.env.get_default_plugin_deployment_tasks()
tasks = self.env.get_default_plugin_tasks()
mocked_metadata = {
self._find_path('metadata'): plugin_metadata,
self._find_path('environment_config'): attributes_metadata,
self._find_path('node_roles'): roles_metadata,
self._find_path('volumes'): volumes_metadata,
self._find_path('deployment_tasks'): deployment_tasks,
self._find_path('tasks'): tasks,
}
with mock.patch.object(self.attr_plugin, '_load_config') as load_conf:
load_conf.side_effect = lambda key: mocked_metadata[key]
self.attr_plugin.sync_metadata_to_db()
for key, val in six.iteritems(plugin_metadata):
self.assertEqual(
getattr(self.plugin, key), val)
self.assertEqual(
self.plugin.attributes_metadata, attributes_metadata)
self.assertEqual(
self.plugin.roles_metadata, roles_metadata)
self.assertEqual(
self.plugin.volumes_metadata, volumes_metadata)
self.assertEqual(
self.plugin.deployment_tasks, deployment_tasks)
self.assertEqual(
se
|
ktaneishi/deepchem
|
deepchem/molnet/load_function/lipo_datasets.py
|
Python
|
mit
| 2,563
| 0.012485
|
"""
Lipophilicity dataset loader.
"""
from __future__ import division
from __future__ import unicode_literals
import os
import logging
import deepchem
logger = logging.getLogger(__name__)
|
def load_lipo(featurizer='ECFP', split='index', reload=True, move_mean=True):
"""Load Lipophilicity datasets."""
# Featurize Lipophilicity dataset
logger.info("About to featurize Lipophilicity dataset.")
logger.info("About to load Lipophilicity dataset.")
data_dir = deepchem.utils.get_data_dir()
if reload:
if move_mean:
dir_name = "lipo/" + featurizer + "/" + str(split)
else:
|
dir_name = "lipo/" + featurizer + "_mean_unmoved/" + str(split)
save_dir = os.path.join(data_dir, dir_name)
dataset_file = os.path.join(data_dir, "Lipophilicity.csv")
if not os.path.exists(dataset_file):
deepchem.utils.download_url(
'http://deepchem.io.s3-website-us-west-1.amazonaws.com/datasets/Lipophilicity.csv'
)
Lipo_tasks = ['exp']
if reload:
loaded, all_dataset, transformers = deepchem.utils.save.load_dataset_from_disk(
save_dir)
if loaded:
return Lipo_tasks, all_dataset, transformers
if featurizer == 'ECFP':
featurizer = deepchem.feat.CircularFingerprint(size=1024)
elif featurizer == 'GraphConv':
featurizer = deepchem.feat.ConvMolFeaturizer()
elif featurizer == 'Weave':
featurizer = deepchem.feat.WeaveFeaturizer()
elif featurizer == 'Raw':
featurizer = deepchem.feat.RawFeaturizer()
loader = deepchem.data.CSVLoader(
tasks=Lipo_tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(dataset_file, shard_size=8192)
# Initialize transformers
transformers = [
deepchem.trans.NormalizationTransformer(
transform_y=True, dataset=dataset, move_mean=move_mean)
]
logger.info("About to transform data")
for transformer in transformers:
dataset = transformer.transform(dataset)
if split == None:
return Lipo_tasks, (dataset, None, None), transformers
splitters = {
'index': deepchem.splits.IndexSplitter(),
'random': deepchem.splits.RandomSplitter(),
'scaffold': deepchem.splits.ScaffoldSplitter()
}
splitter = splitters[split]
train, valid, test = splitter.train_valid_test_split(dataset)
if reload:
deepchem.utils.save.save_dataset_to_disk(save_dir, train, valid, test,
transformers)
return Lipo_tasks, (train, valid, test), transformers
|
FriedrichK/volunteer_planner
|
shiftmailer/management/commands/mailer.py
|
Python
|
agpl-3.0
| 1,636
| 0.001834
|
# coding: utf-8
import datetime
from django.core.management.base import BaseCommand
# from django.template.loader import render_to_string
from django.db.models import Count
from scheduler.models import Need
from shiftmailer.models import Mailer
from shiftmailer.excelexport import GenerateExcelSheet
DATE_FORMAT = '%d.%m.%Y'
class Command(BaseCommand):
help = 'sends emails taken from addresses (.models.mailer) with a list of shifts for this day' \
'run my cronjob'
def add_arguments(self, parser):
parser.add_argument('--date', dest='print_date', default=datetime.date.to
|
day().strftime(DATE_FORMAT),
help='The date to generate scheduler for')
def handle(self, *args, **options):
mailer = Mailer.objects.all()
t = datetime.datetime.strptime(options['print_date'], DATE_FORMAT)
for mail in mailer:
needs = Need.objects.filter(locatio
|
n=mail.location).filter(
ending_time__year=t.strftime("%Y"),
ending_time__month=t.strftime("%m"),
ending_time__day=t.strftime("%d")) \
.order_by('topic', 'ending_time') \
.annotate(volunteer_count=Count('registrationprofile')) \
.select_related('topic', 'location') \
.prefetch_related('registrationprofile_set', 'registrationprofile_set__user')
# if it's not used anyway, we maybe shouldn't even render it? #
# message = render_to_string('shifts_today.html', locals())
iua = GenerateExcelSheet(shifts=needs, mailer=mail)
iua.send_file()
|
marcoesposito1988/easy_handeye
|
easy_handeye/scripts/robot.py
|
Python
|
lgpl-3.0
| 414
| 0.002415
|
#!/usr/bi
|
n/env python
import rospy
from easy_handeye.handeye_server_robot import HandeyeServerRobot
def main():
rospy.init_node('easy_handeye_calibration_server_robot')
while rospy.get_time() == 0.0:
pass
calibration_namespace=rospy.get_param('~calibration_namespace')
cw = HandeyeServerRobot(namespace=calibration_namespace)
rospy.spin()
if __name__ == '__main__':
|
main()
|
steve-ord/daliuge
|
daliuge-engine/dlg/manager/composite_manager.py
|
Python
|
lgpl-2.1
| 19,989
| 0.002651
|
#
# ICRAR - International Centre for Radio Astronomy Research
# (c) UWA - The University of Western Australia, 2015
# Copyright by UWA (in the framework of the ICRAR)
# All rights reserved
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
import abc
import collections
import functools
import logging
import multiprocessing.pool
import threading
from . import constants
from .client import NodeManagerClient
from .constants import ISLAND_DEFAULT_REST_PORT, NODE_DEFAULT_REST_PORT
from .drop_manager import DROPManager
from .. import remote, graph_loader
from ..ddap_protocol import DROPRel
from ..exceptions import InvalidGraphException, DaliugeException, \
SubManagerException
from ..utils import portIsOpen
logger = logging.getLogger(__name__)
def uid_for_drop(dropSpec):
if 'uid' in dropSpec:
return dropSpec['uid']
return dropSpec['oid']
def sanitize_relations(interDMRelations, graph):
# TODO: Big change required to remove this hack here
#
# Values in the interDMRelations array use OIDs to identify drops.
# This is because so far we have told users to that OIDs are required
# in the physical graph description, while UIDs are optional
# (and copied over from the OID if not given).
# On the other hand, once drops are actually created in deploySession()
# we access the values in interDMRelations as if they had UIDs inside,
# which causes problems everywhere because everything else is indexed
# on UIDs.
# In order to not break the current physical graph constrains and keep
# things simple we'll simply replace the values of the interDMRelations
# array here to use the corresponding UID for the given OIDs.
# Because UIDs are globally unique across drop instances it makes sense
# to always index things by UID and not by OID. Thus, in the future we
# should probably change the requirement on the physical graphs sent by
# users to always require an UID, and optionally an OID, and then change
# all this code to immediately use those UIDs instead.
#
# NOTE: It seems that the comment above is the result of a misunderstasnding
# of the concept of OIDs and UIDs. OIDs are objectIDs provided by the
# user or rather the translation system, they can't be UIDs, since those
# have to be created by the underlying system implementing the actual drops.
# The reason for that is that the UIDs are required to be unique within
# the system runtime, the OIDs only have to be unique for a certain object.
# In fact there could be multiple drops using to the same OID, but having
# different UIDs. The idea would be that system generates the UIDs during
# generation of the drops. In fact the user does not need to and should not
# know about the UIDs at all and in general the system does not need to
# know about the OIDs.
newDMRelations = []
for rel in interDMRelations:
lhs = uid_for_drop(graph[rel.lhs])
rhs = uid_for_drop(graph[rel.rhs])
new_rel = DROPRel(lhs, rel.rel, rhs)
newDMRelations.append(new_rel)
interDMRelations[:] = newDMRelations
def group_by_node(uids, graph):
uids_by_node = collections.defaultdict(list)
for uid in uids:
uids_by_node[graph[uid]['node']].append(uid)
return uids_by_node
class CompositeManager(DROPManager):
"""
A DROPManager that in turn manages DROPManagers (sigh...).
DROP Managers form a hierarchy where those at the bottom actually hold
DROPs while those in the levels above rely commands and aggregate results,
making the system more manageable and scalable. The CompositeManager class
implements the upper part of this hierarchy in a generic way by holding
references to a number of sub-DROPManagers and communicating with them to
complete each operation. The only assumption about sub-DROPManagers is that
they obey the DROPManager interface, and therefore this CompositeManager
class allows for multiple levels of hierarchy seamlessly.
Having different levels of Data Management hierarchy implies that the
physical graph that is fed into the hierarchy needs to be partitioned at
each level (except at the bottom of the hierarchy) in order to place each
DROP in its correct place. The attribute used by a particular
CompositeManager to partition the graph (from its graphSpec) is given at
construction time.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, dmPort, partitionAttr, subDmId, dmHosts=[], pkeyPath=None, dmCheckTimeout=10):
"""
Creates a new CompositeManager. The sub-DMs it manages are to be located
at `dmHosts`, and should be listening on po
|
rt `dmPort`.
:param: dmPort The port at which the sub-DMs expose themselves
:param: partitionAtt
|
r The attribute on each dropSpec that specifies the
partitioning of the graph at this CompositeManager level.
:param: subDmId The sub-DM ID.
:param: dmHosts The list of hosts under which the sub-DMs should be found.
:param: pkeyPath The path to the SSH private key to be used when connecting
to the remote hosts to start the sub-DMs if necessary. A value
of `None` means that the default path should be used
:param: dmCheckTimeout The timeout used before giving up and declaring
a sub-DM as not-yet-present in a given host
"""
self._dmPort = dmPort
self._partitionAttr = partitionAttr
self._subDmId = subDmId
self._dmHosts = dmHosts
self._graph = {}
self._drop_rels = {}
self._sessionIds = [] # TODO: it's still unclear how sessions are managed at the composite-manager level
self._pkeyPath = pkeyPath
self._dmCheckTimeout = dmCheckTimeout
n_threads = max(1,min(len(dmHosts),20))
self._tp = multiprocessing.pool.ThreadPool(n_threads)
# The list of bottom-level nodes that are covered by this manager
# This list is different from the dmHosts, which are the machines that
# are directly managed by this manager (which in turn could manage more
# machines)
self._nodes = []
self.startDMChecker()
def startDMChecker(self):
self._dmCheckerEvt = threading.Event()
self._dmCheckerThread = threading.Thread(name='DMChecker Thread', target=self._checkDM)
self._dmCheckerThread.start()
def stopDMChecker(self):
if not self._dmCheckerEvt.isSet():
self._dmCheckerEvt.set()
self._dmCheckerThread.join()
# Explicit shutdown
def shutdown(self):
self.stopDMChecker()
self._tp.close()
self._tp.join()
def _checkDM(self):
while True:
for host in self._dmHosts:
if self._dmCheckerEvt.is_set():
break
if not self.check_dm(host, timeout=self._dmCheckTimeout):
logger.error("Couldn't contact manager for host %s, will try again later", host)
if self._dmCheckerEvt.wait(60):
break
@property
def dmHosts(self):
return self._dmHosts[:]
def addDmHost(self, host):
self._dmHosts.append(host)
@property
def nodes(self):
return self._nodes[:]
def add_node(self, node):
self._nodes.append(node)
def remove_node(sel
|
FutureSharks/invokust
|
tests/test_loadtest.py
|
Python
|
mit
| 866
| 0
|
import os
from unittest import TestCase
from invokust.settings import create_settings
from invokust import LocustLoadTest
from locus
|
t import HttpUser, between, task
class WebsiteUser(HttpUser):
wait_time = between(1, 3)
@task()
def get_home_page(self):
"""
Gets /
"""
self.client.get("/")
class TestLocustLoadTest(TestCase):
def test_basic_load_test(self):
settings = create_settings(
classes=[WebsiteUser],
host="https://github.com",
num_users=1,
spawn_rate=1,
run_time="1m",
|
)
loadtest = LocustLoadTest(settings)
loadtest.run()
stats = loadtest.stats()
assert stats["num_requests"] > 10
assert stats["end_time"] > stats["start_time"]
assert stats["requests"]["GET_/"]["total_rpm"] > 0
|
jackcht/pythonPractice
|
master/Homework3/hw3q3.py
|
Python
|
apache-2.0
| 573
| 0.019197
|
import collection
|
s
import re
import urllib2
url = 'http://shakespeare.mit.edu/hamlet/full.html'
#url = 'https://courseworks.columbia.edu/access/content/group/COMSW3101_002_2015_3/week3/hamlet.html'
req = urllib2.Request(url)
response = urllib2.urlopen(req)
page = response.read()
#lines = page.split('\n')
speech = re.findall(r'<b>(.+)</b>',page)
coun
|
t = collections.defaultdict(int) #int will set all the values to 0
for name in speech:
count[name] += 1
#my_dict = {name: speech.count(name) for name in speech}
print [len(page.split('\n')), len(speech),count]
|
call-me-jimi/taskmanager
|
taskmanager/lib/hLog.py
|
Python
|
gpl-2.0
| 1,397
| 0.026485
|
import ConfigParser
import os
class hLog( object ):
"""! @brief raw implementation for configuring output of logger
"""
def __init__( self, logger ):
self.logger = logger
self.logCategories = {}
# load config file
|
self.load()
def load( self ):
"""! load config file about indication wether message of a particular category is passed to logger
"""
# get path to taskmanager. it is assumed that this fi
|
le is in the lib directory of
# the taskmanager package.
tmpath = os.path.normpath( os.path.join( os.path.dirname( os.path.realpath(__file__) ) + '/..') )
configFileName = '{tmpath}/etc/logger.cfg'.format(tmpath=tmpath)
parser = ConfigParser.SafeConfigParser()
if os.path.exists( configFileName ):
# read config file
parser.read( configFileName )
# remove all entries
self.logCategories = {}
# iterate over all categories
for category in parser.items( 'CATEGORIES' ):
try:
self.logCategories[ category[0] ] = True if category[1]=="True" else False
except:
pass
def write( self, msg, logCategory="default" ):
if self.logCategories.get( logCategory, False ):
self.logger.info( msg )
|
CIRCL/AIL-framework
|
var/www/modules/showpaste/Flask_showpaste.py
|
Python
|
agpl-3.0
| 19,911
| 0.006278
|
#!/usr/bin/env python3
# -*-coding:UTF-8 -*
'''
Flask functions and routes for the trending modules page
'''
import redis
import json
import os
import sys
import flask
from flask import Flask, render_tem
|
plate, jsonify, request, Blueprint, make_response, Respon
|
se, send_from_directory, redirect, url_for, abort
from Role_Manager import login_admin, login_analyst, login_read_only, no_cache
from flask_login import login_required
import difflib
import ssdeep
import Paste
import requests
sys.path.append(os.path.join(os.environ['AIL_BIN'], 'packages/'))
import Tag
import Item
sys.path.append(os.path.join(os.environ['AIL_BIN'], 'lib/'))
import Domain
# ============ VARIABLES ============
import Flask_config
app = Flask_config.app
baseUrl = Flask_config.baseUrl
r_serv_metadata = Flask_config.r_serv_metadata
r_serv_tags = Flask_config.r_serv_tags
r_serv_statistics = Flask_config.r_serv_statistics
max_preview_char = Flask_config.max_preview_char
max_preview_modal = Flask_config.max_preview_modal
DiffMaxLineLength = Flask_config.DiffMaxLineLength
bootstrap_label = Flask_config.bootstrap_label
misp_event_url = Flask_config.misp_event_url
hive_case_url = Flask_config.hive_case_url
vt_enabled = Flask_config.vt_enabled
PASTES_FOLDER = Flask_config.PASTES_FOLDER
SCREENSHOT_FOLDER = Flask_config.SCREENSHOT_FOLDER
showsavedpastes = Blueprint('showsavedpastes', __name__, template_folder='templates')
# ============ FUNCTIONS ============
def get_item_screenshot_path(item):
screenshot = r_serv_metadata.hget('paste_metadata:{}'.format(item), 'screenshot')
if screenshot:
screenshot = os.path.join(screenshot[0:2], screenshot[2:4], screenshot[4:6], screenshot[6:8], screenshot[8:10], screenshot[10:12], screenshot[12:])
else:
screenshot = ''
return screenshot
def showpaste(content_range, requested_path):
if PASTES_FOLDER not in requested_path:
# remove full path
requested_path_full = os.path.join(requested_path, PASTES_FOLDER)
else:
requested_path_full = requested_path
requested_path = requested_path.replace(PASTES_FOLDER, '', 1)
# escape directory transversal
if os.path.commonprefix((requested_path_full,PASTES_FOLDER)) != PASTES_FOLDER:
return 'path transversal detected'
vt_enabled = Flask_config.vt_enabled
try:
paste = Paste.Paste(requested_path)
except FileNotFoundError:
abort(404)
p_date = str(paste._get_p_date())
p_date = p_date[6:]+'/'+p_date[4:6]+'/'+p_date[0:4]
p_source = paste.p_source
p_encoding = paste._get_p_encoding()
p_language = 'None'
p_size = paste.p_size
p_mime = paste.p_mime
p_lineinfo = paste.get_lines_info()
p_content = paste.get_p_content()
p_duplicate_str_full_list = paste._get_p_duplicate()
p_duplicate_full_list = []
p_duplicate_list = []
p_simil_list = []
p_date_list = []
p_hashtype_list = []
for dup_list in p_duplicate_str_full_list:
dup_list = dup_list[1:-1].replace('\'', '').replace(' ', '').split(',')
if dup_list[0] == "tlsh":
dup_list[2] = 100 - int(dup_list[2])
else:
dup_list[2] = int(dup_list[2])
p_duplicate_full_list.append(dup_list)
#p_duplicate_full_list.sort(lambda x,y: cmp(x[2], y[2]), reverse=True)
# Combine multiple duplicate paste name and format for display
new_dup_list = []
dup_list_removed = []
for dup_list_index in range(0, len(p_duplicate_full_list)):
if dup_list_index in dup_list_removed:
continue
indices = [i for i, x in enumerate(p_duplicate_full_list) if x[1] == p_duplicate_full_list[dup_list_index][1]]
hash_types = []
comp_vals = []
for i in indices:
hash_types.append(p_duplicate_full_list[i][0])
comp_vals.append(p_duplicate_full_list[i][2])
dup_list_removed.append(i)
#hash_types = str(hash_types).replace("[","").replace("]","") if len(hash_types)==1 else str(hash_types)
#comp_vals = str(comp_vals).replace("[","").replace("]","") if len(comp_vals)==1 else str(comp_vals)
if len(p_duplicate_full_list[dup_list_index]) > 3:
try:
date_paste = str(int(p_duplicate_full_list[dup_list_index][3]))
date_paste = date_paste[0:4]+"-"+date_paste[4:6]+"-"+date_paste[6:8]
except ValueError:
date_paste = str(p_duplicate_full_list[dup_list_index][3])
else:
date_paste = "No date available"
new_dup_list.append([hash_types, p_duplicate_full_list[dup_list_index][1], comp_vals, date_paste])
# Create the list to pass to the webpage
for dup_list in new_dup_list:
hash_type, path, simil_percent, date_paste = dup_list
p_duplicate_list.append(path)
p_simil_list.append(simil_percent)
p_hashtype_list.append(hash_type)
p_date_list.append(date_paste)
if content_range != 0:
p_content = p_content[0:content_range]
#active taxonomies
active_taxonomies = r_serv_tags.smembers('active_taxonomies')
l_tags = r_serv_metadata.smembers('tag:'+requested_path)
tags_safe = Tag.is_tags_safe(l_tags)
#active galaxies
active_galaxies = r_serv_tags.smembers('active_galaxies')
list_tags = []
for tag in l_tags:
if(tag[9:28] == 'automatic-detection'):
automatic = True
else:
automatic = False
if r_serv_statistics.sismember('tp:'+tag, requested_path):
tag_status_tp = True
else:
tag_status_tp = False
if r_serv_statistics.sismember('fp:'+tag, requested_path):
tag_status_fp = True
else:
tag_status_fp = False
list_tags.append( (tag, automatic, tag_status_tp, tag_status_fp) )
l_64 = []
# load hash files
if r_serv_metadata.scard('hash_paste:'+requested_path) > 0:
set_b64 = r_serv_metadata.smembers('hash_paste:'+requested_path)
for hash in set_b64:
nb_in_file = r_serv_metadata.zscore('nb_seen_hash:'+hash, requested_path)
# item list not updated
if nb_in_file is None:
l_pastes = r_serv_metadata.zrange('nb_seen_hash:'+hash, 0, -1)
for paste_name in l_pastes:
# dynamic update
if PASTES_FOLDER in paste_name:
score = r_serv_metadata.zscore('nb_seen_hash:{}'.format(hash), paste_name)
r_serv_metadata.zrem('nb_seen_hash:{}'.format(hash), paste_name)
paste_name = paste_name.replace(PASTES_FOLDER, '', 1)
r_serv_metadata.zadd('nb_seen_hash:{}'.format(hash), score, paste_name)
nb_in_file = r_serv_metadata.zscore('nb_seen_hash:'+hash, requested_path)
nb_in_file = int(nb_in_file)
estimated_type = r_serv_metadata.hget('metadata_hash:'+hash, 'estimated_type')
file_type = estimated_type.split('/')[0]
# set file icon
if file_type == 'application':
file_icon = 'fa-file-o '
elif file_type == 'audio':
file_icon = 'fa-file-video-o '
elif file_type == 'image':
file_icon = 'fa-file-image-o'
elif file_type == 'text':
file_icon = 'fa-file-text-o'
else:
file_icon = 'fa-file'
saved_path = r_serv_metadata.hget('metadata_hash:'+hash, 'saved_path')
if r_serv_metadata.hexists('metadata_hash:'+hash, 'vt_link'):
b64_vt = True
b64_vt_link = r_serv_metadata.hget('metadata_hash:'+hash, 'vt_link')
b64_vt_report = r_serv_metadata.hget('metadata_hash:'+hash, 'vt_report')
else:
b64_vt = False
b64_vt_link = ''
b64_vt_report = r_serv_metadata.hget('metadata_hash:'+hash, 'vt_report')
# hash never refreshed
if b64_vt_report is None:
b64_vt_report = ''
l_64.append( (file_icon, estim
|
globocom/database-as-a-service
|
dbaas/api/recreate_slave.py
|
Python
|
bsd-3-clause
| 770
| 0
|
# -*- coding: utf-8 -*-
from __
|
future__ import absolute_import, unicode_literals
from rest_framework import serializers
from maintenance.models import RecreateSlave
from api.maintenance_base import Mai
|
ntennanceBaseApi
class RecreateSlaveSerializer(serializers.ModelSerializer):
class Meta:
model = RecreateSlave
fields = (
'id',
'current_step',
'status',
'can_do_retry',
'task',
'created_at',
'host',
)
class RecreateSlaveAPI(MaintennanceBaseApi):
"""
Task API
"""
model = RecreateSlave
serializer_class = RecreateSlaveSerializer
filter_fields = (
'status',
'can_do_retry',
'task',
'host',
)
|
mgree/tmpl
|
www/backend/infer.py
|
Python
|
mit
| 3,314
| 0.016898
|
import sys, os
import pickle
import nltk
import paths
from utils import *
def words_to_dict(words):
return dict(zip(words, range(0, len(words))))
nltk.data.path.append(paths.nltk_data_path)
use_wordnet = True
if use_wordnet:
stemmer = nltk.stem.wordnet.WordNetLemmatizer()
stem = stemmer.lemmatize
else:
stemmer = nltk.stem.porter.PorterStemmer()
stem = stemmer.stem
def tokens(text):
replacements = [("---"," "),
("--"," "),
("-", "")] # trying to capture multi-word keywords
for (src,tgt) in replacements:
text = text.replace(src,tgt)
return preprocess(text)
def make_bow(doc,d):
bow = {}
for word in doc:
if word in d:
wordid = d[word]
bow[wordid] = bow.get(wordid,0) + 1
# XXX we should notify something about non-stopwords that we couldn't parse
return bow
modes = ["fulltext","abstracts"]
ks = ["20","50","100","200"]
dist = ["kl","euclidean"]
if __name__ == '__main__':
args = sys.argv[1:]
mode = modes[0]
k = ks[0]
dfun = dist[0]
num = 20
while len(args) > 1:
if args[0] == "-k":
if args[1] in ks:
k = args[1]
args = args[2:]
if args[0] in ["-m","--mode"]:
if args[1] in modes:
mode = args[1]
args = args[2:]
if args[0] in ["-n","--num"]:
if int(args[1]) in range(1,50):
num = int(args[1])
args = args[2:]
if args[0] in ["-d","--distance"]:
if args[1] in dist:
dfun = args[1]
args = args[2:]
model = os.path.join(mode,"lda" + k,"final")
words = os.path.join(mode,"vocab.dat")
docs = os.path.join(mode,"docs.dat")
pdf_file = args[0]
(base,_) = os.path.splitext(pdf_file)
text = os.popen("/usr/bin/pdftotext \"%s\" -" % pdf_file).read() # XXX safe filenames!
vocab = words_to_dict(open(words).read().split())
bow = make_bow(map(stem,tokens(text)),vocab)
dat_file = base + ".dat"
out = open(
|
dat_file,"w")
out.write(str(len(bow)))
out.write(' ')
for term in bow:
out.write(str(term))
out.write(':')
out.write(str(bow[term]))
out.write(' ')
out.write('\n')
out.close()
log = base + ".log"
os.system(paths.lda + " inf settings.txt %s %s %s >%s 2>&1" % (model,dat_file,base,log))
# XXX capture output, handle errors
inf = read(bas
|
e + "-gamma.dat")
gammas = read(model + ".gamma")
papers = zip(read(docs), map(lambda s: map(float,s.split()), gammas))
tgt = ["INPUT PDF"] + map(lambda s: map(float,s.split()), inf)
# XXX these are the topic values, if we want to visualize them
# XXX be careful to not leak our filenames
if dfun == "euclidean":
metric = distance
fmt = '%d'
elif dfun == "kl":
metric = kl_divergence
fmt = '%f'
else:
metric = kl_divergence
fmt = '%f'
papers = map(lambda s: (metric(s[1],tgt[1]),s), papers)
papers.sort(lambda x,y: cmp(x[0],y[0]))
print "\nRelated papers:\n"
for (d,(doc,gs)) in papers[0:num]:
print (' %s (' + fmt + ')') % (doc,d)
|
jorgb/airs
|
gui/images/anim/make_images.py
|
Python
|
gpl-2.0
| 1,181
| 0.011854
|
#-------------------------------------------------------------------------------
# $RCSfile: make_images.py $
# $Source: repos/minimal_app/src/images/make_images.py $
# $Revision: 1.3 $
# $Date: 18-sep-2007 16:35:29 $
#-------------------------------------------------------------------------------
# Author: Jorgen Bodde
# Copyright: (c) Jorgen Bodde
# License: see LICENSE for details
#-------------------------------------------------------------------------------
import os, os.path
import wxversion
wxversion.select('2.8')
import wx
import os.path, glob
import wx.tools.img2py as i2p
image_exts = ['*.png', '*.gif', '*.bmp']
images = []
for ext in image_exts:
images.extend(glob.glob(ext))
for name in images:
root, ext = os.path.splitext(name)
src_f = os.stat(name).st_mtime
make_dst = True
dst_name = root + '.py'
if os.path.isfile(dst_name):
dst_f = os.stat(dst_name).st_mtime
make_d
|
st = src_f > dst_f # make when image is newer then python file
if make_dst:
print 'Converting', name, ' to ', root + '.py'
i2p.img2py(n
|
ame, root + '.py')
|
CodeNameGhost/shiva
|
thirdparty/scapy/contrib/mqtt.py
|
Python
|
mit
| 8,943
| 0
|
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more informations
# Copyright (C) Santiago Hernandez Ramos <shramos@protonmail.com>
# This program is published under GPLv2 license
from scapy.packet import Packet, bind_layers
from scapy.fields import FieldLenField, BitEnumField, StrLenField, \
ShortField, ConditionalField, ByteEnumField, ByteField, StrNullField
from scapy.layers.inet import TCP
from scapy.error import Scapy_Exception
# CUSTOM FIELDS
# source: http://stackoverflow.com/a/43717630
class VariableFieldLenField(FieldLenField):
def addfield(self, pkt, s, val):
val = self.i2m(pkt, val)
data = []
while val:
if val > 127:
data.append(val & 127)
val /= 127
else:
data.append(val)
lastoffset = len(data) - 1
data = "".join(chr(val | (0 if i == lastoffset else 128))
for i, val in enumerate(data))
return s + data
if len(data) > 3:
raise Scapy_Exception("%s: malformed length field" %
self.__class__.__name__)
def getfield(self, pkt, s):
value = 0
for offset, curbyte in enumerate(s):
curbyte = ord(curbyte)
value += (curbyte & 127) * (128 ** offset)
if curbyte & 128 == 0:
return s[offset + 1:], value
if offset > 2:
raise Scapy_Exception("%s: malformed length field" %
self.__class__.__name__)
# LAYERS
CONTROL_PACKET_TYPE = {1: 'CONNECT',
2: 'CONNACK',
3: 'PUBLISH',
4: 'PUBACK',
5: 'PUBREC',
6: 'PUBREL',
7: 'PUBCOMP',
8: 'SUBSCRIBE',
9: 'SUBACK',
10: 'UNSUBSCRIBE',
11: 'UNSUBACK',
12: 'PINGREQ',
13: 'PINGRESP',
14: 'DISCONNECT'}
QOS_LEVEL = {0: 'At most once delivery',
1: 'At least once delivery',
2: 'Exactly once delivery'}
# source: http://stackoverflow.com/a/43722441
class MQTT(Packet):
name = "MQTT fixed header"
fields_desc = [
BitEnumField("type", 1, 4, CONTROL_PACKET_TYPE),
BitEnumField("DUP", 0, 1, {0: 'Disabled',
1: 'Enabled'}),
BitEnumField("QOS", 0, 2, QOS_LEVEL),
BitEnumField("RETAIN", 0, 1, {0: 'Disabled',
1: 'Enabled'}),
# Since the siz
|
e of the len field depends on the next layer, we need
# to "cheat" with the length_of parameter and use adjust parameter to
# calculate the value.
|
VariableFieldLenField("len", None, length_of="len",
adjust=lambda pkt, x: len(pkt.payload),),
]
class MQTTConnect(Packet):
name = "MQTT connect"
fields_desc = [
FieldLenField("length", None, length_of="protoname"),
StrLenField("protoname", "",
length_from=lambda pkt: pkt.length),
ByteField("protolevel", 0),
BitEnumField("usernameflag", 0, 1, {0: 'Disabled',
1: 'Enabled'}),
BitEnumField("passwordflag", 0, 1, {0: 'Disabled',
1: 'Enabled'}),
BitEnumField("willretainflag", 0, 1, {0: 'Disabled',
1: 'Enabled'}),
BitEnumField("willQOSflag", 0, 2, QOS_LEVEL),
BitEnumField("willflag", 0, 1, {0: 'Disabled',
1: 'Enabled'}),
BitEnumField("cleansess", 0, 1, {0: 'Disabled',
1: 'Enabled'}),
BitEnumField("reserved", 0, 1, {0: 'Disabled',
1: 'Enabled'}),
ShortField("klive", 0),
FieldLenField("clientIdlen", None, length_of="clientId"),
StrLenField("clientId", "",
length_from=lambda pkt: pkt.clientIdlen),
# Payload with optional fields depending on the flags
ConditionalField(FieldLenField("wtoplen", None, length_of="willtopic"),
lambda pkt: pkt.willflag == 1),
ConditionalField(StrLenField("willtopic", "",
length_from=lambda pkt: pkt.wtoplen),
lambda pkt: pkt.willflag == 1),
ConditionalField(FieldLenField("wmsglen", None, length_of="willmsg"),
lambda pkt: pkt.willflag == 1),
ConditionalField(StrLenField("willmsg", "",
length_from=lambda pkt: pkt.wmsglen),
lambda pkt: pkt.willflag == 1),
ConditionalField(FieldLenField("userlen", None, length_of="username"),
lambda pkt: pkt.usernameflag == 1),
ConditionalField(StrLenField("username", "",
length_from=lambda pkt: pkt.userlen),
lambda pkt: pkt.usernameflag == 1),
ConditionalField(FieldLenField("passlen", None, length_of="password"),
lambda pkt: pkt.passwordflag == 1),
ConditionalField(StrLenField("password", "",
length_from=lambda pkt: pkt.passlen),
lambda pkt: pkt.passwordflag == 1),
]
RETURN_CODE = {0: 'Connection Accepted',
1: 'Unacceptable protocol version',
2: 'Identifier rejected',
3: 'Server unavailable',
4: 'Bad username/password',
5: 'Not authorized'}
class MQTTConnack(Packet):
name = "MQTT connack"
fields_desc = [
ByteField("sessPresentFlag", 0),
ByteEnumField("retcode", 0, RETURN_CODE),
# this package has not payload
]
class MQTTPublish(Packet):
name = "MQTT publish"
fields_desc = [
FieldLenField("length", None, length_of="topic"),
StrLenField("topic", "",
length_from=lambda pkt: pkt.length),
ConditionalField(ShortField("msgid", None),
lambda pkt: (pkt.underlayer.QOS == 1
or pkt.underlayer.QOS == 2)),
StrLenField("value", "",
length_from=lambda pkt: (pkt.underlayer.len -
pkt.length - 2)),
]
class MQTTPuback(Packet):
name = "MQTT puback"
fields_desc = [
ShortField("msgid", None),
]
class MQTTPubrec(Packet):
name = "MQTT pubrec"
fields_desc = [
ShortField("msgid", None),
]
class MQTTPubrel(Packet):
name = "MQTT pubrel"
fields_desc = [
ShortField("msgid", None),
]
class MQTTPubcomp(Packet):
name = "MQTT pubcomp"
fields_desc = [
ShortField("msgid", None),
]
class MQTTSubscribe(Packet):
name = "MQTT subscribe"
fields_desc = [
ShortField("msgid", None),
FieldLenField("length", None, length_of="topic"),
StrLenField("topic", "",
length_from=lambda pkt: pkt.length),
ByteEnumField("QOS", 0, QOS_LEVEL),
]
ALLOWED_RETURN_CODE = {0: 'Success',
1: 'Success',
2: 'Success',
128: 'Failure'}
class MQTTSuback(Packet):
name = "MQTT suback"
fields_desc = [
ShortField("msgid", None),
ByteEnumField("retcode", None, ALLOWED_RETURN_CODE)
]
class MQTTUnsubscribe(Packet):
name = "MQTT unsubscribe"
fields_desc = [
ShortField("msgid", None),
StrNullField("payload", "")
]
class MQTTUnsuback(Packet):
name = "MQTT unsuback"
fields_desc = [
ShortField("msgid", None)
]
# LAYERS BINDINGS
bind_layers(TCP, MQTT, sport=1883)
bind_layers(TCP, MQTT, dport=1883)
bind_layers(MQT
|
scottkirkwood/wxoptparse
|
tests/rsync.py
|
Python
|
gpl-2.0
| 4,057
| 0.00986
|
import optparse
if __name__ == "__main__":
parser = optparse.OptionParser(add_help_option=False)
parser.add_option('-v', '--verbose', action='store_true',
help='increase verbosity')
parser.add_option('-q', '--quiet', action='store_true',
|
help='decrease verbosity')
parser.add_option('-c', '--checksum',action='store_true',
help='always checksum')
parser.add_option('-a', '--archive', action='store_true',
help='archive mode, equivalent to -rlptgoD')
parser.add_option('-r', '--recursive', action='store_true',
help='recurse into directories')
parser.add_option('-R', '-
|
-relative', action='store_true',
help='use relative path names')
parser.add_option('--no-relative', action='store_true',
help='turn off --relative')
parser.add_option('--no-implied-dirs', action='store_true',
help="don't send implied dirs with -R")
parser.add_option('-b', '--backup', action='store_true',
help='make backups (see --suffix & --backup-dir)')
parser.add_option('--backup-dir', metavar="DIR",
help='make backups into this directory')
parser.add_option('--suffix', metavar="SUFFIX",
help='backup suffix (default ~ w/o --backup-dir)')
parser.add_option('-u', '--update', action='store_true',
help="update only (don't overwrite newer files)")
parser.add_option('--inplace', action='store_true',
help='update the destination files inplace')
parser.add_option('-K', '--keep-dirlinks', action='store_true',
help='treat symlinked dir on receiver as dir')
parser.add_option('-l', '--links', action='store_true',
help="copy symlinks as symlinks")
parser.add_option('-L', '--copy-links', action='store_true',
help="copy the referent of all symlinks")
parser.add_option('--copy-unsafe-links', action='store_true',
help='copy the referent of "unsafe" symlinks')
parser.add_option('--safe-links', action='store_true',
help='ignore "unsafe" symlinks')
parser.add_option('-H', '--hard-links', action='store_true',
help='preserve hard links')
parser.add_option('-p', '--perms', action='store_true',
help='preserve permissions')
parser.add_option('-o', '--owner', action='store_true',
help='preserve owner (root only)')
parser.add_option('-g', '--group', action='store_true',
help='preserve group')
parser.add_option('-D', '--devices', action='store_true',
help='preserve devices (root only)')
parser.add_option('-t', '--times', action='store_true',
help='preserve times')
parser.add_option('-S', '--sparse', action='store_true',
help='handle sparse files efficiently')
parser.add_option('-n', '--dry-run', action='store_true',
help='show what would have been transferred')
parser.add_option('-W', '--whole-file', action='store_true',
help='copy whole files, no incremental checks')
parser.add_option('--no-whole-file', action='store_true',
help='turn off --whole-file')
parser.add_option('-x', '--one-file-system', action='store_true',
help="don't cross filesystem boundaries")
parser.add_option('-B', '--block-size', metavar='SIZE',
help='force a fixed checksum block-size')
parser.add_option('-e', '--rsh', metavar='COMMAND',
help='specify the remote shell')
parser.add_option('--rsync-path', metavar='PATH',
help='specify path to rsync on the remote machine')
parser.add_option('--existing', action='store_true',
help='only update files that already exist')
parser.add_option('--ignore-existing', action='store_true',
help='ignore files that already exist on receiver')
parser.add_option('--delete', action='store_true',
help="delete files that don't exist on sender")
if '_wxOptParseCallback' in globals():
parser._wxOptParseCallback = _wxOptParseCallback
(options, args) = parser.parse_args()
|
ceb8/astroquery
|
astroquery/cadc/tests/test_cadctap.py
|
Python
|
bsd-3-clause
| 16,419
| 0.000731
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
=============
CadcClass TAP plus
=============
"""
from io import BytesIO
from urllib.parse import urlsplit, parse_qs
import os
import sys
from astropy.table import Table as AstroTable
from astropy.io.fits.hdu.hdulist import HDUList
from astropy.io.votable.tree import VOTableFile, Resource, Table, Field
from astropy.io.votable import parse
from astroquery.utils.commons import parse_coordinates, FileContainer
from astropy import units as u
from astropy.utils.exceptions import AstropyDeprecationWarning
import pytest
import tempfile
import requests
try:
pyvo_OK = True
from pyvo.auth import authsession
from astroquery.cadc import Cadc, conf
import astroquery.cadc.core as cadc_core
except ImportError:
pyvo_OK = False
pytest.skip("Install pyvo for the cadc module.", allow_module_level=True)
except AstropyDeprecationWarning as ex:
if str(ex) == \
'The astropy.vo.samp module has now been moved to astropy.samp':
print('AstropyDeprecationWarning: {}'.format(str(ex)))
else:
raise ex
try:
from unittest.mock import Mock, patch, PropertyMock
except ImportError:
pytest.skip("Install mock for the cadc tests.", allow_module_level=True)
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, filename)
@patch('astroquery.cadc.core.get_access_url',
Mock(side_effect=lambda x: 'https://some.url'))
@pytest.mark.skipif(not pyvo_OK, reason='not pyvo_OK')
def test_get_tables():
# default parameters
table_set = PropertyMock()
table_set.keys.return_value = ['table1', 'table2']
table_set.values.return_value = ['tab1val', 'tab2val', 'tab3val']
with patch('astroquery.cadc.core.pyvo.dal.TAPService', autospec=True) as tapservice_mock:
tapservice_mock.return_value.tables = table_set
cadc = Cadc()
assert len(cadc.get_tables(only_names=True)) == 2
assert len(cadc.get_tables()) == 3
@patch('astroquery.cadc.core.get_access_url',
Mock(side_effect=lambda x: 'https://some.url'))
@pytest.mark.skipif(not pyvo_OK, reason='not pyvo_OK')
def test_get_table():
table_set = PropertyMock()
tables_result = [Mock() for _ in range(3)]
tables_result[0].name = 'tab1'
tables_result[1].name = 'tab2'
tables_result[2].name = 'tab3'
table_set.values.return_value = tables_result
with patch('astroquery.cadc.core.pyvo.dal.TAPService', autospec=True) as tapservice_mock:
tapservice_mock.return_value.tables = table_set
cadc = Cadc()
assert cadc.get_table('tab2').name == 'tab2'
assert cadc.get_table('foo') is None
@patch('astroquery.cadc.core.get_access_url',
Mock(side_effect=lambda x: 'https://some.url'))
@pytest.mark.skipif(not pyvo_OK, reason='not pyvo_OK')
def test_get_collections():
cadc = Cadc()
def mock_run_query(query, output_format=None, maxrec=None,
output_file=None):
assert query == \
'select distinct collection, energy_emBand from caom2.EnumField'
assert output_format is None
assert maxrec is None
assert output_file is None
table = AstroTable(rows=[('CFHT', 'Optical'), ('CFHT', 'Infrared'),
('JCMT', 'Millimeter'), ('DAO', 'Optical'),
('DAO', 'Infrared')],
names=('collection', 'energy_emBand'))
return table
cadc.exec_sync = mock_run_query
result = cadc.get_collections()
assert len(result) == 3
assert 'CFHT' in result
assert 'JCMT' in result
assert 'DAO' in result
@patch('astroquery.cadc.core.get_access_url',
Mock(side_effect=lambda x: 'https://some.url'))
@pytest.mark.skipif(not pyvo_OK, reason='not pyvo_OK')
def test_load_async_job():
with patch('astroquery.cadc.core.pyvo.dal.TAPService', autospec=True) as tapservice_mock:
with patch('astroquery.cadc.core.pyvo.dal.AsyncTAPJob',
autospec=True) as tapjob_mock:
tapservice_mock.return_value.baseurl.return_value = 'https://www.example.com/tap'
mock_job = Mock()
mock_job.job_id = '123'
tapjob_mock.return_value = mock_job
cadc = Cadc()
jobid = '123'
job = cadc.load_async_job(jobid)
assert job.job_id == '123'
@pytest.mark.skip('Disabled until job listing available in pyvo')
@patch('astroquery.cadc.core.get_access_url',
Mock(side_effect=lambda x: 'https://some.url'))
@pytest.mark.skipif(not pyvo_OK, reason='not pyvo_OK')
def test_list_async_jobs():
with patch('astroquery.cadc.core.pyvo.dal.TAPService', autospec=True) as tapservice_mock:
tapservice_mock.return_value.baseurl.return_value = 'https://www.example.com/tap'
cadc = Cadc()
cadc.list_async_jobs()
@patch('astroquery.cadc.core.get_access_url',
Mock(side_effect=lambda x, y=None: 'https://some.url'))
@pytest.mark.skipif(not pyvo_OK, reason='not pyvo_OK')
def test_auth():
# the Cadc() will cause a remote data call to TAP service capabiliti
|
es
# To avoid this, use an anonymous session and replace it with an
# auth session later
cadc = Cadc(auth_session=requests.Session())
cadc.cadctap._session = authsession.AuthSession()
user = 'user'
password = 'password'
cert = 'cert'
with pytest.raises(AttributeError):
|
cadc.login(None, None, None)
with pytest.raises(AttributeError):
cadc.login(user=user)
with pytest.raises(AttributeError):
cadc.login(password=password)
cadc.login(certificate_file=cert)
assert cadc.cadctap._session.credentials.get(
'ivo://ivoa.net/sso#tls-with-certificate').cert == cert
# reset and try with user password/cookies
cadc.cadctap._session = authsession.AuthSession()
post_mock = Mock()
cookie = 'ABC'
mock_resp = Mock()
mock_resp.text = cookie
post_mock.return_value.cookies = requests.cookies.RequestsCookieJar()
post_mock.return_value = mock_resp
cadc._request = post_mock
cadc.login(user=user, password=password)
assert cadc.cadctap._session.credentials.get(
'ivo://ivoa.net/sso#cookie').cookies[cadc_core.CADC_COOKIE_PREFIX] == \
'"{}"'.format(cookie)
# make sure that caps is reset at the end of the test
@patch('astroquery.cadc.core.get_access_url.caps', {})
@pytest.mark.skipif(not pyvo_OK, reason='not pyvo_OK')
def test_get_access_url():
# testing implementation of requests.get method:
def get(url, **kwargs):
class ServiceResponse:
def __init__(self):
self.text = 'ivo://cadc.nrc.ca/mytap = http://my.org/mytap'
def raise_for_status(self):
pass
class CapabilitiesResponse:
def __init__(self):
caps_file = data_path('tap_caps.xml')
self.text = open(caps_file, 'r').read()
def raise_for_status(self):
pass
if url == conf.CADC_REGISTRY_URL:
return ServiceResponse()
else:
return CapabilitiesResponse()
# now use it in testing
with patch.object(cadc_core.requests, 'get', get):
cadc_core.get_access_url.caps = {}
assert 'http://my.org/mytap' == cadc_core.get_access_url('mytap')
assert 'https://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/argus/tables' == \
cadc_core.get_access_url('mytap',
'ivo://ivoa.net/std/VOSI#tables-1.1')
@patch('astroquery.cadc.core.get_access_url',
Mock(side_effect=lambda x, y=None: 'https://some.url'))
@pytest.mark.skipif(not pyvo_OK, reason='not pyvo_OK')
def test_get_data_urls():
def get(*args, **kwargs):
class CapsResponse:
def __init__(self):
self.status_code = 200
self.content = b''
def raise_for_status(self):
pass
return CapsResponse()
class Result:
pass
file1 = Mock()
file1.semantics = '#this'
file1.acc
|
OptoFidelity/cerbero
|
cerbero/commands/bootstrap.py
|
Python
|
lgpl-2.1
| 1,583
| 0.001263
|
# cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library Gen
|
eral Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
from cerbero.commands import Command, register_command
from cerbero.utils import N_, _, ArgparseArgument
from cerbero.bootstrap.bootstrapper import Bootstrapper
class Bootstrap(Command):
doc = N_('Bootstrap the build system installing all the dependencies')
name = 'bootstrap'
def __init__(self):
|
args = [
ArgparseArgument('--build-tools-only', action='store_true',
default=False, help=_('only bootstrap the build tools'))]
Command.__init__(self, args)
def run(self, config, args):
bootstrappers = Bootstrapper(config, args.build_tools_only)
for bootstrapper in bootstrappers:
bootstrapper.start()
register_command(Bootstrap)
|
naruhodo-ryuichi/python-xPLpy
|
xPLpy/xPLConfig.py
|
Python
|
agpl-3.0
| 242
| 0.004132
|
# -*- coding: utf-8 -*-
from __future__ impo
|
rt unicode_literals
__author__ = "naruhodo-ryuichi"
###############GLOBAL CONFIGURATION
# name of xpl host
localVendor = "naruhodo-ryuichi"
# size o
|
f receiving buffer in bytes
bufferSize = 1500
|
lngauthier/LEAA.6
|
python/user_input.py
|
Python
|
gpl-2.0
| 707
| 0.015559
|
#
# title : Interface
#
import os
import time
os.system('clear')
print('#### ULTRA SECRET BOOT CAMP - NIS | ABC | NDSFLT | XEX | ####')
time.sleep(1)
print('Welcome to our organization, Agent 24')
time.sleep(1)
print('For our reccords, please answer truthfully
|
to the following questions')
time.sleep(1)
person = raw_input('Enter your name: ')
weapon = raw_input('What is your favourite weapon: ')
age = raw_input('How old are you: ')
user = { "name":person, "weapon of choice":weapon, "age":age }
os.system('clear')
print("Do you confirm all of these informations?")
print("Please note t
|
hat if you fail to comply, you could be prosecuted")
for key, value in user.items():
print(key + " : " + value)
|
RedHatInsights/insights-core
|
insights/tests/datasources/test_cloud_init.py
|
Python
|
apache-2.0
| 3,823
| 0.001046
|
import json
import pytest
from mock.mock import Mock
from insights.core import filters
from insights.core.dr import SkipComponent
from insights.core.spec_factory import DatasourceProvider
from insights.specs import Specs
from insights.specs.datasources.cloud_init import cloud_cfg, LocalSpecs
CLOUD_CFG = """
users:
- name: demo
ssh-authorized-keys:
- key_one
- key_two
passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/
ssh_deletekeys: {value}
network:
version: 1
config:
- type: physical
name: eth0
subnets:
- type: dhcp
- type: dhcp6
""".strip()
CLOUD_CFG_BAD_INDENT = """
#cloud-config
users:
- name: demo
ssh-authorized-keys:
- key_one
- key_two
passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/
ssh_deletekeys: 1
network:
config: disabled
system_info:
default_user:
name: user2
plain_text_passwd: 'someP@assword'
home: /home/user2
""".strip()
CLOUD_CFG_BAD = """
users
name -demo
ssh-authorized-keys
- key_one
- key_two
""".strip()
CLOUD_CFG_JSON = {
"network": {
"version": 1,
"config": [
{
"type": "physical",
"name": "eth0",
"subnets": [
{
"type": "dhcp"
},
{
"type": "dhcp6"
}
]
}
]
},
"ssh_deletekeys": 1,
}
RELATIVE_PATH = '/etc/cloud/cloud.cfg'
def setup_function(func):
if Specs.cloud_cfg in filters._CACHE:
del filters._CACHE[Specs.cloud_cfg]
if Specs.cloud_cfg in filters.FILTERS:
del filters.FILTERS[Specs.cloud_cfg]
if func is test_cloud_cfg:
filters.add_filter(Specs.cloud_cfg, ['ssh_deletekeys', 'network', 'debug'])
if func is test_cloud_cfg_no_filter:
filters.add_filter(Specs.cloud_cfg, [])
elif func is test_cloud_cfg_bad:
filters.add_filter(Specs.cloud_cfg, ['not_found'])
def teardown_function(func):
if func is test_cloud_cfg_bad or func is test_cloud_cfg:
del filters.FILTERS[Specs.cloud_cfg]
@pytest.mark.parametrize("ssh_deletekeys", [0, 1])
def test_cloud_cfg(ssh_deletekeys):
cloud_cfg_string = CLOUD_CFG.format(value=ssh_deletekeys)
cloud_cfg_dict = CLOUD_CFG_JSON.copy()
cloud_cfg_dict["ssh_deletekeys"] = ssh_deletekeys
cloud_init_file = Mock()
cloud_init_file.content = cloud_cfg_string.splitlines()
broker = {LocalSpecs.cloud_cfg_input: cloud_init_file}
result = cloud_cfg(broker)
assert result is not None
assert isinstance(result, DatasourceProvider)
expected = DatasourceProvider(content=json.dumps(cloud_cfg_dict), relative_path=RELATIVE_PATH)
assert result.content == expected.content
assert result.relative_path == expected.relative_path
def test_cloud_cfg_no_filter():
cloud_init_file = Mock()
cloud_init_file.content = CLOUD_CFG.format(value=1).splitlines()
broker = {LocalSpecs.cloud_cfg_input: cloud_init_file}
with pytest.r
|
aises(SkipComponent) as e:
cloud_cfg(broker)
assert 'SkipComponent' in str(e)
def test_cloud_cfg_bad():
cloud_init_file = Mock()
cloud_init_file.content = CLOUD_CFG_BAD.splitlines()
broker = {LocalSpecs.cloud_cfg_input: cloud_init_file}
with pytest.raises(SkipComponent) as e:
cloud_cfg(broker)
assert 'Invalid YAML format' in str(e)
cloud_init_file.content = CLOUD_CFG_BAD_INDENT.splitlines()
|
broker = {LocalSpecs.cloud_cfg_input: cloud_init_file}
with pytest.raises(SkipComponent) as e:
cloud_cfg(broker)
assert 'Unexpected exception' in str(e)
|
npyoung/python-neo
|
neo/core/analogsignalarray.py
|
Python
|
bsd-3-clause
| 11,882
| 0.000168
|
# -*- coding: utf-8 -*-
'''
This module implements :class:`AnalogSignalArray`, an array of analog signals.
:class:`AnalogSignalArray` derives from :class:`BaseAnalogSignal`, from
:module:`neo.core.analogsignal`.
:class:`BaseAnalogSignal` inherits from :class:`quantites.Quantity`, which
inherits from :class:`numpy.array`.
Inheritance from :class:`numpy.array` is explained here:
http://docs.scipy.org/doc/numpy/user/basics.subclassing.html
In brief:
* Initialization of a new object from constructor happens in :meth:`__new__`.
This is where user-specified attributes are set.
* :meth:`__array_finalize__` is called for all new objects, including those
created by slicing. This is where attributes are copied over from
the old object.
'''
# needed for python 3 compatibility
from __future__ import absolute_import, division, print_function
import logging
import numpy as np
import quantities as pq
from neo.core.analogsignal import (BaseAnalogSignal, AnalogSignal,
_get_sampling_rate)
from neo.core.baseneo import BaseNeo, merge_annotations
logger = logging.getLogger("Neo")
class AnalogSignalArray(BaseAnalogSignal):
'''
Several continuous analog signals
A representation of several continuous, analog signals that
have the same duration, sampling rate and start time.
Basically, it is a 2D array like AnalogSignal: dim 0 is time, dim 1 is
channel index
Inherits from :class:`quantities.Quantity`, which in turn inherits from
:class:`numpy.ndarray`.
*Usage*::
>>> from neo.core import AnalogSignalArray
>>> import quantities as pq
>>>
>>> sigarr = AnalogSignalArray([[1, 2, 3], [4, 5, 6]], units='V',
... sampling_rate=1*pq.Hz)
>>>
>>> sigarr
<AnalogSignalArray(array([[1, 2, 3],
[4, 5, 6]]) * mV, [0.0 s, 2.0 s], sampling rate: 1.0 Hz)>
>>> sigarr[:,1]
<AnalogSignal(array([2, 5]) * V, [0.0 s, 2.0 s],
sampling rate: 1.0 Hz)>
>>> sigarr[1, 1]
array(5) * V
*Required attributes/properties*:
:signal: (quantity array 2D, numpy array 2D, or list (data, chanel))
The data itself.
:units: (quantity units) Required if the signal is a list or NumPy
array, not if it is a :class:`Quantity`
:t_start: (quantity scalar) Time when signal begins
:sampling_rate: *or* :sampling_period: (quantity scalar) Number of
samples per unit time or
interval between two samples.
If both are specified, they are
checked for consistency.
*Recommended attributes/properties*:
:name: (str) A label for the dataset.
:description: (str) Text description.
:file_origin: (str) Filesystem path or URL of the original data file.
:channel_index: (numpy array 1D dtype='i') You can use this to order
the columns of the signal in any way you want. It should have the
same number of elements as the signal has columns.
:class:`AnalogSignal` and :class:`Unit` objects can be given
indexes as well so related objects can be linked together.
*Optional attributes/properties*:
:dtype: (numpy dtype or str) Override the dtype of the signal array.
:copy: (bool) True by default.
Note: Any other additional arguments are assumed to be user-specific
metadata and stored in :attr:`annotations`.
*Properties available on this object*:
:sampling_rate: (quantity scalar) Number of samples per unit time.
(1/:attr:`sampling_period`)
:sampling_period: (quantity scalar) Interval between two samples.
(1/:attr:`quantity scalar`)
:duration: (Quantity) Signal duration, read-only.
(size * :attr:`sampling_period`)
:t_stop: (quantity scalar) Time when signal ends, read-only.
(:attr:`t_start` + :attr:`duration`)
:times: (quantity 1D) The time points of each sample of the signal,
read-only.
(:attr:`t_start` + arange(:attr:`shape`[0])/:attr:`sampling_rate`)
:channel_indexes: (numpy array 1D dtype='i') The same as
:attr:`channel_index`, read-only.
*Slicing*:
:class:`AnalogSignalArray` objects can be sliced. When taking a single
row (dimension 1, e.g. [:, 0]), a :class:`AnalogSignal` is returned.
When taking a single element, a :class:`~quantities.Quantity` is
returned. Otherwise a :class:`AnalogSignalArray` (actually a view) is
returned, with the same metadata, except that :attr:`t_start`
is changed if the start index along dimension 1 is greater than 1.
Getting a single item returns a :class:`~quantity.Quantity` scalar.
*Operations available on this object*:
== != + * /
'''
_single_parent_objects = ('Segment', 'RecordingChannelGroup')
_quantity_attr = 'signal'
_necessary_attrs = (('signal', pq.Quantity, 2),
('sampling_rate', pq.Quantity, 0),
('t_start', pq.Quantity, 0))
_recommended_attrs = ((('channel_index', np.ndarray, 1, np.dtype('i')),) +
BaseNeo._recommended_attrs)
def __new__(cls, signal, units=None, dtype=None, copy=True,
t_start=0 * pq.s, sampling_rate=None, sampling_period=None,
name=None, file_origin=None, description=None,
channel_index=None, **annotations):
'''
Constructs new :class:`AnalogSignalArray` from data.
This is called whenever a new class:`AnalogSignalArray` is created from
the constructor, but not when slicing.
'''
if (isinstance(signal, pq.Quantity)
and units is not None
and units != signal.units):
signal = signal.rescale(units)
if not units and hasattr(signal, "units"):
units = signal.units
obj = pq.Quantity.__new__(cls, signal, units=units, dtype=dtype,
copy=copy)
obj.t_start = t_start
obj.sampling_rate = _get_sampling_rate(sampling_rate, sampling_period)
obj.channel_index = channel_index
obj.segment = None
obj.recordingchannelgroup = None
return obj
def __init__(self, signal, units=None, dtype=None, copy=True,
t_start=0 * pq.s, sampling_rate=None, sampling_period=None,
name=None, file_origin=None, description=None,
channel_index=None, **annotations):
'''
Initializes a newly constructed :class:`AnalogSignalArray` instance.
'''
BaseNeo.__init__(self, name=name, file_origin=file_origin,
description=description, **annotations)
@property
def channel_indexes(self):
'''
The same as :attr:`channel_index`.
'''
return self.channel_index
def __getslice__(self, i, j):
'''
Get a slice from :attr:`i` to :attr:`j`.
Doesn't get called in Python 3, :meth:`__getitem__` is called instead
'''
return self.__getitem__(slice(i, j))
def __getitem__(self, i):
'''
Get the item or slice :attr:`i`.
'''
obj = super(BaseAnalogSignal, self).__getitem__(i)
if isinstance(i, int):
return obj
elif isinstance(i, tuple):
j, k = i
if isinstance(k, int):
if isinstance(j, slice): # extract an A
|
nalogSignal
obj = AnalogSignal(obj, sampling_rate=self.sampling_rate)
if j.start:
obj.t_start = (self.t_start +
j.start * self.sampling_period)
# return a Quantity (for some reason quantities does not
# return a Quantity in this case)
|
elif isinstance(j, int):
|
foursquare/pants
|
tests/python/pants_test/build_graph/test_build_file_aliases.py
|
Python
|
apache-2.0
| 5,509
| 0.00599
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import unittest
from pants.build_graph.address import Address
from pants.build_graph.build_file_aliases import BuildFileAliases, TargetMacro
from pants.build_graph.mutable_build_graph import MutableBuildGraph
from pants.build_graph.target import Target
class BuildFileAliasesTest(unittest.TestCase):
class RedTarget(Target):
pass
class BlueTarget(Target):
pass
def setUp(self):
self.target_macro_factory = TargetMacro.Factory.wrap(
lambda ctx: ctx.create_object(self.BlueTarget,
type_alias='jill',
name=os.path.basename(ctx.rel_path)),
self.BlueTarget, self.RedTarget)
def test_create(self):
self.assertEqual(BuildFileAliases(targets={},
objects={},
context_aware_object_factories={}),
BuildFileAliases())
targets = {'jake': Target, 'jill': self.target_macro_factory}
self.assertEqual(BuildFileAliases(targets=targets,
objects={},
context_aware_object_factories={}),
BuildFileAliases(targets=targets))
objects = {'jane': 42}
self.assertEqual(BuildFileAliases(targets={},
objects=objects,
context_aware_object_factories={}),
BuildFileAliases(objects=objects))
factories = {'jim': lambda ctx: 'bob'}
self.assertEqual(BuildFileAliases(targets={},
objects={},
context_aware_object_factories=factories),
BuildFileAliases(context_aware_object_factories=factories))
self.assertEqual(BuildFileAliases(targets=targets,
objects=objects,
context_aware_object_factories={}),
BuildFileAliases(targets=targets, objects=objects))
self.assertEqual(BuildFileAl
|
iases(targets=targets,
objects={},
context_aware_object_factories=factories),
BuildFileAliases(targets=targets,
context_aware_object_factories=factories))
self.assertEqual(BuildFileAliases(targets={},
objects=objects,
context_aware_object_factories=factories),
|
BuildFileAliases(objects=objects,
context_aware_object_factories=factories))
self.assertEqual(BuildFileAliases(targets=targets,
objects=objects,
context_aware_object_factories=factories),
BuildFileAliases(targets=targets,
objects=objects,
context_aware_object_factories=factories))
def test_create_bad_targets(self):
with self.assertRaises(TypeError):
BuildFileAliases(targets={'fred': object()})
target = Target('fred', Address.parse('a:b'), MutableBuildGraph(address_mapper=None))
with self.assertRaises(TypeError):
BuildFileAliases(targets={'fred': target})
def test_create_bad_objects(self):
with self.assertRaises(TypeError):
BuildFileAliases(objects={'jane': Target})
with self.assertRaises(TypeError):
BuildFileAliases(objects={'jane': self.target_macro_factory})
def test_bad_context_aware_object_factories(self):
with self.assertRaises(TypeError):
BuildFileAliases(context_aware_object_factories={'george': 1})
def test_merge(self):
e_factory = lambda ctx: 'e'
f_factory = lambda ctx: 'f'
first = BuildFileAliases(targets={'a': Target},
objects={'d': 2},
context_aware_object_factories={'e': e_factory})
second = BuildFileAliases(targets={'b': self.target_macro_factory},
objects={'c': 1, 'd': 42},
context_aware_object_factories={'f': f_factory})
expected = BuildFileAliases(
# nothing to merge
targets={'a': Target, 'b': self.target_macro_factory},
# second overrides first
objects={'c': 1, 'd': 42},
# combine
context_aware_object_factories={'e': e_factory, 'f': f_factory})
self.assertEqual(expected, first.merge(second))
def test_target_types(self):
aliases = BuildFileAliases(targets={'jake': Target, 'jill': self.target_macro_factory})
self.assertEqual({'jake': Target}, aliases.target_types)
def test_target_macro_factories(self):
aliases = BuildFileAliases(targets={'jake': Target, 'jill': self.target_macro_factory})
self.assertEqual({'jill': self.target_macro_factory}, aliases.target_macro_factories)
def test_target_types_by_alias(self):
aliases = BuildFileAliases(targets={'jake': Target, 'jill': self.target_macro_factory})
self.assertEqual({'jake': {Target}, 'jill': {self.BlueTarget, self.RedTarget}},
aliases.target_types_by_alias)
|
brianb/mdbtools
|
api_docx/pre_build.py
|
Python
|
gpl-2.0
| 969
| 0.004128
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
ROOT_PATH = os.path.abspath( os.path.join(os.path.dirname( __file__ ), ".."))
files = os.listdir(os.path.join(ROOT_PATH, "doc"))
index = []
for fname in sorted(files):
if not fname.endswith(".txt"):
continue
cmd_name = fname[:-4]
with open(os.path.join(ROOT_PATH, "doc", fname), "r") as f:
contents = f.read()
f.close()
out_file = os.path.join(ROOT_PATH, "temp-man-pages", "%s.md" %
|
cmd_name)
with open(out_file, "w") as f:
s = "# %s {#%s}\n\n```\n%s\n```\n" % (cmd_name, cmd_name, contents)
f.write(s)
f.close()
index.append(cmd_name)
print(" wrote %s" % out_file)
|
out_file = os.path.join(ROOT_PATH, "temp-man-pages", "index.md")
s = "Man Pages {#man-pages}\n"
s += "=========================\n\n"
for page in index:
s += "- @subpage %s\n" % page
s += "\n"
with open(out_file, "w") as f:
f.write(s)
f.close()
|
iglpdc/nipype
|
nipype/algorithms/tests/test_auto_CalculateNormalizedMoments.py
|
Python
|
bsd-3-clause
| 863
| 0.005794
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ...testing import assert_equal
from ..misc import Ca
|
lculateNormalizedMoments
def test_CalculateNormalizedMoments_inputs():
input_map = dict(moment=dict(mandatory=True,
),
timeseries_file=dict(mandatory=True,
),
)
inputs = CalculateNormalizedMoments.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list
|
(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_CalculateNormalizedMoments_outputs():
output_map = dict(moments=dict(),
)
outputs = CalculateNormalizedMoments.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
Vagab0nd/SiCKRAGE
|
lib3/jwt/compat.py
|
Python
|
gpl-3.0
| 1,624
| 0
|
"""
The `compat` module provides support for backwards compatibility with older
versions of python, and compatibility wrappers around optional packages.
"""
# flake8: noqa
import hmac
import struct
import sys
PY3 = sys.version_info[0] == 3
if PY3:
text_type = str
binary_type = bytes
else:
text_type = unicode
binary_type = str
string_types = (text_type, binary_type)
try:
# Importing ABCs from collections will be removed in PY3.8
from collections.abc import Iterable, Mapping
except ImportError:
from collections import Iterable, Mapping
try:
constant_time_compare = hmac.compare_digest
except AttributeError:
# Fallback for Python < 2.7
def constant_time_compare(val1, val2):
"""
Returns True if the two strings are equal, False otherwise.
The time taken is independent of the number of characters that match.
"""
if len(val1) != len(val2):
return False
result = 0
for x, y in zip(val1, val2):
result |= ord(x) ^ ord(y)
return result == 0
# Use int.to_bytes if it exists (Python 3)
if getattr(int, 'to_bytes', None):
def bytes_from_int(val):
remaining = val
byte_length = 0
while remaining != 0:
remai
|
ning = remaining >> 8
byte_length += 1
return val.to_bytes(byte_length, 'big', signed=False)
else:
def bytes_from_int(val):
buf = []
while val:
val, remainder = divmod(val, 256)
buf.append(remainder)
buf.reverse()
return st
|
ruct.pack('%sB' % len(buf), *buf)
|
nagyistoce/eutester
|
testcases/cloud_admin/4-2/euca9959.py
|
Python
|
bsd-2-clause
| 3,192
| 0.004386
|
'''
Created on = '10/28/13"
Author = 'mmunn'
Unit test : EUCA-9959 MalformedPolicyDocument: Policy document should not specify a principal." Should Be Returned
setUp : Install Credentials,
test : create role with MalformedPolicyDocument and make sure an error message is returned instead of the policy text
tearDown : Removes Credentials, terminates instance
cloud.conf:( place in same directory as this test)
IP ADDRESS CENTOS 6.3 64 BZR [CC00 CLC SC00 WS]
IP ADDRESS CENTOS 6.3 64 BZR [NC00]
'''
import unittest
import shutil
import os
from eucaops import Eucaops
class Euca(unittest.TestCase):
def setUp(self):
self.conf = "../cloud.conf"
self.tester = Eucaops(config_file=self.conf, password="foobar")
self.doAuth()
self.STARTC = '\033[1m\033[1m\033[42m'
self.ENDC = '\033[0m'
self.account = "9959-account"
self.groupname = "9959-group"
self.username ="9959-user"
def tearDown(self):
self.tester.delete_account(self.account, recursive=True)
self.tester.sys('rm -rf role-describe-instances-principle.json')
self.tester.sys('rm -rf role-trust.json')
self.tester.cleanup_artifacts()
self.tester.delete_keypair(self.keypair)
self.tester.local("rm " + self.keypair.name + ".pem")
shutil.rmtree(self.tester.credpath)
def runSysCmd(self, cmd):
self.source = "source " + self.tester.credpath + "/eucarc && "
self.out = self.tester.sys(self.source + cmd)
def doAuth(self):
self.keypair = self.tester.add_keypair()
self.group = self.tester.add_group()
self.tester.authorize_group(self.group)
def test(self):
# create account, group and user
self.tester.create_account(account_name=self.account)
self.tester.create_group(self.groupname, "/", self.account)
self.tester.create_user(self.username, "/", self.account)
self.tester.add_user_to_group(self.groupname,self.username,self.account)
# copy json to clc
self.clc_ip = str(self.tester.clc.hostname)
os.system('scp role-describe-instances-principle.json root@' + self.clc_ip + ':role-describe-instances-principle.json')
os.system('scp role-trust.json root@' + s
|
elf.clc_ip + ':role-trust.json')
# create user role
self.runSysCmd("euare-rolecreate -r describe-instances -f role-trust.json --region " + self.account + "-
|
" + self.username)
self.runSysCmd("euare-roleuploadpolicy -r describe-instances -p describe-instances-policy -f role-describe-instances-principle.json --region " + self.account + "-" + self.username)
print self.STARTC + "Success " + str(self.out) + " ENABLED " + self.ENDC
# Check to see that the error message was thrown and not the text from the json file.
count = str(self.out).count("Policy document should not specify a principal.")
if count > 0 :
self.tester.debug("SUCCESS")
pass
else:
self.fail("FAILED : correct error message not thrown")
if __name__ == "__main__":
unittest.main()
|
Onager/plaso
|
plaso/engine/extractors.py
|
Python
|
apache-2.0
| 20,405
| 0.006273
|
# -*- coding: utf-8 -*-
"""The extractor class definitions.
An extractor is a class used to extract information from "raw" data.
"""
import copy
import pysigscan
from dfvfs.helpers import file_system_searcher
from dfvfs.lib import definitions as dfvfs_definitions
from dfvfs.lib import errors as dfvfs_errors
from dfvfs.resolver import resolver as path_spec_resolver
from plaso.engine import logger
from plaso.lib import errors
from plaso.parsers import interface as parsers_interface
from plaso.parsers import manager as parsers_manager
class EventExtractor(object):
"""Event extractor.
An event extractor extracts events from event sources.
"""
_PARSE_RESULT_FAILURE = 1
_PARSE_RESULT_SUCCESS = 2
_PARSE_RESULT_UNSUPPORTED = 3
def __init__(self, parser_filter_expression=None):
"""Initializes an event extractor.
Args:
parser_filter_expression (Optional[str]): parser filter expression,
where None represents all parsers and plugins.
A parser filter expression is a comma separated value string that
denotes which parsers and plugins should be used. See
filters/parser_filter.py for details of the expression syntax.
"""
super(EventExtractor, self).__init__()
self._file_scanner = None
self._filestat_parser = None
self._formats_with_signatures = None
self._mft_parser = None
self._non_sigscan_parser_names = None
self._parsers = None
self._parsers_profiler = None
self._usnjrnl_parser = None
self._InitializeParserObjects(
parser_filter_expression=parser_filter_expression)
def _CheckParserCanProcessFileEntry(self, parser, file_entry):
|
"""Determines if a parser can process a file entry.
Args:
file_entry (dfvfs.FileEntry): file entry.
parser (BaseParser): parser.
Returns:
bool: True if the file entry can be processed by the parser object.
"""
for filter_object in parser.FILTERS:
if filter_object.Match(file_entry):
return True
return False
def _GetSignatureMatchParserNames(self, file_object):
"""Determines if a file-like object matc
|
hes one of the known signatures.
Args:
file_object (file): file-like object whose contents will be checked
for known signatures.
Returns:
list[str]: parser names for which the contents of the file-like object
matches their known signatures.
"""
parser_names = []
scan_state = pysigscan.scan_state()
self._file_scanner.scan_file_object(scan_state, file_object)
for scan_result in iter(scan_state.scan_results):
format_specification = (
self._formats_with_signatures.GetSpecificationBySignature(
scan_result.identifier))
if format_specification.identifier not in parser_names:
parser_names.append(format_specification.identifier)
return parser_names
def _InitializeParserObjects(self, parser_filter_expression=None):
"""Initializes the parser objects.
Args:
parser_filter_expression (Optional[str]): parser filter expression,
where None represents all parsers and plugins.
A parser filter expression is a comma separated value string that
denotes which parsers and plugins should be used. See
filters/parser_filter.py for details of the expression syntax.
"""
self._formats_with_signatures, non_sigscan_parser_names = (
parsers_manager.ParsersManager.GetFormatsWithSignatures(
parser_filter_expression=parser_filter_expression))
self._non_sigscan_parser_names = []
for parser_name in non_sigscan_parser_names:
if parser_name not in ('filestat', 'usnjrnl'):
self._non_sigscan_parser_names.append(parser_name)
self._file_scanner = parsers_manager.ParsersManager.CreateSignatureScanner(
self._formats_with_signatures)
self._parsers = parsers_manager.ParsersManager.GetParserObjects(
parser_filter_expression=parser_filter_expression)
active_parser_names = ', '.join(sorted(self._parsers.keys()))
logger.debug('Active parsers: {0:s}'.format(active_parser_names))
self._filestat_parser = self._parsers.get('filestat', None)
if 'filestat' in self._parsers:
del self._parsers['filestat']
self._mft_parser = self._parsers.get('mft', None)
self._usnjrnl_parser = self._parsers.get('usnjrnl', None)
if 'usnjrnl' in self._parsers:
del self._parsers['usnjrnl']
def _ParseDataStreamWithParser(
self, parser_mediator, parser, file_entry, data_stream_name):
"""Parses a data stream of a file entry with a specific parser.
Args:
parser_mediator (ParserMediator): parser mediator.
parser (BaseParser): parser.
file_entry (dfvfs.FileEntry): file entry.
data_stream_name (str): data stream name.
Raises:
RuntimeError: if the file-like object is missing.
"""
file_object = file_entry.GetFileObject(data_stream_name=data_stream_name)
if not file_object:
raise RuntimeError('Unable to retrieve file-like object from file entry.')
try:
self._ParseFileEntryWithParser(
parser_mediator, parser, file_entry, file_object=file_object)
finally:
file_object.close()
def _ParseFileEntryWithParser(
self, parser_mediator, parser, file_entry, file_object=None):
"""Parses a file entry with a specific parser.
Args:
parser_mediator (ParserMediator): parser mediator.
parser (BaseParser): parser.
file_entry (dfvfs.FileEntry): file entry.
file_object (Optional[file]): file-like object to parse.
If not set the parser will use the parser mediator to open
the file entry's default data stream as a file-like object.
Returns:
int: parse result which is _PARSE_RESULT_FAILURE if the file entry
could not be parsed, _PARSE_RESULT_SUCCESS if the file entry
successfully was parsed or _PARSE_RESULT_UNSUPPORTED when
UnableToParseFile was raised.
Raises:
TypeError: if parser object is not a supported parser type.
"""
if not isinstance(parser, (
parsers_interface.FileEntryParser, parsers_interface.FileObjectParser)):
raise TypeError('Unsupported parser object type.')
parser_mediator.ClearParserChain()
reference_count = (
parser_mediator.resolver_context.GetFileObjectReferenceCount(
file_entry.path_spec))
parser_mediator.SampleStartTiming(parser.NAME)
try:
if isinstance(parser, parsers_interface.FileEntryParser):
parser.Parse(parser_mediator)
elif isinstance(parser, parsers_interface.FileObjectParser):
parser.Parse(parser_mediator, file_object)
result = self._PARSE_RESULT_SUCCESS
# We catch IOError so we can determine the parser that generated the error.
except (IOError, dfvfs_errors.BackEndError) as exception:
display_name = parser_mediator.GetDisplayName(file_entry)
logger.warning(
'{0:s} unable to parse file: {1:s} with error: {2!s}'.format(
parser.NAME, display_name, exception))
result = self._PARSE_RESULT_FAILURE
except errors.UnableToParseFile as exception:
display_name = parser_mediator.GetDisplayName(file_entry)
logger.debug(
'{0:s} unable to parse file: {1:s} with error: {2!s}'.format(
parser.NAME, display_name, exception))
result = self._PARSE_RESULT_UNSUPPORTED
finally:
parser_mediator.SampleStopTiming(parser.NAME)
parser_mediator.SampleMemoryUsage(parser.NAME)
new_reference_count = (
parser_mediator.resolver_context.GetFileObjectReferenceCount(
file_entry.path_spec))
if reference_count != new_reference_count:
display_name = parser_mediator.GetDisplayName(file_entry)
logger.warning((
'[{0:s}] did not explicitly close file-object for file: '
'{1:s}.').format(parser.NAME, display_name))
return result
def _ParseFileEntryWithParsers(
self, parser_mediator, parser_names, file_entry, file_object=None):
"""Parses a file entry
|
lorin/umdinst
|
test/testidentifysourcefiles.py
|
Python
|
bsd-3-clause
| 1,396
| 0.012178
|
import unittest
import s
|
ys
import os
import errno
import commands
from xml.dom import minidom
sys.path.append('bin')
from umdinst import wrap
from testsuccessfulcompiledata import getfield, timezonecheck, xmlifystring
from testcapturecompile import programcheck
def
|
createemptyfile(fname):
"""Creates an empty file. Throws an exception if the file alrady exists"""
if os.access(fname,os.R_OK):
raise ValueError,"File already exists"
f = open(fname,'w')
f.close()
class TestIdentifySourcefiles(unittest.TestCase):
def setUp(self):
# Create some source files
createemptyfile("foo.c")
createemptyfile("bar.cpp")
createemptyfile("baz.upc")
createemptyfile("quux.f77")
self.args = "foo.c bar.cpp baz.upc quux.f77 others x.o y.exe -Dgoomba".split()
self.argsdasho = "foo.c -o baz.upc bar.cpp".split()
def testBasic(self):
files = wrap.identify_sourcefiles(self.args)
self.assertEquals(files,
['foo.c','bar.cpp','baz.upc','quux.f77'])
def testWithDashO(self):
files = wrap.identify_sourcefiles(self.argsdasho)
self.assertEquals(files,['foo.c','bar.cpp'])
def tearDown(self):
os.remove("foo.c")
os.remove("bar.cpp")
os.remove("baz.upc")
os.remove("quux.f77")
if __name__ == '__main__':
unittest.main()
|
davidovitch/f90wrap
|
examples/example-arrays/tests.py
|
Python
|
gpl-2.0
| 1,524
| 0.002625
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 28 15:19:03 2015
@author: David Verelst
"""
from __future__ import print_function
import unittest
import numpy as np
import
|
ExampleArray as lib
class TestExample(unittest.TestCase):
def setUp(self):
pass
def do_array_stuff(self, ndata):
x = np.arange(ndata)
y = np.arange(ndata)
br = np.zeros((ndata,), order='F')
co = np.zeros((4, ndata), order='F')
lib.library.do_array_stuff(n=ndata, x=x, y=y, br=br, co=co)
for k in range(4):
np.testing.assert_allclose(x*y + x, co[k,
|
:])
np.testing.assert_allclose(x/(y+1.0), br)
def test_basic(self):
self.do_array_stuff(1000)
def test_verybig_array(self):
self.do_array_stuff(1000000)
def test_square(self):
n = 100000
x = np.arange(n, dtype=float)
y = np.arange(n, dtype=float)
br = np.zeros((n,), order='F')
co = np.zeros((4, n), order='F')
lib.library.do_array_stuff(n=n, x=x, y=y, br=br, co=co)
lib.library.only_manipulate(n=n, array=co)
for k in range(4):
np.testing.assert_allclose((x*y + x)**2, co[k,:])
def test_return_array(self):
m, n = 10, 4
arr = np.ndarray((m,n), order='F', dtype=np.int32)
lib.library.return_array(m, n, arr)
ii, jj = np.mgrid[0:m,0:n]
ii += 1
jj += 1
np.testing.assert_equal(ii*jj + jj, arr)
if __name__ == '__main__':
unittest.main()
|
Debian/dak
|
dak/check_overrides.py
|
Python
|
gpl-2.0
| 19,666
| 0.004271
|
#! /usr/bin/env python3
""" Cruft checker and hole filler for overrides
@contact: Debian FTPMaster <ftpmaster@debian.org>
@copyright: 2000, 2001, 2002, 2004, 2006 James Troup <james@nocrew.org>
@opyright: 2005 Jeroen van Wolffelaar <jeroen@wolffelaar.nl>
@copyright: 2011 Joerg Jaspert <joerg@debian.org>
@license: GNU General Public License version 2 or later
"""
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
################################################################################
######################################################################
# NB: dak check-overrides is not a good idea with New Incoming as it #
# doesn't take into account accepted. You can minimize the impact #
# of this by running it immediately after dak process-accepted but #
# that's still racy because 'dak process-new' doesn't lock with 'dak #
# process-accepted'. A better long term fix is the evil plan for #
# accepted to be in the DB. #
######################################################################
# dak check-overrides should now work fine being done during
# cron.daily, for example just before 'dak make-overrides' (after 'dak
# process-accepted' and 'dak make-suite-file-list'). At that point,
# queue/accepted should be empty and installed, so... dak
# check-overrides does now take into account suites sharing overrides
# TODO:
# * Only update out-of-sync overrides when corresponding versions are equal to
# some degree
# * consistency checks like:
# - section=debian-installer only for udeb and # dsc
# - priority=optional if dsc
# - (suite, package, 'dsc') is unique,
# - just as (suite, package, (u)deb) (yes, across components!)
# - sections match their component (each component has an own set of sections,
# could probably be reduced...)
################################################################################
import sys
import apt_pkg
from daklib.config import Config
from daklib.dbconn import *
from daklib import daklog
from daklib import utils
################################################################################
Options = None #: Commandline arguments parsed into this
Logger = None #: Our logging object
sections = {}
priorities = {}
blacklist = {}
################################################################################
def usage(exit_code=0):
print("""Usage: dak check-overrides
Check for cruft in overrides.
-n, --no-action don't do anything
-h, --help show this help and exit""")
sys.exit(exit_code)
################################################################################
def process(osuite, affected_suites, originosuite, component, otype, session):
global Logger, Options, sections, priorities
o = get_suite(osuite, session)
if o is None:
utils.fubar("Suite '%s' not recognised." % (osuite))
osuite_id = o.suite_id
originosuite_id = None
if originosuite:
oo = get_suite(originosuite, session)
if oo is None:
utils.fubar("Suite '%s' not recognised." % (originosuite))
originosuite_id = oo.suite_id
c = get_component(component, session)
if c is None:
utils.fubar("Component '%s' not recognised." % (component))
component_id = c.component_id
ot = get_override_type(otype, session)
if ot is None:
utils.fubar("Type '%s' not recognised. (Valid types are deb, udeb and dsc)" % (otype))
type_id = ot.overridetype_id
dsc_type_id = get_override_type("dsc", session).overridetype_id
source_priority_id = get_priority("optional", session).priority_id
if otype == "deb" or otype == "udeb":
packages = {}
# TODO: Fix to use placeholders (check how to with arrays)
q = session.execute("""
SELECT b.package
FROM binaries b
JOIN bin_associations ba ON b.id = ba.bin
JOIN suite ON ba.suite = suite.id
JOIN files_archive_map af ON b.file = af.file_id AND suite.archive_id = af.archive_id
WHERE b.type = :otype AND ba.suite IN (%s) AND af.component_id = :component_id
""" % (",".join([str(i) for i in affected_suites])), {'otype': otype, 'component_id': component_id})
for i in q.fetchall():
packages[i[0]] = 0
src_packages = {}
q = session.execute("""
SELECT s.source FROM source s
JOIN src_associations sa ON s.id = sa.source
JOIN suite ON sa.suite = suite.id
JOIN files_archive_map af ON s.file = af.file_id AND suite.archive_id = af.archive_id
WHERE sa.suite IN (%s) AND af.component_id = :component_id
""" % (",".join([str(i) for i in affected_suites])), {'component_id': component_id})
for i in q.fetchall():
src_packages[i[0]] = 0
# -----------
# Drop unused overrides
q = session.execute("""SELECT package, priority, section, maintainer
FROM override WHERE suite = :suite_id
AND component = :component_id AND type = :type_id""",
{'suite_id': osuite_id, 'component_id': component_id,
'type_id': type_id})
# We're already within a transaction
if otype == "dsc":
for i in q.fetchall():
package = i[0]
if package in src_packages:
src_packages[package] = 1
else:
if package in blacklist:
utils.warn("%s in incoming, not touching" % package)
continue
Logger.log(["removing unused override", osuite, component,
otype, package, priorities[i[1]], sections[i[2]], i[3]])
if not Options["No-Action"]:
session.execute("""DELETE FROM override WHERE package = :package
AND suite = :suite_id AND component = :component_id
AND type = :type_id
AND created < now() - interval '14 days'""",
{'package': package, 'suite_id': osuite_id,
|
'component_id': component_id, 'type_id':
|
type_id})
# create source overrides based on binary overrides, as source
# overrides not always get created
q = session.execute("""SELECT package, priority, section, maintainer
FROM override WHERE suite = :suite_id AND component = :component_id""",
{'suite_id': osuite_id, 'component_id': component_id})
for i in q.fetchall():
package = i[0]
if package not in src_packages or src_packages[package]:
continue
src_packages[package] = 1
Logger.log(["add missing override", osuite, component,
otype, package, "source", sections[i[2]], i[3]])
if not Options["No-Action"]:
session.execute("""INSERT INTO override (package, suite, component,
priority, section, type, maintainer)
VALUES (:package, :suite_id, :component_id,
:priority_id, :section_id, :type_id, :maintainer)""",
{'package': package, 'suite_id': osuite_id,
'component_id': component_id, 'priority_id': source_priority_id,
|
antlarr/picard
|
picard/ui/searchdialog/__init__.py
|
Python
|
gpl-2.0
| 16,428
| 0.001278
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
# Copyright (C) 2016 Rahul Raturi
# Copyright (C) 2018 Laurent Monin
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from collections import (
OrderedDict,
namedtuple,
)
from PyQt5 import (
QtCore,
QtGui,
QtNetwork,
QtWidgets,
)
from PyQt5.QtCore import pyqtSignal
from picard import (
config,
log,
)
from picard.util import (
icontheme,
restore_method,
throttle,
)
from picard.ui import PicardDialog
from picard.ui.util import (
StandardButton,
)
class ResultTable(QtWidgets.QTableWidget):
def __init__(self, parent, column_titles):
super().__init__(0, len(column_titles), parent)
self.setHorizontalHeaderLabels(column_titles)
self.setSelectionMode(
QtWidgets.QAbstractItemView.SingleSelection)
self.setSelectionBehavior(
QtWidgets.QAbstractItemView.SelectRows)
self.setEditTriggers(
QtWidgets.QAbstractItemView.NoEditTriggers)
self.horizontalHeader().setStretchLastSection(True)
self.horizontalHeader().setSectionResizeMode(
QtWidgets.QHeaderView.Stretch)
self.horizontalHeader().setSectionResizeMode(
QtWidgets.QHeaderView.Interactive)
# only emit scrolled signal once per second
@throttle(1000)
def emit_scrolled(x):
parent.scrolled.emit()
self.horizontalScrollBar().valueChanged.connect(emit_scrolled)
self.verticalScrollBar().valueChanged.connect(emit_scrolled)
self.setHorizontalScrollMode(QtWidgets.QAbstractItemView.ScrollPerPixel)
class SearchBox(QtWidgets.QWidget):
def __init__(self, parent):
super().__init__(parent)
self.search_action = QtWidgets.QAction(icontheme.lookup('system-search'),
_("Search"), self)
self.search_action.setEnabled(False)
self.search_action.triggered.connect(self.search)
self.setupUi()
def focus_in_event(self, event):
# When focus is on search edit box, need to disable
# dialog's accept button. This would avoid closing of dialog when user
# hits enter.
parent = self.parent()
if parent.table:
parent.table.clearSelection()
parent.a
|
ccept_button.setEnabled(False)
def setupUi(self):
self.layout = QtWidgets.QVBoxLayout(self)
self.search_row_widget = QtWidgets.QWidget(self)
self.search_row_layout = QtWidgets.QHBoxLayout(self.search_row_widget)
self.search_row_layout.setContentsMargins(1, 1, 1, 1)
self.search_row_layout.setSpacing(1)
self.search_edit
|
= QtWidgets.QLineEdit(self.search_row_widget)
self.search_edit.setClearButtonEnabled(True)
self.search_edit.returnPressed.connect(self.trigger_search_action)
self.search_edit.textChanged.connect(self.enable_search)
self.search_edit.setFocusPolicy(QtCore.Qt.StrongFocus)
self.search_edit.focusInEvent = self.focus_in_event
self.search_row_layout.addWidget(self.search_edit)
self.search_button = QtWidgets.QToolButton(self.search_row_widget)
self.search_button.setAutoRaise(True)
self.search_button.setDefaultAction(self.search_action)
self.search_button.setIconSize(QtCore.QSize(22, 22))
self.search_row_layout.addWidget(self.search_button)
self.search_row_widget.setLayout(self.search_row_layout)
self.layout.addWidget(self.search_row_widget)
self.adv_opt_row_widget = QtWidgets.QWidget(self)
self.adv_opt_row_layout = QtWidgets.QHBoxLayout(self.adv_opt_row_widget)
self.adv_opt_row_layout.setAlignment(QtCore.Qt.AlignLeft)
self.adv_opt_row_layout.setContentsMargins(1, 1, 1, 1)
self.adv_opt_row_layout.setSpacing(1)
self.use_adv_search_syntax = QtWidgets.QCheckBox(self.adv_opt_row_widget)
self.use_adv_search_syntax.setText(_("Use advanced query syntax"))
self.use_adv_search_syntax.stateChanged.connect(self.update_advanced_syntax_setting)
self.adv_opt_row_layout.addWidget(self.use_adv_search_syntax)
self.adv_syntax_help = QtWidgets.QLabel(self.adv_opt_row_widget)
self.adv_syntax_help.setOpenExternalLinks(True)
self.adv_syntax_help.setText(_(
" (<a href='https://musicbrainz.org/doc/Indexed_Search_Syntax'>"
"Syntax Help</a>)"))
self.adv_opt_row_layout.addWidget(self.adv_syntax_help)
self.adv_opt_row_widget.setLayout(self.adv_opt_row_layout)
self.layout.addWidget(self.adv_opt_row_widget)
self.layout.setContentsMargins(1, 1, 1, 1)
self.layout.setSpacing(1)
self.setMaximumHeight(60)
def search(self):
self.parent().search(self.query)
def restore_checkbox_state(self):
self.use_adv_search_syntax.setChecked(config.setting["use_adv_search_syntax"])
def update_advanced_syntax_setting(self):
config.setting["use_adv_search_syntax"] = self.use_adv_search_syntax.isChecked()
def enable_search(self):
if self.query:
self.search_action.setEnabled(True)
else:
self.search_action.setEnabled(False)
def trigger_search_action(self):
if self.search_action.isEnabled():
self.search_action.trigger()
def get_query(self):
return self.search_edit.text()
def set_query(self, query):
return self.search_edit.setText(query)
query = property(get_query, set_query)
Retry = namedtuple("Retry", ["function", "query"])
BY_NUMBER, BY_DURATION = range(2)
class SortableTableWidgetItem(QtWidgets.QTableWidgetItem):
def __init__(self, sort_key):
super().__init__()
self.sort_key = sort_key
def __lt__(self, other):
return self.sort_key < other.sort_key
def to_seconds(timestr):
if not timestr:
return 0
seconds = 0
for part in timestr.split(':'):
seconds = seconds * 60 + int(part)
return seconds
class SearchDialog(PicardDialog):
defaultsize = QtCore.QSize(720, 360)
autorestore = False
scrolled = pyqtSignal()
def __init__(self, parent, accept_button_title, show_search=True, search_type=None):
super().__init__(parent)
self.search_results = []
self.table = None
self.show_search = show_search
self.search_type = search_type
self.search_box = None
self.setupUi(accept_button_title)
self.restore_state()
# self.columns has to be an ordered dict, with column name as keys, and
# matching label as values
self.columns = None
self.sorting_enabled = True
self.finished.connect(self.save_state)
@property
def columns(self):
return self.__columns
@columns.setter
def columns(self, list_of_tuples):
if not list_of_tuples:
list_of_tuples = []
self.__columns = OrderedDict(list_of_tuples)
self.__colkeys = list(self.columns.keys())
@property
def table_headers(self):
return list(self.columns.values())
def colpos(self, colname):
return self.__colkeys.index(colname)
def set_table_item(self, row, colname, obj, key, default="", sort=None):
# QVariant remembers the original type of the data
# matching comparison operator will be used when sorting
# get() will return a string, force conv
|
Microvellum/Fluid-Designer
|
win64-vc/2.78/python/lib/unittest/suite.py
|
Python
|
gpl-3.0
| 10,478
| 0.000477
|
"""TestSuite"""
import sys
from . import case
from . import util
__unittest = True
def _call_if_exists(parent, attr):
func = getattr(parent, attr, lambda: None)
func()
class BaseTestSuite(object):
"""A simple test suite that doesn't provide class or module shared fixtures.
"""
_cleanup = True
def __init__(self, tests=()):
self._tests = []
self._removed_tests = 0
self.addTests(tests)
def __repr__(self):
return "<%s tests=%s>" % (util.strclass(self.__class__), list(self))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return list(self) == list(other)
def __iter__(self):
return iter(self._tests)
def countTestCases(self):
cases = self._removed_tests
for test in self:
if test:
cases += test.countTestCases()
return cases
def addTest(self, test):
# sanity checks
if not callable(test):
raise TypeError("{} is not callable".format(repr(test)))
if isinstance(test, type) and issubclass(test,
(case.TestCase, TestSuite)):
raise TypeError("TestCases and TestSuites must be instantiated "
|
"before passing them to addTest()")
self._tests.append(test)
def addTests(self, tests):
if isinstance(tests, str):
raise TypeError("tests must be an iterable of tests, not a string")
for test in tests:
self.addTest(test)
def run(self, result):
for index, test in enumerate(self):
if result.shouldStop:
break
test(resu
|
lt)
if self._cleanup:
self._removeTestAtIndex(index)
return result
def _removeTestAtIndex(self, index):
"""Stop holding a reference to the TestCase at index."""
try:
test = self._tests[index]
except TypeError:
# support for suite implementations that have overriden self._tests
pass
else:
# Some unittest tests add non TestCase/TestSuite objects to
# the suite.
if hasattr(test, 'countTestCases'):
self._removed_tests += test.countTestCases()
self._tests[index] = None
def __call__(self, *args, **kwds):
return self.run(*args, **kwds)
def debug(self):
"""Run the tests without collecting errors in a TestResult"""
for test in self:
test.debug()
class TestSuite(BaseTestSuite):
"""A test suite is a composite test consisting of a number of TestCases.
For use, create an instance of TestSuite, then add test case instances.
When all tests have been added, the suite can be passed to a test
runner, such as TextTestRunner. It will run the individual test cases
in the order in which they were added, aggregating the results. When
subclassing, do not forget to call the base class constructor.
"""
def run(self, result, debug=False):
topLevel = False
if getattr(result, '_testRunEntered', False) is False:
result._testRunEntered = topLevel = True
for index, test in enumerate(self):
if result.shouldStop:
break
if _isnotsuite(test):
self._tearDownPreviousClass(test, result)
self._handleModuleFixture(test, result)
self._handleClassSetUp(test, result)
result._previousTestClass = test.__class__
if (getattr(test.__class__, '_classSetupFailed', False) or
getattr(result, '_moduleSetUpFailed', False)):
continue
if not debug:
test(result)
else:
test.debug()
if self._cleanup:
self._removeTestAtIndex(index)
if topLevel:
self._tearDownPreviousClass(None, result)
self._handleModuleTearDown(result)
result._testRunEntered = False
return result
def debug(self):
"""Run the tests without collecting errors in a TestResult"""
debug = _DebugResult()
self.run(debug, True)
################################
def _handleClassSetUp(self, test, result):
previousClass = getattr(result, '_previousTestClass', None)
currentClass = test.__class__
if currentClass == previousClass:
return
if result._moduleSetUpFailed:
return
if getattr(currentClass, "__unittest_skip__", False):
return
try:
currentClass._classSetupFailed = False
except TypeError:
# test may actually be a function
# so its class will be a builtin-type
pass
setUpClass = getattr(currentClass, 'setUpClass', None)
if setUpClass is not None:
_call_if_exists(result, '_setupStdout')
try:
setUpClass()
except Exception as e:
if isinstance(result, _DebugResult):
raise
currentClass._classSetupFailed = True
className = util.strclass(currentClass)
errorName = 'setUpClass (%s)' % className
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
def _get_previous_module(self, result):
previousModule = None
previousClass = getattr(result, '_previousTestClass', None)
if previousClass is not None:
previousModule = previousClass.__module__
return previousModule
def _handleModuleFixture(self, test, result):
previousModule = self._get_previous_module(result)
currentModule = test.__class__.__module__
if currentModule == previousModule:
return
self._handleModuleTearDown(result)
result._moduleSetUpFailed = False
try:
module = sys.modules[currentModule]
except KeyError:
return
setUpModule = getattr(module, 'setUpModule', None)
if setUpModule is not None:
_call_if_exists(result, '_setupStdout')
try:
setUpModule()
except Exception as e:
if isinstance(result, _DebugResult):
raise
result._moduleSetUpFailed = True
errorName = 'setUpModule (%s)' % currentModule
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
def _addClassOrModuleLevelException(self, result, exception, errorName):
error = _ErrorHolder(errorName)
addSkip = getattr(result, 'addSkip', None)
if addSkip is not None and isinstance(exception, case.SkipTest):
addSkip(error, str(exception))
else:
result.addError(error, sys.exc_info())
def _handleModuleTearDown(self, result):
previousModule = self._get_previous_module(result)
if previousModule is None:
return
if result._moduleSetUpFailed:
return
try:
module = sys.modules[previousModule]
except KeyError:
return
tearDownModule = getattr(module, 'tearDownModule', None)
if tearDownModule is not None:
_call_if_exists(result, '_setupStdout')
try:
tearDownModule()
except Exception as e:
if isinstance(result, _DebugResult):
raise
errorName = 'tearDownModule (%s)' % previousModule
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
def _tearDownPreviousClass(self, test, result):
previousClass = getattr(result, '_previousTestClass', None)
currentClass = t
|
praekeltfoundation/mc2-freebasics
|
freebasics/migrations/0007_freebasicscontroller_postgres_db_url.py
|
Python
|
bsd-2-clause
| 442
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_lit
|
erals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('freebasics', '0006_change_site_url_field_type'),
]
operations = [
migrations.AddField(
model_name='freebasicscontroller',
name='postgres_db_url',
field=models.TextField(null=True,
|
blank=True),
),
]
|
tchellomello/home-assistant
|
tests/components/nightscout/test_config_flow.py
|
Python
|
apache-2.0
| 3,995
| 0.000751
|
"""Test the Nightscout config flow."""
from aiohttp import ClientConnectionError
from homeassistant import config_entries, data_entry_flow, setup
from homeassistant.components.nightscout.const import DOMAIN
from homeassistant.components.nightscout.utils import hash_from_url
from homeassistant.const import CONF_URL
from tests.async_mock import patch
from tests.common import MockConfigEntry
from tests.components.nightscout import GLUCOSE_READINGS, SERVER_STATUS
CONFIG = {CONF_URL: "https://some.url:1234"}
async def test_form(hass):
"""Test we get the user initiated form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
with _patch_glucose_readings(), _patch_server_status(), _patch_async_setup() as mock_setup, _patch_async_setup_entry() as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
CONFIG,
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result2["title"] == SERVER_STATUS.name # pylint: disable=maybe-no-member
assert result2["data"] == CONFIG
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_user_form_cannot_connect(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.nightscout.NightscoutAPI.get_server_status",
side_effect=ClientConnectionError(),
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_URL: "https://some.url:1234"},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] == {"base": "cannot_connect"}
async def test_user_form_unexpected_exception(hass):
"""Test we handle unexpected exception."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.nightscout.NightscoutAPI.get_server_status",
side_effect=Exception(),
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_URL: "https
|
://some.url:1234"},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] == {"base": "unknown"}
async def test_user_form_duplicate(hass):
"""Test duplicate entries."""
with _patch_glucose_readings(), _pat
|
ch_server_status():
unique_id = hash_from_url(CONFIG[CONF_URL])
entry = MockConfigEntry(domain=DOMAIN, unique_id=unique_id)
await hass.config_entries.async_add(entry)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_USER},
data=CONFIG,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
def _patch_async_setup():
return patch("homeassistant.components.nightscout.async_setup", return_value=True)
def _patch_async_setup_entry():
return patch(
"homeassistant.components.nightscout.async_setup_entry",
return_value=True,
)
def _patch_glucose_readings():
return patch(
"homeassistant.components.nightscout.NightscoutAPI.get_sgvs",
return_value=GLUCOSE_READINGS,
)
def _patch_server_status():
return patch(
"homeassistant.components.nightscout.NightscoutAPI.get_server_status",
return_value=SERVER_STATUS,
)
|
mobarski/smash
|
test/test-parse2.py
|
Python
|
mit
| 600
| 0.06
|
in1 = """
[aaa]
a = 123
456
789
b =
321
654
987
c =
135
24
|
6
999
"""
in2 = """
[cmd]= ok
[cmd] << 40+2
x << 123*2
[cmd]
<<<
x = 42
y = 123
print(x,y)
... print ok
[data] =
1 2 3
4 5 6
7 8 9
[tsv]
head = no
cols = a b c
out >> tab
[insert] << tab
table = mydata
"""
in2 = """
[aaa] <<<
jest
test
x = 42
= x
123123
123123
123123554
[bbb] << x
x = 42
[ccc] = to jest test
x = 42
"""
import sys
sys.path.append('..')
from parse2 import *
if __name__=="__main__":
if 1:
for s in sections(in1):
print('section name:',name(s))
print('a
|
rgs:',args(s))
print()
|
bernard357/shellbot
|
examples/todos.py
|
Python
|
apache-2.0
| 3,482
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional inf
|
ormation regarding copyright ownership.
# The ASF licenses this file to You under the Apa
|
che License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Manage todos
In this example we create following commands with some lines of code:
- command: todo <something to do>
- show the # of the new item in response
- command: todo #<n> <something to do>
- show the updated item in response
- command: todos
- list all thing to do
- command: next
- show next item to do
- command: done
- signal that one item has been completed
- command: history
- lists completed items
- command: drop
- command: drop #<n>
- to delete one item
Here we showcase how a bot manages information over time. A simple
todo list is added to the engine, and any participant is entitled to act on it,
from any channel.
Multiple questions are adressed in this example:
- How to share information across multiple channels? You can attach any
attribute to the engine, and this will be made available to every bot
instance. Here we add ``factory`` to the engine, and it is accessed from
within commands as ``bot.engine.factory``.
To run this script you have to provide a custom configuration, or set
environment variables instead::
- ``CHANNEL_DEFAULT_PARTICIPANTS`` - Mention at least your e-mail address
- ``CISCO_SPARK_BOT_TOKEN`` - Received from Cisco Spark on bot registration
- ``SERVER_URL`` - Public link used by Cisco Spark to reach your server
The token is specific to your run-time, please visit Cisco Spark for
Developers to get more details:
https://developer.ciscospark.com/
For example, if you run this script under Linux or macOs with support from
ngrok for exposing services to the Internet::
export CHANNEL_DEFAULT_PARTICIPANTS="alice@acme.com"
export CHAT_TOKEN="<token id from Cisco Spark for Developers>"
export SERVER_URL="http://1a107f21.ngrok.io"
python todos.py
"""
import os
from shellbot import Engine, Context
from todos import TodoFactory
if __name__ == '__main__':
Context.set_logger()
factory = TodoFactory([
'write down the driving question',
'gather facts and related information',
'identify information gaps and document assumptions',
'formulate scenarios',
'select the most appropriate scenario',
])
engine = Engine( # use Cisco Spark and load shell commands
type='spark',
commands=TodoFactory.commands())
engine.factory = factory
os.environ['BOT_ON_ENTER'] = 'What do you want to do today?'
os.environ['CHAT_ROOM_TITLE'] = 'Manage todos'
engine.configure() # ensure that all components are ready
engine.bond(reset=True) # create a group channel for this example
engine.run() # until Ctl-C
engine.dispose() # delete the initial group channel
|
IsCoolEntertainment/pynba
|
src/iscool_e/pynba/globals.py
|
Python
|
mit
| 221
| 0
|
# -*- coding: utf-8 -*-
"""
IsCool-e Pynba
~~~~~~~~~~~~~~
:copyri
|
ght: (c) 2015 by IsCool Entertainment.
:license: MIT, see LICENSE for more details.
"""
from pynba.wsgi import pynba
_
|
_all__ = ['pynba']
|
jianajavier/pnc-cli
|
pnc_cli/swagger_client/models/user.py
|
Python
|
apache-2.0
| 6,614
| 0.000605
|
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from datetime import datetime
from pprint import pformat
from six import iteritems
class User(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
User - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'int',
'email': 'str',
'first_name': 'str',
'last_name': 'str',
'login_token': 'str',
'username': 'str',
'build_records': 'list[BuildRecord]',
'field_handler': 'FieldHandler'
}
self.attribute_map = {
'id': 'id',
'email': 'email',
'first_name': 'firstName',
'last_name': 'lastName',
'login_token': 'loginToken',
'username': 'username',
'build_records': 'buildRecords',
'field_handler': 'fieldHandler'
}
self._id = None
self._email = None
self._first_name = None
self._last_name = None
self._login_token = None
self._username = None
self._build_records = None
self._field_handler = None
@property
def id(self):
"""
Gets the id of this User.
:return: The id of this User.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this User.
:param id: The id of this User.
:type: int
"""
self._id = id
@property
def email(self):
"""
Gets the email of this User.
:return: The email of this User.
:rtype: str
"""
return self._email
@email.setter
def email(self, email):
"""
Sets the email of this User.
:param email: The email of this User.
:type: str
"""
self._email = email
@property
def first_name(self):
"""
Gets the first_name of this User.
:return: The first_name of this User.
:rtype: str
"""
return self._first_name
@first_name.setter
def first_name(self, first_name):
"""
Sets the first_name of this User.
:param first_name: The first_name of this User.
:type: str
"""
self._first_name = first_name
@property
def last_name(self):
"""
Gets the last_name of this User.
:return: The last_name of this User.
:rtype: str
"""
return self._last_name
@last_name.setter
def last_name(self, last_name):
"""
Sets the last_name of this User.
:param last_name: The last_name of this User.
:type: str
"""
self._last_name = last_name
@property
def login_token(self):
"""
Gets the login_token of this User.
:return: The login_token of this User.
:rtype: str
"""
return self._login_token
@login_token.setter
def login_token(self, login_token):
"""
Sets the login_token of this User.
:param login_token: The login_token of this User.
:type: str
"""
self._login_token = login_token
@property
def username(self):
"""
Gets the username of this User.
:return: The username of this User.
:rtype: str
"""
return self._username
@username.setter
def username(self, username):
"""
Sets the username of this User.
:param username: The username of this User.
:type: str
"""
self._username = username
@property
def build_records(self):
"""
Gets the build_records of this User.
:return: The build_records of this User.
:rtype: list[BuildRecord]
"""
return self._build_records
@build_records.setter
def build_records(self, build_records):
"""
Sets the build_records of this User.
:param build_records: The build_records of this User.
:type: list[BuildRecord]
"""
self._build_records = build_records
@property
def field_handler(self):
"""
Gets the field_handler of this User.
:return: The field_handler of this User.
:rtype: FieldHandler
"""
return self._field_handler
@field_handler.setter
def field_handler(self, field_handler):
"""
Sets the field_handler of this Us
|
er.
:param field_handler: The field_handler of this User.
:type: FieldHandler
"""
self._field_handler = field_handler
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, l
|
ist):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, datetime):
result[attr] = str(value.date())
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
|
gunan/tensorflow
|
tensorflow/python/ops/nccl_ops.py
|
Python
|
apache-2.0
| 8,087
| 0.006059
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for GPU collective operations implemented using NVIDIA nccl."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import device
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_nccl_ops
_module_lock = threading.Lock()
_shared_name_counter = 0
def all_sum(tensors):
"""Returns a list of tensors with the all-reduce sum across `tensors`.
The computation is done with an all-reduce operation, so if only some of the
returned tensors are evaluated then the computation will hang.
Args:
tensors: The input tensors across which to sum; must be assigned
to GPU devices.
Returns:
List
|
of tensors, each with the sum of the input tensors, where tensor i has
the same device as `tensors[i]`.
"""
return _apply_all_reduce('sum', tensors)
@ops.RegisterGradient('NcclAllReduce')
def _all_sum_grad(op, grad):
"""The gradients for `all_sum`.
Args:
op: The `all_
|
sum` `Operation` that we are differentiating.
grad: Gradient with respect to the output of the `all_sum` op.
Returns:
The gradient with respect to the output of `all_sum`.
Raises:
LookupError: If `reduction` is not `sum`.
"""
if op.get_attr('reduction') != b'sum':
raise LookupError('No gradient defined for NcclAllReduce except sum.')
_check_device(grad, expected=op.device)
num_devices = op.get_attr('num_devices')
shared_name = op.get_attr('shared_name') + b'_grad'
with ops.device(op.device):
return gen_nccl_ops.nccl_all_reduce(
input=grad,
reduction='sum',
num_devices=num_devices,
shared_name=shared_name)
def all_prod(tensors):
"""Returns a list of tensors with the all-reduce product across `tensors`.
The computation is done with an all-reduce operation, so if only some of the
returned tensors are evaluated then the computation will hang.
Args:
tensors: The input tensors across which to multiply; must be assigned
to GPU devices.
Returns:
List of tensors, each with the product of the input tensors, where tensor i
has the same device as `tensors[i]`.
"""
return _apply_all_reduce('prod', tensors)
def all_min(tensors):
"""Returns a list of tensors with the all-reduce min across `tensors`.
The computation is done with an all-reduce operation, so if only some of the
returned tensors are evaluated then the computation will hang.
Args:
tensors: The input tensors across which to reduce; must be assigned
to GPU devices.
Returns:
List of tensors, each with the minimum of the input tensors, where tensor i
has the same device as `tensors[i]`.
"""
return _apply_all_reduce('min', tensors)
def all_max(tensors):
"""Returns a list of tensors with the all-reduce max across `tensors`.
The computation is done with an all-reduce operation, so if only some of the
returned tensors are evaluated then the computation will hang.
Args:
tensors: The input tensors across which to reduce; must be assigned
to GPU devices.
Returns:
List of tensors, each with the maximum of the input tensors, where tensor i
has the same device as `tensors[i]`.
"""
return _apply_all_reduce('max', tensors)
def reduce_sum(tensors):
"""Returns a tensor with the reduce sum across `tensors`.
The computation is done with a reduce operation, so only one tensor is
returned.
Args:
tensors: The input tensors across which to sum; must be assigned
to GPU devices.
Returns:
A tensor containing the sum of the input tensors.
Raises:
LookupError: If context is not currently using a GPU device.
"""
return _apply_reduce('sum', tensors)
@ops.RegisterGradient('NcclReduce')
def _reduce_sum_grad(op, grad):
"""The gradients for input `Operation` of `reduce_sum`.
Args:
op: The `sum send` `Operation` that we are differentiating.
grad: Gradient with respect to the output of the `reduce_sum` op.
Returns:
The gradient with respect to the input of `reduce_sum` op.
Raises:
LookupError: If the reduction attribute of op is not `sum`.
"""
if op.get_attr('reduction') != b'sum':
raise LookupError('No gradient defined for NcclReduce except sum.')
_check_device(grad, expected=op.device)
with ops.device(op.device):
result = gen_nccl_ops.nccl_broadcast(input=grad, shape=grad.shape)
return [result] * len(op.inputs)
def broadcast(tensor):
"""Returns a tensor that can be efficiently transferred to other devices.
Args:
tensor: The tensor to send; must be assigned to a GPU device.
Returns:
A tensor with the value of `src_tensor`, which can be used as input to
ops on other GPU devices.
"""
_check_device(tensor)
with ops.device(tensor.device):
return gen_nccl_ops.nccl_broadcast(input=tensor, shape=tensor.shape)
@ops.RegisterGradient('NcclBroadcast')
def _broadcast_grad(op, accumulated_grad):
"""The gradients for input `Operation` of `broadcast`.
Args:
op: The `broadcast send` `Operation` that we are differentiating.
accumulated_grad: Accumulated gradients with respect to the output of the
`broadcast` op.
Returns:
Gradients with respect to the input of `broadcast`.
"""
# Grab inputs of accumulated_grad and replace accumulation with reduce_sum.
grads = [t for t in accumulated_grad.op.inputs]
for t in grads:
_check_device(t)
with ops.device(op.device):
return gen_nccl_ops.nccl_reduce(input=grads, reduction='sum')
def _apply_all_reduce(reduction, tensors):
"""Helper function for all_* functions."""
if not tensors:
raise ValueError('Must pass >0 tensors to all reduce operations')
shared_name = _get_shared_name()
def _all_reduce():
"""Call nccl allreduce."""
res = []
for t in tensors:
_check_device(t)
with ops.device(t.device):
res.append(
gen_nccl_ops.nccl_all_reduce(
input=t,
reduction=reduction,
num_devices=len(tensors),
shared_name=shared_name))
return res
if context.executing_eagerly():
# Nccl ops will block unless they are executed concurrently such as in a
# graph or a defun.
return def_function.function(_all_reduce)()
else:
return _all_reduce()
def _apply_reduce(reduction, tensors):
"""Helper function for reduce_* functions."""
if not tensors:
raise ValueError('Must pass >0 tensors to reduce operations')
for t in tensors:
_check_device(t)
result = gen_nccl_ops.nccl_reduce(input=tensors, reduction=reduction)
try:
next(t for t in tensors if t.device == result.device)
except StopIteration:
raise ValueError('One input tensor must be assigned to current device')
return result
def _get_shared_name():
global _shared_name_counter
with _module_lock:
val = _shared_name_counter
_shared_name_counter += 1
return 'c%s' % val
def _check_device(tensor, expected=None):
if not device.canonical_name(tensor.device):
raise ValueError('Device assignment required for nccl collective ops')
if expected and expected != tensor.device:
raise ValueError('Expected device %s, got %s' % (expected, tensor.device))
|
ampotty/fas
|
scripts/export-bugzilla.py
|
Python
|
gpl-2.0
| 5,752
| 0.00452
|
#!/usr/bin/python -t
__requires__ = 'TurboGears'
import pkg_resources
pkg_resources.require('CherryPy >= 2.0, < 3.0alpha')
import logging
logging.basicConfig()
import os
import sys
import getopt
import xmlrpclib
import smtplib
from email.Message import Message
import warnings
# Ignore DeprecationWarnings. This allows us to stop getting email
# from the cron job. We'll see the same warnings from the server starting up
warnings.simplefilter('ignore', DeprecationWarning)
import turbogears
import bugzilla
from turbogears import config
cfgfile = '/etc/export-bugzilla.cfg'
if os.access('./export-bugzilla.cfg', os.R_OK):
cfgfile = './export-bugzilla.cfg'
turbogears.update_config(configfile=cfgfile)
from turbogears.database import session
from fas.model import BugzillaQueue
BZSERVER = config.get('bugzilla.url', 'https://bugdev.devel.redhat.com/bugzilla-cvs/xmlrpc.cgi')
BZUSER = config.get('bugzilla.username')
BZPASS = config.get('bugzilla.password')
MAILSERVER = config.get('mail.server', 'localhost')
ADMINEMAIL = config.get('mail.admin_email', 'admin@fedoraproject.org')
NOTIFYEMAIL = config.get('mail.notify_email', ['admin@fedoraproject.org'])
if __name__ == '__main__':
opts, args = getopt.getopt(sys.argv[1:], '', ('usage', 'help'))
if len(args) != 2 or ('--usage','') in opts or ('--help','') in opts:
print """
Usage: export-bugzilla.py GROUP BUGZILLA_GROUP
"""
sys.exit(1)
ourGroup = args[0]
bzGroup = args[1]
server = bugzilla.Bugzilla(url=BZSERVER, user=BZUSER, password=BZPASS,
cookiefile=None, tokenfile=None)
bugzilla_queue = BugzillaQueue.query.join('group').filter_by(
name=ourGroup)
no_bz_account = []
for entry in bugzilla_queue:
# Make sure we have a record for this user in bugzilla
if entry.action == 'r':
# Remove the user's bugzilla group
try:
server.updateperms(entry.email, 'rem', bzGroup)
except xmlrpclib.Fault, e:
if e.faultCode == 51:
# It's okay, not having this user is equivalent to setting
# them to not have this group.
pass
else:
raise
elif entry.action == 'a':
# Make sure the user exists
try:
server.getuser(entry.email)
except xmlrpclib.Fault, e:
if e.faultCode == 51:
# This user doesn't have a bugzilla account yet
# add them to a list and we'll let them know.
no_bz_account.append(entry)
continue
else:
print 'Error:', e, entry.email, entry.person.human_name
raise
server.updateperms(entry.email, 'add', bzGroup)
else:
print 'Unrecognized action code: %s %s %s %s %s' % (entry.action,
entry.email, entry.person.human_name, entry.person.username, entry.group.name)
continue
# Remove them from the queue
session.delete(entry)
session.flush()
# Mail the people without bugzilla accounts
if '$USER' in NOTIFYEMAIL:
for person in no_bz_account:
smtplib.SMTP(MAILSERVER)
msg = Message()
message = '''Hello %(name)s,
As a Fedora packager, we grant you permissions to make changes to bugs in
bugzilla to all Fedora bugs. This lets you work together with other Fedora
developers in an easier fashion. However, to enable this functionality, we
need to have your bugzilla email address stored in the Fedora Account System.
At the moment you have:
%(email)s
which bugzilla is telling us is not an account in bugzilla. If you could
please set up an account in bugzilla with this address or change your email
address on your Fedora Account to match an existing bugzilla account this would
let us go forward.
Note: this message is being generated by an automated script. You'll continue
getting this message until the problem is resolved. Sorry for the
inconvenience.
Thank you,
The Fedora Account System
%(admin_email)s
''' % {'name': person.person.human_name, 'email': person.email,
'admin_email': ADMINEMAIL}
msg.add_header('To', person.email)
msg.add_header('From', ADMINEMAIL)
msg.add_header('Subject', 'Fedora Account System and Bugzilla Mismatch')
msg.set_payload(message)
smtp = smtplib.SMTP(MAILSERVER)
smtp.sendmail(ADMINEMAIL, [person.email], msg.as_string())
smtp.quit()
recipients = [e for e in NOTIFYEMAIL if e != '$USER']
if recipients and no_bz_account:
smtplib.SMTP(MAILSERVER)
ms
|
g = Message()
people = []
for person in no_bz_account:
if person.person.status == 'Active':
|
people.append(' %(user)s -- %(name)s -- %(email)s' %
{'name': person.person.human_name, 'email': person.email,
'user': person.person.username})
if people:
people = '\n'.join(people)
message = '''
The following people are in the packager group but do not have email addresses
that are valid in bugzilla:
%s
''' % people
msg.add_header('From', ADMINEMAIL)
msg.add_header('To', ', '.join(recipients))
msg.add_header('Subject', 'Fedora Account System and Bugzilla Mismatch')
msg.set_payload(message)
smtp = smtplib.SMTP(MAILSERVER)
smtp.sendmail(ADMINEMAIL, recipients, msg.as_string())
smtp.quit()
|
esaezgil/aiohttp
|
aiohttp/client.py
|
Python
|
apache-2.0
| 26,142
| 0.000191
|
"""HTTP Client for asyncio."""
import asyncio
import base64
import hashlib
import os
import sys
import traceback
import warnings
from multidict import CIMultiDict, MultiDict, MultiDictProxy, istr
from yarl import URL
import aiohttp
from . import hdrs, helpers
from ._ws_impl import WS_KEY, WebSocketParser, WebSocketWriter
from .client_reqrep import ClientRequest, ClientResponse
from .client_ws import ClientWebSocketResponse
from .cookiejar import CookieJar
from .errors import WSServerHandshakeError
from .helpers import Timeout
__all__ = ('ClientSession', 'request', 'get', 'options', 'head',
'delete', 'post', 'put', 'patch', 'ws_connect')
PY_35 = sys.version_info >= (3, 5)
class ClientSession:
"""First-class interface for making HTTP requests."""
_source_traceback = None
_connector = None
def __init__(self, *, connector=None, loop=None, cookies=None,
headers=None, skip_auto_headers=None,
auth=None, request_class=ClientRequest,
response_class=ClientResponse,
ws_response_class=ClientWebSocketResponse,
version=aiohttp.HttpVersion11,
cookie_jar=None):
if connector is None:
connector = aiohttp.TCPConnector(loop=loop)
loop = connector._loop # never None
else:
if loop is None:
loop = connector._loop # never None
elif connector._loop is not loop:
raise ValueError("loop argument must agree with connector")
self._loop = loop
if loop.get_debug():
self._source_traceback = traceback.extract_stack(sys._getframe(1))
if cookie_jar is None:
cookie_jar = CookieJar(loop=loop)
self._cookie_jar = cookie_jar
if cookies
|
is
|
not None:
self._cookie_jar.update_cookies(cookies)
self._connector = connector
self._default_auth = auth
self._version = version
# Convert to list of tuples
if headers:
headers = CIMultiDict(headers)
else:
headers = CIMultiDict()
self._default_headers = headers
if skip_auto_headers is not None:
self._skip_auto_headers = frozenset([istr(i)
for i in skip_auto_headers])
else:
self._skip_auto_headers = frozenset()
self._request_class = request_class
self._response_class = response_class
self._ws_response_class = ws_response_class
def __del__(self, _warnings=warnings):
if not self.closed:
self.close()
_warnings.warn("Unclosed client session {!r}".format(self),
ResourceWarning)
context = {'client_session': self,
'message': 'Unclosed client session'}
if self._source_traceback is not None:
context['source_traceback'] = self._source_traceback
self._loop.call_exception_handler(context)
def request(self, method, url, *,
params=None,
data=None,
headers=None,
skip_auto_headers=None,
auth=None,
allow_redirects=True,
max_redirects=10,
encoding='utf-8',
version=None,
compress=None,
chunked=None,
expect100=False,
read_until_eof=True,
proxy=None,
proxy_auth=None,
timeout=5*60):
"""Perform HTTP request."""
return _RequestContextManager(
self._request(
method,
url,
params=params,
data=data,
headers=headers,
skip_auto_headers=skip_auto_headers,
auth=auth,
allow_redirects=allow_redirects,
max_redirects=max_redirects,
encoding=encoding,
version=version,
compress=compress,
chunked=chunked,
expect100=expect100,
read_until_eof=read_until_eof,
proxy=proxy,
proxy_auth=proxy_auth,
timeout=timeout))
@asyncio.coroutine
def _request(self, method, url, *,
params=None,
data=None,
headers=None,
skip_auto_headers=None,
auth=None,
allow_redirects=True,
max_redirects=10,
encoding='utf-8',
version=None,
compress=None,
chunked=None,
expect100=False,
read_until_eof=True,
proxy=None,
proxy_auth=None,
timeout=5*60):
if version is not None:
warnings.warn("HTTP version should be specified "
"by ClientSession constructor", DeprecationWarning)
else:
version = self._version
if self.closed:
raise RuntimeError('Session is closed')
redirects = 0
history = []
# Merge with default headers and transform to CIMultiDict
headers = self._prepare_headers(headers)
if auth is None:
auth = self._default_auth
# It would be confusing if we support explicit Authorization header
# with `auth` argument
if (headers is not None and
auth is not None and
hdrs.AUTHORIZATION in headers):
raise ValueError("Can't combine `Authorization` header with "
"`auth` argument")
skip_headers = set(self._skip_auto_headers)
if skip_auto_headers is not None:
for i in skip_auto_headers:
skip_headers.add(istr(i))
if isinstance(proxy, str):
proxy = URL(proxy)
while True:
url = URL(url).with_fragment(None)
cookies = self._cookie_jar.filter_cookies(url)
req = self._request_class(
method, url, params=params, headers=headers,
skip_auto_headers=skip_headers, data=data,
cookies=cookies, encoding=encoding,
auth=auth, version=version, compress=compress, chunked=chunked,
expect100=expect100,
loop=self._loop, response_class=self._response_class,
proxy=proxy, proxy_auth=proxy_auth, timeout=timeout)
with Timeout(timeout, loop=self._loop):
conn = yield from self._connector.connect(req)
try:
resp = req.send(conn.writer, conn.reader)
try:
yield from resp.start(conn, read_until_eof)
except:
resp.close()
conn.close()
raise
except (aiohttp.HttpProcessingError,
aiohttp.ServerDisconnectedError) as exc:
raise aiohttp.ClientResponseError() from exc
except OSError as exc:
raise aiohttp.ClientOSError(*exc.args) from exc
self._cookie_jar.update_cookies(resp.cookies, resp.url)
# redirects
if resp.status in (301, 302, 303, 307) and allow_redirects:
redirects += 1
history.append(resp)
if max_redirects and redirects >= max_redirects:
resp.close()
break
else:
# TODO: close the connection if BODY is large enough
# Redirect with big BODY is forbidden by HTTP protocol
# but malformed server may send illegal response.
# Small BODIES with text like "Not Found" are still
# perfectly fine and should be accepted.
yield from resp.release()
# For 301 and 302, mimic IE behaviour, now changed in RFC.
|
dberc/tpzsimul.gems
|
jgraph/xact_mem.py
|
Python
|
gpl-2.0
| 140,056
| 0.019185
|
#!/s/std/bin/python
import sys, string, os, glob, re, mfgraph
#results_dir = "../results/isca07_final_version"
#results_dir = "/p/multifacet/projects/logtm_eager_lazy/ISCA07_results/old/old-11-7/"
results_dir = "../results/"
#results_dir = "/p/multifacet/projects/shore/HPCA-filters-results/non-smt/"
#results_dir = "/p/multifacet/projects/xact_memory/isca06_submission_results2"
make_excel_files = 0
def get_int_stat(file, stat):
grep_lines = mfgraph.grep(file, stat)
if (grep_lines == []):
return -1
line = string.split(grep_lines[0])
return int(line[1])
def get_float_stat(file, stat):
grep_lines = mfgraph.grep(file, stat)
line = string.split(grep_lines[0])
return float(line[1])
def get_runtime(benchmark):
data = []
files = glob.glob(results_dir + "/" + benchmark + "/*.stats")
for file in files:
procs = get_int_stat(file, "g_NUM_NODES")
cycles = get_int_stat(file, "Ruby_cycles")
#print "%dp:\t%d" % (procs, cycles)
data.append((procs, cycles))
return data
def get_stat(file, str):
grep_lines = mfgraph.grep(file, str)
line = (grep_lines[0]).split(':')
return int(line[1])
def get_count_stat(file, stat):
grep_lines = mfgraph.grep(file, stat
|
)
line = string.split(grep_lines[0])
return int(line[6])
def get_average_stat(file, stat):
grep_lines = mfgraph.grep(file, stat)
if (grep_lines == []):
return -1
line = string.split(grep_lines[0])
return float(line[8])
def make_microbench_line(jgraphs, name, runs, bw, protocol_map, nest, label):
xlabel = "Processors"
ylabel = "Run Time"
proc_values = [1, 2, 3, 4,
|
5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31]
#proc_values = [1, 2, 3, 4, 5, 6, 7]
read_set = None
data = []
for run in runs:
#print run
protocol = protocol_map[run[2]]
line = [run[0]]
for procs in proc_values:
#print procs
lst = []
#read_set_str = "%s" % read_set
glob_str = "%s/%s/%s-%dp-default-%s-%dnx-40minopen-%d-*.stats" % (results_dir, run[1], run[1], procs, protocol, nest, bw)
files = glob.glob(glob_str)
if files == []:
#print "No files match: %s" % glob_str
#exit
continue
for file in files:
print file
ruby_cycles = get_float_stat(file, "Ruby_cycles")/1000000.0
#divide by procs?
print "ruby_cycles: %f" % ruby_cycles
lst.append(ruby_cycles)
cycles = mfgraph.mean(lst)
#print lst
conf = mfgraph.confidence_interval_95_percent(lst)
#print "95 conf: %f" % conf
line.append([procs, cycles, cycles - conf, cycles + conf])
data.append(line)
print data
jgraphs.append(mfgraph.line_graph(data,
title = name,
ylabel = "Execution Time (in millions of cycles)",
xlabel = "Threads",
xsize = 4.5,
ysize = 8.0,
line_thickness = 2.0,
legend_x = "90",
marktype = ["circle", "box", "triangle", "diamond"],
#marksize = [0.4, 0.4, 0.4, 0.5],
))
graph_out = mfgraph.line_graph(data,
title = "",
ylabel = "Execution Time (in millions of cycles)",
#ymax = 1.50,
ymin = 0,
xsize = 2.5,
ysize = 1.778,
line_thickness = 2.0,
marktype = ["circle", "box", "triangle", "diamond"],
marksize = [0.2, 0.3, 0.3, 0.4],
title_fontsize = "12",
label_fontsize = "8",
legend_fontsize = "9",
legend_x = "70",
colors = ["0 0 0",
".6 .6 .6",
".2 .2 .2",
".8 .8 .8",
".4 .4 .4"]
)
mfgraph.make_eps(graph_out, "%s_%s_line" % (name, label), "ttm_figures")
if make_excel_files:
mfgraph.make_excel_bar(name=name,
data=bars)
def make_simics_microbench_line(jgraphs, name, runs, bw, protocol_map, label):
xlabel = "Processors"
ylabel = "Run Time"
proc_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31]
#proc_values = [1, 2, 3, 4, 5, 6, 7]
read_set = None
data = []
for run in runs:
#print run
protocol = protocol_map[run[2]]
line = [run[0]]
for procs in proc_values:
#print procs
lst = []
#read_set_str = "%s" % read_set
glob_str = "%s/%s/%s*%dp-default-%s-1nx-%d-*.stats" % (results_dir, run[1], run[1], procs, protocol, bw)
files = glob.glob(glob_str)
if files == []:
print "No files match: %s" % glob_str
#exit
continue
for file in files:
#print file
ruby_cycles = get_float_stat(file, "simics_cycles")/1000000.0
#divide by procs?
print "ruby_cycles: %f" % ruby_cycles
lst.append(ruby_cycles)
cycles = mfgraph.mean(lst)
#print lst
conf = mfgraph.confidence_interval_95_percent(lst)
print "95 conf: %f" % conf
line.append([procs, cycles, cycles - conf, cycles + conf])
data.append(line)
#print data
jgraphs.append(mfgraph.line_graph(data,
title = name,
ylabel = "Execution Time (in millions of cycles)",
xlabel = "Threads",
xsize = 4.5,
ysize = 8.0,
line_thickness = 2.0,
legend_x = "90",
marktype = ["circle", "box", "triangle", "diamond"],
#marksize = [0.4, 0.4, 0.4, 0.5],
))
graph_out = mfgraph.line_graph(data,
title = "",
ylabel = "Execution Time (in millions of cycles)",
#ymax = 1.50,
ymin = 0,
xsize = 2.5,
ysize = 1.778,
line_thickness = 2.0,
marktype = ["circle", "box", "triangle", "diamond"],
marksize = [0.2, 0.3, 0.3, 0.4],
title_fontsize = "12",
label_fontsize = "8",
legend_fontsize = "9",
legend_x = "70",
colors = ["0 0 0",
".6 .6 .6",
".2 .2 .2",
|
beiko-lab/gengis
|
bin/Lib/site-packages/numpy/f2py/setupscons.py
|
Python
|
gpl-3.0
| 4,305
| 0.009988
|
#!/usr/bin/env python
"""
setup.py for installing F2PY
Usage:
python setup.py install
Copyright 2001-2005 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@cens.ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.32 $
$Date: 2005/01/30 17:22:14 $
Pearu Peterson
"""
__version__ = "$Id: setup.py,v 1.32 2005/01/30 17:22:14 pearu Exp $"
import os
import sys
from distutils.dep_util import newer
from numpy.distutils import log
from num
|
py.distutils.core import setup
from numpy.distutils.misc_util import Configuration
from __version__ import version
def configuration(parent_package='',top_path=None):
config = Configuration('f2py', parent_package, top_path)
config.add_data_dir('docs')
config.add_data_files('src/fortranobject.c',
'src/fortranobject.h',
'f2py.1'
|
)
config.make_svn_version_py()
def generate_f2py_py(build_dir):
f2py_exe = 'f2py'+os.path.basename(sys.executable)[6:]
if f2py_exe[-4:]=='.exe':
f2py_exe = f2py_exe[:-4] + '.py'
if 'bdist_wininst' in sys.argv and f2py_exe[-3:] != '.py':
f2py_exe = f2py_exe + '.py'
target = os.path.join(build_dir,f2py_exe)
if newer(__file__,target):
log.info('Creating %s', target)
f = open(target,'w')
f.write('''\
#!/usr/bin/env %s
# See http://cens.ioc.ee/projects/f2py2e/
import os, sys
for mode in ["g3-numpy", "2e-numeric", "2e-numarray", "2e-numpy"]:
try:
i=sys.argv.index("--"+mode)
del sys.argv[i]
break
except ValueError: pass
os.environ["NO_SCIPY_IMPORT"]="f2py"
if mode=="g3-numpy":
print >> sys.stderr, "G3 f2py support is not implemented, yet."
sys.exit(1)
elif mode=="2e-numeric":
from f2py2e import main
elif mode=="2e-numarray":
sys.argv.append("-DNUMARRAY")
from f2py2e import main
elif mode=="2e-numpy":
from numpy.f2py import main
else:
print >> sys.stderr, "Unknown mode:",`mode`
sys.exit(1)
main()
'''%(os.path.basename(sys.executable)))
f.close()
return target
config.add_scripts(generate_f2py_py)
return config
if __name__ == "__main__":
config = configuration(top_path='')
version = config.get_version()
print 'F2PY Version',version
config = config.todict()
if sys.version[:3]>='2.3':
config['download_url'] = "http://cens.ioc.ee/projects/f2py2e/2.x"\
"/F2PY-2-latest.tar.gz"
config['classifiers'] = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: NumPy License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: C',
'Programming Language :: Fortran',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Software Development :: Code Generators',
]
setup(version=version,
description = "F2PY - Fortran to Python Interface Generaton",
author = "Pearu Peterson",
author_email = "pearu@cens.ioc.ee",
maintainer = "Pearu Peterson",
maintainer_email = "pearu@cens.ioc.ee",
license = "BSD",
platforms = "Unix, Windows (mingw|cygwin), Mac OSX",
long_description = """\
The Fortran to Python Interface Generator, or F2PY for short, is a
command line tool (f2py) for generating Python C/API modules for
wrapping Fortran 77/90/95 subroutines, accessing common blocks from
Python, and calling Python functions from Fortran (call-backs).
Interfacing subroutines/data from Fortran 90/95 modules is supported.""",
url = "http://cens.ioc.ee/projects/f2py2e/",
keywords = ['Fortran','f2py'],
**config)
|
EnTeQuAk/dotfiles
|
sublime-text-3/Packages/isort/pies/dbm/__init__.py
|
Python
|
unlicense
| 184
| 0
|
from
|
__future__ import absolute_import
from dbm import *
from ..version_info import PY2
if PY2:
from . import dumb, gnu, ndbm
from whichdb import *
fr
|
om anydbm import *
|
markbenvenuto/buildbaron
|
analyzer/evg_log_file_analyzer.py
|
Python
|
apache-2.0
| 3,551
| 0.002534
|
#!/usr/bin/env python3
"""
Analyze a evergreen task log page
"""
import argparse
import json
import os
import sys
if __name__ == "__main__" and __package__ is None:
sys.path.append(os.path.dirname(os.path.abspath(os.path.realpath(__file__))))
print(sys.path)
import faultinfo
else:
from . import faultinfo
DEFAULT_CONTEXT_LINES = 10
class EvgLogFileAnalyzer(object):
"""Analyze a non-timeout evergreen task log file"""
def __init__(self, logstr):
self.logstr = logstr
self.faults = []
self.contexts = []
self.lines = self.logstr.split("\n")
def analyze(self):
for idx in range(len(self.lines)):
line = self.lines[idx]
# All test failures contain this:
# ("Task completed - FAILURE.")
# All tests that fail usually have this messsage so
# there is nothing gained by check for it a normal case when a test simply fails
if "Task completed - FAILURE" in line:
self.add_fault('task failure', idx, DEFAULT_CONTEXT_LINES, 0)
# -1073741819 = 0xC0000005 = Access Violation on Windows
if "(-1073741819)" in line:
self.add_fault('test crashed', idx, DEFAULT_CONTEXT_LINES, 5)
if "fatal: Could not read from remote repository." in line:
self.add_fault('git hub down', idx, DEFAULT_CONTEXT_LINES, 5)
self.check_oom(line, idx)
def check_oom(self, line, idx):
if "OOM (Out of memory) killed processes detected" in line and not "No OOM (Out of memory) killed processes detected" in line:
count = 1
for fidx in range(idx + 1, len(self.lines)):
if not ("oom-killer" in line or "Out of memory" in line or "Kill process" in line):
break
count += 1
context = '\n'.join(self.lines[idx:idx + count])
self.faults.append(faultinfo.FaultInfo("evergreen", "oom-killer", context, line))
def analyze_oom(self):
for idx in range(len(self.lines)):
line = self.lines[idx]
self.check_oom(line, idx)
def add_fault(self, category, line, before_line_count, after_line_count):
context = '\n'.join(self.lines[line - before_line_count:line + after_line_count])
self.faults.append(faultinfo.FaultInfo("evergreen", category, context, line))
def get_faults(self):
return self.faults
def get_contexts(self):
return self.contexts
def to_json(self):
d1 = {"faults": self.faults, "contexts": self.contexts}
return json.dumps(d1, cls=faultinfo.CustomEncoder)
def main():
parser = argparse.ArgumentParser(description='Process log file.')
parser.add_argument("files", type=str, nargs='+', help="the file
|
to read")
args = parser.parse_args()
for file in args.files:
with open(file, "rb") as lfh:
log_file_str = lfh.read().decode('utf-8')
analyzer = EvgLogFileAnalyzer(log_file_str)
analyzer.analyze()
faults = analyzer.get_faults()
if len(faults) == 0:
p
|
rint("===========================")
print("Analysis failed for test: " + file)
print("===========================")
return
for f in analyzer.get_faults():
print(f)
print(analyzer.to_json())
f = json.loads(analyzer.to_json(), cls=faultinfo.CustomDecoder)
#print (f);
if __name__ == '__main__':
main()
|
DorianDepriester/mtex2abaqus
|
MTEX2abaqus/AbaqusImport.py
|
Python
|
mit
| 2,602
| 0.04804
|
import string
import csv
import os
from abaqusConstants import *
from part import *
from material import *
from section import *
from assembly import *
from load import *
from mesh import *
from visualization import *
def im
|
portEBSD(inpFileName):
while True:
fileName, file_extension = os.path.splitext(inpFileName)
# Load grain properties
try:
file = open(fileName+'.csv', "r")
reader = csv.DictReader(file,delimiter='\t',lineterminator='\n',quoting = csv.QUOTE_NONNUMERIC)
phase=[];Xx=[];Xy=[];Xz=[];Yx=[];Yy=[];Yz=[]
for row in reader:
phase.append(row['Phase'],)
Xx.append(row['Xx'],)
Xy.append(row['Xy'],)
Xz.append(row['Xz'],)
Yx.append(row['Yx'
|
],)
Yy.append(row['Yy'],)
Yz.append(row['Yz'],)
file.close()
except IOError:
print 'Error:',fileName+'.csv','not found.'
break
mdbName=os.path.basename(fileName)
# Import INP file
try:
mdb.ModelFromInputFile(name=mdbName,inputFileName=inpFileName)
pk=mdb.models[mdbName].parts.keys()
partName=pk[0]
except IndexError:
print 'Error:',fileName+'.inp','not found.'
break
# Set the new part visible
p1 = mdb.models[mdbName].parts[partName]
session.viewports['Viewport: 1'].setValues(displayedObject=p1)
# Copy sets from assembly to part
a=mdb.models[mdbName].rootAssembly
sets=a.sets
sets_list=sets.keys()
p = mdb.models[mdbName].parts[partName]
for grainID in sets_list:
set_i=sets[grainID]
if grainID.startswith('GRAIN'):
IDs=[j.label for j in set_i.elements]
p.SetFromElementLabels(elementLabels=IDs,name=grainID)
# Assign properties to elements
phaseList=set(phase)
for i in list(phaseList):
mdb.models[mdbName].Material(name=i)
for i in range(0,len(phase)):
sectionID='GRAIN_{:d}'.format(i+1)
mdb.models[mdbName].HomogeneousSolidSection(name=sectionID, material=phase[i],thickness=None)
region = p.sets[sectionID]
p.SectionAssignment(region=region, sectionName=sectionID,offset=0.0,offsetType=MIDDLE_SURFACE, offsetField='',thicknessAssignment=FROM_SECTION)
datumName='ORIENT_{:d}'.format(i+1)
p.DatumCsysByThreePoints(name=datumName, coordSysType=CARTESIAN, origin=(.0,.0,.0), point1=(Xx[i], Xy[i], Xz[i]), point2=(Yx[i], Yy[i], Yz[i]))
id=p.features[datumName].id
orientation = p.datums[id]
p.MaterialOrientation(region=region,orientationType=SYSTEM,axis=AXIS_3,localCsys=orientation,fieldName='',additionalRotationType=ROTATION_NONE, angle=0.0,additionalRotationField='', stackDirection=STACK_3)
break
|
pdgilbert/Vcourse
|
lib/GUIutils.py
|
Python
|
gpl-2.0
| 2,664
| 0.033408
|
import tkinter
import logging
######################### Utility Functions #########################
def But(w, text='x', command='', side=tkinter.LEFT) :
b = tkinter.Button(w, text=text, command=command)
b.pack(side=side, padx=5, pady=5)
return(b)
def Drop(w, options=['zero', 'one', 'two'], default=0, command=None, font=("Helvetica", 10)) :
#command below needs to accept the selection, which is passed to it,
# eg, self.readRCWindow() will be passes (self, 'FX')
v = tkinter.StringVar(w)
v.set(options[default])
if command is None : b = tkinter.OptionMenu(w, v, *options)
else : b = tkinter.OptionMenu(w, v, *options, command=command)
b.config(font=font)
b.pack(side=tkinter.LEFT)
#b.config(font=("Helvetica", 10)) does not reset, default on next call does the reset
return v
def DROP(row, text, options=['zero', 'one', 'two'], default=0, command=None, font=("Helvetica", 10)) :
#command below needs to accept the selection, which is passed to it,
# eg, self.readRCWindow() will be passes (self, 'FX')
tkinter.Label(row, text= text, anchor='w').pack(side=tkinter.LEFT)
v = tkinter.StringVar(row)
v.set(options[default])
if command is None : b = tkinter.OptionMenu(row, v, *options)
else : b = tkinter.OptionMenu(row, v, *options, command=command)
b.config(font=font
|
)
b.pack(side=tkinter.LEFT
|
)
#b.config(font=("Helvetica", 10)) does not reset, default on next call does the reset
return v
def ROW(t, text, width=30, ebg=None, pad=5):
#ebg None means no entry field, otherwise color of entry field bg.
row = tkinter.Frame(t)
lab = tkinter.Label(row, width=width, text=text, anchor='w')
if ebg is None :
e = None
else :
e = tkinter.Entry(row, bg = ebg)
e.pack(side=tkinter.RIGHT, expand=tkinter.YES, fill=tkinter.X)
row.pack(side=tkinter.TOP, fill=tkinter.X, padx=pad, pady=pad)
lab.pack(side=tkinter.LEFT)
return e
# foo is only used for option 2 step in plotWindow() but unfortunately must
# be global or it cannot be pickled for parallel operation.
def foo(n,h,p): return((n, gpsConnection(h,p).getGPS()))
def tkWarning(text, w= None, width=40):
# consider using
# from tkinter import messagebox
# tkinter.messagebox.showinfo("info name","This is a Test")
if w is not None : w.destroy()
logging.info('**** WARNING ***' + str(text))
t = tkinter.Toplevel()
t.wm_title("**** WARNING ***")
row = tkinter.Frame(t)
tkinter.Label(row, width=width, text=str(text), anchor='w').pack(side=tkinter.LEFT)
row.pack(side=tkinter.TOP, fill=tkinter.X, padx=5, pady=5)
|
rmyers/trove-dashboard
|
setup.py
|
Python
|
apache-2.0
| 673
| 0
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in complian
|
ce with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS
|
OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from setuptools import setup
setup(
setup_requires=['pbr'],
pbr=True,
)
|
Exgibichi/statusquo
|
test/functional/zmq_test.py
|
Python
|
mit
| 4,305
| 0.002091
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the ZMQ API."""
import configparser
import os
import struct
from test_framework.test_framework import StatusquoTestFramework, SkipTest
from test_framework.util import (assert_equal,
bytes_to_hex_str,
)
class ZMQTest (StatusquoTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
def setup_nodes(self):
# Try to import python3-zmq. Skip this test if the import fails.
try:
import zmq
except ImportError:
raise SkipTest("python3-zmq module not available.")
# Check that statusquo has been built with ZMQ enabled
config = configparser.ConfigParser()
if not self.options.configfile:
self.options.configfile = os.path.dirname(__file__) + "/../config.ini"
config.read_file(open(self.options.configfile))
if not config["components"].getboolean("ENABLE_ZMQ"):
raise SkipTest("statusquod has not been built with zmq enabled.")
self.zmqContext = zmq.Context()
self.zmqSubSocket = self.zmqContext.socket(zmq.SUB)
self.zmqSubSocket.set(zmq.RCVTIMEO, 60000)
self.zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"hashblock")
self.zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"hashtx")
ip_address = "tcp://127.0.0.1:26121"
self.zmqSubSocket.connect(ip_address)
extra_args = [['-zmqpubhashtx=%s' % ip_address, '-zmqpubhashblock=%s' % ip_address], []]
self.nodes = self.start_nodes(self.num_nodes, self.options.tmpdir, extra_args)
def run_test(self):
try:
self._zmq_test()
finally:
|
# Destroy the zmq context
self.log.debug("Destroying zmq context")
self.zmqContext.destroy(linger=None)
def _zmq_test(self):
genhashes = self.nodes[0].generate(1)
self.sync_all()
self.log.info("Wait for tx")
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
assert_equal(topic, b"hashtx")
body = msg[1]
msgSequence = struct.unpack('<I', msg[-1])[-1]
asser
|
t_equal(msgSequence, 0) # must be sequence 0 on hashtx
self.log.info("Wait for block")
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, 0) # must be sequence 0 on hashblock
blkhash = bytes_to_hex_str(body)
assert_equal(genhashes[0], blkhash) # blockhash from generate must be equal to the hash received over zmq
self.log.info("Generate 10 blocks (and 10 coinbase txes)")
n = 10
genhashes = self.nodes[1].generate(n)
self.sync_all()
zmqHashes = []
blockcount = 0
for x in range(n * 2):
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
if topic == b"hashblock":
zmqHashes.append(bytes_to_hex_str(body))
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, blockcount + 1)
blockcount += 1
for x in range(n):
assert_equal(genhashes[x], zmqHashes[x]) # blockhash from generate must be equal to the hash received over zmq
self.log.info("Wait for tx from second node")
# test tx from a second node
hashRPC = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1.0)
self.sync_all()
# now we should receive a zmq msg because the tx was broadcast
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
assert_equal(topic, b"hashtx")
hashZMQ = bytes_to_hex_str(body)
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, blockcount + 1)
assert_equal(hashRPC, hashZMQ) # txid from sendtoaddress must be equal to the hash received over zmq
if __name__ == '__main__':
ZMQTest().main()
|
nijinashok/sos
|
sos/plugins/xfs.py
|
Python
|
gpl-2.0
| 1,029
| 0
|
# This file is part of the sos project: https://github.com/sosreport/sos
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# version 2 of the GNU General Public License.
#
# See the LICENSE file in the source distribution for further information.
from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
from six.moves import zip
class Xfs(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin):
"""XFS filesystem
"""
|
plugin_name = 'xfs'
profiles = ('storage',)
def setup(self):
mounts = '/proc/mounts'
ext
|
_fs_regex = r"^(/dev/.+).+xfs\s+"
for dev in zip(self.do_regex_find_all(ext_fs_regex, mounts)):
for e in dev:
parts = e.split(' ')
self.add_cmd_output("xfs_info %s" % (parts[1]))
self.add_cmd_output("xfs_admin -l -u %s" % (parts[1]))
self.add_copy_spec('/proc/fs/xfs')
# vim: set et ts=4 sw=4 :
|
kjordahl/xray
|
xray/core/dataset.py
|
Python
|
apache-2.0
| 72,806
| 0.00011
|
import functools
import warnings
from collections import Mapping, Sequence
from numbers import Number
import numpy as np
import pandas as pd
from . import ops
from . import utils
from . import common
from . import groupby
from . import indexing
from . import alignment
from . import formatting
from .. import conventions
from .alignment import align, partial_align
from .coordinates import DatasetCoordinates, Indexes
from .common import ImplementsDatasetReduce, BaseDataObject
from .utils import (Frozen, SortedKeysDict, ChainMap, maybe_wrap_array)
from .variable import as_variable, Variable, Coordinate, broadcast_variables
from .pycompat import (iteritems, itervalues, basestring, OrderedDict,
dask_array_type)
from .combine import concat
# list of attributes of pd.DatetimeIndex that are ndarrays of time info
_DATETIMEINDEX_COMPONENTS = ['year', 'month', 'day', 'hour', 'minute',
'second', 'microsecond', 'nanosecond', 'date',
'time', 'dayofyear', 'weekofyear', 'dayofweek',
'quarter']
def _get_virtual_variable(variables, key):
"""Get a virtual variable (e.g., 'time.year') from a dict of xray.Variable
objects (if possible)
"""
if not isinstance(key, basestring):
raise KeyError(key)
split_key = key.split('.', 1)
if len(split_key) != 2:
raise KeyError(key)
ref_name, var_name = split_key
ref_var = variables[ref_name]
if ref_var.ndim == 1:
date = ref_var.to_index()
elif ref_var.ndim == 0:
date = pd.Timestamp(ref_var.values)
else:
raise KeyError(key)
if var_name == 'season':
# TODO: move 'season' into pandas itself
seasons = np.array(['DJF', 'MAM', 'JJA', 'SON'])
month = date.month
data = seasons[(month // 3) % 4]
else:
data = getattr(date, var_name)
return ref_name, var_name, Variable(ref_var.dims, data)
def _as_dataset_variable(name, var):
"""Prepare a variable for adding it to a Dataset
"""
try:
var = as_variable(var, key=name)
except TypeError:
raise TypeError('Dataset variables must be an array or a tuple of '
'the form (dims, data[, attrs, encoding])')
if name in var.dims:
# convert the into an Index
if var.ndim != 1:
raise ValueError('an index variable must be defined with '
'1-dimensional data')
var = var.to_coord()
return var
def _align_variables(variables, join='outer'):
"""Align all DataArrays in the provided dict, leaving other values alone.
"""
alignable = [k for k, v in variables.items() if hasattr(v, 'indexes')]
aligned = align(*[variables[a] for a in alignable],
join=join, copy=False)
new_variables = OrderedDict(variables)
new_variables.update(zip(alignable, aligned))
return new_variables
def _expand_variables(raw_variables, old_variables={}, compat='identical'):
"""Expand a dictionary of variables.
Returns a dictionary of Variable objects suitable for inserting into a
Dataset._variables dictionary.
This includes converting tuples (dims, data) into Variable objects,
converting coordinate variables into Coordinate objects and expanding
DataArray objects into Variables plus coordinates.
Raises ValueError if any conflicting values are found, between any of the
new or old variables.
"""
new_variables = OrderedDict()
new_coord_names = set()
variables = ChainMap(new_variables, old_variables)
def maybe_promote_or_replace(name, var):
existing_var = variables[name]
if name not in existing_var.dims:
if name in var.dims:
variables[name] = var
else:
common_dims = OrderedDict(zip(existing_var.dims,
existing_var.shape))
|
common_dims.update(zip(var.dims, var.shape))
variables[name] = existing_var.expand_dims(common_dims)
new_coord_names.update(var.dims)
def add_variable(name, var):
var = _as_dataset_variable(name, var)
i
|
f name not in variables:
variables[name] = var
new_coord_names.update(variables[name].dims)
else:
if not getattr(variables[name], compat)(var):
raise ValueError('conflicting value for variable %s:\n'
'first value: %r\nsecond value: %r'
% (name, variables[name], var))
if compat == 'broadcast_equals':
maybe_promote_or_replace(name, var)
for name, var in iteritems(raw_variables):
if hasattr(var, 'coords'):
# it's a DataArray
new_coord_names.update(var.coords)
for dim, coord in iteritems(var.coords):
if dim != name:
add_variable(dim, coord.variable)
var = var.variable
add_variable(name, var)
return new_variables, new_coord_names
def _calculate_dims(variables):
"""Calculate the dimensions corresponding to a set of variables.
Returns dictionary mapping from dimension names to sizes. Raises ValueError
if any of the dimension sizes conflict.
"""
dims = {}
last_used = {}
scalar_vars = set(k for k, v in iteritems(variables) if not v.dims)
for k, var in iteritems(variables):
for dim, size in zip(var.dims, var.shape):
if dim in scalar_vars:
raise ValueError('dimension %s already exists as a scalar '
'variable' % dim)
if dim not in dims:
dims[dim] = size
last_used[dim] = k
elif dims[dim] != size:
raise ValueError('conflicting sizes for dimension %r: '
'length %s on %r and length %s on %r' %
(dim, size, k, dims[dim], last_used[dim]))
return dims
def _merge_expand(aligned_self, other, overwrite_vars, compat):
possible_conflicts = dict((k, v) for k, v in aligned_self._variables.items()
if k not in overwrite_vars)
new_vars, new_coord_names = _expand_variables(other, possible_conflicts, compat)
replace_vars = aligned_self._variables.copy()
replace_vars.update(new_vars)
return replace_vars, new_vars, new_coord_names
def _merge_dataset(self, other, overwrite_vars, compat, join):
aligned_self, other = partial_align(self, other, join=join, copy=False)
replace_vars, new_vars, new_coord_names = _merge_expand(
aligned_self, other._variables, overwrite_vars, compat)
new_coord_names.update(other._coord_names)
return replace_vars, new_vars, new_coord_names
def _merge_dict(self, other, overwrite_vars, compat, join):
other = _align_variables(other, join='outer')
alignable = [k for k, v in other.items() if hasattr(v, 'indexes')]
aligned = partial_align(self, *[other[a] for a in alignable],
join=join, copy=False, exclude=overwrite_vars)
aligned_self = aligned[0]
other = OrderedDict(other)
other.update(zip(alignable, aligned[1:]))
return _merge_expand(aligned_self, other, overwrite_vars, compat)
def _assert_empty(args, msg='%s'):
if args:
raise ValueError(msg % args)
def as_dataset(obj):
"""Cast the given object to a Dataset.
Handles DataArrays, Datasets and dictionaries of variables. A new Dataset
object is only created in the last case.
"""
obj = getattr(obj, '_dataset', obj)
if not isinstance(obj, Dataset):
obj = Dataset(obj)
return obj
class Variables(Mapping):
def __init__(self, dataset):
self._dataset = dataset
def __iter__(self):
return (key for key in self._dataset._variables
if key not in self._dataset._coord_names)
def __len__(self):
return len(self._dataset._variables) - len(self._dataset._coord_names)
def __contains__(self,
|
durante987/nonogram_solver
|
tests/rules/test_init.py
|
Python
|
mit
| 3,997
| 0.001251
|
#!/usr/bin/env python3.8
import os
import sys
import unittest
SCRIPT_DIR = os.path.dirname(
os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.append(
os.path.normpath(os.path.join(SCRIPT_DIR, os.path.pardir, os.path.pardir)))
# pylint: disable=wrong-import-position
from nonogram.raster import BLACK
from nonogram.raster import UNKNOWN
from nonogram.raster import WHITE
from nonogram.raster.block import Block
from nonogram import rules
class TestRules(unittest.TestCase):
# pylint: disable=protected-access
def test_covering_blocks(self):
blocks = [
Block(start=1, end=10, length=4),
Block(start=6, end=9, length=4),
Block(start=3, end=7, length=4),
Block(start=2, end=4, length=4)
]
covering_blocks = rules._covering_blocks(blocks, start=2)
self.assertEqual([
Block(start=1, end=10, length=4),
Block(start=2, end=4, length=4)
], covering_blocks)
covering_blocks = rules._covering_blocks(blocks, start=3, end=5)
self.assertEqual([
Block(start=1, end=10, length=4),
Block(start=3, end=7, length=4)
], covering_blocks)
covering_blocks = rules._covering_blocks(blocks, start=0)
self.assertEqual([], covering_blocks)
def test_get_black_runs(self):
# mask = bytearray(map(ord, 'X.X ..X..X. .X'))
mask = bytearray([
BLACK, UNKNOWN, BLACK, WHITE, WHITE, UNKNOWN, UNKNOWN, BLACK,
|
UNKNOWN, UNKNOWN, BLACK, UNKNOWN, WHITE, UNKNOWN, BLACK
])
expected = [
Block(start=0, end=0, length=1),
Block(start=2, end=2, length=1),
Block(start=7, end=7, length=1),
Block(start=10, end=10, length=1),
Block(start=14, end=14, length=1)
]
self.assertEqual(expected, rules._get_black_runs(mask))
mask = bytearray([
|
UNKNOWN, BLACK, BLACK, WHITE, UNKNOWN, WHITE, UNKNOWN, UNKNOWN,
BLACK, BLACK
])
expected = [
Block(start=1, end=2, length=2),
Block(start=8, end=9, length=2)
]
self.assertEqual(expected, rules._get_black_runs(mask))
mask = bytearray([BLACK, BLACK, BLACK, BLACK])
expected = [Block(start=0, end=3, length=4)]
self.assertEqual(expected, rules._get_black_runs(mask))
mask = bytearray([WHITE, UNKNOWN, UNKNOWN, WHITE])
self.assertEqual([], rules._get_black_runs(mask))
mask = bytearray([BLACK, WHITE, BLACK, WHITE] + [BLACK] * 4 +
[UNKNOWN, BLACK])
expected = [
Block(start=0, end=0, length=1),
Block(start=2, end=2, length=1),
Block(start=4, end=7, length=4),
Block(start=9, end=9, length=1)
]
self.assertEqual(expected, rules._get_black_runs(mask))
def test_get_non_white_runs(self):
mask = bytearray(b' X. .....')
expected = [
Block(start=2, end=3, length=2),
Block(start=5, end=9, length=5)
]
self.assertEqual(expected, rules._get_non_white_runs(mask))
mask = bytearray(b'..X .X .')
expected = [
Block(start=0, end=2, length=3),
Block(start=5, end=6, length=2),
Block(start=9, end=9, length=1)
]
self.assertEqual(expected, rules._get_non_white_runs(mask))
mask = bytearray(b'. .X.X ')
expected = [
Block(start=0, end=0, length=1),
Block(start=5, end=8, length=4)
]
self.assertEqual(expected, rules._get_non_white_runs(mask))
mask = bytearray(b'. .X. ')
expected = [
Block(start=0, end=0, length=1),
Block(start=5, end=7, length=3)
]
self.assertEqual(expected, rules._get_non_white_runs(mask))
if __name__ == '__main__':
unittest.main()
|
DH-Box/dhbox
|
dhbox.py
|
Python
|
gpl-3.0
| 13,293
| 0.002558
|
import os, os.path, random, string, time, urllib2
from flask import Flask, flash, request, redirect, url_for, render_template, \
make_response, abort
import ast
from flask_sqlalchemy import SQLAlchemy
from flask_security import Security, SQLAlchemyUserDatastore, login_user, logout_user, \
UserMixin, RoleMixin, login_required, roles_required, current_user, LoginForm
from flaskext.markdown import Markdown
from wtforms.validators import DataRequired
from wtforms import TextField, Form
from werkzeug import generate_password_hash, check_password_hash
from werkzeug.contrib.fixers import ProxyFix
import schedule
from threading import Thread
import DockerBackend
# create application
app = Flask('dhbox')
app.wsgi_app = ProxyFix(app.wsgi_app)
Markdown(app)
# install_secret_key(app)
app.config.from_pyfile('config.cfg')
# app.template_folder = 'src/templates' if app.config['TESTING'] else 'dist/templates'
# app.static_folder = 'src/static' if app.config['TESTING'] else 'dist/static'
app.template_folder = 'dist/templates'
app.static_folder = 'dist/static'
# Create database connection object
db = SQLAlchemy(app)
all_apps = [
{'name': 'mallet', 'wiki-page': 'MALLET', 'display-name': 'MALLET'},
{'name': 'ntlk', 'wiki-page': 'NLTK', 'display-name': 'NLTK'},
{'name': 'filemanager', 'port': '8081', 'wiki-page': 'manager', 'display-name': 'File Manager'},
{'name': 'bash', 'port': '4200', 'wiki-page': 'Bash-shell', 'display-name': 'Command Line', 'height': 500},
{'name': 'rstudio', 'port': '8787', 'wiki-page': 'R-Studio', 'display-name': 'R Studio'},
{'name': 'brackets', 'port': '4444', 'wiki-page': 'Brackets', 'display-name': 'Brackets'},
{'name': 'apache', 'port': '80', 'hide': True},
{'name': 'jupyter', 'port': '8888', 'wiki-page': 'ipython', 'display-name': 'Jupyter Notebooks'},
# {'name': 'wordpress', 'port': '80', 'wiki-page': 'wordpress', 'display-name': 'WordPress'}
# {'name': 'website', 'port': '4000', 'wiki-page': 'webpage', 'display-name': 'Your Site'}
]
def get_app(key):
for app in all_apps:
if app['name'] == key:
return app
"""
MODELS
"""
roles_users = db.Table('roles_users',
db.Column('user_id', db.Integer(), db.ForeignKey('user.id')),
db.
|
Column('role_id', db.Integer(), db.ForeignKey('role.id')))
class Role(db.Model, RoleMixin):
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(80), unique=True)
description = db.Column(db.String(255))
class User(db.Model, UserMixi
|
n):
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(255), unique=True)
name = db.Column(db.String(255), unique=True)
pwdhash = db.Column(db.String(160))
active = db.Column(db.Boolean())
dhbox_duration = db.Column(db.Integer)
confirmed_at = db.Column(db.DateTime())
roles = db.relationship('Role', secondary=roles_users,
backref=db.backref('users', lazy='dynamic'))
def __init__(self, name, active, roles, email, password, dhbox_duration):
self.name = name
self.email = email.lower()
self.active = active
self.roles = roles
self.dhbox_duration = dhbox_duration
self.set_password(password)
def set_password(self, password):
self.pwdhash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.pwdhash, password)
class ExtendedLoginForm(LoginForm):
name = TextField('Name', [DataRequired()])
def validate(self):
rv = Form.validate(self)
if not rv:
return False
user = User.query.filter_by(
name=self.name.data).first()
if user is None:
self.name.errors.append('Unknown username')
return False
if not user.check_password(self.password.data):
self.password.errors.append('Invalid password')
return False
self.user = user
return True
# Setup Flask-Security
user_datastore = SQLAlchemyUserDatastore(db, User, Role)
security = Security(app, user_datastore, login_form=ExtendedLoginForm)
# Create an admin user to test with
def create_user_and_role():
first_user = User.query.filter(User.name == 'admin').first()
if not first_user:
user_email = app.config['ADMIN_EMAIL']
username = 'admin'
user_pass = app.config['ADMIN_PASS']
the_user = user_datastore.create_user(email=user_email, name=username, password=user_pass, dhbox_duration=1000000000)
check_admin_role = Role.query.filter(Role.name == 'admin').first()
if not check_admin_role:
the_role = user_datastore.create_role(name='admin', description='The administrator')
user_datastore.add_role_to_user(the_user, the_role)
else:
user_datastore.add_role_to_user(the_user, check_admin_role)
db.session.commit()
try:
DockerBackend.get_container_info(username)
print 'already have a container'
except Exception:
DockerBackend.setup_new_dhbox(username, user_pass, user_email)
def delete_user(user):
try:
user = User.query.filter(User.name == user).first()
db.session.delete(user)
db.session.commit()
except Exception, e:
print e
"""
URLS/VIEWS
"""
@app.route("/")
def index():
return render_template('index.html', institution=app.config['INSTITUTION'], demo=app.config['DEMO_ENABLED'])
@app.route("/signup")
def signup():
return render_template('signup.html')
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/contact')
def contact():
return render_template('contact.html')
@app.route('/our_team')
def our_team():
return render_template('our_team.html')
@app.route('/news')
def news():
news_folder = 'src/templates/news/'
news_list = []
for file in os.listdir(news_folder):
with open(news_folder + file) as f:
content = f.read()
news_list.append(content)
return render_template('news.html', news_list=news_list)
@app.route('/port_4000')
def port_4000():
return render_template('port.html')
@app.route('/get_started')
def get_started():
return render_template('get_started.html')
@app.route("/login", methods=["GET", "POST"])
def login():
# form = LoginForm()
# if form.validate_on_submit():
# # login and validate the user...
# login_user(user)
# flash("Logged in successfully.", 'alert-success')
# return redirect(url_for("user_box", the_user=user.name) or url_for("index"))
return render_template("login.html", form=form)
@app.route('/admin')
@login_required
@roles_required('admin')
def admin():
containers = User.query.all()
containers_list = []
for container in containers:
uptime = DockerBackend.how_long_up(container.name)
time_left = DockerBackend.check_if_over_time(container)
time_left = DockerBackend.display_time(time_left)
containers_list.append({'name': container.name, 'uptime': uptime, 'time_left': time_left})
return render_template('admin.html', containers=containers_list)
@app.route('/demo', methods=["GET"])
def demonstration():
username = 'demo' + ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10))
demo_user_object = user_datastore.create_user(email=username + '@demo.com', name=username, password='demo', dhbox_duration=3600)
db.session.commit()
login_user(demo_user_object)
DockerBackend.demo_dhbox(username)
return redirect('/dhbox/' + username)
@app.route('/dhbox/<the_user>')
@login_required
def user_box(the_user):
which_user = User.query.filter(User.name == str(the_user)).first()
if current_user.__name__ is 'AnonymousUser':
return redirect(url_for("index"))
if which_user is None or current_user is None:
return redirect(url_for("index"))
login_user(which_user)
email_domain = which_user.email.split("@", 1)[1]
if email_domain == 'demo.com':
demo = True
else:
demo = False
try:
|
kaflesudip/TweetStats
|
update_tweets.py
|
Python
|
apache-2.0
| 2,656
| 0.00113
|
from twython import Twython
from models import Tweet
import datetime
import traceback
# import time
# first
APP_KEY = ''
APP_SECRET = ''
OAUTH_TOKEN = ''
OAUTH_TOKEN_SECRET = ''
# second
APP_KEY2 = ''
APP_SECRET2 = ''
OAUTH_TOKEN2 = ''
OAUTH_TOKEN_SECRET2 = ''
# third
APP_KEY3 = ''
APP_SECRET3 = ''
OAUTH_TOKEN3 = ''
OAUTH_TOKEN_SECRET3 = ''
def rotate_key():
global APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET
global APP_KEY2, APP_SECRET2, OAUTH_TOKEN2, OAUTH_TOKEN_SECRET2
global APP_KEY3, APP_SECRET3, OAUTH_TOKEN3, OAUTH_TOKEN_SECRET3
APP_KEY, APP_KEY2, APP_KEY3 = APP_KEY3, APP_KEY, APP_KEY2
APP_SECRET, APP_SECRET2, APP_SECRET3 = APP_SECRET3, APP_SECRET, APP_SECRET2
OAUTH_TOKEN, OAUTH_TOKEN2, OAUTH_TOKEN3 = OAUTH_TOKEN3, OAUTH_TOKEN, OAUTH_TOKEN2
OAUTH_TOKEN_SECRET, OAUTH_TOKEN_SECRET2, OAUTH_TOKEN_SECRET3 =\
OAUTH_TOKEN_SECRET3, OAUTH_TOKEN_SECRET, OAUTH_TOKEN_SECRET2
def update_given_tweet(tweet_id):
twitter = Twython(
app_key=APP_KEY,
app_secret=APP_SECRET,
oauth_token=OAUTH_TOKEN,
oauth_token_secret=OAUTH_TOKEN_SECRET,
)
try:
status = twitter.show_status(id=tweet_id)
return status
except Exception as err:
# rotate_key()
print(err.msg)
if 'rate limit exceeded' in err.msg.lower() or 'max retries' in err.msg.lower():
rotate_key()
# twitter = Twython(
# app_key=APP_KEY,
# app_secret=APP_SECRET,
# oauth_token=OAUTH_TOKEN,
# oauth_token_secret=OAUTH_TOKEN_SECRET,
# )
traceback.print_tb(err.__traceback__)
return update_given_tweet(tweet_id)
traceback.print_tb(err.__traceback__)
return False
def update_database():
print("fetching")
tweets = Tweet.objects(t
|
otal_fetched=1, error_occured__ne=True)
print("updating")
i = 0
error_count = 0
for each_tweet in tweets:
print("loop")
data = update_given_tweet(each_tweet.tweet_id)
if not data:
error_count += 1
print("!!!!!!!!!error", error_count, "correct", i)
each_tweet.error_occured = True
each_tweet.save()
continue
elif data == 3:
continue
print("got data")
dat
|
a['fetched_timestamp'] = datetime.datetime.now()
data['fresh_tweet'] = False
data['update_count'] = 2
each_tweet.total_fetched = 2
each_tweet.tweets.append(data)
each_tweet.save()
print(i, "errors=", error_count)
i += 1
update_database()
|
omwdunkley/crazyflieROS
|
src/crazyflieROS/cflib/utils/callbacks.py
|
Python
|
gpl-2.0
| 1,843
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2011-2013 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#
|
GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""
Callback objects
|
used in the Crazyflie library
"""
__author__ = 'Bitcraze AB'
__all__ = ['Caller']
class Caller():
""" An object were callbacks can be registered and called """
def __init__(self):
""" Create the object """
self.callbacks = []
def add_callback(self, cb):
""" Register cb as a new callback. Will not register duplicates. """
if ((cb in self.callbacks) is False):
self.callbacks.append(cb)
def remove_callback(self, cb):
""" Un-register cb from the callbacks """
self.callbacks.remove(cb)
def call(self, *args):
""" Call the callbacks registered with the arguments args """
for cb in self.callbacks:
cb(*args)
|
sigproc/robotic_surgery
|
src/ros/crustcrawler_smart_arm/smart_arm_kinematics/nodes/test_ik_service.py
|
Python
|
mit
| 3,443
| 0.004066
|
#!/usr/bin/env python
# Copyright (c) 2010, Antons Rebguns. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SER
|
VICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING N
|
EGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author: Antons Rebguns
import roslib; roslib.load_manifest('smart_arm_kinematics')
import time
import rospy
from geometry_msgs.msg import PointStamped
from smart_arm_kinematics.srv import SmartArmIK
from std_msgs.msg import Float64
if __name__ == '__main__':
try:
rospy.init_node('test_ik_service', anonymous=True)
rospy.wait_for_service('smart_arm_ik_service')
shoulder_pan_pub = rospy.Publisher('shoulder_pan_controller/command', Float64)
shoulder_tilt_pub = rospy.Publisher('shoulder_pitch_controller/command', Float64)
elobow_tilt_pub = rospy.Publisher('elbow_flex_controller/command', Float64)
wrist_rotate_pub = rospy.Publisher('wrist_roll_controller/command', Float64)
rospy.sleep(1)
try:
ik_service = rospy.ServiceProxy('smart_arm_ik_service', SmartArmIK)
point = PointStamped()
point.header.stamp = rospy.Time.now()
point.header.frame_id = 'arm_base_link'
point.point.x = 0.15
point.point.y = 0.20
point.point.z = 0.0
resp = ik_service(point)
if resp:
rospy.loginfo('%s = %s' % (str(resp.success), str(resp.solutions)))
if resp.success:
rospy.logdebug("Publishing stuff")
sol1 = resp.solutions[0:4]
rospy.logdebug("%s" % str(sol1))
shoulder_pan_pub.publish(sol1[0])
shoulder_tilt_pub.publish(sol1[1])
elobow_tilt_pub.publish(sol1[2])
wrist_rotate_pub.publish(sol1[3])
except rospy.ServiceException, e:
rospy.logerr('Service call failed %s' % e)
except rospy.ROSInterruptException: pass
|
pferreir/indico
|
indico/modules/events/surveys/views.py
|
Python
|
mit
| 1,351
| 0.00074
|
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from indico.modules.events.management.views import WPEventManagement
from indico.modules.events.views import WPConferenceDisplayBase, WPSimpleEventDisplayBase
from indico.web.views import WPJinjaMixin
class WPManageSurvey(WPEventManagement):
template_prefix = 'events/surveys/'
sidemenu_option = 'surveys'
bundles = ('module_events.surveys.js', 'module_events.surveys.css')
class WPSurveyResults(WPManageSurvey):
pass
class DisplaySurveyMixin(WPJinjaMixin):
template_prefix = 'events/surveys/'
base_class = None
def _get_body(self, params):
return WPJinjaMixin._get_page_content(self, params)
class WPDisplaySurveyConference(DisplaySurveyMixin, WPConferenceDisplayBas
|
e):
template_prefix =
|
'events/surveys/'
base_class = WPConferenceDisplayBase
menu_entry_name = 'surveys'
bundles = ('module_events.surveys.js', 'module_events.surveys.css')
class WPDisplaySurveySimpleEvent(DisplaySurveyMixin, WPSimpleEventDisplayBase):
template_prefix = 'events/surveys/'
base_class = WPSimpleEventDisplayBase
bundles = ('module_events.surveys.js', 'module_events.surveys.css')
|
nikolay-fedotov/tempest
|
tempest/api/compute/admin/test_fixed_ips_negative.py
|
Python
|
apache-2.0
| 3,516
| 0
|
# Copyright 2013 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import config
from tempest import exceptions
from tempest import test
CONF = config.CONF
class FixedIPsNegativeTestJson(base.BaseV2ComputeA
|
dminTest):
@classmethod
def resource_setup(cls):
super(FixedIPsNegativeTestJson, cls).resource_setup()
if CONF.service_available.neutron:
msg = ("%s skipped as neutron is available" % cls.__name__)
raise cls.skipException(msg)
cls.client = cls.os_adm.fixed_ips_client
cls.non_admin_client = cls.fixed_ips_client
resp, server = cls.create_test_server(wait_until='A
|
CTIVE')
resp, server = cls.servers_client.get_server(server['id'])
for ip_set in server['addresses']:
for ip in server['addresses'][ip_set]:
if ip['OS-EXT-IPS:type'] == 'fixed':
cls.ip = ip['addr']
break
if cls.ip:
break
@test.attr(type=['negative', 'gate'])
@test.services('network')
def test_list_fixed_ip_details_with_non_admin_user(self):
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.get_fixed_ip_details, self.ip)
@test.attr(type=['negative', 'gate'])
@test.services('network')
def test_set_reserve_with_non_admin_user(self):
body = {"reserve": "None"}
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.reserve_fixed_ip,
self.ip, body)
@test.attr(type=['negative', 'gate'])
@test.services('network')
def test_set_unreserve_with_non_admin_user(self):
body = {"unreserve": "None"}
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.reserve_fixed_ip,
self.ip, body)
@test.attr(type=['negative', 'gate'])
@test.services('network')
def test_set_reserve_with_invalid_ip(self):
# NOTE(maurosr): since this exercises the same code snippet, we do it
# only for reserve action
body = {"reserve": "None"}
# NOTE(eliqiao): in Juno, the exception is NotFound, but in master, we
# change the error code to BadRequest, both exceptions should be
# accepted by tempest
self.assertRaises((exceptions.NotFound, exceptions.BadRequest),
self.client.reserve_fixed_ip,
"my.invalid.ip", body)
@test.attr(type=['negative', 'gate'])
@test.services('network')
def test_fixed_ip_with_invalid_action(self):
body = {"invalid_action": "None"}
self.assertRaises(exceptions.BadRequest,
self.client.reserve_fixed_ip,
self.ip, body)
class FixedIPsNegativeTestXml(FixedIPsNegativeTestJson):
_interface = 'xml'
|
LIP-Computing/pelogw
|
src/epilw.py
|
Python
|
apache-2.0
| 950
| 0
|
#!/usr/bin/python
'''
Epilog wrapper for docker images submission to batch systems
Created on Oct 28, 2015
@author: mariojmdavid@gmail.com
'''
import time
import os
import peUtils
if __name__ == '__main__':
print '==========================================='
print '==========================================='
ti = time.time()
print 'I am the EPILOG'
param = peUtils.getOptions()
comp_stdout = os.environ[param['glob
|
al']['comp_stdout']]
comp_stderr = os.environ[param['global']['comp_stderr']]
sub_host = os.environ[param['global']['sub_host']]
sub_workdir = os.environ[param['global']['sub_workdir']]
print 'S
|
TDOUT: %s' % comp_stdout
print 'STDERR: %s' % comp_stderr
os.system('scp -r -q %s %s:%s' % (comp_stdout, sub_host, sub_workdir))
os.system('scp -r -q %s %s:%s' % (comp_stderr, sub_host, sub_workdir))
os.system('rm -f *')
print '==========================================='
|
google/ctfscoreboard
|
scoreboard/mail.py
|
Python
|
apache-2.0
| 4,180
| 0
|
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
|
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the Li
|
cense.
from email.mime import text
import email.utils
import smtplib
import socket
import mailjet_rest
from scoreboard import main
app = main.get_app()
class MailFailure(Exception):
"""Inability to send mail."""
pass
def send(message, subject, to, to_name=None, sender=None, sender_name=None):
"""Send an email."""
sender = sender or app.config.get('MAIL_FROM')
sender_name = sender_name or app.config.get('MAIL_FROM_NAME') or ''
mail_provider = app.config.get('MAIL_PROVIDER')
if mail_provider is None:
app.logger.error('No MAIL_PROVIDER configured!')
raise MailFailure('No MAIL_PROVIDER configured!')
elif mail_provider == 'smtp':
_send_smtp(message, subject, to, to_name, sender, sender_name)
elif mail_provider == 'mailjet':
_send_mailjet(message, subject, to, to_name, sender, sender_name)
else:
app.logger.error('Invalid MAIL_PROVIDER configured!')
raise MailFailure('Invalid MAIL_PROVIDER configured!')
def _send_smtp(message, subject, to, to_name, sender, sender_name):
"""SMTP implementation of sending email."""
host = app.config.get('MAIL_HOST')
if not host:
raise MailFailure('SMTP Server Not Configured')
try:
server = smtplib.SMTP(host)
except (smtplib.SMTPConnectError, socket.error) as ex:
app.logger.error('Unable to send mail: %s', str(ex))
raise MailFailure('Error connecting to SMTP server.')
msg = text.MIMEText(message)
msg['Subject'] = subject
msg['To'] = email.utils.formataddr((to_name, to))
msg['From'] = email.utils.formataddr((sender_name, sender))
try:
if app.debug:
server.set_debuglevel(True)
server.sendmail(sender, [to], msg.as_string())
except (smtplib.SMTPException, socket.error) as ex:
app.logger.error('Unable to send mail: %s', str(ex))
raise MailFailure('Error sending mail to SMTP server.')
finally:
try:
server.quit()
except smtplib.SMTPException:
pass
def _send_mailjet(message, subject, to, to_name, sender, sender_name):
"""Mailjet implementation of sending email."""
api_key = app.config.get('MJ_APIKEY_PUBLIC')
api_secret = app.config.get('MJ_APIKEY_PRIVATE')
if not api_key or not api_secret:
app.logger.error('Missing MJ_APIKEY_PUBLIC/MJ_APIKEY_PRIVATE!')
return
# Note the data structures we use are api v3.1
client = mailjet_rest.Client(
auth=(api_key, api_secret),
api_url='https://api.mailjet.com/',
version='v3.1')
from_obj = {
"Email": sender,
}
if sender_name:
from_obj["Name"] = sender_name
to_obj = [{
"Email": to,
}]
if to_name:
to_obj[0]["Name"] = to_name
message = {
"From": from_obj,
"To": to_obj,
"Subject": subject,
"TextPart": message,
}
result = client.send.create(data={'Messages': [message]})
if result.status_code != 200:
app.logger.error(
'Error sending via mailjet: (%d) %r',
result.status_code, result.text)
raise MailFailure('Error sending via mailjet!')
try:
j = result.json()
except Exception:
app.logger.error('Error sending via mailjet: %r', result.text)
raise MailFailure('Error sending via mailjet!')
if j['Messages'][0]['Status'] != 'success':
app.logger.error('Error sending via mailjet: %r', j)
raise MailFailure('Error sending via mailjet!')
|
dakiri/splunk-app-twitter
|
twitter2/bin/oauthlib/oauth2/rfc6749/parameters.py
|
Python
|
apache-2.0
| 12,583
| 0.000397
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
"""
oauthlib.oauth2.rfc6749.parameters
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module contains methods related to `Section 4`_ of the OAuth 2 RFC.
.. _`Section 4`: http://tools.ietf.org/html/rfc6749#section-4
"""
import json
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
from oauthlib.common import add_params_to_uri, add_params_to_qs, unicode_type
from .errors import raise_from_error, MissingTokenError, MissingTokenTypeError
from .errors import MismatchingStateError, MissingCodeError
from .errors import InsecureTransportError
from .utils import list_to_scope, scope_to_list, is_secure_transport
def prepare_grant_uri(uri, client_id, response_type, redirect_uri=None,
scope=None, state=None, **kwargs):
"""Prepare the authorization grant request URI.
The client constructs the request URI by adding the following
parameters to the query component of the authorization endpoint URI
using the ``application/x-www-form-urlencoded`` format as defined by
[`W3C.REC-html401-19991224`_]:
:param response_type: To indicate which OAuth 2 grant/flow is required,
"code" and "token".
:param client_id: The client identifier as described in `Section 2.2`_.
:param redirect_uri: The client provided URI to redirect back to after
authorization as described in `Section 3.1.2`_.
:param scope: The scope of the access request as described by
`Section 3.3`_.
:param state: An opaque value used by the client to maintain
state between the request and callback. The authorization
server includes this value when redirecting the user-agent
back to the client. The parameter SHOULD be used for
preventing cross-site request forgery as described in
`Section 10.12`_.
:param kwargs: Extra arguments to embed in the grant/authorization URL.
An example of an authorization code grant authorization URL:
.. code-block:: http
GET /authorize?response_type=code&client_id=s6BhdRkqt3&state=xyz
&redirect_uri=https%3A%2F%2Fclient%2Eexample%2Ecom%2Fcb HTTP/1.1
Host: server.example.com
.. _`W3C.REC-html401-19991224`: http://tools.ietf.org/html/rfc6749#ref-W3C.REC-html401-19991224
.. _`Section 2.2`: http://tools.ietf.org/html/rfc6749#section-2.2
.. _`Section 3.1.2`: http://tools.ietf.org/html/rfc6749#section-3.1.2
.. _`Section 3.3`: http://tools.ietf.org/html/rfc6749#section-3.3
.. _`section 10.12`: http://tools.ietf.org/html/rfc6749#section-10.12
"""
if not is_secure_transport(uri):
raise InsecureTransportError()
params = [(('response_type', response_type)),
(('client_id', client_id))]
if redirect_uri:
params.append(('redirect_uri', redirect_uri))
if scope:
params.append(('scope', list_to_scope(scope)))
if state:
params.append(('state', state))
for k in kwargs:
if kwargs[k]:
params.append((unicode_type(k), kwargs[k]))
return add_params_to_uri(uri, params)
def prepare_token_request(grant_type, body='', **kwargs):
"""Prepare the access token request.
The client makes a request to the token endpoint by adding the
following parameters using the ``application/x-www-form-urlencoded``
format in the HTTP request entity-body:
:param grant_type: To indicate grant type being used, i.e. "password",
"authorization_code" or "client_credentials".
:param body: Existing request body to embed parameters in.
:param code: If using authorization code grant, pass the previously
obtained authorization code as the ``code`` argument.
:param redirect_uri: If the "redirect_uri" parameter was included in the
authorization request as described in
`Section 4.1.1`_, and their values MUST be identical.
:param kwargs: Extra arguments to embed in the request body.
An example of an authorization code token request body:
.. code-block:: http
grant_type=authorization_code&code=SplxlOBeZQQYbYS6WxSbIA
&redirect_uri=https%3A%2F%2Fclient%2Eexample%2Ecom%2Fcb
.. _`Section 4.1.1`: http://tools.ietf.org/html/rfc6749#section-4.1.1
"""
params = [('grant_type', grant_type)]
if 'scope' in kwargs:
kwargs['scope'] = list_to_scope(kwargs['scope'])
for k in kwargs:
if kwargs[k]:
params.append((unicode_type(k), kwargs[k]))
return add_params_to_qs(body, params)
def parse_authorization_code_response(uri, state=None):
"""Parse authorization grant response URI into a dict.
If the resource owner grants the access request, the authorization
server issues an authorization code and delivers it to the client by
adding the following parameters to the query component of the
redirection URI using the ``application/x-www-form-urlencoded`` format:
**code**
REQUIRED. The authorization code generated by the
authorization server. The authorization code MUST expire
shortly after it is issued to mitigate the risk of leaks. A
maximum authorization code lifetime of 10 minutes is
RECOMMENDED. The client MUST NOT use the authorization code
more than once. If an authorization code is used more than
once, the authorization server MUST deny the request and SHOULD
revoke (when possible) all tokens previously issued based on
that authorization code. The authorization code is bound to
the client identifier and redirection URI.
**state**
REQUIRED if the "state" parameter was present in the client
authorization request. The exact value received from the
client.
:param uri: The full redirect URL back to the client.
:param state: The state parameter from the authorization request.
For example, the authorization server redirects the user-agent by
sending the following HTTP response:
.. code-block:: http
HTTP/1.1 302 Found
Location: https://client.example.com/cb?code=SplxlOBeZQQYbYS6WxSbIA
&state=xyz
"""
if not is_secure_transport(uri):
raise InsecureTransportError()
query = urlparse.urlparse(uri).query
params = dict(urlparse.parse_qsl(query))
if not 'code' in params:
raise MissingCodeError("Missing code parameter in response.")
if state and params.get('state', None) != state:
raise MismatchingStateError()
return params
def parse_implicit_response(uri, state=None, scope=None):
"""Parse the implicit token response URI into a dict.
If the resource owner grants the access request, the authorization
server issues an access token and delivers it to the client by adding
the following parameters to the fragment component of the redirection
URI using the ``application/x-www-form-urlencoded`` format:
**access_token**
REQUIRED. The access token issued by the authorization server.
**token_type**
REQUIRED. The type of the token issued as described in
Section 7.1. Value is case insensitive.
**expires_in**
RECOMMENDED. The lifetime
|
in seconds of the access token.
|
For
example, the value "3600" denotes that the access token will
expire in one hour from the time the response was generated.
If omitted, the authorization server SHOULD provide the
expiration time via other means or document the default value.
**scope**
OPTIONAL, if identical to the scope requested by the client,
otherwise REQUIRED. The scope of the access token as described
by Section 3.3.
**state**
REQUIRED if the "state" parameter was present in the client
authorization request. The exact value received from the
|
ryfeus/lambda-packs
|
Keras_tensorflow_nightly/source2.7/tensorflow/tools/api/generator/api/keras/__init__.py
|
Python
|
mit
| 1,351
| 0.00074
|
"""Imports for Python API.
This file is MACHINE GENERATED! Do not edit.
Generated by: tensorflow/tools/api/generator/create_python_api.py script.
"""
from tensorflow.python.keras import Input
from tensorflow.python.keras impo
|
rt Model
from tensorflow.python.keras import Sequ
|
ential
from tensorflow.tools.api.generator.api.keras import activations
from tensorflow.tools.api.generator.api.keras import applications
from tensorflow.tools.api.generator.api.keras import backend
from tensorflow.tools.api.generator.api.keras import callbacks
from tensorflow.tools.api.generator.api.keras import constraints
from tensorflow.tools.api.generator.api.keras import datasets
from tensorflow.tools.api.generator.api.keras import estimator
from tensorflow.tools.api.generator.api.keras import initializers
from tensorflow.tools.api.generator.api.keras import layers
from tensorflow.tools.api.generator.api.keras import losses
from tensorflow.tools.api.generator.api.keras import metrics
from tensorflow.tools.api.generator.api.keras import models
from tensorflow.tools.api.generator.api.keras import optimizers
from tensorflow.tools.api.generator.api.keras import preprocessing
from tensorflow.tools.api.generator.api.keras import regularizers
from tensorflow.tools.api.generator.api.keras import utils
from tensorflow.tools.api.generator.api.keras import wrappers
|
vsajip/django
|
tests/regressiontests/generic_inline_admin/tests.py
|
Python
|
bsd-3-clause
| 17,051
| 0.002932
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.conf import settings
from django.contrib import admin
from django.contrib.admin.sites import AdminSite
from django.contrib.contenttypes.generic import (
generic_inlineformset_factory, GenericTabularInline)
from django.forms.models import ModelForm
from django.test import TestCase
from django.test.utils import override_settings
from django.utils import six
# local test models
from .admin import MediaInline, MediaPermanentInline
from .models import (Episode, EpisodeExtra, EpisodeMaxNum, Media,
EpisodePermanent, Category)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class GenericAdminViewTest(TestCase):
urls = "regressiontests.generic_inline_admin.urls"
fixtures = ['users.xml']
def setUp(self):
# set TEMPLATE_DEBUG to True to ensure {% include %} will raise
# exceptions since that is how inlines are rendered and #9498 will
# bubble up if it is an issue.
self.original_template_debug = settings.TEMPLATE_DEBUG
settings.TEMPLATE_DEBUG = True
self.client.login(username='super', password='secret')
# Can't load content via a fixture (since the GenericForeignKey
# relies on content type IDs, which will vary depending on what
# other tests have been run), thus we do it here.
e = Episode.objects.create(name='This Week in Django')
self.episode_pk = e.pk
m = Media(content_object=e, url='http://example.com/podcast.mp3')
m.save()
self.mp3_media_pk = m.pk
m = Media(content_object=e, url='http://example.com/logo.png')
m.save()
self.png_media_pk = m.pk
def tearDown(self):
self.client.logout()
settings.TEMPLATE_DEBUG = self.original_template_debug
def testBasicAddGet(self):
"""
A smoke test to ensure GET on the add_view works.
"""
response = self.client.get('/generic_inline_admin/admin/generic_inline_admin/episode/add/')
self.assertEqual(response.status_code, 200)
def testBasicEditGet(self):
"""
A smoke test to ensure GET on the change_view works.
"""
response = self.client.get('/generic_inline_admin/admin/generic_inline_admin/episode/%d/' % self.episode_pk)
self.assertEqual(response.status_code, 200)
def testBasicAddPost(self):
"""
A smoke test to ensure POST on add_view works.
"""
post_data = {
"name": "This Week in Django",
# inline data
"generic_inline_admin-media-content_type-object_id-TOTAL_FORMS": "1",
"generic_inline_admin-media-content_type-object_id-INITIAL_FORMS": "0",
"generic_inline_admin-media-content_type-object_id-MAX_NUM_FORMS": "0",
}
response = self.client.post('/generic_inline_admin/admin/generic_inline_admin/episode/add/', post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def testBasicEditPost(self):
"""
A smoke test to ensure POST on edit_view works.
"""
post_data = {
"name": "This Week in Django",
# inline data
"generic_inline_admin-media-content_type-object_id-TOTAL_FORMS": "3",
"generic_inline_admin-media-content_type-object_id-INITIAL_FORMS": "2",
"generic_inline_admin-media-content_type-object_id-MAX_NUM_FORMS": "0",
"generic_inline_admin-media-content_type-object_id-0-id": "%d" % self.mp3_media_pk,
"generic_inline_admin-media-content_type-object_id-0-url": "http://example.com/podcast.mp3",
"generic_inline_admin-media-content_type-object_id-1-id": "%d" % self.png_media_pk,
"generic_inline_admin-media-content_type-object_id-1-url": "http://example.com/logo.png",
"generic_inline_admin-media-content_type-object_id-2-id": "",
"generic_inline_admin-media-content_type-object_id-2-url": "",
}
url = '/generic_inline_admin/admin/generic_inline_admin/episode/%d/' % self.episode_pk
response = self.client.post(url, post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def testGenericInlineFormset(self):
EpisodeMediaFormSet = generic_inlineformset_factory(Media, can_delete=False, exclude=['description', 'keywords'], extra=3)
e = Episode.objects.get(name='This Week in Django')
# Works with no queryset
formset = EpisodeMediaFormSet(instance=e)
self.assertEqual(len(formset.forms), 5)
self.assertHTMLEqual(formset.forms[0].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-0-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-0-url" type="text" name="generic_inline_admin-media-content_type-object_id-0-url" value="http://example.com/podcast.mp3" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-0-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-0-id" /></p>' % self.mp3_media_pk)
self.assertHTMLEqual(formset.forms[1].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-1-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-1-url" type="text" name="generic_inline_admin-media-content_type-object_id-1-
|
url" value="http://example.com/logo.png" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-objec
|
t_id-1-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-1-id" /></p>' % self.png_media_pk)
self.assertHTMLEqual(formset.forms[2].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-2-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-2-url" type="text" name="generic_inline_admin-media-content_type-object_id-2-url" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-2-id" id="id_generic_inline_admin-media-content_type-object_id-2-id" /></p>')
# A queryset can be used to alter display ordering
formset = EpisodeMediaFormSet(instance=e, queryset=Media.objects.order_by('url'))
self.assertEqual(len(formset.forms), 5)
self.assertHTMLEqual(formset.forms[0].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-0-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-0-url" type="text" name="generic_inline_admin-media-content_type-object_id-0-url" value="http://example.com/logo.png" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-0-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-0-id" /></p>' % self.png_media_pk)
self.assertHTMLEqual(formset.forms[1].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-1-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-1-url" type="text" name="generic_inline_admin-media-content_type-object_id-1-url" value="http://example.com/podcast.mp3" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-1-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-1-id" /></p>' % self.mp3_media_pk)
self.assertHTMLEqual(formset.forms[2].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-2-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-2-url" type="text" name="generic_inline_admin-media-content_type-object_id-2-url" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-2-id" id="id_generic_inline_admin-media-content_type-object_id-2-id" /></p>')
# Works with a queryset that omits items
formset = EpisodeMediaFormSet(instance=e, queryset=Media.objects.filter(url__endswith=".png"))
self.assertEqual(len(formset.forms), 4)
self.assertHTMLEqual(formset.forms[0].as_p(), '<p><labe
|
giliam/sharbrary
|
discussion/tests.py
|
Python
|
gpl-2.0
| 7,655
| 0.012021
|
# coding: utf-8
from django.test import TestCase
from django.contrib.auth.models import User, Permission
from django.utils import timezone
from django.core.urlresolvers import reverse
from django.contrib.auth.models import Group
from utils.tests.common_test_case import CommonTestCase, with_login_user
from discussion.models import Discussion, Message
class DiscussionTestCase(CommonTestCase):
@with_login_user()
def test_creation(self):
data = {
'title': 'Sujet de conversation',
}
response = self.client.post(reverse('discussion_new'), data)
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, reverse('discussion_detail',args=[1]))
try:
discussion = Discussion.objects.get(**data)
except Discussion.DoesNotExist:
discussion = None
self.assertIsNotNone(discussion)
# Then check if the book has been created
response = self.client.get(reverse('discussion_list'))
self.assertQuerysetEqual(response.context['discussions'].all(),[repr(discussion)])
@with_login_user('bub')
def test_creation_no_right(self):
response = self.client.post(reverse('discussion_new'))
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, '/sharing/login/?next=/discussion/discussion/new')
@with_login_user()
def test_deletion(self):
discussion = Discussion.objects.create(title='La vie et le reste')
response = self.client.get(reverse('discussion_list'))
self.assertQuerysetEqual(response.context['discussions'].all(),[repr(discussion)])
response = self.client.post(reverse('discussion_delete',args=[discussion.id]),{})
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, reverse('discussion_list'))
try:
discussion = Discussion.objects.get(title="La vie et le reste")
except Discussion.DoesNotExist:
discussion = None
self.assertIsNone(discussion)
# Then check if the book has been created
response = self.client.get(reverse('discussion_list'))
self.assertQuerysetEqual(response.context['discussions'].all(),[])
@with_login_user('bub')
def test_deletion_no_right(self):
discussion = Discussion.objects.create(title='La vie et le reste')
response = self.client.post(reverse('discussion_delete',args=[discussion.id]),{})
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, '/sharing/login/?next=' + reverse('discussion_delete',args=[discussion.id]))
@with_login_user()
def test_update(self):
discussion = Discussion.objects.create(title='La vie et le reste')
data = {
'title': 'Les etoiles',
}
response = self.client.post(reverse('discussion_edit',args=[discussion.id]),data)
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, reverse('discussion_list'))
try:
discussion = Discussion.objects.get(**data)
except Discussion.DoesNotExist:
discussion = None
self.assertIsNotNone(discussion)
class MessageTestCase(CommonTestCase):
def setUp(self):
super(MessageTestCase,self).setUp()
self.discussion = Discussion.objects.create(title='La vie et le reste',author=self.bib)
@with_login_user()
def test_creation(self):
data = {
'message': 'Lorem ipsum lala bobium rasam est nivudae.',
}
response = self.client.post(reverse('message_new',args=[self.discussion.id]), data)
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, reverse('discussion_detail',args=[self.discussion.id]))
try:
message = Message.objects.get(discussion=self.discussion,**data)
except Message.DoesNotExist:
message = None
self.assertIsNotNone(message)
# Then check if the book has been created
response = self.client.get(reverse('discussion_detail',args=[self.discussion.id]))
self.assertQuerysetEqual(response.context['messages_discussion'].all(),[repr(message)])
@with_login_user('bub')
def test_creation_no_right(self):
response = self.client.post(reverse('message_new',args=[self.discussion.id]))
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, '/sharing/login/?next=' + reverse('message_new',args=[self.discussion.id]))
@with_login_user()
def test_deletion(self):
message = Message.objects.create(discussion=self.discussion,message='Les pommes de terre sont cuites.')
response = self.client.get(reverse('discussion_detail',args=[self.discussion.id]))
self.assertQuerysetEqual(response.context['messages_discussion'].all(),[repr(message)])
response = self.client.post(reverse('message_delete',args=[message.id]),{})
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, reverse('discussion_list'))
try:
message = Message.objects.get(discussion=self.discussion,message="Les pommes de terre sont cuites.")
except Message.DoesNotExist:
message = None
self.assertIsNone(message)
# Then check if the book has been created
response = self.client.get(reverse('discussion_detail',args=[self.discussion.id]))
self.assertQuerysetEqual(response.context['messages_discussion'].all(),[])
@with_login_user('bub')
def test_deletion_no_right(self):
message = Message.objects.create(discussion=self.discussion,message='Tout va bien ici !')
response = self.client.post(reverse('message_delete',args=[message.id]),{})
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, '/sharing/login/?next=' + reverse('message_delete',args=[message.id]))
@with_login_user('bib')
def test_deletion_not_my_message(self):
message = Message.objects.create(d
|
iscussion=self.discussion,message='Yoplait !',author=self.bob)
response = self.client.post(reverse('message_delete',args=[message.id]),{})
# only moderators can delete messages
self.assertEqual(response.status_code, 403)
@with_login_user('bib')
def test_update(self):
message = Message.objects.create(discussion=self.discussion,message='La vie et le reste',author=self.bib)
data = {
'message': 'Les etoiles',
}
|
response = self.client.post(reverse('message_edit',args=[message.id]),data)
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, reverse('discussion_list'))
try:
message = Message.objects.get(**data)
except Message.DoesNotExist:
message = None
self.assertIsNotNone(message)
@with_login_user('bib')
def test_update_not_my_message(self):
message = Message.objects.create(discussion=self.discussion,message='Yoplait !',author=self.bob)
data = {
'message': 'Salut les amis, ça ne passera jamais de toute façon...',
}
response = self.client.post(reverse('message_edit',args=[message.id]),data)
self.assertEqual(response.status_code, 403)
try:
message = Message.objects.get(discussion=self.discussion,**data)
except Message.DoesNotExist:
message = None
self.assertIsNone(message)
|
pair-code/lit
|
lit_nlp/lib/wsgi_serving.py
|
Python
|
apache-2.0
| 4,652
| 0.004944
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Lint as: python3
"""WSGI servers to power the LIT backend."""
import socket
import threading
from typing import Optional, Text, List
from wsgiref import validate
import wsgiref.simple_server
from absl import logging
import portpicker
from werkzeug import serving as
|
werkzeug_serving
class BasicDevServer(object):
"""Basic development server; not recommended for deployment."""
def __init__(self, wsgi_app, port: int = 4321, host: Text = '127.0.0.1',
**unused_kw):
self._port = port
self._host = host
self._app = wsgi_app
self.can_act_as_model_server = True
|
def serve(self):
"""Start serving."""
logging.info(('\n\nStarting Server on port %d'
'\nYou can navigate to %s:%d\n\n'), self._port, self._host,
self._port)
werkzeug_serving.run_simple(
self._host,
self._port,
self._app,
use_debugger=False,
use_reloader=False)
class WsgiServerIpv6(wsgiref.simple_server.WSGIServer):
"""IPv6 based extension of the simple WSGIServer."""
address_family = socket.AF_INET6
class NotebookWsgiServer(object):
"""WSGI server for notebook environments."""
def __init__(self, wsgi_app, host: Text = 'localhost',
port: Optional[int] = None, **unused_kw):
"""Initialize the WSGI server.
Args:
wsgi_app: WSGI pep-333 application to run.
host: Host to run on, defaults to 'localhost'.
port: Port to run on. If not specified, then an unused one will be picked.
"""
self._app = wsgi_app
self._host = host
self._port = port
self._server_thread = None
self.can_act_as_model_server = False
@property
def port(self):
"""Returns the current port or error if the server is not started.
Raises:
RuntimeError: If server has not been started yet.
Returns:
The port being used by the server.
"""
if self._server_thread is None:
raise RuntimeError('Server not started.')
return self._port
def stop(self):
"""Stops the server thread."""
if self._server_thread is None:
return
self._stopping.set()
self._server_thread = None
self._stopped.wait()
def serve(self):
"""Starts a server in a thread using the WSGI application provided.
Will wait until the thread has started calling with an already serving
application will simple return.
"""
if self._server_thread is not None:
return
if self._port is None:
self._port = portpicker.pick_unused_port()
started = threading.Event()
self._stopped = threading.Event()
self._stopping = threading.Event()
def build_server(started, stopped, stopping):
"""Closure to build the server function to be passed to the thread.
Args:
started: Threading event to notify when started.
stopped: Threading event to notify when stopped.
stopping: Threading event to notify when stopping.
Returns:
A function that function that takes a port and WSGI app and notifies
about its status via the threading events provided.
"""
def server(port, wsgi_app):
"""Serve a WSGI application until stopped.
Args:
port: Port number to serve on.
wsgi_app: WSGI application to serve.
"""
try:
httpd = wsgiref.simple_server.make_server(self._host, port, wsgi_app)
except socket.error:
# Try IPv6
httpd = wsgiref.simple_server.make_server(
self._host, port, wsgi_app, server_class=WsgiServerIpv6)
started.set()
httpd.timeout = 30
while not stopping.is_set():
httpd.handle_request()
stopped.set()
return server
server = build_server(started, self._stopped, self._stopping)
server_thread = threading.Thread(
target=server, args=(self._port, self._app))
self._server_thread = server_thread
server_thread.start()
started.wait()
|
ygol/odoo
|
addons/hr_holidays/tests/__init__.py
|
Python
|
agpl-3.0
| 460
| 0
|
# -*- c
|
oding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from . import test_access_rights
from . import test_automatic_leave_dates
from . import test_allocation_access_rights
from . import test_holidays_flow
from . import test_hr_leave_type
from . import test_accrual_allocations
from . import test_change_department
from . import test_leave_requests
from . import test_out_of_office
from . import test_company_l
|
eave
|
tfeldmann/tryagain
|
tasks.py
|
Python
|
mit
| 450
| 0
|
import os
from invoke
|
import task
@task
def test():
os.system('coverage run --source tryagain -m py.test')
os.system('coverage report')
@task
def register(production=False):
target = 'pypi' if production else 'pypitest'
os.system('python3 setup.py register -r %s' % target)
@task
def upload(production=False):
target = 'pypi' if production else 'pypitest'
os.system('python3 setup.p
|
y bdist_wheel upload -r %s' % target)
|
mancoast/CPythonPyc_test
|
fail/314_test_zlib.py
|
Python
|
gpl-3.0
| 23,273
| 0.001891
|
import unittest
from test import support
import binascii
import random
import sys
from test.support import precisionbigmemtest, _1G, _4G
zlib = support.import_module('zlib')
try:
import mmap
except ImportError:
mmap = None
class ChecksumTestCase(unittest.TestCase):
# checksum test cases
def test_crc32start(self):
self.assertEqual(zlib.crc32(b""), zlib.crc32(b"", 0))
self.assertTrue(zlib.crc32(b"abc", 0xffffffff))
def test_crc32empty(self):
self.assertEqual(zlib.crc32(b"", 0), 0)
self.assertEqual(zlib.crc32(b"", 1), 1)
self.assertEqual(zlib.crc32(b"", 432), 432)
def test_adler32start(self):
self.assertEqual(zlib.adler32(b""), zlib.adler32(b"
|
", 1))
self.assertTrue(zlib.adler32(b"abc", 0xffffffff))
def test_adler32empty(self):
self.assertEqual(zlib.adler32(b"", 0), 0)
self.assertEqual(zlib.adler32(b"", 1), 1)
|
self.assertEqual(zlib.adler32(b"", 432), 432)
def assertEqual32(self, seen, expected):
# 32-bit values masked -- checksums on 32- vs 64- bit machines
# This is important if bit 31 (0x08000000L) is set.
self.assertEqual(seen & 0x0FFFFFFFF, expected & 0x0FFFFFFFF)
def test_penguins(self):
self.assertEqual32(zlib.crc32(b"penguin", 0), 0x0e5c1a120)
self.assertEqual32(zlib.crc32(b"penguin", 1), 0x43b6aa94)
self.assertEqual32(zlib.adler32(b"penguin", 0), 0x0bcf02f6)
self.assertEqual32(zlib.adler32(b"penguin", 1), 0x0bd602f7)
self.assertEqual(zlib.crc32(b"penguin"), zlib.crc32(b"penguin", 0))
self.assertEqual(zlib.adler32(b"penguin"),zlib.adler32(b"penguin",1))
def test_crc32_adler32_unsigned(self):
foo = b'abcdefghijklmnop'
# explicitly test signed behavior
self.assertEqual(zlib.crc32(foo), 2486878355)
self.assertEqual(zlib.crc32(b'spam'), 1138425661)
self.assertEqual(zlib.adler32(foo+foo), 3573550353)
self.assertEqual(zlib.adler32(b'spam'), 72286642)
def test_same_as_binascii_crc32(self):
foo = b'abcdefghijklmnop'
crc = 2486878355
self.assertEqual(binascii.crc32(foo), crc)
self.assertEqual(zlib.crc32(foo), crc)
self.assertEqual(binascii.crc32(b'spam'), zlib.crc32(b'spam'))
# Issue #10276 - check that inputs >=4GB are handled correctly.
class ChecksumBigBufferTestCase(unittest.TestCase):
@unittest.skipUnless(mmap, "mmap() is not available.")
@unittest.skipUnless(sys.maxsize > _4G, "Can't run on a 32-bit system.")
@unittest.skipUnless(support.is_resource_enabled("largefile"),
"May use lots of disk space.")
def setUp(self):
with open(support.TESTFN, "wb+") as f:
f.seek(_4G)
f.write(b"asdf")
f.flush()
self.mapping = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
def tearDown(self):
self.mapping.close()
support.unlink(support.TESTFN)
def test_big_buffer(self):
self.assertEqual(zlib.crc32(self.mapping), 3058686908)
self.assertEqual(zlib.adler32(self.mapping), 82837919)
class ExceptionTestCase(unittest.TestCase):
# make sure we generate some expected errors
def test_badlevel(self):
# specifying compression level out of range causes an error
# (but -1 is Z_DEFAULT_COMPRESSION and apparently the zlib
# accepts 0 too)
self.assertRaises(zlib.error, zlib.compress, b'ERROR', 10)
def test_badargs(self):
self.assertRaises(TypeError, zlib.adler32)
self.assertRaises(TypeError, zlib.crc32)
self.assertRaises(TypeError, zlib.compress)
self.assertRaises(TypeError, zlib.decompress)
for arg in (42, None, '', 'abc', (), []):
self.assertRaises(TypeError, zlib.adler32, arg)
self.assertRaises(TypeError, zlib.crc32, arg)
self.assertRaises(TypeError, zlib.compress, arg)
self.assertRaises(TypeError, zlib.decompress, arg)
def test_badcompressobj(self):
# verify failure on building compress object with bad params
self.assertRaises(ValueError, zlib.compressobj, 1, zlib.DEFLATED, 0)
# specifying total bits too large causes an error
self.assertRaises(ValueError,
zlib.compressobj, 1, zlib.DEFLATED, zlib.MAX_WBITS + 1)
def test_baddecompressobj(self):
# verify failure on building decompress object with bad params
self.assertRaises(ValueError, zlib.decompressobj, -1)
def test_decompressobj_badflush(self):
# verify failure on calling decompressobj.flush with bad params
self.assertRaises(ValueError, zlib.decompressobj().flush, 0)
self.assertRaises(ValueError, zlib.decompressobj().flush, -1)
class BaseCompressTestCase(object):
def check_big_compress_buffer(self, size, compress_func):
_1M = 1024 * 1024
fmt = "%%0%dx" % (2 * _1M)
# Generate 10MB worth of random, and expand it by repeating it.
# The assumption is that zlib's memory is not big enough to exploit
# such spread out redundancy.
data = b''.join([binascii.a2b_hex(fmt % random.getrandbits(8 * _1M))
for i in range(10)])
data = data * (size // len(data) + 1)
try:
compress_func(data)
finally:
# Release memory
data = None
def check_big_decompress_buffer(self, size, decompress_func):
data = b'x' * size
try:
compressed = zlib.compress(data, 1)
finally:
# Release memory
data = None
data = decompress_func(compressed)
# Sanity check
try:
self.assertEqual(len(data), size)
self.assertEqual(len(data.strip(b'x')), 0)
finally:
data = None
class CompressTestCase(BaseCompressTestCase, unittest.TestCase):
# Test compression in one go (whole message compression)
def test_speech(self):
x = zlib.compress(HAMLET_SCENE)
self.assertEqual(zlib.decompress(x), HAMLET_SCENE)
def test_speech128(self):
# compress more data
data = HAMLET_SCENE * 128
x = zlib.compress(data)
self.assertEqual(zlib.compress(bytearray(data)), x)
for ob in x, bytearray(x):
self.assertEqual(zlib.decompress(ob), data)
def test_incomplete_stream(self):
# An useful error message is given
x = zlib.compress(HAMLET_SCENE)
self.assertRaisesRegexp(zlib.error,
"Error -5 while decompressing data: incomplete or truncated stream",
zlib.decompress, x[:-1])
# Memory use of the following functions takes into account overallocation
@precisionbigmemtest(size=_1G + 1024 * 1024, memuse=3)
def test_big_compress_buffer(self, size):
compress = lambda s: zlib.compress(s, 1)
self.check_big_compress_buffer(size, compress)
@precisionbigmemtest(size=_1G + 1024 * 1024, memuse=2)
def test_big_decompress_buffer(self, size):
self.check_big_decompress_buffer(size, zlib.decompress)
@precisionbigmemtest(size=_4G + 100, memuse=1)
def test_length_overflow(self, size):
if size < _4G + 100:
self.skipTest("not enough free memory, need at least 4 GB")
data = b'x' * size
try:
self.assertRaises(OverflowError, zlib.compress, data, 1)
self.assertRaises(OverflowError, zlib.decompress, data)
finally:
data = None
class CompressObjectTestCase(BaseCompressTestCase, unittest.TestCase):
# Test compression object
def test_pair(self):
# straightforward compress/decompress objects
datasrc = HAMLET_SCENE * 128
datazip = zlib.compress(datasrc)
# should compress both bytes and bytearray data
for data in (datasrc, bytearray(datasrc)):
co = zlib.compressobj()
x1 = co.compress(data)
x2 = co.flush()
self.assertRaises(zlib.error, co.flush) # second flush should not work
self.assertEqual(x1 + x2, datazip)
for v1,
|
metabrainz/picard
|
test/test_api_helpers.py
|
Python
|
gpl-2.0
| 11,729
| 0.001108
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2017 Sambhav Kothari
# Copyright (C) 2018 Wieland Hoffmann
# Copyright (C) 2018, 2020-2021 Laurent Monin
# Copyright (C) 2019-2022 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
|
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from unittest.mock import MagicMock
from test.p
|
icardtestcase import PicardTestCase
from picard.acoustid.manager import Submission
from picard.metadata import Metadata
from picard.webservice import WebService
from picard.webservice.api_helpers import (
AcoustIdAPIHelper,
APIHelper,
MBAPIHelper,
)
class APITest(PicardTestCase):
def setUp(self):
super().setUp()
self.host = "abc.com"
self.port = 80
self.api_path = "/v1/"
self.path_list = ['test', 'more', 'test']
self.complete_path = "/v1/test/more/test"
self.ws = MagicMock(auto_spec=WebService)
self.api = APIHelper(self.host, self.port, self.api_path, self.ws)
def _test_ws_function_args(self, ws_function):
self.assertGreater(ws_function.call_count, 0)
self.assertEqual(ws_function.call_args[0][0], self.host)
self.assertEqual(ws_function.call_args[0][1], self.port)
self.assertEqual(ws_function.call_args[0][2], self.complete_path)
def test_get(self):
self.api.get(self.path_list, None)
self._test_ws_function_args(self.ws.get)
def test_post(self):
self.api.post(self.path_list, None, None)
self._test_ws_function_args(self.ws.post)
def test_put(self):
self.api.put(self.path_list, None, None)
self._test_ws_function_args(self.ws.put)
def test_delete(self):
self.api.delete(self.path_list, None)
self._test_ws_function_args(self.ws.delete)
class MBAPITest(PicardTestCase):
def setUp(self):
super().setUp()
self.config = {'server_host': "mb.org", "server_port": 443}
self.set_config_values(self.config)
self.ws = MagicMock(auto_spec=WebService)
self.api = MBAPIHelper(self.ws)
def _test_ws_function_args(self, ws_function):
self.assertGreater(ws_function.call_count, 0)
self.assertEqual(ws_function.call_args[0][0], self.config['server_host'])
self.assertEqual(ws_function.call_args[0][1], self.config['server_port'])
self.assertIn("/ws/2/", ws_function.call_args[0][2])
def assertInPath(self, ws_function, path):
self.assertIn(path, ws_function.call_args[0][2])
def assertNotInPath(self, ws_function, path):
self.assertNotIn(path, ws_function.call_args[0][2])
def assertInQuery(self, ws_function, argname, value=None):
query_args = ws_function.call_args[1]['queryargs']
self.assertIn(argname, query_args)
self.assertEqual(value, query_args[argname])
def _test_inc_args(self, ws_function, arg_list):
self.assertInQuery(self.ws.get, 'inc', "+".join(arg_list))
def test_get_release(self):
inc_args_list = ['test']
self.api.get_release_by_id("1", None, inc=inc_args_list)
self._test_ws_function_args(self.ws.get)
self.assertInPath(self.ws.get, "/release/1")
self._test_inc_args(self.ws.get, inc_args_list)
def test_get_track(self):
inc_args_list = ['test']
self.api.get_track_by_id("1", None, inc=inc_args_list)
self._test_ws_function_args(self.ws.get)
self.assertInPath(self.ws.get, "/recording/1")
self._test_inc_args(self.ws.get, inc_args_list)
def test_get_collection(self):
inc_args_list = ["releases", "artist-credits", "media"]
self.api.get_collection("1", None)
self._test_ws_function_args(self.ws.get)
self.assertInPath(self.ws.get, "collection")
self.assertInPath(self.ws.get, "1/releases")
self._test_inc_args(self.ws.get, inc_args_list)
def test_get_collection_list(self):
self.api.get_collection_list(None)
self._test_ws_function_args(self.ws.get)
self.assertInPath(self.ws.get, "collection")
self.assertNotInPath(self.ws.get, "releases")
def test_put_collection(self):
self.api.put_to_collection("1", ["1", "2", "3"], None)
self._test_ws_function_args(self.ws.put)
self.assertInPath(self.ws.put, "collection/1/releases/1;2;3")
def test_delete_collection(self):
self.api.delete_from_collection("1", ["1", "2", "3", "4"] * 200, None)
collection_string = ";".join(["1", "2", "3", "4"] * 100)
self._test_ws_function_args(self.ws.delete)
self.assertInPath(self.ws.delete, "collection/1/releases/" + collection_string)
self.assertNotInPath(self.ws.delete, collection_string + ";" + collection_string)
self.assertEqual(self.ws.delete.call_count, 2)
def test_xml_ratings_empty(self):
ratings = dict()
xmldata = self.api._xml_ratings(ratings)
self.assertEqual(
xmldata,
'<?xml version="1.0" encoding="UTF-8"?>'
'<metadata xmlns="http://musicbrainz.org/ns/mmd-2.0#">'
'<recording-list></recording-list>'
'</metadata>'
)
def test_xml_ratings_one(self):
ratings = {("recording", 'a'): 1}
xmldata = self.api._xml_ratings(ratings)
self.assertEqual(
xmldata,
'<?xml version="1.0" encoding="UTF-8"?>'
'<metadata xmlns="http://musicbrainz.org/ns/mmd-2.0#">'
'<recording-list>'
'<recording id="a"><user-rating>20</user-rating></recording>'
'</recording-list>'
'</metadata>'
)
def test_xml_ratings_multiple(self):
ratings = {
("recording", 'a'): 1,
("recording", 'b'): 2,
("nonrecording", 'c'): 3,
}
xmldata = self.api._xml_ratings(ratings)
self.assertEqual(
xmldata,
'<?xml version="1.0" encoding="UTF-8"?>'
'<metadata xmlns="http://musicbrainz.org/ns/mmd-2.0#">'
'<recording-list>'
'<recording id="a"><user-rating>20</user-rating></recording>'
'<recording id="b"><user-rating>40</user-rating></recording>'
'</recording-list>'
'</metadata>'
)
def test_xml_ratings_encode(self):
ratings = {("recording", '<a&"\'>'): 0}
xmldata = self.api._xml_ratings(ratings)
self.assertEqual(
xmldata,
'<?xml version="1.0" encoding="UTF-8"?>'
'<metadata xmlns="http://musicbrainz.org/ns/mmd-2.0#">'
'<recording-list>'
'<recording id="<a&"\'>"><user-rating>0</user-rating></recording>'
'</recording-list>'
'</metadata>'
)
def test_xml_ratings_raises_value_error(self):
ratings = {("recording", 'a'): 'foo'}
self.assertRaises(ValueError, self.api._xml_ratings, ratings)
def test_collection_request(self):
releases = tuple("r"+str(i) for i in range(13))
generator = self.api._collection_request("test", releases, batchsize=5)
batch = next(generator)
self.assertEqual(batch, ('collection', 'test', 'releases', 'r0;r1;r2;r3;r4'))
batch = next(generator)
self.assertEqual(batch, ('collection', 'test', 'releases', 'r5;r6;r7;r8;r9'))
batch = next(generator)
self.assertEqual(batch, ('collection', 'test', 'releases', 'r10;r11;r12'))
with self.assertRaises(Sto
|
laisrael/Game-Tools-NPC-Generator
|
classgen.py
|
Python
|
mit
| 1,109
| 0.002705
|
import random
classes = [
('Bar
|
barian', ['STR', 'CON', 'DEX', 'WIS', 'CHA', 'INT']),
('Bard', ['CHA', 'CON', 'STR', 'DEX', 'WIS', 'INT']),
('Cleric', ['WIS', 'CHA', 'CON', 'DEX', 'INT', 'STR']),
('Druid', ['WIS', 'CON', 'STR', 'DEX', 'INT', 'CHA']),
('Fighter', ['STR', 'CON', 'DEX', 'WIS', 'INT'
|
, 'CHA']),
('Monk', ['STR', 'WIS', 'CON', 'DEX', 'INT', 'CHA']),
('Paladin', ['CHA', 'WIS', 'STR', 'CON', 'DEX', 'INT']),
('Ranger', ['STR', 'DEX', 'WIS', 'CHA', 'CON', 'INT']),
('Rogue', ['DEX', 'CHA', 'CON', 'WIS', 'INT', 'STR']),
('Sorcerer', ['CHA', 'DEX', 'CON', 'WIS', 'INT', 'STR']),
('Wizard', ['INT', 'CON', 'DEX', 'WIS', 'CHA', 'STR'])
]
class Class():
def __init__(self, inClass):
if inClass == "rand":
#Classes are returned with a list of stats in order of where they will allocate stat points, from highest to lowest.
self.finalClass, self.statPref = random.choice(classes)
else:
self.finalClass, self.statPref = inClass
def generate(self):
return (self.finalClass, self.statPref)
|
cython-testbed/pandas
|
pandas/tests/plotting/test_deprecated.py
|
Python
|
bsd-3-clause
| 1,513
| 0
|
# coding: utf-8
import string
import pandas as pd
import pandas.util.testing as tm
import pandas.util._test_decorators as td
import pytest
from numpy.random import randn
import pandas.tools.plotting as plotting
from pandas.tests.plotting.common import TestPlotBase
"""
Test cases for plot functions imported from deprecated
pandas.tools.plotting
"""
@td.skip_if_no_mpl
class TestDeprecatedNameSpace(TestPlotBase):
@pytest.mark.slow
@td.skip_if_no_scipy
def test_scatter_plot_legacy(self):
df = pd.DataFrame(randn(100, 2))
with tm.assert_produces_warning(FutureWarning):
plotting.scatter_matrix(df)
with tm.assert_produces_warning(FutureWarning):
pd.scatter_matrix(df)
@pytest.mark.slow
def test_boxplot_deprecated(self):
df = pd.DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
df['indic'] = ['foo', 'bar'] * 3
with tm.assert_produces_warning(FutureWarning):
plotting.boxplot(df, column=['one', 'two'],
|
by='indic')
@pytest.mark.slow
def test_radviz_deprecated(self, iris):
with tm.assert_produces_warning(FutureWarning):
plotting.radviz(frame=iris, class_column='Name')
@pytest.mark.slow
def test_plot_params(self):
with tm.assert_produces_warning(FutureWarning):
pd.plot_params['xaxi
|
s.compat'] = True
|
armab/st2contrib
|
packs/dimensiondata/actions/create_vlan.py
|
Python
|
apache-2.0
| 826
| 0
|
from lib import actions
__all__ = [
'CreateVlanAction',
]
class CreateVlanAction(actions.BaseAction):
def run(self, **kwargs)
|
:
action = kwargs['action']
del kwargs['action']
region = kwargs['region']
del kwargs['region']
network_domain_id = kwargs['network_domain_id']
del kwargs['network_domain_id']
driver = self._get
|
_compute_driver(region)
network_domain = driver.ex_get_network_domain(network_domain_id)
kwargs['network_domain'] = network_domain
result = self._do_function(driver, action, **kwargs)
# Wait to complete
driver.ex_wait_for_state('NORMAL', driver.ex_get_vlan,
poll_interval=2, timeout=1200,
vlan_id=result['id'])
return result
|
dwfreed/mitmproxy
|
examples/complex/tcp_message.py
|
Python
|
mit
| 917
| 0.001091
|
"""
tcp_message Inline Script Hook API Demonstration
------------------------------------------------
* modifies packets containing "foo" to "bar"
* prints various details for each packet.
example cmdline invocation:
mitmdump -T --host --tcp ".*" -q -s examples/tcp_message.py
"""
from mitmproxy.utils import strutils
def tcp_message(tcp_msg):
modified_msg = tcp_msg.message.replace("foo", "bar")
is_modified = Fa
|
lse if modified_msg == tcp_msg.message else True
tcp_msg.message = modified_msg
print(
"[tcp_message{}] from {} {} to {} {}:\r\n{}".format(
" (modified)" if is_modified else "",
"client" if tcp_msg.sender == tcp_msg.client_conn else "server",
tcp_msg.sender.address,
"server" if tcp_msg.receiver == tcp_msg.server_conn else "client",
tcp_msg.receiver.address, stru
|
tils.bytes_to_escaped_str(tcp_msg.message))
)
|
richardbeare/SimpleITK
|
Examples/Python/CannyEdge.py
|
Python
|
apache-2.0
| 1,302
| 0
|
#!/usr/bin/env python
# =========================================================================
#
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may
|
obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
|
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =========================================================================
from __future__ import print_function
import SimpleITK as sitk
import sys
import os
if len(sys.argv) < 2:
print("Usage: %s <input>" % (sys.argv[0]))
sys.exit(1)
image = sitk.Cast(sitk.ReadImage(sys.argv[1]), sitk.sitkFloat32)
edges = sitk.CannyEdgeDetection(image, lowerThreshold=200, upperThreshold=400,
variance=[4] * 3)
stats = sitk.StatisticsImageFilter()
stats.Execute(image)
if ("SITK_NOSHOW" not in os.environ):
sitk.Show(sitk.Maximum(image * 0.5, edges * stats.GetMaximum() * .5))
|
pndurette/gTTS
|
gtts/__init__.py
|
Python
|
mit
| 137
| 0
|
# -*- coding: utf-8 -*-
from .version import __version__ # no
|
qa: F401
from .tts import gTTS, gTTSError
__all__ = ['gTTS', 'gTTSE
|
rror']
|
guozengxin/codeeval
|
easy/fizzBuzz.py
|
Python
|
mit
| 537
| 0.001862
|
#!/usr/bin/env python
# https://
|
www.codeeval.com/open_challenges/1/
import sys
def solve(X, Y, N):
r = []
for i in range(1, N + 1):
|
if i % X == 0 and i % Y == 0:
r.append('FB')
elif i % X == 0:
r.append('F')
elif i % Y == 0:
r.append('B')
else:
r.append(str(i))
print ' '.join(r)
def main():
for line in sys.stdin:
(X, Y, N) = line.strip().split(' ')
solve(int(X), int(Y), int(N))
if __name__ == '__main__':
main()
|
nickbjohnson4224/greyhat-crypto-ctf-2014
|
frontend/services.py
|
Python
|
mit
| 4,564
| 0.004163
|
import time
import json
import tornado.httpclient
http_client = tornado.httpclient.HTTPClient()
class HTTPServiceProxy(object):
def __init__(self, host='localhost', port=6999, cache_timeout=5.0):
self._host = host
self._port = port
self._cache_timeout = cache_timeout
self._cache = {}
self._cache_time = {}
def get(self, *path):
print 'http://%s:%d/%s' % (self._host, self._port, '/'.join(path))
if path in self._cache and \
self._cache_time[path] + self._cache_timeout > time.time():
return self._cache[path]
try:
response = http_client.fetch('http://%s:%d/%s' % (self._host, self._port, '/'.join(path)))
self._cache[path] = response.body
self._cache_time[path] = time.time()
return response.body
except tornado.httpclient.HTTPError as e:
if path in self._cache:
del self._cache[path]
return None
def post(self, *path, **kwargs):
url = 'http://%s:%d/%s' % (self._host, self._port, '/'.join(path))
print url
try:
request = tornado.httpclient.HTTPRequest(url, method='POST', body=json.dumps(kwargs))
response = http_client.fetch(request)
return response.body
except tornado.httpclient.HTTPError as e:
return None
class MonitorProxy(HTTPServiceProxy):
"""
Proxy object for the challenge monitor service.
"""
de
|
f __init__(self):
super(MonitorProxy, self).__init__(host='localhost', port=6999, cache_timeout=0.0)
@property
def challenges(self):
return json.loads(self.get('list'))
@prope
|
rty
def visible_challenges(self):
return json.loads(self.get('list_visible'))
def status(self, challenge):
try:
return json.loads(self.get('status')).get(challenge, None)
except TypeError:
return None
def show(self, challenge):
self.post('show', challenge)
def hide(self, challenge):
self.post('hide', challenge)
def start(self, challenge):
self.post('start', challenge)
def stop(self, challenge):
self.post('stop', challenge)
def metadata(self, challenge):
try:
return json.loads(self.get('metadata', challenge))
except TypeError:
return None
def fetch_file(self, challenge, filename):
return self.get('static_files', challenge, filename)
monitor = MonitorProxy()
class AuthProxy(HTTPServiceProxy):
"""
Proxy object for the user authentication serivce.
"""
def __init__(self, host='127.0.0.1', port=6998, cache_timeout=1.0):
super(AuthProxy, self).__init__(host='localhost', port=6998, cache_timeout=1.0)
@property
def users(self):
return json.loads(self.get('list'))
def create_user(self, user):
self.post('create_user', user)
def is_admin(self, user):
try:
return json.loads(self.post('get_tag', user, key='is_admin', default='false'))
except (ValueError, TypeError):
return False
def is_playing(self, user):
try:
return json.loads(self.post('get_tag', user, key='is_playing', default='true'))
except (ValueError, TypeError):
return False
def set_password(self, user, password):
self.post('set_password', user, password=password)
def check_password(self, user, password):
try:
return json.loads(self.post('check_password', user, password=password))
except TypeError:
return False
def set_tag(self, user, key, value):
self.post('set_tag', user, key=key, value=json.dumps(value))
def get_tag(self, user, key, default=''):
return self.post('get_tag', user, key=key, default=default)
auth = AuthProxy()
class ScoreboardProxy(HTTPServiceProxy):
"""
Proxy object for the scoreboard service.
"""
def __init__(self, host='127.0.0.1', port=6997, cache_timeout=1.0):
super(ScoreboardProxy, self).__init__(host='localhost', port=6997, cache_timeout=1.0)
def capture(self, user, challenge):
self.post('capture', challenge, user=user)
def get_captures_by_user(self, user):
return json.loads(self.get('get_captures_by_user', user))
def get_captures_by_challenge(self, challenge):
return json.loads(self.get('get_captures_by_challenge', challenge))
scoreboard = ScoreboardProxy()
|
frol/django-mysql-fix
|
django_mysql_fix/backends/mysql/compiler.py
|
Python
|
mit
| 5,170
| 0.002901
|
from django.db.backends.mysql.compiler import SQLCompiler as BaseSQLCompiler
from django.db.backends.mysql.compiler import SQLInsertCompiler, \
SQLDeleteCompiler, SQLUpdateCompiler, SQLAggregateCompiler, \
SQLDateCompiler, SQLDateTimeCompiler
class SQLCompiler(BaseSQLCompiler):
STRAIGHT_INNER = 'STRAIGHT_JOIN'
def get_ordering(self):
"""
Returns a tuple containing a list representing the SQL elements in the
"order by" clause, and the list of SQL elements that need to be added
to the GROUP BY clause as a result of the ordering.
Also sets the ordering_aliases attribute on this instance to a list of
extra aliases needed in the select.
Determining the ordering SQL can change the tables we need to include,
so this should be run *before* get_from_clause().
The method is overrided to save the result to reuse it in
get_from_clause().
"""
# We might want to notify people to not order by columns from different
# tables as there is no index across tables. They may create proxy
# model to do filtering with subquery.
result, params, group_by = super(SQLCompiler, self).get_ordering()
self.__ordering_group_by = group_by
return result, params, group_by
def get_from_clause(self):
"""
Returns a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Sub-classes, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables we need. This means the select columns,
ordering and distinct must be done first.
Patch query with STRAIGHT_JOIN if there is ordering and all joins in
query are INNER joins.
"""
straight_join_patch_applied = False
if self.__ordering_group_by \
and len(self.query.tables) > 1 \
and all(join_info.join_type is None \
or join_info.join_type == self.query.INNER
for join_info in self.query.alias_map.itervalues()):
# Get ordering table name from get_ordering()
# XXX: let's pretend that we believe in luck! :)
ordering_table = self.__ordering_group_by[0][0].split('.', 1)[0][1:-1]
# Save query tables and alias mapping to patch and restore them.
query_tables = _query_tables = self.query.tables
query_alias_map = self.query.alias_map
_query_alias_map = query_alias_map.copy()
try:
ordering_table_index = query_tables.index(ordering_table)
except ValueError:
# Is this possible? Fallback without patching
pass
else:
# STRAIGHT_JOIN forces MySQL read from the first table in
# a query, thus we must be sure that the first table is that
# we apply ordering to.
if ordering_table_index > 0:
_first_table = query_tables[0]
# Move ordering table to the begining
_query_tables = [ordering_table] \
+ [table for table in query_tables if table != ordering_table]
_ordering_join_info = _query_alias_map[ordering_table]
# Fix JoinInfo
# XXX: It's unsufficient, it recreates objects.
_query_alias_map[_first_table] = _query_alias_map[_first_table]\
._replace(
join_type=self.STRAIGHT_INNER,
join_cols=[join_cols[::-1]
for join_cols in _ordering_join_info.join_cols],
join_fie
|
ld=_ordering_join_info.join_field,
lhs_alias=ordering_table
)
_query_alias_map[ordering_table] = _ordering_join_info._replace(
join_type=None,
join_cols=((None, None), ),
join_field=None,
lhs_alias=None
)
# Repl
|
ace INNER joins with STRAIGHT joins
# XXX: It's unsufficient, it recreates objects.
for table in _query_tables[1:]:
_query_alias_map[table] = _query_alias_map[table]\
._replace(join_type=self.STRAIGHT_INNER)
# Patch query
self.query.tables = _query_tables
self.query.alias_map = _query_alias_map
straight_join_patch_applied = True
result, from_params = super(SQLCompiler, self).get_from_clause()
# Restore patched query if patched
if straight_join_patch_applied:
self.query.tables = query_tables
if ordering_table_index > 0:
self.query.alias_map = query_alias_map
return result, from_params
|
Inboxen/Inboxen
|
inboxen/search/migrations/0001_initial.py
|
Python
|
agpl-3.0
| 2,198
| 0.002275
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-03-03 15:23
from __future__ import unicode_literals
from django.conf import settings
from django.contrib.postgres.search import SearchVector
from django.db import migrations, models
from django.db.models.expressions import Value
def unicode_damnit(data, charset="utf-8", errors="replace"):
"""copied over from inboxen.utils.emails"""
if isinstance(data, str):
return data
elif data is None:
return u""
try:
return str(bytes(data), charset, errors)
except LookupError:
return str(bytes(data), "ascii", errors)
def combine_index(field_a, field_b):
vector = SearchVector(Value(field_a, output_field=models.TextField()), config=settings.SEARCH_CONFIG, weight="A") + \
SearchVector(Value(field_b, output_field=models.TextField()), config=settings.SEARCH_CONFIG, weight="B")
return vector
def get_header(email_id, header_name, header_data_model):
try:
header = header_data_model
|
.objects.filter(
header__part__parent__isnull=True,
header__name__name=header_name,
header__part__email__id=email_id,
).first()
return unicode_damnit(header.data)
except AttributeError:
return ""
def populate_search_index(apps, schema_editor):
Inbox = apps.get_model("inboxen", "Inbox")
Email = apps.get_model("inboxen", "Email")
HeaderData = apps
|
.get_model("inboxen", "HeaderData")
for inbox in Inbox.objects.all().select_related("domain").iterator():
inbox.search_tsv = combine_index(inbox.description, "{}@{}".format(inbox.inbox, inbox.domain.domain))
inbox.save(update_fields=["search_tsv"])
for email in Email.objects.all().iterator():
email.search_tsv = combine_index(get_header(email.id, "Subject", HeaderData), get_header(email.id, "From", HeaderData))
email.save(update_fields=["search_tsv"])
class Migration(migrations.Migration):
atomic = False
dependencies = [
("inboxen", "0024_auto_20190225_1606"),
]
operations = [
migrations.RunPython(populate_search_index, reverse_code=lambda x, y: None),
]
|
gamechanger/deferrable
|
deferrable/queue/dockets.py
|
Python
|
mit
| 5,390
| 0.002226
|
from __future__ import absolute_import
import logging
from uuid import uuid1
import dockets.queue
import dockets.error_queue
from .base import Queue
class DocketsQueue(Queue):
def __init__(self, redis_client, queue_name, wait_time, timeout):
self.queue = dockets.queue.Queue(redis_client,
queue_name,
use_error_queue=True,
wait_time=wait_time,
|
timeout=timeout)
def make_error_queue(self):
return DocketsErro
|
rQueue(self.queue)
def _push(self, item):
push_kwargs = {}
if 'delay' in item:
push_kwargs['delay'] = item['delay'] or None
return self.queue.push(item, **push_kwargs)
def _push_batch(self, items):
result = []
for item in items:
try:
self._push(item)
result.append((item, True))
except Exception:
logging.exception("Error pushing item {}".format(item))
result.append((item, False))
return result
def _pop(self):
envelope = self.queue.pop()
if envelope:
return envelope, envelope.get('item')
return None, None
def _pop_batch(self, batch_size):
batch = []
for _ in range(batch_size):
envelope, item = self._pop()
if envelope:
batch.append((envelope, item))
else:
break
return batch
def _touch(self, envelope, seconds):
"""Dockets heartbeat is consumer-level and does not
utilize the envelope or seconds arguments."""
return self.queue._heartbeat()
def _complete(self, envelope):
return self.queue.complete(envelope)
def _complete_batch(self, envelopes):
# Dockets doesn't return any information from complete, so here we go...
for envelope in envelopes:
self._complete(envelope)
return [(envelope, True) for envelope in envelopes]
def _flush(self):
while True:
envelope, item = self._pop()
if envelope is None:
break
self._complete(envelope)
def _stats(self):
return {'available': self.queue.queued(),
'in_flight': self.queue.working(),
'delayed': self.queue.delayed()}
class DocketsErrorQueue(Queue):
FIFO = False
SUPPORTS_DELAY = False
RECLAIMS_TO_BACK_OF_QUEUE = False
def __init__(self, parent_dockets_queue):
self.queue = dockets.error_queue.ErrorQueue(parent_dockets_queue)
def _push(self, item):
"""This error ID dance is Dockets-specific, since we need the ID
to interface with the hash error queue. Other backends shouldn't
need to do this and should use the envelope properly instead."""
try:
error_id = item['error']['id']
except KeyError:
logging.warn('No error ID found for item, will generate and add one: {}'.format(item))
error_id = str(uuid1())
item.setdefault('error', {})['id'] = error_id
return self.queue.queue_error_item(error_id, item)
def _push_batch(self, items):
result = []
for item in items:
try:
self._push(item)
result.append((item, True))
except Exception:
logging.exception("Error pushing item {}".format(item))
result.append((item, False))
return result
def _pop(self):
"""Dockets Error Queues are not actually queues, they're hashes. There's no way
for us to implement a pure pop that doesn't expose us to the risk of dropping
data. As such, we're going to return the first error in that hash but not actually
remove it until we call `_complete` later on. This keeps our data safe but may
deliver errors multiple times. That should be okay."""
error_ids = self.queue.error_ids()
if error_ids:
error_id = error_ids[0]
error = self.queue.error(error_id)
return error, error
return None, None
def _pop_batch(self, batch_size):
"""Similar to _pop, but returns a list of tuples containing batch_size pops
from our queue.
Again, this does not actually pop from the queue until we call _complete on
each queued item"""
error_ids = self.queue.error_ids()
batch = []
if error_ids:
for error_id in error_ids[:batch_size]:
error = self.queue.error(error_id)
batch.append((error, error))
return batch
def _touch(self, envelope, seconds):
return None
def _complete(self, envelope):
error_id = envelope['error']['id']
if not error_id:
raise AttributeError('Error item has no id field: {}'.format(envelope))
return self.queue.delete_error(error_id)
def _complete_batch(self, envelopes):
return [(envelope, bool(self._complete(envelope))) for envelope in envelopes]
def _flush(self):
for error_id in self.queue.error_ids():
self.queue.delete_error(error_id)
def _stats(self):
return {'available': self.queue.length()}
|
bkerster/utilities
|
cy_af2d/setup.py
|
Python
|
gpl-2.0
| 171
| 0.011696
|
from dis
|
tutils.core import setup
from Cython.Build import cythonize
import numpy
setup(
ext_modules = cythonize("af2d.pyx"),
include_dirs=[numpy.get_in
|
clude()]
)
|
akshaykamath/StateReviewTrendAnalysisYelp
|
StateReviewTrendsPOC.py
|
Python
|
mit
| 4,899
| 0.002654
|
__author__ = 'Akshay'
"""
File contains code to Mine reviews and stars from a state reviews.
This is just an additional POC that we had done on YELP for visualising number of 5 star reviews per state on a map.
For each business per state, 5 reviews are taken and the count of the review is kept in the dictionary for each state.
Use the resulting json to plot it onto the map.
For the actual map visualisation, please refer State Review Nightlife POC.
Since only 5 business reviews were taken per state, this still needs work.
"""
##############################################
from __future__ import division
import sys
reload(sys)
import json
import datetime
sys.setdefaultencoding('utf8')
state_5_star_dict = {}
state_4_star_dict = {}
state_3_star_dict = {}
state_2_star_dict = {}
state_1_star_dict = {}
state_business = {}
def create_set_for_business_with_cat(category):
business_count = 0
with open('Data\yelp_academic_dataset_business.json') as fp:
for line in fp.readlines():
temp = json.loads(line, encoding='utf-8')
categories = str(temp["categories"])
state = str(temp["state"])
if state == "ON" or state == "ELN" or state == "EDH" or state == "MLN" or state == "NTH" or state == "FIF":
continue
if state not in state_business:
state_business[state] = 0
if len(state_business.keys()) == 50:
break
if category in categories:
print state
business_id = str(temp["business_id"])
city = str(temp["city"])
name = str(temp["name"])
create_yelp_set(business_id, state, city, name)
print "set prepared."
def create_yelp_set(business_id, state, city, name):
file_write = open('Data\state_stars_date_business.txt', mode='a')
if state_business[state] == 5:
print state, " is already completed."
return
with open('Data\yelp_academic_dataset_review.json') as fp:
for line in fp.readlines():
temp = json.loads(line, encoding='utf-8')
if str(temp["business_id"]) == business_id:
state_business[state] += 1
star = str(temp["stars"])
date = str(temp["date"])
date_tm = datetime.datetime.strptime(date, "%Y-%m-%d").date()
file_write.write(business_id)
file_write.write('\t')
file_write.write(state)
file_write.write('\t')
file_write.write(star)
file_write.write('\t')
file_write.write(city)
file_write.write('\t')
file_write.write(name)
file_write.write('\t')
file_write.write(str(date_tm))
file_write.write('\n')
if state_business[state] == 5:
break
for key, value in state_5_star_dict.iteritems():
print key, value
file_write.close()
print "Done."
def state_review_trends():
count = 0
with open('Data\state_stars_date_business.txt') as fp:
for line in fp.readlines():
count += 1
tup = (line.split("\t")[0], line.split("\t")[1], line.split("\t")[2], line.split("\t")[3],
line.split("\t")[4], line.split("\t")[5])
state = tup[1]
star_rating = int(tup[2])
if int(star_rating) != 5:
continue
if state not in state_5_star_dict:
state_5_star_dict[state] = 0
if state not in state_4_star_dict:
state_4_star_di
|
ct[state] = 0
if state not in state_3_star_dict:
state_3_star_dict[state] = 0
if
|
state not in state_2_star_dict:
state_2_star_dict[state] = 0
if state not in state_1_star_dict:
state_1_star_dict[state] = 0
if star_rating == 5:
state_5_star_dict[state] += 1
if star_rating == 4:
state_4_star_dict[state] += 1
if star_rating == 3:
state_3_star_dict[state] += 1
if star_rating == 2:
state_2_star_dict[state] += 1
if star_rating == 1:
state_1_star_dict[state] += 1
response = []
print "Number of 5 star reviews per state."
for key, value in state_5_star_dict.iteritems():
response.append({'id': key, 'value': value})
print key, value
json_data = json.dumps(response)
print json_data
print "Done."
print count
def main():
# Uncomment the line to run mining data.
# create_set_for_business_with_cat("Nightlife")
state_review_trends()
if __name__ == "__main__":
print "Execute Script!!"
main()
|
FrontSide/Sizun
|
sizun/controllers/syntaxhandler.py
|
Python
|
mit
| 1,697
| 0.000589
|
"""
Sizun
MIT License
(C) 2015 David Rieger
"""
from flask import current_app as app
from .confighandler import ConfigHandler
class SyntaxHandler:
SYNTAXFILES_FOLDER = "config/syntax/"
SYNTAXFILES_APPDX = ".syn"
ELEMENTS_SECTION = "ELEMENTS"
|
def __init__(self, _settings):
# Instantiate a Configuration Handler fot the according syntax file
self.language = _settings.get_language()
self.app_path = _settings.get_
|
apppath()
self.confighandler = ConfigHandler("{}/{}{}{}".format(
self.app_path,
self.SYNTAXFILES_FOLDER,
self.language,
self.SYNTAXFILES_APPDX))
def get_flowpath_regex(self):
"""
Returns the regex by which if statments can be dedected
in the specified language's sourcecode
"""
return self.confighandler.get(self.ELEMENTS_SECTION, "FLOWPATH")
def get_method_regex(self):
"""
Returns the regex by which method-starts can be dedected
in the specified language's sourcecode
"""
return self.confighandler.get(self.ELEMENTS_SECTION, "METHOD")
def get_foreign_reference_regex(self):
"""
Returns the regex by which foreign references can be dedected
in the specified language's sourcecode
"""
return self.confighandler.get(self.ELEMENTS_SECTION, "FOREIGN_REFERENCE")
def get_message_chain_regex(self):
"""
Returns the regex by which message chains can be dedected
in the specified language's sourcecode
"""
return self.confighandler.get(self.ELEMENTS_SECTION, "MESSAGE_CHAIN")
|
tensorflow/graphics
|
tensorflow_graphics/projects/points_to_3Dobjects/transforms/transforms.py
|
Python
|
apache-2.0
| 12,632
| 0.00855
|
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transforms."""
import functools
from cvx2 import latest as cv2
import numpy as np
import tensorflow as tf
from tensorflow_graphics.projects.points_to_3Dobjects.utils import image as image_utils
from google3.third_party.tensorflow_models.object_detection.core import preprocessor
tf_data_augmentation = preprocessor
LIGHTING_EIGVAL = (0.2141788, 0.01817699, 0.00341571)
LIGHTING_EIGVEC = ((-0.58752847, -0.69563484, 0.4134035),
(-0.5832747, 0.00994535, -0.8122141),
(-0.560893, 0.7183267, 0.41158938))
def bgr_to_rgb(image):
return image[..., ::-1]
def rgb_to_bgr(image):
return image[..., ::-1]
def brightness(image, variance):
alpha = 1 + tf.random.uniform(
[1], dtype=tf.float32, minval=-variance, maxval=variance)[0]
return image * alpha
def contrast(image, image_grayscale_mean, variance):
alpha = 1 + tf.random.uniform(
[1], dtype=tf.float32, minval=-variance, maxval=variance)[0]
return image * alpha + image_grayscale_mean * (1 - alpha)
def saturation(image, image_grayscale, variance):
alpha = 1 + tf.random.uniform(
[1], dtype=tf.float32, minval=-variance, maxval=variance)[0]
return image * alpha + image_grayscale * (1 - alpha)
def lighting(image,
alpha_std=0.1,
eigval=LIGHTING_EIGVAL,
eigvec=LIGHTING_EIGVEC):
alpha = tf.random.normal([3], stddev=alpha_std, dtype=tf.float32)
return image + tf.tensordot(
tf.constant(eigvec), tf.constant(eigval) * alpha, axes=((1,), (0,)))
def color_augmentations(image, variance=0.4):
"""Color augmentations."""
if variance:
print(variance)
image_grayscale = tf.image.rgb_to_grayscale(bgr_to_rgb(image))
image_grayscale_mean = tf.math.reduce_mean(
image_grayscale, axis=[-3, -2, -1], keepdims=True)
brightness_fn = functools.partial(brightness, variance=variance)
contrast_fn = functools.partial(
contrast, image_grayscale_mean=image_grayscale_mean, variance=variance)
saturation_fn = functools.partial(
saturation, image_grayscale=image_grayscale, variance=variance)
function_order = tf.random.shuffle([0, 1, 2])
ii = tf.constant(0)
def _apply_fn(image, ii):
tmp_ii = function_order[ii]
image = tf.switch_case(
tmp_ii, {
0: lambda: brightness_fn(image),
1: lambda: contrast_fn(image),
2: lambda: saturation_fn(image)
})
ii = ii + 1
return image, ii
(image, _) = tf.while_loop(lambda image, ii: tf.less(ii, 3),
_apply_fn(image, ii),
[image, ii])
image = ligh
|
ting(image)
return image
def subtract_mean_and_normalize(image, means, std, random=False):
if len(m
|
eans) != len(std):
raise ValueError('len(means) and len(std) must match')
image = image / 255
if random:
image = color_augmentations(image)
image = (image - tf.constant(means)) / tf.constant(std)
return image
def _get_image_border(border, size):
i = tf.constant(1)
cond = lambda i: tf.math.less_equal(size - border // i, border // i)
body = lambda i: tf.multiply(i, 2)
r = tf.while_loop(cond, body, [i])
return border // r[0]
def compute_image_size_affine_transform(original_image_size,
input_image_size,
padding_keep_size=127,
random=False,
random_side_scale_range=None):
"""Computer affine transform."""
if input_image_size is None:
input_h = tf.bitwise.bitwise_or(original_image_size[-2],
padding_keep_size) + 1
input_w = tf.bitwise.bitwise_or(original_image_size[-1],
padding_keep_size) + 1
input_size = tf.cast(tf.stack([input_w, input_h]), tf.float32)
side_size = tf.cast(tf.stack([input_w, input_h]), tf.float32)
center = tf.cast(
tf.stack([original_image_size[-1] // 2, original_image_size[-2] // 2]),
tf.float32)
else:
input_size = tf.cast(tf.stack(input_image_size), tf.float32)
max_side = tf.reduce_max(original_image_size[-2:])
side_size = tf.cast(tf.stack([max_side, max_side]), tf.float32)
image_shape = tf.cast(original_image_size, tf.float32)
center = tf.stack([image_shape[-1] / 2., image_shape[-2] / 2.])
if random:
assert random_side_scale_range is not None, (
'Random random_side_scale_range has to be provided when computing '
'random affine transformation!')
scales = tf.range(*random_side_scale_range)
scale_ii = tf.random.categorical(
tf.ones_like(scales)[None, ...], 1, dtype=tf.int32)[0, 0]
side_size = side_size * scales[scale_ii]
h_border = _get_image_border(128, original_image_size[-2])
w_border = _get_image_border(128, original_image_size[-1])
center_x = tf.random.uniform([1],
dtype=tf.int32,
minval=w_border,
maxval=(original_image_size[-1] - w_border))[0]
center_y = tf.random.uniform([1],
dtype=tf.int32,
minval=h_border,
maxval=(original_image_size[-2] - h_border))[0]
center = tf.cast(tf.stack([center_x, center_y]), tf.float32)
return center, side_size, input_size
def affine_transform(image,
original_image_size,
bounding_boxes,
instance_masks,
image_size,
padding_keep_size=127,
transform_gt_annotations=False,
random=False,
random_side_scale_range=None,
random_flip_probability=False):
"""Affine transform."""
# bounding_boxes: normalized coordinates with (ymin, xmin, ymax, xmax)
center, side_size, input_size = compute_image_size_affine_transform(
tf.shape(image)[:-1], image_size, padding_keep_size, random,
random_side_scale_range)
flipped = False
if random:
def _flip(flipped, image, center):
flipped = tf.math.logical_not(flipped)
image = image[..., ::-1, :]
center = tf.tensor_scatter_nd_update(center, tf.constant(
[[0]]), [tf.cast(tf.shape(image)[-2], center.dtype) - center[0]])
return flipped, image, center
def _no_flip(flipped, image, center):
return flipped, image, center
flipped, image, center = tf.cond(
tf.random.uniform([1], dtype=tf.float32)[0] < random_flip_probability,
lambda: _flip(flipped, image, center),
lambda: _no_flip(flipped, image, center))
if instance_masks is not None:
def _flip_mask(mask):
return mask[..., ::-1]
def _no_flip_mask(mask):
return mask
instance_masks = tf.cond(
flipped,
lambda: _flip_mask(instance_masks),
lambda: _no_flip_mask(instance_masks))
# affine_transform_image_np(image, center, side_size, input_size)
input_image_size_static = image.shape
[
image,
] = tf.py_function(affine_transform_image_np,
[image, center, side_size, input_size], [tf.float32])
if len(input_image_size_static) == 4:
image.set_shape([image.shape[0], None, None, image.shape[-1]])
else:
image.set_shape([None, None, image.shape[-1]])
if transform_gt_annotations:
bounding_boxes_shape = bounding_boxes.shape
[
bounding_boxes,
] = tf.py_function(_affine_transform_points_np,
|
sajuptpm/neutron-ipam
|
neutron/tests/unit/_test_extension_portbindings.py
|
Python
|
apache-2.0
| 17,806
| 0.000168
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 NEC Corporation
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Akihiro Motoki, NEC Corporation
#
import contextlib
import httplib
from oslo.config import cfg
from webob import exc
from neutron import context
from neutron.extensions import portbindings
from n
|
eutron.manager import NeutronManager
from neutron.tests.unit import test_db_plugin
class PortBindingsTestC
|
ase(test_db_plugin.NeutronDbPluginV2TestCase):
# VIF_TYPE must be overridden according to plugin vif_type
VIF_TYPE = portbindings.VIF_TYPE_OTHER
# VIF_DETAILS must be overridden according to plugin vif_details
VIF_DETAILS = None
def _check_response_portbindings(self, port):
self.assertEqual(port[portbindings.VIF_TYPE], self.VIF_TYPE)
# REVISIT(rkukura): Consider reworking tests to enable ML2 to bind
if self.VIF_TYPE not in [portbindings.VIF_TYPE_UNBOUND,
portbindings.VIF_TYPE_BINDING_FAILED]:
# NOTE(r-mibu): The following six lines are just for backward
# compatibility. In this class, HAS_PORT_FILTER has been replaced
# by VIF_DETAILS which can be set expected vif_details to check,
# but all replacement of HAS_PORT_FILTER in successor has not been
# completed.
if self.VIF_DETAILS is None:
expected = getattr(self, 'HAS_PORT_FILTER', False)
vif_details = port[portbindings.VIF_DETAILS]
port_filter = vif_details[portbindings.CAP_PORT_FILTER]
self.assertEqual(expected, port_filter)
return
self.assertEqual(self.VIF_DETAILS, port[portbindings.VIF_DETAILS])
def _check_response_no_portbindings(self, port):
self.assertIn('status', port)
self.assertNotIn(portbindings.VIF_TYPE, port)
self.assertNotIn(portbindings.VIF_DETAILS, port)
def _get_non_admin_context(self):
return context.Context(user_id=None,
tenant_id=self._tenant_id,
is_admin=False,
read_deleted="no")
def test_port_vif_details(self):
with self.port(name='name') as port:
port_id = port['port']['id']
# Check a response of create_port
self._check_response_portbindings(port['port'])
# Check a response of get_port
ctx = context.get_admin_context()
port = self._show('ports', port_id, neutron_context=ctx)['port']
self._check_response_portbindings(port)
# By default user is admin - now test non admin user
ctx = self._get_non_admin_context()
non_admin_port = self._show(
'ports', port_id, neutron_context=ctx)['port']
self._check_response_no_portbindings(non_admin_port)
def test_ports_vif_details(self):
plugin = NeutronManager.get_plugin()
cfg.CONF.set_default('allow_overlapping_ips', True)
with contextlib.nested(self.port(), self.port()):
ctx = context.get_admin_context()
ports = plugin.get_ports(ctx)
self.assertEqual(len(ports), 2)
for port in ports:
self._check_response_portbindings(port)
# By default user is admin - now test non admin user
ctx = self._get_non_admin_context()
ports = self._list('ports', neutron_context=ctx)['ports']
self.assertEqual(len(ports), 2)
for non_admin_port in ports:
self._check_response_no_portbindings(non_admin_port)
def _check_port_binding_profile(self, port, profile=None):
# For plugins which does not use binding:profile attr
# we just check an operation for the port succeed.
self.assertIn('id', port)
def _test_create_port_binding_profile(self, profile):
profile_arg = {portbindings.PROFILE: profile}
with self.port(arg_list=(portbindings.PROFILE,),
**profile_arg) as port:
port_id = port['port']['id']
self._check_port_binding_profile(port['port'], profile)
port = self._show('ports', port_id)
self._check_port_binding_profile(port['port'], profile)
def test_create_port_binding_profile_none(self):
self._test_create_port_binding_profile(None)
def test_create_port_binding_profile_with_empty_dict(self):
self._test_create_port_binding_profile({})
def _test_update_port_binding_profile(self, profile):
profile_arg = {portbindings.PROFILE: profile}
with self.port() as port:
# print "(1) %s" % port
self._check_port_binding_profile(port['port'])
port_id = port['port']['id']
ctx = context.get_admin_context()
port = self._update('ports', port_id, {'port': profile_arg},
neutron_context=ctx)['port']
self._check_port_binding_profile(port, profile)
port = self._show('ports', port_id)['port']
self._check_port_binding_profile(port, profile)
def test_update_port_binding_profile_none(self):
self._test_update_port_binding_profile(None)
def test_update_port_binding_profile_with_empty_dict(self):
self._test_update_port_binding_profile({})
def test_port_create_portinfo_non_admin(self):
profile_arg = {portbindings.PROFILE: {'dummy': 'dummy'}}
with self.network(set_context=True, tenant_id='test') as net1:
with self.subnet(network=net1) as subnet1:
# succeed without binding:profile
with self.port(subnet=subnet1,
set_context=True, tenant_id='test'):
pass
# fail with binding:profile
try:
with self.port(subnet=subnet1,
expected_res_status=403,
arg_list=(portbindings.PROFILE,),
set_context=True, tenant_id='test',
**profile_arg):
pass
except exc.HTTPClientError:
pass
def test_port_update_portinfo_non_admin(self):
profile_arg = {portbindings.PROFILE: {'dummy': 'dummy'}}
with self.network() as net1:
with self.subnet(network=net1) as subnet1:
with self.port(subnet=subnet1) as port:
# By default user is admin - now test non admin user
# Note that 404 is returned when prohibit by policy.
# See comment for PolicyNotAuthorized except clause
# in update() in neutron.api.v2.base.Controller.
port_id = port['port']['id']
ctx = self._get_non_admin_context()
port = self._update('ports', port_id,
{'port': profile_arg},
expected_code=404,
neutron_context=ctx)
class PortBindingsHostTestCaseMixin(object):
fmt = 'json'
hostname = 'testhost'
def _check_response_portbindings_host(self, port):
self.assertEqual(port[portbindings.HOST_ID], self.hostname)
def _check_response_no_portbindings_host(self, port):
self.assertIn('status', port)
self.assertNotIn(portbindings.HOST_ID, port)
def test_port_vif_non_admin(self):
|
aplicatii-romanesti/allinclusive-kodi-pi
|
.kodi/addons/plugin.video.kidsplace/brightcovePlayer.py
|
Python
|
apache-2.0
| 1,587
| 0.010082
|
import httplib
from pyamf import AMF0, AMF3
from pyamf import remoting
from pyamf.remoting.client import RemotingService
height = 1080
def build_amf_request(const, playerID, videoPlayer, publisherID):
env = remoting.Envelope(amfVersion=3)
env.bodies.append(
(
"/1",
remoting.Request(
target="com.brightcove.player.runtime.PlayerMediaFacade.findMediaById",
body=[const, playerID, videoPlayer, publisherID],
envelope=env
)
)
)
return env
def get_clip_info(const, playerID, videoPlayer, publisherID, playerKey):
conn = httplib.HTTPConnection("c.brightcove.com")
envelope = build_amf_request(const, playerID, videoPlayer, publisherID)
conn.request("POST", "/services/messagebroker/amf?playerKey=" + playerKey, str(remoting.encode(envelope).read()), {'content-type': 'application/x-amf'})
response = conn.getresponse().read()
response = remoting.decode(response).bodies[0][1].body
return response
def play(const, playerID, videoPlayer, publisherID, play
|
erKey):
rtmpdata = get_clip_info(const, playerID, videoPlayer
|
, publisherID, playerKey)
streamName = ""
streamUrl = rtmpdata['FLVFullLengthURL'];
for item in sorted(rtmpdata['renditions'], key=lambda item:item['frameHeight'], reverse=False):
streamHeight = item['frameHeight']
if streamHeight <= height:
streamUrl = item['defaultURL']
streamName = streamName + rtmpdata['displayName']
return [streamName, streamUrl];
|
idurkan/mtg-piper
|
get_land_ids.py
|
Python
|
mit
| 361
| 0.024931
|
import re
import s
|
ys
import pprint
def main(args):
text_path = args[0]
file_content = open(text_path, 'r').read()
link_id_expr = re.compile(r"multiverseid=(\d+)")
matches = link_id_expr.findall(file_content)
print "There were {0} matches in {1}".format(len(matches), text_path)
pprint.pprint(matches)
if __name__ == '__main__
|
':
main(sys.argv[1:])
|
JavierGarciaD/athena
|
mneme/db_updater.py
|
Python
|
gpl-3.0
| 8,366
| 0.000478
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
@since: 2014-11-28
@author: Javier Garcia
@contact: javier.garcia@bskapital.com
@summary: Update from websources and csv the Master Securities
SQLITE database.
'''
# General imports
import pandas.io.data as web
import pandas as pd
import numpy as np
import datetime
import Quandl
import sqlite3
from pprint import pprint
import time
# useful links:
# http://www.pythoncentral.io/advanced-sqlite-usage-in-python/
# CONSTANTS
WORKING_ON = 'PC'
QUANDL_TOKEN = "ryDzS4euF3UoFtYwswQp"
if WORKING_ON == 'PC':
DATABASE = ('C:/Users/javgar119/Documents/Dropbox'
'/SEC_MASTER/securities_master.sqlite')
elif WORKING_ON == 'MAC':
DATABASE = ('//Users/Javier/Dropbox/SEC_MASTER'
'/securities_master.sqlite')
###############################################################################
def connect_to_database(db_address):
"""
:param from_location:
"""
try:
connection = sqlite3.connect(
db_address, detect_types=sqlite3.PARSE_DECLTYPES
| sqlite3.PARSE_COLNAMES)
return connection
except sqlite3.Error as err:
print('Error connecting database', err.args[0])
def load_to_sec_master(database_conn, list_to_insert, symbol):
"""
:param database:
:param list_to_insert:
"""
try:
pointer = database_conn.cursor()
# insert update values
pointer.executemany(
"INSERT OR REPLACE INTO prices VALUES (?,?,?,?,?,?,?,?,?,?,?)",
list_to_insert)
database_conn.commit()
# get last update date for symbol
last_update_qry = ('SELECT DATE(max(date)) AS price_date FROM prices'
' WHERE symbol = "{}"').format(symbol)
pointer.execute(last_update_qry)
last_update_eod = pointer.fetchone()[0]
# update info about last update date in symbols
sql_update_symbols = ('UPDATE symbols SET last_update_eod = "{}"'
' WHERE symbol = "{}"').format(last_update_eod,
symbol)
pointer.execute(sql_update_symbols)
database_conn.commit()
print('{} update {} lines up-to-date {}'
.format(symbol, len(list_to_insert), last_update_eod))
return True
except sqlite3.Error as err:
print('Error loading data', err.args[0])
return False
def prepare_list_for_loading(vendor_results, symbol, symbol_data):
"""
:param vendor_results:
:param symbol:
:param symbol_data:
"""
# print(vendor_results.tail(10))
try:
# create an index by join date + symbol, since SQLite does not have
# clustered indexes this is my solution
idx = symbol + \
pd.Series(vendor_results.index, index=vendor_results.index).map(
lambda x: x.strftime('%Y%m%d'))
price_date = pd.Series(vendor_results.index,
index=vendor_results.index).map(
lambda x: datetime.date(x.year, x.month, x.day))
# add the series to the dataframe
vendor_results['idx'] = idx
vendor_results['created_date'] = datetime.date.today()
vendor_results['symbol'] = symbol
vendor_results['date'] = price_date
if 'open_interest' not in vendor_results.columns:
vendor_results['open_interest'] = None
if 'Adj Close' not in vendor_results.columns:
vendor_results['Adj Close'] = None
if 'Volume' not in vendor_results.columns:
vendor_results['Volume'] = None
# rename the columns
vendor_results = vendor_results.rename(
columns={'Open': 'open',
'High': 'high',
'Low': 'low',
'Close': 'close',
'Volume': 'volume',
'Adj Close': 'adj_close'})
# rearrange the columns
cols = ['idx', 'symbol',
'created_date',
'date', 'open',
'high', 'low',
'close', 'adj_close',
'volume', 'open_interest']
vendor_results = vendor_results[cols]
# Even as the column is declares as NUMERIC, it does not accept an
# integer. :(
vendor_result
|
s['volume'] = vendor_results['volume'].astype(float)
output = vendor_results.itertuples(index=False)
data = list(output)
return data
except:
print('Error pre
|
pare_list_for_loading()')
raise
def query_to_dictionary(query_result):
"""
helper creates a dictionary from the database query to the format
needed for the update function.
:param query_result: dict(symbol: (last_update_eod))
"""
# print('query_result', query_result)
ans = dict()
for row in query_result:
data_for_symbol = (row[1], row[2], row[3])
ans[row[0]] = data_for_symbol
return ans
def get_symbols_to_update(database_conn):
"""
INPUTS:
:param securities_database:
RETURNS:
:return dictionary with info of securities to update
"""
try:
pointer = database_conn.cursor()
pointer.execute('''SELECT symbol, vendor_symbol, vendor,
last_update_eod FROM symbols WHERE to_update=1;''')
query_result = pointer.fetchall()
ans = query_to_dictionary(query_result)
return ans
except sqlite3.Error as err:
print('Error get_symbols_to_update()', err.args[0])
def vendor_query(vendor, symbol, from_date, to_date):
"""
Make a web query to data vendor
:param vendor: 'QUANDL', 'YAHOO'
:param symbol:
:param from_date:
:param to_date:
"""
try:
if vendor == "QUANDL":
web_qry_result = Quandl.get(symbol,
trim_start=from_date,
trim_end=to_date,
authtoken=QUANDL_TOKEN,
verbose=False)
elif vendor == "YAHOO":
web_qry_result = web.get_data_yahoo(symbol,
start=from_date,
end=to_date)
elif vendor == 'CSV':
raw_read = pd.read_csv(symbol, index_col='Date', parse_dates=True)
web_qry_result = raw_read[from_date:to_date]
return web_qry_result
except:
print('Error querying the vendor')
def run_update(db_address):
"""
Puts everything together
"""
time0 = time.time()
database_conn = connect_to_database(db_address)
symbols = get_symbols_to_update(database_conn)
result = False
for symbol, symbol_data in symbols.items():
print(symbol, ': updating...')
date_to = datetime.date.today() - datetime.timedelta(1)
if symbol_data[2] >= date_to:
result = True
print('{} up-to-date {}. No query was made.'
.format(symbol, symbol_data[2].strftime('%Y-%m-%d')))
else:
vendor = symbol_data[1]
date_from = symbol_data[2].strftime('%Y-%m-%d')
symbol_in_vendor = symbol_data[0]
vendor_return = vendor_query(vendor,
symbol_in_vendor,
date_from,
date_to)
list_ready_to_load = prepare_list_for_loading(vendor_return,
symbol,
symbol_data)
result = load_to_sec_master(database_conn,
list_ready_to_load,
symbol)
database_conn.close()
|
qutip/qutip
|
qutip/topology.py
|
Python
|
bsd-3-clause
| 2,853
| 0.000701
|
__all__ = ['berry_curvature', 'plot_berry_curvature']
from qutip import (Qobj, tensor, basis, qeye, isherm, sigmax, sigmay, sigmaz)
import numpy as np
try:
import matplotlib.pyplot as plt
except:
pass
def berry_curvature(eigfs):
"""Computes the discretized Berry curvature on the two dimensional grid
of parameters. The function works well for cases with no band mixing.
Parameters
==========
eigfs : numpy ndarray
4 dimensional numpy ndarray where the first two indices are for the two
discrete values of the two parameters and the third is the index of the
occupied bands. The fourth dimension holds the eigenfunctions.
Returns
-------
b_curv : numpy ndarray
A two dimensional array of the discretized Berry curvature defined for
the values of the two parameters defined in t
|
he eigfs.
"""
nparam0 = eigfs.shape[0]
nparam1 = eigfs.shape[1]
nocc = eigfs.shape[2]
b_curv = np.zeros((nparam0-1, nparam1-1), dtype=float)
for i in range(nparam0-1):
for j in range(nparam1-1):
rect_prd = np.identity(nocc, dtype=complex)
innP0 = np.zeros([nocc, nocc], dtype=complex)
|
innP1 = np.zeros([nocc, nocc], dtype=complex)
innP2 = np.zeros([nocc, nocc], dtype=complex)
innP3 = np.zeros([nocc, nocc], dtype=complex)
for k in range(nocc):
for l in range(nocc):
wf0 = eigfs[i, j, k, :]
wf1 = eigfs[i+1, j, l, :]
innP0[k, l] = np.dot(wf0.conjugate(), wf1)
wf1 = eigfs[i+1, j, k, :]
wf2 = eigfs[i+1, j+1, l, :]
innP1[k, l] = np.dot(wf1.conjugate(), wf2)
wf2 = eigfs[i+1, j+1, k, :]
wf3 = eigfs[i, j+1, l, :]
innP2[k, l] = np.dot(wf2.conjugate(), wf3)
wf3 = eigfs[i, j+1, k, :]
wf0 = eigfs[i, j, l, :]
innP3[k, l] = np.dot(wf3.conjugate(), wf0)
rect_prd = np.dot(rect_prd, innP0)
rect_prd = np.dot(rect_prd, innP1)
rect_prd = np.dot(rect_prd, innP2)
rect_prd = np.dot(rect_prd, innP3)
dett = np.linalg.det(rect_prd)
curl_z = np.angle(dett)
b_curv[i, j] = curl_z
return b_curv
def plot_berry_curvature(eigfs):
"""Plots the discretized Berry curvature on the two dimensional grid
of parameters. The function works well for cases with no band mixing."""
b_curv = berry_curvature(eigfs)
fig, ax = plt.subplots()
ax.imshow(b_curv, origin="lower")
ax.set_title("Berry curvature")
ax.set_xlabel(r"$Parameter0$")
ax.set_ylabel(r"$Parameter1$")
fig.tight_layout()
fig.savefig("berry_curvature.pdf")
|
BartMassey/nb-misc
|
arrayqueue.py
|
Python
|
mit
| 1,163
| 0.000861
|
# Array-based circular queue implementation.
# Copyright © 2014 Bart Massey
# [This program is licensed under the "MIT License"]
# Please see the file COPYING in the source
# distribution of this software for license terms.
class Queue(object):
def __init__(self, max_size):
self.queue = [None] * (max_size + 1)
self.enq = 0
self.deq = 0
self.n = 0
def increase(self, n):
n += 1
if n >= len(self.queue):
n -= len(self.queue)
return n
def enqueue(self,
|
v):
assert not self.is_full()
self.queue[self.enq] = v
self.enq = self.increase(self.enq)
self.n += 1
def dequeue(self):
assert not self.is_empty()
v = self.queue[self.deq]
self.deq = self.increase(self.deq)
self.n -= 1
return v
def is_empty(self):
assert (self.enq == self.deq) == (self.n == 0)
return self.n == 0
def is_full(self):
return
|
self.increase(self.enq) == self.deq
def size(self):
return self.n
if __name__ == "__main__":
from queuetest import arrayqueuetest
arrayqueuetest(Queue)
|
n4xh4ck5/wh01p
|
modules/getip/getip.py
|
Python
|
gpl-3.0
| 636
| 0.044025
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
import socket
import requests
def GetIP(tar
|
get):
timeout = 8
ip=""
valid_responses = ['200', '401', '403', '404', '301', '302']
try:
if str(requests.get('http://' + target,timeout = timeout).status_code) not in valid_responses:
if str(requests.get('https://' + target, timeout= timeout).status_code) in valid_responses:
ip = socket.getbyhostname(target)
else:
domain_ip = '0.0.0.0'
print "The target is not up"
|
exit(0)
else:
ip = socket.gethostbyname(target)
except Exception as e:
print e
pass
return ip
|
tejasnikumbh/Algorithms
|
Warmup/AlternatingCharacgters.py
|
Python
|
bsd-2-clause
| 1,034
| 0.011605
|
# Importing Libraries
import sys
'''
Function that generates the results for all the test cases. Iterates through
the test cases and delegates the work ot getCount
'''
def genResults(cases):
results = []
for case in cases:
results.append(getCount(case))
return results
'''
Function that coutns the number of deletions required. Marks all required and
later retuns the number of them that are required
'''
def getCount(strCase):
strChars
|
= list(strCase)
markList = [0]*len(strChars)
prev = strChars[0]
for i in range(1,len(strChars)):
if(strChars[i] == prev):
markList[i] = 1
else:
prev = strChars[i]
return sum(markList)
'''
Main Function for the program
'''
if __name__ == "__main__":
# Parsing in input
t = int(sys.stdin.readline())
cases = [x for x in sys.stdin.readlines()]
# Generating the results
results = genResults(c
|
ases)
# Printing out results
for i in results:
print i
|
antonyr/django-haystack
|
test_haystack/whoosh_tests/test_whoosh_backend.py
|
Python
|
bsd-3-clause
| 45,377
| 0.002402
|
# encoding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import unittest
from datetime import timedelta
from decimal import Decimal
from django.conf import settings
from django.test import TestCase
from django.test.utils import override_settings
from django.utils.datetime_safe import date, datetime
from whoosh.fields import BOOLEAN, DATETIME, KEYWORD, NUMERIC, TEXT
from whoosh.qparser import QueryParser
from haystack import connections, indexes, reset_search_queries
from haystack.exceptions import SkipDocument, SearchBackendError
from haystack.inputs import AutoQuery
from haystack.models import SearchResult
from haystack.query import SearchQuerySet, SQ
from haystack.utils.loading import UnifiedIndex
from ..core.models import AFourthMockModel, AnotherMockModel, MockModel
from ..mocks import MockSearchResult
from .testcases import WhooshTestCase
class WhooshMockSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True
|
, use_template=True)
name = indexes.CharField(model_attr='author')
pub_date = indexes.DateTimeField(model_attr='pub_date')
def get_model(self):
return MockModel
class WhooshMockSearchIndexWithSkipDocument(WhooshMockSearchIndex):
def prepare_text(self, obj):
if obj.author == 'daniel3':
raise SkipDocument
return obj.author
class WhooshAnotherMockSearchIndex(in
|
dexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True)
name = indexes.CharField(model_attr='author')
pub_date = indexes.DateTimeField(model_attr='pub_date')
def get_model(self):
return AnotherMockModel
def prepare_text(self, obj):
return obj.author
class AllTypesWhooshMockSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
name = indexes.CharField(model_attr='author', indexed=False)
pub_date = indexes.DateTimeField(model_attr='pub_date')
sites = indexes.MultiValueField()
seen_count = indexes.IntegerField(indexed=False)
is_active = indexes.BooleanField(default=True)
def get_model(self):
return MockModel
class WhooshMaintainTypeMockSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True)
month = indexes.CharField(indexed=False)
pub_date = indexes.DateTimeField(model_attr='pub_date')
def get_model(self):
return MockModel
def prepare_text(self, obj):
return "Indexed!\n%s" % obj.pk
def prepare_month(self, obj):
return "%02d" % obj.pub_date.month
class WhooshBoostMockSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(
document=True, use_template=True,
template_name='search/indexes/core/mockmodel_template.txt'
)
author = indexes.CharField(model_attr='author', weight=2.0)
editor = indexes.CharField(model_attr='editor')
pub_date = indexes.DateTimeField(model_attr='pub_date')
def get_model(self):
return AFourthMockModel
def prepare(self, obj):
data = super(WhooshBoostMockSearchIndex, self).prepare(obj)
if obj.pk % 2 == 0:
data['boost'] = 2.0
return data
class WhooshAutocompleteMockModelSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(model_attr='foo', document=True)
name = indexes.CharField(model_attr='author')
pub_date = indexes.DateTimeField(model_attr='pub_date')
text_auto = indexes.EdgeNgramField(model_attr='foo')
name_auto = indexes.EdgeNgramField(model_attr='author')
def get_model(self):
return MockModel
class WhooshSearchBackendTestCase(WhooshTestCase):
fixtures = ['bulk_data.json']
def setUp(self):
super(WhooshSearchBackendTestCase, self).setUp()
self.old_ui = connections['whoosh'].get_unified_index()
self.ui = UnifiedIndex()
self.wmmi = WhooshMockSearchIndex()
self.wmmidni = WhooshMockSearchIndexWithSkipDocument()
self.wmtmmi = WhooshMaintainTypeMockSearchIndex()
self.ui.build(indexes=[self.wmmi])
self.sb = connections['whoosh'].get_backend()
connections['whoosh']._index = self.ui
self.sb.setup()
self.raw_whoosh = self.sb.index
self.parser = QueryParser(self.sb.content_field_name, schema=self.sb.schema)
self.sb.delete_index()
self.sample_objs = MockModel.objects.all()
def tearDown(self):
connections['whoosh']._index = self.old_ui
super(WhooshSearchBackendTestCase, self).tearDown()
def whoosh_search(self, query):
self.raw_whoosh = self.raw_whoosh.refresh()
searcher = self.raw_whoosh.searcher()
return searcher.search(self.parser.parse(query), limit=1000)
def test_non_silent(self):
bad_sb = connections['whoosh'].backend('bad', PATH='/tmp/bad_whoosh', SILENTLY_FAIL=False)
bad_sb.use_file_storage = False
bad_sb.storage = 'omg.wtf.bbq'
try:
bad_sb.update(self.wmmi, self.sample_objs)
self.fail()
except:
pass
try:
bad_sb.remove('core.mockmodel.1')
self.fail()
except:
pass
try:
bad_sb.clear()
self.fail()
except:
pass
try:
bad_sb.search('foo')
self.fail()
except:
pass
def test_update(self):
self.sb.update(self.wmmi, self.sample_objs)
# Check what Whoosh thinks is there.
self.assertEqual(len(self.whoosh_search(u'*')), 23)
self.assertEqual([doc.fields()['id'] for doc in self.whoosh_search(u'*')], [u'core.mockmodel.%s' % i for i in range(1, 24)])
def test_update_with_SkipDocument_raised(self):
self.sb.update(self.wmmidni, self.sample_objs)
# Check what Whoosh thinks is there.
res = self.whoosh_search(u'*')
self.assertEqual(len(res), 14)
ids = [1, 2, 5, 6, 7, 8, 9, 11, 12, 14, 15, 18, 20, 21]
self.assertListEqual(
[doc.fields()['id'] for doc in res],
[u'core.mockmodel.%s' % i for i in ids]
)
def test_remove(self):
self.sb.update(self.wmmi, self.sample_objs)
self.assertEqual(self.sb.index.doc_count(), 23)
self.sb.remove(self.sample_objs[0])
self.assertEqual(self.sb.index.doc_count(), 22)
def test_clear(self):
self.sb.update(self.wmmi, self.sample_objs)
self.assertEqual(self.sb.index.doc_count(), 23)
self.sb.clear()
self.assertEqual(self.sb.index.doc_count(), 0)
self.sb.update(self.wmmi, self.sample_objs)
self.assertEqual(self.sb.index.doc_count(), 23)
self.sb.clear([AnotherMockModel])
self.assertEqual(self.sb.index.doc_count(), 23)
self.sb.clear([MockModel])
self.assertEqual(self.sb.index.doc_count(), 0)
self.sb.index.refresh()
self.sb.update(self.wmmi, self.sample_objs)
self.assertEqual(self.sb.index.doc_count(), 23)
self.sb.clear([AnotherMockModel, MockModel])
self.assertEqual(self.raw_whoosh.doc_count(), 0)
def test_search(self):
self.sb.update(self.wmmi, self.sample_objs)
self.assertEqual(len(self.whoosh_search(u'*')), 23)
# No query string should always yield zero results.
self.assertEqual(self.sb.search(u''), {'hits': 0, 'results': []})
# A one letter query string gets nabbed by a stopwords filter. Should
# always yield zero results.
self.assertEqual(self.sb.search(u'a'), {'hits': 0, 'results': []})
# Possible AttributeError?
# self.assertEqual(self.sb.search(u'a b'), {'hits': 0, 'results': [], 'spelling_suggestion': '', 'facets': {}})
self.assertEqual(self.sb.search(u'*')['hits'], 23)
self.assertEqual([result.pk for result in self.sb.search(u'*')['results']], [u'%s' % i for i in range(1, 24)])
self.assertEqual(self.sb.search(u'Indexe')['hits'], 23)
self.assertEqual(self.sb.search(u
|
kobotoolbox/kpi
|
kpi/views/environment.py
|
Python
|
agpl-3.0
| 1,307
| 0.000765
|
# coding: utf-8
import constance
from django.conf import settings
from rest_framework.response import Response
from rest_framework.views import APIView
from kobo.static_lists import COUNTRIES, LANGUAGES, SECTORS
from kobo
|
.apps.hook.constants import SUBMISSION_PLACEHOLDER
class EnvironmentV
|
iew(APIView):
"""
GET-only view for certain server-provided configuration data
"""
CONFIGS_TO_EXPOSE = [
'TERMS_OF_SERVICE_URL',
'PRIVACY_POLICY_URL',
'SOURCE_CODE_URL',
'SUPPORT_EMAIL',
'SUPPORT_URL',
'COMMUNITY_URL',
]
def get(self, request, *args, **kwargs):
"""
Return the lowercased key and value of each setting in
`CONFIGS_TO_EXPOSE`, along with the static lists of sectors, countries,
all known languages, and languages for which the interface has
translations.
"""
data = {
key.lower(): getattr(constance.config, key)
for key in self.CONFIGS_TO_EXPOSE
}
data['available_sectors'] = SECTORS
data['available_countries'] = COUNTRIES
data['all_languages'] = LANGUAGES
data['interface_languages'] = settings.LANGUAGES
data['submission_placeholder'] = SUBMISSION_PLACEHOLDER
return Response(data)
|
xzackli/isocurvature_2017
|
analysis/plot_isocurvature_spectra_effects/deriv_iso.py
|
Python
|
mit
| 3,825
| 0.019869
|
# must use python 2
from classy import Class
import matplotlib.pyplot as plt
import numpy as np
import math
max_l = 5000
max_scalars = '5000'
ell = np.array( range(1, max_l+1) )
def getDl( pii1=0.5e-10, pii2=1e-9, pri1=1e-13 ):
# Define your cosmology (what is not specified will be set to CLASS default parameters)
params = {
'output': 'tCl lCl pCl',
'modes': 's', # scalar perturbations
'lensing': 'yes',
'ic': 'ad&cdi',
'l_max_scalars':max_scalars,
'P_k_ini type': 'two_scales',
'k1': 0.002,
'k2': 0.1,
'P_{RR}^1': 2.34e-9,
'P_{RR}^2': 2.115e-9,
'P_{II}^1' : pii1,
'P_{II}^2' : pii2,
'P_{RI}^1' : pri1}
cosmo = Class()
cosmo.set(params)
cosmo.compute()
# print(dir(cosmo)) # use this command to see what is in the cosmo
# It is a dictionary that contains the fields: tt, te, ee, bb, pp, tp
cls = cosmo.raw_cl(max_l) # Access the cl until l=1000
yy = np.array( cls['ee'][1:] )
zz = np.array( cls['tt'][1:] )
yz = np.array( cls['te'][1:] )
ee = ((ell)*(ell+1) * yy / (2 * math.pi))
tt = ((ell)*(ell+1) * zz / (2 * math.pi))
te = ((ell)*(ell+1) * yz / (2 * math.pi))
cosmo.struct_cleanup()
return tt, te, ee
# Print on screen to see the output
# print len(cls['tt'])
pii1 = 0.5e-10
pii2 = 1e-9
pri1 = 1e-13
dpii1 = pii1 / 10000.0
dpii2 = pii2 / 10000.0
dpri1 = pri1 / 10000.0
pii1_tt1, pii1_te1, pii1_ee1 = getDl( pii1 = pii1 - dpii1 )
pii1_tt2, pii1_te2, pii1_ee2 = getDl( pii1 = pii1 + dpii1 )
pii2_tt1, pii2_te1, pii2_ee1 = getDl( pii2 = pii2 - dpii2 )
pii2_tt2, pii2_te2, pii2_ee2 = getDl( pii2 = pii2 + dpii2 )
pri1_tt1, pri1_te1, pri1_ee1 = getDl( pri1 = pri1 - dpri1 )
pri1_tt2, pri1_te2, pri1_ee2 = getDl( pri1 = pri1 +
|
dpri1 )
|
# plot something with matplotlib...
plt.plot( (pii1_tt2 - pii1_tt1)/(2 * dpii1), label='$P_{II}^1$', markersize=0 )
plt.plot( (pii2_tt2 - pii2_tt1)/(2 * dpii2), label='$P_{II}^2$', markersize=0 )
# plt.plot( (pri1_tt2 - pri1_tt1)/(2 * dpri1), label='$P_{RI}^1$', markersize=0 )
plt.title('TT Derivatives')
plt.ylabel(r'$d \mathcal{D}_l / d P_{II}^j$')
plt.xlabel(r'$l$')
plt.legend()
plt.savefig('tt.pdf')
plt.clf()
plt.plot( (pii1_te2 - pii1_te1)/(2 * dpii1), label='$P_{II}^1$', markersize=0 )
plt.plot( (pii2_te2 - pii2_te1)/(2 * dpii2), label='$P_{II}^2$', markersize=0 )
# plt.plot( (pri1_te2 - pri1_te1)/(2 * dpri1), label='$P_{RI}^1$', markersize=0 )
plt.title('TE Derivatives')
plt.ylabel(r'$d \mathcal{D}_l / d P_{II}^j$')
plt.xlabel(r'$l$')
plt.legend()
plt.savefig('te.pdf')
plt.clf()
plt.plot( (pii1_ee2 - pii1_ee1)/(2 * dpii1), label='$P_{II}^1$', markersize=0 )
plt.plot( (pii2_ee2 - pii2_ee1)/(2 * dpii2), label='$P_{II}^2$', markersize=0 )
# plt.plot( (pri1_ee2 - pri1_ee1)/(2 * dpri1), label='$P_{RI}^1$', markersize=0 )
plt.title('EE Derivatives')
plt.ylabel(r'$d \mathcal{D}_l / d P_{II}^j$')
plt.xlabel(r'$l$')
plt.legend()
plt.savefig('ee.pdf')
plt.clf()
plt.plot( (pii1_ee2 - pii1_ee1)/(2 * dpii1), label='$P_{II}^1$', markersize=0 )
plt.plot( (pii2_ee2 - pii2_ee1)/(2 * dpii2), label='$P_{II}^2$', markersize=0 )
# plt.plot( (pri1_ee2 - pri1_ee1)/(2 * dpri1), label='$P_{RI}^1$', markersize=0 )
plt.title('EE Derivatives')
plt.ylabel(r'$d \mathcal{D}_l / d P_{II}^j$')
plt.xlabel(r'$l$')
plt.yscale('log')
plt.legend()
plt.savefig('logee.pdf')
plt.clf()
plt.plot( (pii1_tt2 - pii1_tt1)/(2 * dpii1), label='$P_{II}^1$', markersize=0 )
plt.plot( (pii2_tt2 - pii2_tt1)/(2 * dpii2), label='$P_{II}^2$', markersize=0 )
# plt.plot( (pri1_tt2 - pri1_tt1)/(2 * dpri1), label='$P_{RI}^1$', markersize=0 )
plt.title('TT Derivatives')
plt.ylabel(r'$d \mathcal{D}_l / d P_{II}^j$')
plt.xlabel(r'$l$')
plt.yscale('log')
plt.legend()
plt.savefig('logtt.pdf')
plt.clf()
|
kbarbary/cubefit
|
cubefit/__init__.py
|
Python
|
mit
| 158
| 0
|
from .fitting import *
from .io import *
from .
|
main import *
from .plotting import *
from .psf import *
f
|
rom .utils import *
from .version import __version__
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.