repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
PUNCH-Cyber/stoq | stoq/cli.py | Python | apache-2.0 | 10,981 | 0.001002 | #!/usr/bin/env python3
# Copyright 2014-2018 PUNCH Cyber Analytics Group
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import select
import asyncio
import argparse
import unittest
from pathlib import Path
from typing import Dict, Union
import stoq.tests as tests
from stoq.installer import StoqPluginInstaller
from stoq import Stoq, PayloadMeta, RequestMeta, __version__
def main() -> None:
about = f'stoQ :: v{__version__} :: an automated analysis framework'
# If $STOQ_HOME exists, set our base directory to that, otherwise
# use $HOME/.stoq
try:
stoq_home = str(
Path(os.getenv('STOQ_HOME', f'{str(Path.home())}/.stoq')).resolve(
strict=True
)
)
except FileNotFoundError as err:
print(f"$STOQ_HOME is invalid, exiting: {err}", file=sys.stderr)
sys.exit(1)
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=about,
epilog='''
Examples:
- Scan a file with installed plugins and dispatch rules:
$ %(prog)s scan mybadfile.exe
- Scan a file and force it to go through the yara plugin:
$ %(prog)s scan mybadfile.exe -s yara
- Ingest from PubSub, force all payloads through yara, trid, and exif,
then save results to file:
$ %(prog)s run -a yara trid exif -P pubsub -C file
- Monitor a directory (specified in dirmon.stoq) for newly created files
send them to workers, and archive all payloads into MongoDB:
$ %(prog)s run -P dirmon -A mongodb
- Install a plugin from a directory
$ %(prog)s install path/to/plugin_directory
''',
)
subparsers = parser.add_subparsers(title='commands', dest='command')
subparsers.required = True
scan = subparsers.add_parser('scan', help='Scan a given payload')
scan.add_argument(
'file',
nargs='?',
type=argparse.FileType('rb'),
default=sys.stdin.buffer,
help='File to scan, can also be provided from stdin',
)
run = subparsers.add_parser(
'run', help='Continually ingest and scan payloads from Provider plugins'
)
run.add_argument(
'-P', '--providers', nargs='+', help='Provider plugins to ingest payloads from'
)
# Add shared arguments so they still show up in the help dialog
for subparser in [scan, run]:
subparser.add_argument(
'-A',
'--dest-archivers',
nargs='+',
help='Archiver plugins to send payloads to',
)
subparser.add_argument(
'-S',
'--source-archivers',
nargs='+',
help='Archiver plugins to read payload from',
)
subparser.add_argument(
'-D',
'--decorators',
nargs='+',
help='Decorator plugins to send results to before saving',
)
subparser.add_argument(
'-C', '--connectors', nargs='+', help='Connector plugins to send results to'
)
subparser.add_argument(
'-R',
'--dispatchers',
nargs='+',
help='Dispatcher plugins to use send payloads to',
)
subparser.add_argument(
'-a',
'--always-dispatch',
nargs='+',
help='Worker plugins to always dispatch plugins to',
)
subparser.add_argument(
'-s',
'--start-dispatch',
nargs='+',
help='Worker plugins to add to the original payload dispatch',
)
subparser.add_argument(
'--max-recursion',
type=int,
default=None,
help='Maximum level of recursion into a payload and extracted payloads',
)
subparser.add_argument('--plugin-opts', nargs='+', help='Plugin options')
subparser.add_argument(
'--request-source',
default=None,
help='Source name to add to initial scan request',
)
subparser.add_argument(
'--request-extra',
nargs='+',
help='Key/value pair to add to initial scan request metadata',
)
subparser.add_argument(
'--plugin-dir', nargs='+', help='Directory(ies) containing stoQ plugins'
)
subparser.add_argument(
'--config-file',
default=f'{stoq_home}/stoq.cfg',
help='Path to stoQ configuration file',
)
subparser.add_argument(
'--log-level',
default=None,
choices=['debug', 'info', 'warning', 'error' 'crtical'],
help='Log level for stoQ events',
)
plugin_list = subparsers.add_parser('list', help='List available plugins')
plugin_list.add_argument(
'--plugin-dir', nargs='+', help='Directory(ies) containing stoQ plugins'
)
install = subparsers.add_parser('install', help='Install a given plugin')
install.add_argument(
'plugin_path', help='Directory or Github repo of the plugin to install'
)
install.add_argument(
'--install_dir',
default=os.path.join(stoq_home, 'plugins'),
help='Override the default plu | gin installation directory',
)
install.add_argument(
'--upgrade',
action='store_true',
help='Force the plugin to be upgraded if it already exists',
)
install.add_argument(
'--github', action='store_true', help='Install plugin from Github re | pository'
)
subparsers.add_parser('test', help='Run stoQ tests')
args = parser.parse_args()
plugin_opts: Union[Dict, None] = None
try:
if args.plugin_opts:
plugin_opts = {}
for arg in args.plugin_opts:
plugin_name, plugin_option = arg.split(':', 1)
opt, value = plugin_option.split('=', 1)
if value.lower() == 'true':
value = True
elif value.lower() == 'false':
value = False
if plugin_name in plugin_opts:
plugin_opts[plugin_name].update({opt: value})
else:
plugin_opts[plugin_name] = {opt: value}
except AttributeError:
pass
except ValueError as err:
print(f'Failed parsing plugin option: {err}')
request_meta = RequestMeta()
try:
if args.request_source:
request_meta.source = args.request_source
if args.request_extra:
for arg in args.request_extra:
extra_key, extra_value = arg.split('=', 1)
if extra_value.lower() == 'true':
extra_value = True
elif extra_value.lower() == 'false':
extra_value = False
request_meta.extra_data[extra_key] = extra_value
except AttributeError:
pass
except ValueError as err:
print(f'Failed parsing request metadata option: {err}')
try:
if not os.path.isfile(args.config_file):
print(f'Warning: {args.config_file} does not exist, using stoQ defaults!')
except AttributeError:
pass
if args.command == 'scan':
with args.file as f:
# Verify that the file or stdin has some sort of data
if not select.select([f], [], [], 0.0)[0]:
print('Error: No content to scan was provided')
sys.exit(2)
content = f.read()
if not content:
print('Error: The provided content to scan was empty')
sys.exit(2)
|
introini/ourlist | users/urls.py | Python | bsd-2-clause | 1,039 | 0.005775 | from django.conf.urls import include, url
# from django.urls import path, include
from . import views
# from django.contrib.auth import views as auth_views
import search.views
app_name = 'users'
urlpatterns = [
url(r'^login/', views.Login.as_view(), name='login'),
url(r'^register/', views.register_view, name='register'),
url(r'^logout/', views.Logout.as_view(), name='logout'),
url(r'^friendship/', include('friendship.urls')),
url(r'^settings/(?P<pk>[0-9]+)/$', views.UserSettings.as_view(), name='user-settings-ai'),
url(r'^settings/(?P<pk>[0-9]+)/$', views.UserSettings.as_view(), name='user-settings-pi'),
url(r'^settings/fl=(?P<pk>[0-9]+)$ | ', views.user_settings_fl, name='user-settings-fl'),
url(r'^settings/fl=(?P<pk>[ | 0-9]+)&search=$', views.search_users, name='search-users'),
url(r'^settings/fl=(?P<pk>[0-9]+)&add=(?P<add_pk>[0-9]+)$', views.add_friend, name='add-friend'),
url(r'^settings/fl=(?P<pk>[0-9]+)&remove=(?P<remove_pk>[0-9]+)$', views.remove_friend, name='remove-friend'),
]
|
skodapetr/lbvs-environment | scripts/libs/data.py | Python | mit | 6,451 | 0.00062 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Provide ways to loading molecules for the test instances.
"""
import os
import re
__license__ = 'X11'
__DATA_DIRECTORY = os.path.dirname(os.path.realpath(__file__)) + '/../../data/'
__DATASET_DIRECTORY = __DATA_DIRECTORY + 'datasets/'
__MOLECULES_FILE_CACHE = {}
class DatasetReference(object):
"""Definition of a dataset reference.
"""
def __init__(self, dataset, selection, group):
self.dataset = dataset
self.selection = selection
self.group = group
class Molecules(object):
"""Object with molecules for a single test instance.
"""
def __init__(self):
self.test = []
self.train = {
"actives": [],
"inactives": []
}
def list_datasets(as_path=False):
"""
:param as_path:
:return: Datasets in the platform.
"""
datasets = [name for name in os.listdir(__DATASET_DIRECTORY)
if os.path.isdir(__DATASET_DIRECTORY + name)]
if as_path:
return [__DATASET_DIRECTORY + name for name in datasets]
else:
return datasets
def list_selections(dataset, as_path=False):
"""
:param dataset: Name of the dataset.
:param as_path:
:return: Selections in the dataset.
"""
directory = __DATASET_DIRECTORY + dataset + '/selections/'
selections = [name for name in os.listdir(directory)
if os.path.isdir(directory + name)]
if as_path:
return [directory + name for name in selections]
else:
return selections
def list_groups(dataset, selection, as_path=False):
"""
:param dataset: Name of the dataset.
:param selection: Name of the selection.
:param as_path:
:return: Groups in given selection and datasets.
"""
directory = __DATASET_DIRECTORY + dataset + '/selections/' + selection + '/'
if as_path:
return [directory + name for name in os.listdir(directory)]
else:
return os.listdir(directory)
def list_instances_from_reference(
dataset_reference, as_path=False):
return list_instances(dataset_reference.dataset,
dataset_reference.selection,
dataset_reference.group, as_path)
def list_instances(dataset, selection, group, as_path=False):
"""
:param dataset: Name of the dataset.
:param selection: Name of the selection.
:param group: Name of the group.
:param as_path:
:return: Instances for given dataset, selection and group.
"""
directory = __DATASET_DIRECTORY + dataset + '/selections/' + \
selection + '/' + group + '/'
instances_names = [name for name in os.listdir(directory)
if name.startswith("s_")]
if as_path:
return [directory + name for name in instances_names]
else:
return instances_names
def __load_molecules(path):
"""
:param path:
:return: Valid molecules from given file.
"""
global __MOLECULES_FILE_CACHE
if path in __MOLECULES_FILE_CACHE:
return __MOLECULES_FILE_CACHE[path]
if len(__MOLECULES_FILE_CACHE) > 2:
__MOLECULES_FILE_CACHE = {}
import rdkit
from rdkit import Chem
molecules = [molecule for molecule in rdkit.Chem.SDMolSupplier(str(path))
if molecule is not None]
__MOLECULES_FILE_CACHE[path] = molecules
return molecules
def load_molecules(dataset_reference, instance_data):
"""
:param dataset_reference:
:param instance_data: Data of the instance.
:return:
"""
sdf_directory = __DATASET_DIRECTORY + dataset_reference.dataset + \
'/molecules/sdf/'
molecules = {}
for file in instance_data['data']['files']:
sdf_path = sdf_directory + file + '.sdf'
for molecule in __load_molecules(sdf_path):
molecules[molecule.GetProp('_Name')] = molecule
result = Molecules()
for item in instance_data['data']['test']:
result.test.append(molecules[item['name']])
for item in instance_data['data']['train']['decoys']:
result.train["inactives"].append(molecules[item['name']])
for item in instance_data['data']['train']['ligands']:
result.train["actives"].append(molecules[item['name']])
return result
def resolve(dataset_filter='.*', selection_filter='.*', group_filter='.*'):
"""
:param dataset_filter:
:param selection_filter:
:param group_filter:
:return: Array of matches to given filters.
"""
result = []
re_dataset = re.compile(dataset_filter)
re_selection = re.compile(selection_filter)
re_group = re.compile(group_filter)
for dataset in list_datasets():
if not re_dataset.match(dataset):
continue
for selection in list_selections(dataset):
if not re_selection.match(selection):
continue
for group in list_groups(dataset, selection):
if not re_group.match(group):
continue
result.append(DatasetReference(dataset, selection, group))
return result
def dataset_to_path(dataset_reference):
"""
:param dataset_reference:
:return: Path to group directory.
"""
return __DATASET_DIRECTORY + dataset_reference.dataset + '/selections/' + \
dataset_reference.selection + '/' + dataset_reference.group
def list_collections(as_path=False):
"""
:param as_path:
:return: List of collections.
"""
if as_path:
return [__DATA_DIRECTORY + name
for name in os.listdir(__DATA_DIRECTORY + '/collections/')]
else:
return os.listdir(__DATA_DIRECTORY + '/collections/')
def list_datasets_for_collection(collection, | default_selection=None):
"""
:param collection:
:param default_selection:
:return: Groups of datasets.
"""
collection_dir = __DATA_DIRECTORY + '/collections/' + collection + '/'
result = {}
for name in os.lis | tdir(collection_dir):
datasets_in_collection = []
result[name] = datasets_in_collection
with open(collection_dir + name) as stream:
for line in stream:
line = line.rstrip().split(',')
datasets_in_collection.append([line[0], line[1], line[2]])
return result
if __name__ == '__main__':
raise Exception('This module should be used only as a library!')
|
Silvia333/ASleep | my_importData_small.py | Python | apache-2.0 | 1,639 | 0.009152 | import numpy as np
import pandas as pd
input_file = "3_floor.csv"
# comma delimited is the default
df = pd.read_csv(input_file, header = 0)
# for space delimited use:
# df = pd.read_csv(input_file, header = 0, delimiter = " ")
# for tab delimited use:
# df = pd.read_csv(input_file, header = 0, delimiter = "\t")
# put the original column names in a python list
original_headers = list(df.columns.values)
# remove the non-numeric columns
df = df._get_numeric_data()
# put the numeric column names in a python list
numeric_headers = list(df.columns.values)
# create a numpy array with the numeric values for input into scikit-learn
numpy_array = df.as_matrix()
# reverse the order of the columns
#numeric_headers.reverse()
#reverse_df = df[numeric_headers]
# throughput random forest regression
t = numpy_array[0:160, 3]
x = np.linspace(0, 159, 160)
xall = np.linspace(0, 181, 182)
xtest = np.linspace(160, 181, 22)
from sklearn.ensemble import RandomForestRegressor
#tfit = RandomForestRegressor(100).fit(x[:, None], t).predict(x[:, None])
tfit = | RandomForestRegressor(100).fit(numpy_array[0:160, 0:2 ], t).predict(numpy_array[16 | 0:182, 0:2])
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
#ax.errorbar(x, t, 0.3, fmt='*', label="Training traffic")
ax.plot(xtest, tfit, '-r', label="Predicted traffic")
ax.errorbar(xtest, numpy_array[160:182, 3], fmt='g-o', label="Test traffic")
#ax.set_ylabel('Throughput (kbits/second)')
#ax.set_xlabel('Time in hours')
#ax.set_title('Taffic Prediction with Random Forest Regression on 3rd floor')
#ax.legend(loc="upper left")
plt.savefig('0_floor_small.jpg', dpi=300)
plt.show()
|
hamish2014/optTune | optTune/paretoArchives/paretoArchive2D_multi_front.py | Python | gpl-3.0 | 11,121 | 0.012409 | """
Multi-objective pareto Front archive, features:
- stores current pareto front approximation.
- check designs domaninance status
- this version is designed for large pareto sets.
* quick dominance calculting algorithms
Only 2D Pareto Fronts values
"""
import numpy
def dominates(a,b):
"all(a <= b) and any(a < b), no longer used"
return (a[0] <= b[0] and a[1] <= b[1]) and (a[0]<b[0] or a[1]<b[1])
#return (a <= b).all() and (a < b).any() # to slow
#cmp_vals = [cmp(Av,Bv) for Av,Bv in zip(a,b)]
#return 1 not in cmp_vals and -1 in cmp_vals
class _paretoArchive_design:
"class containing information about the design."
def __init__(self, fv, xv):
self.fv = fv
self.xv = xv
def __eq__(self, b):
return (self.fv == b.fv).all() and (self.xv == b.xv).all()
class paretoArchive2D_multi_front:
def __init__(self, fronts=5, _offset=0, _frontDominating=None):
"""
make use of a sorted by f1 list for for sorting data.
"""
self.designs = []
self.searc | h_list = []
self.nod_inspected = 0 #nod = number of designs
self.nod_dominance_check_only = 0
self.nod_rejected = 0
# recusively create pareto front layers.
self.frontDominating = _frontDominating
self.offset = _offset
if _offset < fronts-1:
self.frontDominated = paretoArchive2D_multi_front(fronts, _offset+1, self)
else:
self.frontDominated = None
self.N = 0
def list_loc(self, fv_dim1):
| "binary search to locate comparison point."
search_list = self.search_list
lb, ub = 0, len(search_list)-1
while ub - lb > 1:
mp = (ub + lb)/2
if search_list[mp] < fv_dim1:
lb = mp #try make sure lb is always less than fv_dim1, and hence non dominated ...
else:
ub = mp
if search_list[ub] == fv_dim1 and search_list[lb] < fv_dim1:
return ub
else: #search_list[lb] == fv_dim1
return lb
def add_design(self, fv, xv, loc, adjust_bounds):
self.designs.insert(loc, _paretoArchive_design(fv,xv))
self.search_list.insert(loc, fv[0])
if adjust_bounds:
self.lower_bound = min(self.lower_bound, fv[0])
self.upper_bound = max(self.upper_bound, fv[0])
self.N = self.N + 1
def del_design(self, index):
if self.frontDominated <> None:
self.frontDominated.inspect_design(self.designs[index].xv, self.designs[index].fv)
del self.designs[index], self.search_list[index]
self.nod_rejected = self.nod_rejected + 1
self.N = self.N - 1
def inspect_design(self, xv, fv):
"""
inspects designs and returns True if design added, or False if the design in not added,
in other words it returns if the design is non-dominated (True) or domaninated(False)
"""
assert len(fv) == 2
self.nod_inspected = self.nod_inspected + 1
if len(self.designs) == 0:
self.designs = [_paretoArchive_design(fv,xv)]
self.search_list = [fv[0]]
self.lower_bound = fv[0]
self.upper_bound = fv[0]
self.N = 1
return True
if self.lower_bound <= fv[0] and fv[0] <= self.upper_bound:
ind = self.list_loc(fv[0])
if not dominates(self.designs[ind].fv, fv):
if fv[0] > self.designs[ind].fv[0]:
self.add_design(fv,xv,ind+1,False)
check_ind = ind+2
else:
self.add_design(fv,xv,ind,False)
check_ind = ind+1
while check_ind < len(self.designs) and fv[1] < self.designs[check_ind].fv[1]:
self.del_design(check_ind)
if check_ind == len(self.designs):
self.upper_bound = fv[0]
return True
else :
self.nod_rejected = self.nod_rejected + 1
if self.frontDominated <> None:
self.frontDominated.inspect_design(xv, fv)
return False
elif fv[0] < self.lower_bound:
self.add_design(fv,xv,0,True)
while 1 < len(self.designs) and fv[1] <= self.designs[1].fv[1]:
self.del_design(1)
if len(self.designs) == 1:
self.upper_bound = fv[0]
return True
else: # self.upper_bound < fv[0]
if fv[1] < self.designs[-1].fv[1]:
self.add_design(fv,xv,len(self.designs),True)
return True
else:
self.nod_rejected = self.nod_rejected + 1
if self.frontDominated <> None:
self.frontDominated.inspect_design(xv, fv)
return False
def inspect_multiple(self, xvals, fvals):
"inspect multiple designs many fvals and xvals. function helps to reduce expensive grid calculations"
return [self.inspect_design(xv,fv) for xv,fv in zip(xvals,fvals)]
def dominates(self, fv):
"check if front dominates fv"
assert len(fv) == 2
self.nod_dominance_check_only = self.nod_dominance_check_only + 1
if len(self.designs) == 0:
return False
if self.lower_bound <= fv[0] and fv[0] <= self.upper_bound:
ind = self.list_loc(fv[0])
return self.designs[ind].fv[1] < fv[1]
elif fv[0] < self.lower_bound:
return True
else:
return self.designs[-1].fv[1] < fv[1]
def lower_bounds(self):
return numpy.array([self.designs[0].fv[0], self.designs[-1].fv[1]])
def upper_bounds(self):
return numpy.array([self.designs[-1].fv[0], self.designs[0].fv[1]])
def hyper_volume(self, HPV_bound ):
'Calculated the hypervolume bound between, the pareto front and an HPV_bound'
start_ind = 0
#trimming points outside HPV_bounds
while self.designs[start_ind].fv[1] > HPV_bound[1] and start_ind + 1 < len(self.designs)-1 :
start_ind = start_ind + 1
end_ind = len(self.designs)-1
while self.designs[end_ind].fv[0] > HPV_bound[0] and 0 < end_ind :
end_ind = end_ind - 1
HPV = 0.0
for i in range(start_ind, end_ind + 1):
if i == start_ind:
wid = HPV_bound[1] - self.designs[i].fv[1]
else:
wid = self.designs[i-1].fv[1] - self.designs[i].fv[1]
HPV = HPV + wid * ( HPV_bound[0] - self.designs[i].fv[0])
return HPV
def __getstate__(self):
odict = self.__dict__.copy() # copy the dict since we change it
del odict['designs']
odict['design_fv'] = numpy.array([d.fv for d in self.designs])
odict['design_xv'] = numpy.array([d.xv for d in self.designs])
return odict
def __setstate__(self, dict):
dict['designs'] = [ _paretoArchive_design(fv,xv) for fv,xv in zip(dict['design_fv'],dict['design_xv']) ]
self.__dict__.update(dict)
def __eq__(self, b):
'very slow ...'
return all( b_design in self.designs for b_design in b.designs ) and all( d in b.designs for d in self.designs )
def __repr__(self):
return """<lossless 2D pareto Front archive: size: %i, designs inspected: %i, designs rejected: %i, dominance checks %i >""" % (len(self.designs), self.nod_inspected, self.nod_rejected, self.nod_dominance_check_only + self.nod_inspected )
def plot(self, key='go'):
designs = self.designs
xv = [d.fv[0] for d in designs]
yv = [d.fv[1] for d in designs]
import pylab
pylab.plot(xv,yv,key)
def plotAll(self, keysList):
assert type(keysList) == list
import pylab
designs = self.designs
xv = [d.fv[0] for d in designs]
yv = [d.fv[1] for d in designs]
import pylab
pylab.plot(xv,yv,keysList[0],label='Front %i' % self.offset)
if self.frontDominated <> None:
self.frontDominated.plotAll(keysList[1:] |
SuriyaaKudoIsc/olympia | apps/api/utils.py | Python | bsd-3-clause | 5,273 | 0.000948 | import re
from django.conf import settings
from django.utils.html import strip_tags
import amo
from amo.helpers import absolutify
from amo.urlresolvers import reverse
from amo.utils import urlparams, epoch
from tags.models import Tag
from versions.compare import version_int
# For app version major.minor matching.
m_dot_n_re = re.compile(r'^\d+\.\d+$')
def addon_to_dict(addon, disco=False, src='api'):
"""
Renders an addon in JSON for the API.
"""
v = addon.current_version
url = lambda u, | **kwargs: settings.SITE_URL + urlparams(u, **kwargs)
if disco:
learnmore = settings.SERVICES_URL + reverse('discovery.addons.detail',
args=[addon.slug])
learnmore = urlparams(learnmore, src='discovery-personalrec')
else:
learnmore = url(addon.get_url_path(), src=src)
d = {
| 'id': addon.id,
'name': unicode(addon.name) if addon.name else None,
'guid': addon.guid,
'status': amo.STATUS_CHOICES_API[addon.status],
'type': amo.ADDON_SLUGS_UPDATE[addon.type],
'authors': [{'id': a.id, 'name': unicode(a.name),
'link': absolutify(a.get_url_path(src=src))}
for a in addon.listed_authors],
'summary': (
strip_tags(unicode(addon.summary)) if addon.summary else None),
'description': strip_tags(unicode(addon.description)),
'icon': addon.icon_url,
'learnmore': learnmore,
'reviews': url(addon.reviews_url),
'total_dls': addon.total_downloads,
'weekly_dls': addon.weekly_downloads,
'adu': addon.average_daily_users,
'created': epoch(addon.created),
'last_updated': epoch(addon.last_updated),
'homepage': unicode(addon.homepage) if addon.homepage else None,
'support': unicode(addon.support_url) if addon.support_url else None,
}
if addon.is_persona():
d['theme'] = addon.persona.theme_data
if v:
d['version'] = v.version
d['platforms'] = [unicode(a.name) for a in v.supported_platforms]
d['compatible_apps'] = v.compatible_apps.values()
if addon.eula:
d['eula'] = unicode(addon.eula)
if addon.developer_comments:
d['dev_comments'] = unicode(addon.developer_comments)
if addon.takes_contributions:
contribution = {
'link': url(addon.contribution_url, src=src),
'meet_developers': url(addon.meet_the_dev_url(), src=src),
'suggested_amount': addon.suggested_amount,
}
d['contribution'] = contribution
if addon.type == amo.ADDON_PERSONA:
d['previews'] = [addon.persona.preview_url]
else:
d['previews'] = [p.as_dict(src=src) for p in addon.all_previews]
return d
def extract_from_query(term, filter, regexp, end_of_word_boundary=True):
"""
This pulls out a keyword filter from a search term and returns the value
for the filter and a new term with the filter removed.
E.g. term="yslow version:3", filter='version', regexp='\w+' will result in
a return value of: (yslow, 3).
"""
re_string = r'\b%s:\s*(%s)' % (filter, regexp)
if end_of_word_boundary:
re_string += r'\b'
match = re.search(re_string, term)
if match:
term = term.replace(match.group(0), '').strip()
value = match.group(1)
else:
value = None
return (term, value)
def extract_filters(term, opts=None):
"""
Pulls all the filtering options out of the term and returns a cleaned term
and a dictionary of filter names and filter values. Term filters override
filters found in opts.
"""
opts = opts or {}
filters = {}
params = {}
# Type filters.
term, addon_type = extract_from_query(term, 'type', '\w+')
addon_type = addon_type or opts.get('addon_type')
if addon_type:
try:
atype = int(addon_type)
if atype in amo.ADDON_SEARCH_TYPES:
filters['type'] = atype
except ValueError:
# `addon_type` is not a digit.
# Try to find it in `ADDON_SEARCH_SLUGS`.
atype = amo.ADDON_SEARCH_SLUGS.get(addon_type.lower())
if atype:
filters['type'] = atype
# Platform filters.
term, platform = extract_from_query(term, 'platform', '\w+')
params['platform'] = platform or opts.get('platform')
# Version filters.
term, version = extract_from_query(term, 'version', '[0-9.]+')
params['version'] = version or opts.get('version')
# Tag filters.
term, tag = extract_from_query(term, 'tag', '\w+')
if tag:
tag = Tag.objects.filter(tag_text=tag).values_list('tag_text',
flat=True)
if tag:
filters['tags__in'] = list(tag)
return (term, filters, params)
def filter_version(version, app_id):
"""
Returns filters that can be sent to ES for app version ranges.
If the version is a alpha, beta, or pre-release this does an exact match.
Otherwise it will query where max >= M.Na and min <= M.N.
"""
low = version_int(version)
return {'appversion.%s.min__lte' % app_id: low}
|
micove/libdesktop-agnostic | wafadmin/Tools/ccroot.py | Python | lgpl-2.1 | 13,478 | 0.058391 | #! /usr/bin/env python
# encoding: utf-8
import os,sys,re
import TaskGen,Task,Utils,preproc,Logs,Build,Options
from Logs import error,debug,warn
from Utils import md5
from TaskGen import taskgen,after,before,feature
from Constants import*
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
import config_c
USE_TOP_LEVEL=False
win_platform=sys.platform in('win32','cygwin')
def get_cc_version(conf,cc,gcc=False,icc=False):
cmd=cc+['-dM','-E','-']
try:
p=Utils.pproc.Popen(cmd,stdin=Utils.pproc.PIPE,stdout=Utils.pproc.PIPE,stderr=Utils.pproc.PIPE)
p.stdin.write('\n')
out=p.communicate()[0]
except:
conf.fatal('could not determine the compiler version %r'%cmd)
out=str(out)
if gcc:
if out.find('__INTEL_COMPILER')>=0:
conf.fatal('The intel compiler pretends to be gcc')
if out.find('__GNUC__')<0:
conf.fatal('Could not determine the compiler type')
if icc and out.find('__INTEL_COMPILER')<0:
conf.fatal('Not icc/icpc')
k={}
if icc or gcc:
out=out.split('\n')
import shlex
for line in out:
lst=shlex.split(line)
if len(lst)>2:
key=lst[1]
val=lst[2]
k[key]=val
conf.env['CC_VERSION']=(k['__GNUC__'],k['__GNUC_MINOR__'],k['__GNUC_PATCHLEVEL__'])
return k
class DEBUG_LEVELS:
ULTRADEBUG="ultradebug"
DEBUG="debug"
RELEASE="release"
OPTIMIZED="optimized"
CUSTOM="custom"
ALL=[ULTRADEBUG,DEBUG,RELEASE,OPTIMIZED,CUSTOM]
def scan(self):
debug('ccroot: _scan_preprocessor(self, node, env, path_lst)')
if len(self.inputs)==1:
node=self.inputs[0]
(nodes,names)=preproc.get_deps(node,self.env,nodepaths=self.env['INC_PATHS'])
if Logs.verbose:
debug('deps: deps for %s: %r; unresolved %r'%(str(node),nodes,names))
return(nodes,names)
all_nodes=[]
all_names=[]
seen=[]
for node in self.inputs:
(nodes,names)=preproc.get_deps(node,self.env,nodepaths=self.env['INC_PATHS'])
if Logs.verbose:
debug('deps: deps for %s: %r; unresolved %r'%(str(node),nodes,names))
for x in nodes:
if id(x)in seen:continue
seen.append(id(x))
all_nodes.append(x)
for x in names:
if not x in all_names:
all_names.append(x)
return(all_nodes,all_names)
class ccroot_abstract(TaskGen.task_gen):
def __init__(self,*k,**kw):
if len(k)>1:
k=list(k)
if k[1][0]!='c':
k[1]='c'+k[1]
TaskGen.task_gen.__init__(self,*k,**kw)
def get_target_name(self):
tp='program'
for x in self.features:
if x in['cshlib','cstaticlib']:
tp=x.lstrip('c')
pattern=self.env[tp+'_PATTERN']
if not pattern:pattern='%s'
dir,name=os.path.split(self.target)
if win_platform and getattr(self,'vnum','')and'cshlib'in self.features:
name=name+'-'+self.vnum.split('.')[0]
return os.path.join(dir,pattern%name)
def install_implib(self):
bld=self.outputs[0].__class__.bld
bindir=self.install_path
if not len(self.outputs)==2:
raise ValueError('fail')
dll=self.outputs[0]
bld.install_as(bindir+os.sep+dll.name,dll.abspath(self.env),chmod=self.chmod,env=self.env)
implib=self.outputs[1]
libdir='${LIBDIR}'
if not self.env['LIBDIR']:
libdir='${PREFIX}/lib'
if sys.platform=='cygwin':
bld.symlink_as(libdir+'/'+implib.name,bindir+os.sep+dll.name,env=self.env)
else:
bld.install_as(libdir+'/'+implib.name,implib.abspath(self.env),env=self.env)
def install_shlib(self):
bld=self.outputs[0].__class__.bld
nums=self.vnum.split('.')
path=self.install_path
if not path:return
libname=self.outputs[0].name
name3=libname+'.'+self.vnum
name2=libname+'.'+nums[0]
name1=libname
filename=self.outputs[0].abspath(self.env)
bld.install_as(os.path.join(path,name3),filename,env=self.env)
bld.symlink_as(os.path.join(path,name2),name3)
bld.symlink_as(os.path.join(path,name1),name3)
def default_cc(self):
Utils.def_attrs(self,includes='',defines='',rpaths='',uselib='',uselib_local='',add_objects='',p_flag_vars=[],p_type_vars=[],compiled_tasks=[],link_task=None)
def apply_verif(self):
if not(self.source or getattr(self,'add_objects',None)):
raise Utils.WafError('no source files specified for %s'%self)
if not self.target:
raise Utils.WafError('no target for %s'%self)
def vars_target_cprogram(self):
self.default_install_path=self.env['BINDIR']or'${PREFIX}/bin'
self.default_chmod=O755
def vars_target_cstaticlib(self):
self.default_install_path=self.env['LIBDIR']or'${PREFIX}/lib${LIB_EXT}'
def vars_target_cshlib(self):
if win_platform:
self.default_install_path=self.env['BINDIR']or'${PREFIX}/bin'
self.default_chmod=O755
else:
self.default_install_path=self.env['LIBDIR']or'${PREFIX}/lib${LIB_EXT}'
def install_target_cstaticlib(self):
if not self.bld.is_install:return
self.link_task.install_path=self.install_path
def install_target_cshlib(self):
if getattr(self,'vnum','')and not win_platform:
self.link_task.vnum=self.vnum
self.link_task.install=install_shlib
def apply_incpaths(self):
lst=[]
for lib in self.to_list(self.uselib):
for path in self.env['CPPPATH_'+lib]:
if not path in lst:
lst.append(path)
if preproc.go_absolute:
for path in preproc.standard_includes:
if not path in lst:
lst.append(path)
for path in self.to_list(self.includes):
if not path in lst:
if preproc.go_absolute or not os.path.isabs(path):
lst.append(path)
else:
self.env.prepend_value('CPPPATH',path)
for path in lst:
node=None
if os.path.isabs(path):
if preproc.go_absolute:
node=self.bld.root.find_dir(path)
elif path[0]=='#':
node=self.bld.srcnode
if len(path)>1:
node=node.find_dir(path[1:])
else:
node=self.path.find_dir(path)
if node:
self.env.append_value('INC_PATHS',node)
if USE_TOP_LEVEL:
self.env.append_value('INC_PATHS',self.bld.srcnode)
def apply_type_vars(self):
for x in self.features:
if not x in['cprogram','cstaticlib','cshlib']:
continue
x=x.lstrip('c')
st=self.env[x+'_USELIB']
if st:self.uselib=self.uselib+' '+st
for var in self.p_type_vars:
compvar='%s_%s'%(x,var)
value=self.env[compvar]
if value:self.env.append_value(var,value)
def apply_link(self):
link=getattr(self,'link',None)
if not link:
if'cstaticlib'in self.features:link='ar_link_static'
elif'cxx'in self.features:link='cxx_link'
else:link='cc_link'
if'cshlib'in self.features:
if win_platform:
link='dll_'+link
elif getattr(self,'vnum',''):
if sys.platform=='darwin':
self.vnum=''
else:
link='vnum_'+link
tsk=self.create_task(link)
outputs=[t.outputs[0]for t in self.compiled_tasks]
tsk.set_inputs(outputs)
tsk.set_outputs(self.path.find_or_declare(get_targe | t_name(self)))
tsk.chmod=self.chmod
self.link_task=tsk
def apply_lib_vars(self):
env=self.env
uselib=self.to_list(self.uselib)
seen=[]
names=self.to_list(self.uselib_local)[:]
while names | :
x=names.pop(0)
if x in seen:
continue
y=self.name_to_obj(x)
if not y:
raise Utils.WafError("object '%s' was not found in uselib_local (required by '%s')"%(x,self.name))
if getattr(y,'uselib_local',None):
lst=y.to_list(y.uselib_local)
for u in lst:
if not u in seen:
names.append(u)
y.post()
seen.append(x)
libname=y.target[y.target.rfind(os.sep)+1:]
if'cshlib'in y.features or'cprogram'in y.features:
env.append_value('LIB',libname)
elif'cstaticlib'in y.features:
env.append_value('STATICLIB',libname)
if y.link_task is not None:
self.link_task.set_run_after(y.link_task)
dep_nodes=getattr(self.link_task,'dep_nodes',[])
self.link_task.dep_nodes=dep_nodes+y.link_task.outputs
tmp_path=y.link_task.outputs[0].parent.bldpath(self.env)
if not tmp_path in env['LIBPATH']:env.prepend_value('LIBPATH',tmp_path)
morelibs=y.to_list(y.uselib)
for v in morelibs:
if v in uselib:continue
uselib=[v]+uselib
if getattr(y,'export_incdirs',None):
cpppath_st=self.env['CPPPATH_ST']
for x in self.to_list(y.export_incdirs):
node=y.path.find_dir(x)
if not node:
raise Utils.WafError('object %s: invalid folder %s in export_incdirs'%(y.target,x))
self.env.append_unique('INC_PATHS',node)
for x in uselib:
for v in self.p_flag_vars:
val=self.env[v+'_'+x]
if val:self.env.append_value(v,val)
def apply_objdeps(self):
if not getattr(self,'add_objects',None):return
seen=[]
names=self.to_list(self. |
Nekel-Seyew/Complex-3D-Vector-Fields | dlib-18.18/python_examples/max_cost_assignment.py | Python | apache-2.0 | 2,558 | 0.001955 | #!/usr/bin/python
# The contents of this file are in the public domain. See LICENSE_FOR_EXAMPLE_PROGRAMS.txt
#
# This simple example shows how to call dlib's optimal linear assignment
# problem solver. It is an implementation of the famous Hungarian algorithm
# and is quite fast, operating in O(N^3) time.
#
# COMPILING/INSTALLING THE DLIB PYTHON INTERFACE
# You can install dlib using the command:
# pip install dlib
#
# Alternatively, if you want to compile dlib yourself then go into the dlib
# root folder and run:
# python setup.py install
# or
# python setup.py install --yes USE_AVX_INSTRUCTIONS
# if you have a CPU that supports AVX instructions, since this makes some
# things run faster.
#
# Compiling dlib should work on any operating system so long as you have
# CMake and boost-python installed. On Ubuntu, this can be done easily by
# running the command:
# sudo apt-get install libboost-python-dev cmake
#
import dlib
# Let's imagine you need to assign N people to N jobs. Additionally, each
# person will make your company a certain amount of money at each job, but each
# person has different skills so they are better at some jobs and worse at
# others. You would like to find the best way to assign people to these jobs.
# In particular, you would like to maximize the amount of money the group makes
# as a whole. This is an example of an assignment problem and is what is solved
# by the dlib.max_cost_assignment() routine.
# So in this example, let's imagine we have 3 people and 3 jobs. We represent
# the amount of money each person will produce at each job with a cost matrix.
# Each row corresponds to a person an | d each column corresponds to a job. So for
# example, below we are saying that person 0 will make $1 at | job 0, $2 at job 1,
# and $6 at job 2.
cost = dlib.matrix([[1, 2, 6],
[5, 3, 6],
[4, 5, 0]])
# To find out the best assignment of people to jobs we just need to call this
# function.
assignment = dlib.max_cost_assignment(cost)
# This prints optimal assignments: [2, 0, 1]
# which indicates that we should assign the person from the first row of the
# cost matrix to job 2, the middle row person to job 0, and the bottom row
# person to job 1.
print("Optimal assignments: {}".format(assignment))
# This prints optimal cost: 16.0
# which is correct since our optimal assignment is 6+5+5.
print("Optimal cost: {}".format(dlib.assignment_cost(cost, assignment)))
|
jtauber/graded-reader | code/vocab-coverage-arbitrary.py | Python | mit | 3,511 | 0.002848 | #!/usr/bin/env python
"""
Output a table showing what percentage of targets can be read assuming
a certain percentage coverage (columns) and number of items learnt in
arbitrary order (rows).
The first input file should consist of lines of <target> <item> separated by
whitespace.
The second input file should consist of lines of the form "learn <item>" in
order they are learnt. All other lines will be ignored.
COVERAGE and ITEM_COUNTS below are configurable.
"""
from _ | _future__ import print_function
## configurable settings
# list of coverage ratios we want to calculate for
# 0.001 is approximately "any"
COVERAGE = [0.001, 0.50, 0.75, 0.90, 0.95, 1.00]
# list of item counts we want to display for
ITEM_COUNTS = [100, 200, 500, 1000, 2000, 5000, 8000, 12000, 16000, 20000]
## load files
import sys
TARGET_ITEM_FILENAME = sys.argv[1]
LEARNING_PROGRAMME_FILENAME = sys.argv[2]
# target_item_list: list of (target, item) tuples to save us loading the file
# tw | ice
target_item_list = []
for line in open(TARGET_ITEM_FILENAME):
target_item_list.append(line.strip().split())
# items: map of item to learning order, as indicated by loaded learning
# programme
items = {}
count = 1
for line in open(LEARNING_PROGRAMME_FILENAME):
if line.startswith("learn"):
item = line.strip().split()[1]
items[item] = count
count += 1
## build target info
# targets - map of target to list of learning order of items in that target
from collections import defaultdict
targets = defaultdict(list)
for target, item in target_item_list:
targets[target].append(items[item])
# sort the list of item learning orders
for target in targets:
targets[target] = sorted(targets[target])
# so now if targets[X] = [5, 5, 20, 50],
# it means that target X consists of the 5th learnt item (twice), the 20th
# learnt and the 50th learnt it also means if you want to read 50% of this
# target (i.e. 2/4) you need to know up to the 5th learnt word and if you want
# to read 75% of this target (i.e. 3/4) you need to know up to the 20th most
# learnt word
## math helper functions
import math
# gives the lowest integer above the given number
ceiling = lambda num: int(math.ceil(num))
# given a fraction n/d, returns a percentage to one decimal place
def percentage(n, d):
return int(1000.0 * n / d) / 10.0
## calculate what's needed for each target
# needed - maps a given coverage ratio to a list which, for each target, gives
# the last learnt word number necessary to reach that coverage ratio for that
# target
needed = {}
for coverage in COVERAGE:
needed[coverage] = [
targets[target][
ceiling(coverage * len(targets[target])) - 1
] for target in targets
]
# in other words if needed[0.50] = [16, 44, 182, 34, 21, 36, 8, 48, 21, 26],
# that means that to achieve 50% coverage, the first target needs up to the
# 16th learnt word, the second target needs up to the 44th learnt word, the
# third target needs up to the 182nd learnt word, and so on...
## display table
# header
for coverage in COVERAGE:
print("\t{:f}%".format(100 * coverage), end=" ")
print()
for item_count in ITEM_COUNTS:
print(item_count, end=" ")
for coverage in COVERAGE:
# how many targets require less than or equal to item_count to reach
# the given coverage?
num = len([freq for freq in needed[coverage] if freq <= item_count])
print("\t{:f}%".format(percentage(num, len(targets))), end=" ")
print()
|
jigargandhi/UdemyMachineLearning | Machine Learning A-Z Template Folder/Part 2 - Regression/Section 4 - Simple Linear Regression/j_data_preprocessing_template.py | Python | mit | 72 | 0.013889 | import numpy as np
import pandas as pd
| import matplot | lib.pyplot as plt
|
openstack/rally | tests/unit/plugins/task/sla/test_iteration_time.py | Python | apache-2.0 | 3,060 | 0 | # Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
from rally.plugins.task.sla import iteration_time
from rally.task import sla
from tests.unit import test
@ddt.ddt
class IterationTimeTestCase(test.TestCase):
@ddt.data((1, True), (1000, True), (0, False))
@ddt.unpack
def test_validate(self, config, valid):
results = sla.SLA.validate(
"max_seconds_per_iteration", None, None, config)
if valid:
self.assertEqual([], results)
else:
self.assertEqual(1, len(results))
def test_result(self):
sla1 = iteration_time.IterationTime(42)
sla2 = iteration_time.IterationTime(3.62)
for sla_inst in [sla1, sla2]:
sla_inst.add | _iteration({"duration": 3.14})
sla_inst.add_iteration({"duration": 6.28})
self.assertTrue(sla1.result()["success"]) # 42 > 6.28
self.assertFalse(sla2.result()["success"]) # 3.62 < 6.28
self.assertEqual("Passed", sla1.status())
self.assertEqual("Failed", sla2 | .status())
def test_result_no_iterations(self):
sla_inst = iteration_time.IterationTime(42)
self.assertTrue(sla_inst.result()["success"])
def test_add_iteration(self):
sla_inst = iteration_time.IterationTime(4.0)
self.assertTrue(sla_inst.add_iteration({"duration": 3.14}))
self.assertTrue(sla_inst.add_iteration({"duration": 2.0}))
self.assertTrue(sla_inst.add_iteration({"duration": 3.99}))
self.assertFalse(sla_inst.add_iteration({"duration": 4.5}))
self.assertFalse(sla_inst.add_iteration({"duration": 3.8}))
@ddt.data([[1.0, 2.0, 1.5, 4.3],
[2.1, 3.4, 1.2, 6.3, 7.2, 7.0, 1.],
[1.1, 1.1, 2.2, 2.2, 3.3, 4.3]])
def test_merge(self, durations):
single_sla = iteration_time.IterationTime(4.0)
for dd in durations:
for d in dd:
single_sla.add_iteration({"duration": d})
slas = [iteration_time.IterationTime(4.0) for _ in durations]
for idx, sla_inst in enumerate(slas):
for duration in durations[idx]:
sla_inst.add_iteration({"duration": duration})
merged_sla = slas[0]
for sla_inst in slas[1:]:
merged_sla.merge(sla_inst)
self.assertEqual(single_sla.success, merged_sla.success)
self.assertEqual(single_sla.max_iteration_time,
merged_sla.max_iteration_time)
|
ksmit799/Toontown-Source | toontown/hood/MailboxInteractiveProp.py | Python | mit | 10,805 | 0.00435 | from direct.actor import Actor
from direct.directnotify import DirectNotifyGlobal
from direct.interval.IntervalGlobal import Sequence, Func
from toontown.hood import InteractiveAnimatedProp
from toontown.hood import GenericAnimatedProp
from toontown.toonbase import ToontownGlobals, ToontownBattleGlobals, TTLocalizer
class MailboxInteractiveProp(InteractiveAnimatedProp.InteractiveAnimatedProp):
notify = DirectNotifyGlobal.directNotify.newCategory('MailboxInteractiveProp')
BattleCheerText = TTLocalizer.InteractivePropTrackBonusTerms[ToontownBattleGlobals.THROW_TRACK]
ZoneToIdles = {ToontownGlobals.ToontownCentral: (('tt_a_ara_ttc_mailbox_idle0',
3,
10,
'tt_a_ara_ttc_mailbox_idle0settle',
3,
10),
('tt_a_ara_ttc_mailbox_idleTake2',
1,
1,
None,
3,
10),
('tt_a_ara_ttc_mailbox_idleLook1',
1,
1,
None,
3,
10),
('tt_a_ara_ttc_mailbox_idleAwesome3',
1,
1,
None,
3,
10)),
ToontownGlobals.DonaldsDock: (('tt_a_ara_dod_mailbox_idle0',
3,
10,
'tt_a_ara_dod_mailbox_idle0settle',
3,
10),
('tt_a_ara_dod_mailbox_idle2',
1,
1,
None,
3,
10),
('tt_a_ara_dod_mailbox_idle1',
1,
1,
None,
3,
10),
('tt_a_ara_dod_mailbox_idleAwesome3',
1,
1,
None,
3,
10)),
ToontownGlobals.DaisyGardens: (('tt_a_ara_dga_mailbox_idle0',
3,
10,
'tt_a_ara_dga_mailbox_idle0settle',
3,
10),
('tt_a_ara_dga_mailbox_idleTake1',
1,
1,
None,
3,
10),
('tt_a_ara_dga_mailbox_idleLook2',
1,
1,
None,
3,
10),
('tt_a_ara_dga_mailbox_idleAwesome3',
1,
1,
None,
3,
10)),
ToontownGlobals.MinniesMelodyland: (('tt_a_ara_mml_mailbox_idle0',
3,
10,
'tt_a_ara_mml_mailbox_idle0settle',
3,
10),
('tt_a_ara_mml_mailbox_idleTake1',
1,
1,
None,
3,
| 10),
('tt_a_ara_mml_mailbox_idleLook2',
1,
1,
| None,
3,
10),
('tt_a_ara_mml_mailbox_idleAwesome3',
1,
1,
None,
3,
10)),
ToontownGlobals.TheBrrrgh: (('tt_a_ara_tbr_mailbox_idleShiver1',
1,
1,
None,
3,
10),
('tt_a_ara_tbr_mailbox_idleSneeze2',
1,
1,
None,
3,
10),
('tt_a_ara_tbr_mailbox_idleSpin0',
1,
1,
None,
3,
10),
('tt_a_ara_tbr_mailbox_idleAwesome3',
1,
1,
None,
3,
10)),
ToontownGlobals.DonaldsDreamland: (('tt_a_ara_ddl_mailbox_idleSleep0',
3,
10,
None,
0,
0),
('tt_a_ara_ddl_mailbox_idleShake2',
1,
1,
None,
0,
0),
('tt_a_ara_ddl_mailbox_idleSnore1',
1,
1,
None,
0,
0),
('tt_a_ara_ddl_mailbox_idleAwesome3',
1,
1,
None,
0,
0))}
ZoneToIdleIntoFightAnims = {ToontownGlobals.ToontownCentral: 'tt_a_ara_ttc_mailbox_idleIntoFight',
ToontownGlobals.DonaldsDock: 'tt_a_ara_dod_mailbox_idleIntoFight',
ToontownGlobals.DaisyGardens: 'tt_a_ara_dga_mailbox_idleIntoFight',
ToontownGlobals.MinniesMelodyland: 'tt_a_ara_mml_mailbox_idleIntoFight',
ToontownGlobals.TheBrrrgh: 'tt_a_ara_tbr_mailbox_idleIntoFight',
ToontownGlobals.DonaldsDreamland: 'tt_a_ara_ddl_mailbox_idleIntoFight'}
ZoneToVictoryAnims = {ToontownGlobals.ToontownCentral: 'tt_a_ara_ttc_mailbox_victoryDance',
ToontownGlobals.DonaldsDock: 'tt_a_ara_dod_mailbox_victoryDance',
|
jawilson/home-assistant | homeassistant/components/aftership/const.py | Python | apache-2.0 | 1,116 | 0.000896 | """Constants for the Aftership integration."""
from __future__ import annotations
from datetime import timedelta
from typing import Final
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
DOMAIN: Final = "aftership"
ATTRIBUTION: Final = "Information provided by AfterShip"
ATTR_TRACKINGS: Final = "trackings"
BASE: Final = "https://track.aftership.com/"
CONF_SLUG: Final = "slug"
CONF_TITLE: Final = "title"
CONF_TRACKING_NUMBER: Final = "tracking_number"
DEFAULT_NAME: Final = "aftership"
UPDATE_TOPIC: Final = f"{DOMAIN}_update"
ICON: Final = "mdi:package-variant-closed"
MIN_TIME_BETWEEN_UPDATES: Final = timedelta(minutes=15)
SERVICE_ADD_TRACKING: Final = "add_tracking"
SERVICE_REMOVE_TRACKING: Final = "remove_tracking"
ADD_TRACKING_SERVICE_SCHEMA: Final = vol.Schema(
{
vol.Required(CONF_TRACKING_NUMBER): cv.string,
vol.Optional(CONF_TITLE): cv.string,
vol.Optional | (CONF_SLUG): cv.string,
}
)
REMOVE_TRACKING_SERVICE_SCHEMA: Final = vol.Schema(
{vol.R | equired(CONF_SLUG): cv.string, vol.Required(CONF_TRACKING_NUMBER): cv.string}
)
|
cor14095/probases1 | Back-end/csvHandler.py | Python | mit | 41,975 | 0.032281 | import csv
import json
import random
import os
import re
import itertools
import shutil
currentDatabase = ''
def showDatabases():
return (next(os.walk('./db'))[1])
def createDatabase(databaseName):
newDatabaseDirectory = (r'./db/') + databaseName
if not os.path.exists(newDatabaseDirectory):
#Create directory
os.makedirs(newDatabaseDirectory)
#Create metadata file
metadataFile = {}
metadataFile['tables'] = {}
with open('./db/'+databaseName+'/'+databaseName+'Metadata.json', 'w') as output:
json.dump(metadataFile, output)
return ("Database '"+databaseName+"' created succesfully.")
else:
return ('Database with name: "'+databaseName+'" already exists.')
def dropDatabase(databaseName):
databaseDirectory = (r'./db/') + databaseName
if not os.path.exists(databaseDirectory):
return ("Database with name: "+databaseName+" doesnt exists.")
else:
shutil.rmtree(databaseDirectory)
return ("Database "+databaseName+" succesfully deleted.")
def useDatabase(databaseName):
databaseDirectory = (r'./db/') + databaseName
if os.path.exists(databaseDirectory):
global currentDatabase
currentDatabase = databaseName
return ("Changed to database: ")
else:
return ('Database with name: "'+databaseName+'" doesnt exists.')
def showTables(currentDatabase):
#Insert info in metadata file
input = open('./db/'+currentDatabase+'/'+currentDatabase+'Metadata.json', 'r')
metadata = json.load(input)
return metadata['tables'].keys()
def changeDatabaseName(oldName, newName):
if newName in showDatabases():
return ("Error, a database with name "+newName+" already exists.")
else:
os.rename(r'./db/'+oldName, r'./db/'+newName)
os.rename(r'./db/'+newName+'/'+oldName+'Metadata.json', r'./db/'+newName+'/'+newName+'Metadata.json')
return ("Database: "+oldName+" changed name to: "+newName)
NO_KEY = 0
FOREIGN_KEY = 1
PRIMARY_KEY = 2
# tableSchemaExample = {'tableName':'table1', 'columns':[{'columnName':'column1', 'key':1, 'constraintTable':'table2', 'constraintColumn':'column1,'type':'int'},{'columnName':'column2', 'key':1, 'type':'date'}]}
def createTable(tableSchema, currentDatabase):
if not os.path.isfile('./db/'+currentDatabase+'/'+tableSchema['tableName']+'.json'):
#Check if table contains at least one type of key
pkSum = 0
fkSum = 0
for column in tableSchema['columns']:
if column['key'] == PRIMARY_KEY:
#Sum to PK counter
pkSum += 1
elif column['key'] == FOREIGN_KEY:
#Sum to FK counter
fkSum += 1
#Check if the constraint target table exists
if not os.path.isfile(r'./db/'+currentDatabase+'/'+column['constraintTable']+'.json'):
return ("Error, constraint target table: "+column['constraintTable']+" doesnt exists in database: "+currentDatabase)
return False
#Table cannot have more than one primary key
if(pkSum)>1:
return ("Error, table cannot have more than one primary key.")
return False
#Table has to have at least one type of key
if((pkSum+fkSum) < 1):
return ("Error, table needs at least one type of key.")
return False
#Create file
file = open('./db/'+currentDatabase+'/'+tableSchema['tableName']+'.json', 'w')
file.write('{}')
#Create hash file
hashFile = open('./db/'+currentDatabase+'/'+tableSchema['tableName']+'.hash', 'w')
initialHash = {}
for column in tableSchema['columns']:
initialHash[column['columnName']] = {}
json.dump(initialHash, hashFile)
#Insert info in metadata file
input = open('./db/'+currentDatabase+'/'+currentDatabase+'Metadata.json', 'r')
metadata = json.load(input)
tempTables = metadata['tables']
tempTables[tableSchema['tableName']] = {}
tempTables[tableSchema['tableName']]['columns'] = tableSchema['columns']
tempTables[tableSchema['tableName']]['lastIndex'] = -1
tempTables[tableSchema['tableName']]['deletedRows'] = 0
metadata['tables'] = tempTables
#Write info in metadata file
with open('./db/'+currentDatabase+'/'+currentDatabase+'Metadata.json', 'w') as output:
json.dump(metadata, output)
return ('Table succesfully created')
else:
return ('Table with name: '+tableSchema['tableName']+' already exists.')
def getType(columnName, tableName, metadata):
columnsInTable = metadata['tables'][tableName]['columns']
for column in columnsInTable:
if (column['columnName'] == columnName):
# print("Returning type: "+column['type']+" for column: "+columnName)
return column['type']
return False
def checkTypes(insertInfo, metadata):
columnsInTable = metadata['tables'][insertInfo['tableName']]['columns']
for i in range(len(insertInfo['columns'])):
if(getType(insertInfo['columns'][i | ], insertInfo['tableName'], metadata) == 'int'):
if(type(insertInfo['values'][i]) != type(1)):
print("TYPE ERROR")
return False
if(getType(insertInfo['columns'][i], insertInfo['tableNa | me'], metadata) == 'float'):
if(type(insertInfo['values'][i]) != type(1.0)):
print("TYPE ERROR")
return False
if(getType(insertInfo['columns'][i], insertInfo['tableName'], metadata) == 'date'):
dateExpresion = re.compile('^\d\d-\d\d-\d\d\d\d$')
if not dateExpresion.match(insertInfo['values'][i]):
print("TYPE ERROR")
return False
if(getType(insertInfo['columns'][i], insertInfo['tableName'], metadata) == 'string'):
if(type(insertInfo['values'][i]) != type("a")):
print("TYPE ERROR")
return False
return True
def checkConstraints(insertInfo, metadata, tableHash, currentDatabase):
#Traverse every column in the table
for column in metadata['tables'][insertInfo['tableName']]['columns']:
value = insertInfo['values'][insertInfo['columns'].index(column['columnName'])]
#If column is foreign key then check if it already exists in the respective table
if column['key'] == FOREIGN_KEY:
try:
if value == "NULL":
#It cannot be NULL
return ("Error, column: "+column['columnName']+" cannot be NULL, as it is a foreign key.")
else:
#Check if it exists in the respective table
#Open table file
constraintTableFile = open(r'./db/'+currentDatabase+'/'+column['constraintTable']+'.hash', 'r')
constraintTable = json.load(constraintTableFile)
#If it isnt
if not (value in constraintTable[column['constraintColumn']]):
return ("Error, "+str(value)+" in column "+column['columnName']+" doesnt exist in constraint table "+column['constraintTable']+" yet.")
return False
except:
#It has to come in the insertion statement
return ("Error, column: "+column['columnName']+" is required, as it is a foreign key.")
return False
#If column is primary key then check if its unique in the respective table
elif column['key'] == PRIMARY_KEY:
# print("Value: "+str(value)+" column "+column['columnName'])
if str(value) in tableHash[column['columnName']]:
return ("Error, primary key "+str(value)+" already exists in column: "+column['columnName'])
return False
#If all the columns are good then return True
return True
# insertInfoExample = {'tableName': 'table1', 'columns':['id','nombre','edad'], 'values':[1, 'Perry', 20]}
def insertRecord(insertInfo, currentDatabase):
#Perform parity check
if(len(insertInfo['columns']) != len(insertInfo['values'])):
return ('Error, values quantity doesnt match columns quantity')
return False
#Open metadata file
metadataFile = open('./db/'+currentDatabase+'/'+currentDatabase+'Metadata.json', 'r')
metadata = json.load(metadataFile)
#Check if table exists
if insertInfo['tableName'] not in metadata['tables']:
return ("Error, table: "+insertInfo['tableName']+" doesnt exists in database: "+currentDatabase)
return False
#Perform type checks
if(checkTypes(insertInfo, metadata) != True):
return ('Error, types dont match with the table types.')
return False
#Open hash file
tableHashFile = open('./db/'+currentDatabase+'/'+insertInfo['tableName']+'.hash', 'r')
tableHash = json.load(tableHashFile)
#Perform constraint check
constraintCheck = checkConstraints(insertInfo, metadata, tableHash, currentDatabase)
if(constraintCheck != True):
return constraintCheck
#Construct key-value pair to insert to table json file and store index in hash
resultingCSV = |
scalyr/scalyr-agent-2 | tests/image_builder/distributions/centos7/__init__.py | Python | apache-2.0 | 1,729 | 0.000578 | # Copyright 2014-2020 Scalyr Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the | License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
|
from scalyr_agent.__scalyr__ import get_install_root
from tests.utils.compat import Path
from tests.image_builder.distributions.fpm_package_builder import FpmPackageBuilder
from tests.utils.image_builder import AgentImageBuilder
from tests.image_builder.distributions.base import (
create_distribution_base_image_name,
create_distribution_image_name,
)
DISTRIBUTION_NAME = "centos7"
class CentOSBuilderBase(AgentImageBuilder):
IMAGE_TAG = create_distribution_base_image_name(DISTRIBUTION_NAME)
DOCKERFILE = Path(__file__).parent / "Dockerfile.base"
INCLUDE_PATHS = [
Path(get_install_root(), "dev-requirements.txt"),
Path(get_install_root(), "agent_build"),
]
class CentOSBuilder(AgentImageBuilder):
IMAGE_TAG = create_distribution_image_name(DISTRIBUTION_NAME)
DOCKERFILE = Path(__file__).parent / "Dockerfile"
REQUIRED_IMAGES = [FpmPackageBuilder, CentOSBuilderBase]
REQUIRED_CHECKSUM_IMAGES = [CentOSBuilderBase]
COPY_AGENT_SOURCE = True
IGNORE_CACHING = True
|
shtrom/gtg | GTG/plugins/bugzilla/bugzilla.py | Python | gpl-3.0 | 4,140 | 0.000966 | # -*- coding: utf-8 -*-
# Copyright (c) 2009 - Guillaume Desmottes <gdesmott@gnome.org>
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import GObject
import re
import threading
import xmlrpc.client
from urllib.parse import urlparse
from .services import BugzillaServiceFactory
from .services import BugzillaServiceNotExist
from .notification import send_notification
bugIdPattern = re.compile('^\d+$')
bugURLPattern = re.compile('^(https?)://(.+)/show_bug\.cgi\?id=(\d+)$')
class GetBugInformationTask(threading.Thread):
def __init__(self, task, **kwargs):
''' Initialize task data, where task is the GTG task object. '''
self.task = task
super().__init__(**kwargs)
def parseBugUrl(self, url):
r = urlparse(url)
queries = dict([item.split('=') for item in r.query.split('&')])
return r.scheme, r.hostname, queries
def run(self):
bug_url = self.task.get_title()
# We only handle bug URL. When task's title is not a bug URL, stop
# handling quietly.
if bugURLPattern.match(bug_url) is None:
return
| scheme, hostname, queries = self.parseBugUr | l(bug_url)
bug_id = queries.get('id', None)
if bugIdPattern.match(bug_id) is None:
# FIXME: make some sensable action instead of returning silently.
return
try:
bugzillaService = BugzillaServiceFactory.create(scheme, hostname)
except BugzillaServiceNotExist:
# Stop quietly when bugzilla cannot be found. Currently, I don't
# assume that user enters a wrong hostname or just an unkown
# bugzilla service.
return
try:
bug = bugzillaService.getBug(bug_id)
except xmlrpc.client.Fault as err:
code = err.faultCode
if code == 100: # invalid bug ID
title = 'Invalid bug ID #%s' % bug_id
elif code == 101: # bug ID not exist
title = 'Bug #%s does not exist.' % bug_id
elif code == 102: # Access denied
title = 'Access denied to bug %s' % bug_url
else: # unrecoganized error code currently
title = err.faultString
send_notification(bugzillaService.name, title)
except Exception as err:
send_notification(bugzillaService.name, err.message)
else:
title = '#%s: %s' % (bug_id, bug.summary)
GObject.idle_add(self.task.set_title, title)
text = "%s\n\n%s" % (bug_url, bug.description)
GObject.idle_add(self.task.set_text, text)
tags = bugzillaService.getTags(bug)
if tags is not None and tags:
for tag in tags:
GObject.idle_add(self.task.add_tag, '@%s' % tag)
class BugzillaPlugin(object):
def activate(self, plugin_api):
self.plugin_api = plugin_api
self.browser = plugin_api.get_browser()
self.connect_id = self.browser.connect(
"task-added-via-quick-add", self.task_added_cb)
def task_added_cb(self, sender, task_id):
# this is a gobject callback that will block the Browser.
# decoupling with a thread. All interaction with task and tags objects
# (anything in a Tree) must be done with gobject.idle_add (invernizzi)
task = self.plugin_api.get_requester().get_task(task_id)
bugTask = GetBugInformationTask(task)
bugTask.setDaemon(True)
bugTask.start()
|
samabhi/pstHealth | venv/lib/python2.7/site-packages/crispy_forms/tests/test_layout_objects.py | Python | mit | 16,205 | 0.000309 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import forms
from django.template import Context
from django.utils.translation import ugettext as _
from django.utils.translation import activate, deactivate
from .compatibility import get_template_from_string
from .conftest import only_bootstrap
from .forms import CheckboxesTestForm, TestForm
from crispy_forms.bootstrap import (
PrependedAppendedText, AppendedText, PrependedText, InlineRadios,
Tab, TabHolder, AccordionGroup, Accordion, Alert, InlineCheckboxes,
FieldWithButtons, StrictButton
)
from crispy_forms.helper import FormHelper
from crispy_forms.layout import (
Layout, HTML, Field, MultiWidgetField
)
from crispy_forms.utils import render_crispy_form
def test_field_with_custom_template():
test_form = TestForm()
test_form.helper = FormHelper()
test_form.helper.layout = Layout(
Field('email', template='custom_field_template.html')
)
html = render_crispy_form(test_form)
assert '<h1>Special custom field</h1>' in html
def test_multiwidget_field():
template = get_template_from_string("""
{% load crispy_forms_tags %}
{% crispy form %}
""")
test_form = TestForm()
test_form.helper = FormHelper()
test_form.helper.layout = Layout(
MultiWidgetField(
'datetime_field',
attrs=(
{'rel': 'test_dateinput'},
{'rel': 'test_timeinput', 'style': 'width: 30px;', 'type': "hidden"}
)
)
)
c = Context({'form': test_form})
html = template.render(c)
assert html.count('class="dateinput') == 1
assert html.count('rel="test_dateinput"') == 1
assert html.count('rel="test_timeinput"') == 1
assert html.count('style="width: 30px;"') == 1
assert html.count('type="hidden"' | ) == 1
def test_field_type_hidden():
template = get_template_from_string("""
{% load crispy_forms_tags %}
{% crispy test_form %}
""")
test_form = TestForm()
test_form.helper = FormHelper()
| test_form.helper.layout = Layout(
Field('email', type="hidden", data_test=12),
Field('datetime_field'),
)
c = Context({
'test_form': test_form,
})
html = template.render(c)
# Check form parameters
assert html.count('data-test="12"') == 1
assert html.count('name="email"') == 1
assert html.count('class="dateinput') == 1
assert html.count('class="timeinput') == 1
def test_field_wrapper_class(settings):
form = TestForm()
form.helper = FormHelper()
form.helper.layout = Layout(Field('email', wrapper_class="testing"))
html = render_crispy_form(form)
if settings.CRISPY_TEMPLATE_PACK == 'bootstrap':
assert html.count('class="control-group testing"') == 1
elif settings.CRISPY_TEMPLATE_PACK == 'bootstrap3':
assert html.count('class="form-group testing"') == 1
elif settings.CRISPY_TEMPLATE_PACK == 'bootstrap4':
assert html.count('class="form-group row testing"') == 1
def test_html_with_carriage_returns(settings):
test_form = TestForm()
test_form.helper = FormHelper()
test_form.helper.layout = Layout(
HTML("""
if (a==b){
// some comment
a+1;
foo();
}
""")
)
html = render_crispy_form(test_form)
if settings.CRISPY_TEMPLATE_PACK == 'uni_form':
assert html.count('\n') == 23
elif settings.CRISPY_TEMPLATE_PACK == 'bootstrap':
assert html.count('\n') == 25
else:
assert html.count('\n') == 27
def test_i18n():
activate('es')
form = TestForm()
form.helper = FormHelper()
form.helper.layout = Layout(
HTML(_("Enter a valid value."))
)
html = render_crispy_form(form)
assert "Introduzca un valor correcto" in html
deactivate()
@only_bootstrap
class TestBootstrapLayoutObjects(object):
def test_custom_django_widget(self):
class CustomRadioSelect(forms.RadioSelect):
pass
class CustomCheckboxSelectMultiple(forms.CheckboxSelectMultiple):
pass
# Make sure an inherited RadioSelect gets rendered as it
form = CheckboxesTestForm()
form.fields['inline_radios'].widget = CustomRadioSelect()
form.helper = FormHelper()
form.helper.layout = Layout('inline_radios')
html = render_crispy_form(form)
assert 'class="radio"' in html
# Make sure an inherited CheckboxSelectMultiple gets rendered as it
form.fields['checkboxes'].widget = CustomCheckboxSelectMultiple()
form.helper.layout = Layout('checkboxes')
html = render_crispy_form(form)
assert 'class="checkbox"' in html
def test_prepended_appended_text(self, settings):
test_form = TestForm()
test_form.helper = FormHelper()
test_form.helper.layout = Layout(
PrependedAppendedText('email', '@', 'gmail.com'),
AppendedText('password1', '#'),
PrependedText('password2', '$'),
)
html = render_crispy_form(test_form)
# Check form parameters
if settings.CRISPY_TEMPLATE_PACK == 'bootstrap':
assert html.count('<span class="add-on">@</span>') == 1
assert html.count('<span class="add-on">gmail.com</span>') == 1
assert html.count('<span class="add-on">#</span>') == 1
assert html.count('<span class="add-on">$</span>') == 1
if settings.CRISPY_TEMPLATE_PACK in ['bootstrap3', 'bootstrap4']:
assert html.count('<span class="input-group-addon">@</span>') == 1
assert html.count(
'<span class="input-group-addon">gmail.com</span>') == 1
assert html.count('<span class="input-group-addon">#</span>') == 1
assert html.count('<span class="input-group-addon">$</span>') == 1
if settings.CRISPY_TEMPLATE_PACK == 'bootstrap3':
test_form.helper.layout = Layout(
PrependedAppendedText('email', '@', 'gmail.com',
css_class='input-lg'), )
html = render_crispy_form(test_form)
assert '<input class="input-lg' in html
assert '<span class="input-group-addon input-lg' in html
if settings.CRISPY_TEMPLATE_PACK == 'bootstrap4':
test_form.helper.layout = Layout(
PrependedAppendedText('email', '@', 'gmail.com',
css_class='form-control-lg'), )
html = render_crispy_form(test_form)
assert '<input class="form-control-lg' in html
assert '<span class="input-group-addon' in html
def test_inline_radios(self, settings):
test_form = CheckboxesTestForm()
test_form.helper = FormHelper()
test_form.helper.layout = Layout(
InlineRadios('inline_radios')
)
html = render_crispy_form(test_form)
if settings.CRISPY_TEMPLATE_PACK == 'bootstrap':
assert html.count('radio inline"') == 2
elif settings.CRISPY_TEMPLATE_PACK in ['bootstrap3', 'bootstrap4']:
assert html.count('radio-inline"') == 2
def test_accordion_and_accordiongroup(self, settings):
test_form = TestForm()
test_form.helper = FormHelper()
test_form.helper.layout = Layout(
Accordion(
AccordionGroup(
'one',
'first_name'
),
AccordionGroup(
'two',
'password1',
'password2'
)
)
)
html = render_crispy_form(test_form)
if settings.CRISPY_TEMPLATE_PACK == 'bootstrap':
assert html.count('<div class="accordion"') == 1
assert html.count('<div class="accordion-group">') == 2
assert html.count('<div class="accordion-heading">') == 2
else:
assert html.count('<div class="panel panel-default"') == 2
assert html.count('<div class="panel- |
simo5/custodia | custodia/message/simple.py | Python | gpl-3.0 | 1,145 | 0 | # Copyright (C) 2015 Custodia Project Contributors - see LICENSE file
import json
from six import string_types
from custodia.message.common import InvalidMessage
from custodia.message.common import MessageHandler
class SimpleKey(MessageHandler):
" | ""Handles 'simple' messages"""
def parse(self, msg, name):
"""Parses a simple message
:param req: ignored
:param msg: the json-decoded value
| :raises UnknownMessageType: if the type is not 'simple'
:raises InvalidMessage: if the message cannot be parsed or validated
"""
# On requests we imply 'simple' if there is no input message
if msg is None:
return
if not isinstance(msg, string_types):
raise InvalidMessage("The 'value' attribute is not a string")
self.name = name
self.payload = msg
def reply(self, output):
if self.name.endswith('/'):
# directory listings are pass-through with simple messages
return output
return json.dumps({'type': 'simple', 'value': output},
separators=(',', ':'))
|
cemc/cscircles-wp-content | lesson_files/lesson9/if.py | Python | gpl-3.0 | 225 | 0 | x = int(input())
y = int(input( | ))
print('In this test case x =', x, 'and y =', y)
if x >= y:
print('(The maximum is x)')
theMax = x
else:
print('(The maximum is y)')
theMax = y
print('The maximum is', theMax) | |
oneman/xmms2-oneman-old | wafadmin/Tools/vala.py | Python | lgpl-2.1 | 8,888 | 0.02644 | #!/usr/bin/env python
# encoding: utf-8
# Ali Sabil, 2007
import os.path, shutil
import Task, Runner, Utils, Logs, Build, Node
from TaskGen import extension, after, before
EXT_VALA = ['.vala', '.gs']
class valac_task(Task.Task):
vars = ("VALAC", "VALAC_VERSION", "VALAFLAGS")
before = ("cc", "cxx")
def run(self):
env = self.env
inputs = [a.srcpath(env) for a in self.inputs]
valac = env['VALAC']
vala_flags = env.get_flat('VALAFLAGS')
top_src = self.generator.bld.srcnode.abspath()
top_bld = self.generator.bld.srcnode.abspath(env)
if env['VALAC_VERSION'] > (0, 1, 6):
cmd = [valac, '-C', '--quiet', vala_flags]
else:
cmd = [valac, '-C', vala_flags]
if self.threading:
cmd.append('--thread')
if self.target_glib:
cmd.append('--target-glib=%s' % self.target_glib)
features = self.generator.features
if 'cshlib' in features or 'cstaticlib' in features:
output_dir = self.outputs[0].bld_dir(env)
cmd.append('--library ' + self.target)
if env['VALAC_VERSION'] >= (0, 7, 0):
cmd.append('--header ' + os.path.join(output_dir, self.target + '.h'))
self.outputs.append(self.generator.path.find_or_declare(self.target + '.h'))
cmd.append('--basedir ' + top_src)
cmd.append('-d ' + top_bld)
if env['VALAC_VERSION'] > (0, 7, 2) and hasattr(self, 'gir'):
cmd.append('--gir=%s.gir' % self.gir)
else:
output_dir = self.outputs[0].bld_dir(env)
cmd.append('-d %s' % output_dir)
for vapi_dir in self.vapi_dirs:
cmd.append('--vapidir=%s' % vapi_dir)
for package in self.packages:
cmd.append('--pkg %s' % package)
for package in self.packages_private:
cmd.append('--pkg %s' % package)
cmd.append(" ".join(inputs))
result = self.generator.bld.exec_command(" ".join(cmd))
if not 'cprogram' in features:
# generate the .deps file
if self.packages:
filename = os.path.join(self.generator.path.abspath(env), "%s.deps" % self.target)
deps = open(filename, 'w')
for package in self.packages:
deps.write(package + '\n')
deps.close()
# handle vala 0.1.6 who doesn't honor --directory for the generated .vapi
self._fix_output("../%s.vapi" % self.target)
# handle vala >= 0.1.7 who has a weid definition for --directory
self._fix_output("%s.vapi" % self.target)
# handle vala >= 0.2.0 who doesn't honor --directory for the generated .gidl
self._fix_output("%s.gidl" % self.target)
# handle vala >= 0.3.6 who doesn't honor --directory for the generated .gir
self._fix_output("%s.gir" % self.target)
if hasattr(self, 'gir'):
self._fix_output("%s.gir" % self.gir)
first = None
for node in self.outputs:
if not first:
first = node
else:
if first.parent.id != node.parent.id:
# issue #483
if env['VALAC_VERSION'] < (0, 7, 0):
shutil.move(first.parent.abspath(self.env) + os.sep + node.name, node.abspath(self.env))
return result
def install(self):
bld = self.generator.bld
features = self.generator.features
if self.attr("install_path") and ("cshlib" in features or "cstaticlib" in features):
headers_list = [o for o in self.outputs if o.suffix() == ".h"]
vapi_list = [o for o in self.outputs if (o.suffix() in (".vapi", ".deps"))]
gir_list = [o for o in self.outputs if o.suffix() == ".gir"]
for header in headers_list:
top_src = self.generator.bld.srcnode
package = self.env['PACKAGE']
try:
api_version = Utils.g_module.API_VERSION
except AttributeError:
version = Utils.g_module.VERSION.split(".")
if version[0] == "0":
api_version = "0." + version[1]
else:
api_version = version[0] + ".0"
install_path = '${INCLUDEDIR}/%s-%s/%s' % (package, api_version, header.relpath_gen(top_src))
bld.install_as(install_path, header, self.env)
bld.install_files('${DATAROOTDIR}/vala/vapi', vapi_list, self.env)
bld.install_files('${DATAROOTDIR}/gir-1.0', gir_list, self.env)
def _fix_output(self, output):
top_bld = self.generator.bld.srcnode.abspath(self.env)
try:
src = os.path.join(top_bld, output)
dst = self.generator.path.abspath (self.env)
shutil.move(src, dst)
except:
pass
@extension(EXT_VALA)
def vala_file(self, node):
valatask = getattr(self, "valatask", None)
# there is only one vala task and it compiles all vala files .. :-/
if not valatask:
valatask = self.create_task('valac')
self.valatask = valatask
self.includes = Utils.to_list(getattr(self, 'includes', []))
valatask.packages = []
valatask.packages_private = Utils.to_list(getattr(self, 'packages_private', []))
valatask.vapi_dirs = []
valatask.target = self.target
valatask.threading = False
valatask.install_path = self.install_path
valatask.target_glib = None
packages = Utils.to_list(getattr(self, 'packages', []))
vapi_dirs = Utils.to_list(getattr(self, 'vapi_dirs', []))
includes = []
if hasattr(self, 'uselib_local'):
local_packages = Utils.to_list(self.uselib_local)
seen = []
while len(local_packages) > 0:
package = local_packages.pop()
if package in seen:
continue
seen.append(package)
# check if the package exists
package_obj = self.name_to_obj(package)
if not package_obj:
raise Utils.WafError("object '%s' was not found in uselib_local (required by '%s')" % (package, self.name))
package_name = package_obj.target
package_node = package_obj.path
package_dir = package_node.relpath_gen(self.path)
for task in package_obj.tasks:
for output in task.outputs:
if output.name == package_name + ".vapi":
valatask.set_run_after(task)
if package_name not in packages:
packages.append(package_name)
if package_dir not in vapi_dirs:
vapi_dirs.append(package_dir)
if package_dir not in includes:
includes.append(package_dir)
if hasattr(package_obj, 'uselib_local'):
lst = self.to_list(package_obj.uselib_local)
lst.reverse()
local_packages = [pkg for pkg in lst if pkg not in seen] + local_packages
valatask.packages = packages
for vapi_dir in vapi_dirs:
try:
valatask.vapi_dirs.append(self.path.find_dir(vapi_dir).abspath())
valatask.vapi_dirs.append(self.path.find_dir(vapi_dir).abspath(self.env))
except AttributeError:
Logs.warn("Unable to locate Vala API directory: '%s'" % vapi_dir)
self.includes.append(node.bld.srcnode.abspath())
self.includes.append(node.bld.srcnode.abspath(self.env))
for include in includes:
try:
self.includes.append(self.path.find_dir(include).abspath())
self.includes.append(self.path.find_dir(include).abspath(self.env))
except AttributeError:
Logs.warn("Unable to locate include dir | ectory: '%s'" % include)
if hasattr(self, 'threading'):
valatask.threading = self.threading
self.use | lib = self.to_list(self.uselib)
if not 'GTHREAD' in self.uselib:
self.uselib.append('GTHREAD')
if hasattr(self, 'target_glib'):
valatask.target_glib = self.target_glib
if hasattr(self, 'gir'):
valatask.gir = self.gir
env = valatask.env
output_nodes = []
c_node = node.change_ext('.c')
output_nodes.append(c_node)
self.allnodes.append(c_node)
if env['VALAC_VERSION'] < (0, 7, 0):
output_nodes.append(node.change_ext('.h'))
else:
if not 'cprogram' in self.features:
output_nodes.append(self.path.find_or_declare('%s.h' % self.target))
if not 'cprogram' in self.features:
output_nodes.append(self.path.find_or_declare('%s.vapi' % self.target))
if env['VALAC_VERSION'] > (0, 7, 2):
if hasattr(self, 'gir'):
output_nodes.append(self.path.find_or_declare('%s.gir' % self.gir))
elif env['VALAC_VERSION'] > (0, 3, 5):
output_nodes.append(self.path.find_or_declare('%s.gir' % self.target))
elif env['VALAC_VERSION'] > (0, 1, 7):
output_nodes.append(self.path.find_or_declare('%s.gidl' % self.target))
if valatask.packages:
output_nodes.append(self.path.find_or_declare('%s.deps' % self.target))
valatask.inputs.append(node)
valatask.outputs.extend(output_nodes)
def detect(conf):
min_version = (0, 1, 6)
min_version_str = "%d.%d.%d" % min_version
valac = conf.find_program('valac', var='VALAC', mandatory=True)
if no |
uaprom-summer-2015/Meowth | project/utils.py | Python | bsd-3-clause | 368 | 0 | import re
from project.models import PageChunk
contacts_map_coordinates = \
re.compil | e(
r".*"
r"@(?P<latitude>\-?[\d\.]+),"
r"(?P<longitude>\-?[\d\.]+),"
r"(?P<zoom>[\d\.]+)z"
r".*"
| )
def inject_pagechunks():
chunks = {chunk.name: chunk.text for chunk in PageChunk.query.all()}
return {"pagechunks": chunks}
|
saebyn/django-classifieds | classifieds/__init__.py | Python | bsd-3-clause | 17 | 0 | """
| $Id$
|
"""
|
mu2019/heysqlware | demo/tables/atttb.py | Python | mit | 1,087 | 0.053628 | #!/usr/bin/env python
#!coding=utf-8
from sqlalchemy.ext.declarative import declarative | _base
from sqlalchemy import Table,Column,Integer,String,Numeric,MetaData,DateTime,Date
from ._base import BaseInit,Base
'''
員工刷卡原始數據表
'''
class ATTTB(BaseInit,Base):
__tablename__='ATTTB'
TB001=Column(String(20),nullable=False,primary_key=True,doc='員工編號')
TB002=Column(DateTime,nullable=False,primary_key=True,doc='打卡時間')
TB003=Column(Date,nullable=False,doc='出勤日期')
T | B004=Column(String(30),default='',doc='員工姓名')
TB005=Column(String(10),default='',doc='員工部門編號')
TB006=Column(String(30),default='',doc='員工部門編號')
TB007=Column(String(10),default='',doc='員工職務編號')
TB008=Column(String(30),default='',doc='員工職務名稱')
TB009=Column(String(10),default='',doc='卡鍾代號')
TB010=Column(String(30),default='',doc='卡鍾名稱')
TB011=Column(String(1),default='',doc='打卡上下班識別')
TB012=Column(String(1),default='',doc='出勤狀態')
|
charlieRode/network_tools | test_socket_server1.py | Python | mit | 573 | 0.013962 | #!/usr/bin/env python
import socket_server, pytest, socket
address= ('127.0.0.1', 50000)
tester_client= socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_IP)
def test_bad_request_type():
request= "HEAD www.wombatlyfe.com HTTP/1.1\r\n"
pass
def test_good_request():
request= "GET www.wombatlyfe.com HTTP/1.1\r\n"
tester_client.connect(address)
tester_client.sendall(request)
message= tester_client.recv(1032)
| tester_client.shutdown(socket.SH | UT_WR)
tester_client.close()
assert 'Bad Request' not in message
|
kivy/python-for-android | pythonforandroid/recipes/pyaml/__init__.py | Python | mit | 321 | 0.003115 | from pythonforandroid.recipe import P | ythonRecipe
class PyamlRecipe(PythonRecipe):
version = "15.8.2"
url = 'https://pypi.python.org/packages/source/p/pyaml/py | aml-{version}.tar.gz'
depends = ["setuptools"]
site_packages_name = 'yaml'
call_hostpython_via_targetpython = False
recipe = PyamlRecipe()
|
twonds/punjab | punjab/httpb_client.py | Python | mit | 12,339 | 0 | import hashlib
import random
import urllib.parse
import os
from twisted.internet import defer, reactor, protocol
from twisted.python import log, failure
try:
from twisted.words.xish import domish, utility
except Exception:
from twisted.xish import domish, utility # noqa
from twisted.web import http
from twisted.words.protocols.jabber import xmlstream, client
# maybe use something else to seperate from punjab
from punjab.httpb import HttpbParse
TLS_XMLNS = 'urn:ietf:params:xml:ns:xmpp-tls'
SASL_XMLNS = 'urn:ietf:params:xml:ns:xmpp-sasl'
BIND_XMLNS = 'urn:ietf:params:xml:ns:xmpp-bind'
SESSION_XMLNS = 'urn:ietf:params:xml:ns:xmpp-session'
NS_HTTP_BIND = "http://jabber.org/protocol/httpbind"
class Error(Exception):
stanza_error = ''
punjab_error = ''
msg = ''
def __init__(self, msg=None):
if msg:
self.stanza_error = msg
self.punjab_error = msg
self.msg = msg
def __str__(self):
return self.stanza_error
class RemoteConnectionFailed(Error):
msg = 'remote-connection-failed'
stanza_error = 'remote-connection-failed'
class NodeNotFound(Error):
msg = '404 not found'
class NotAuthorized(Error):
pass
class NotImplemented(Error):
pass
# Exceptions raised by the client.
class HTTPBException(Exception):
pass
class HTTPBNetworkTerminated(HTTPBException):
def __init__(self, body_tag, elements):
self.body_tag = body_tag
self.elements = elements
def __str__(self):
return self.body_tag.toXml()
class XMPPAuthenticator(client.XMPPAuthenticator):
"""
Authenticate against an xmpp server using BOSH
"""
class QueryProtocol(http.HTTPClient):
noisy = False
def connectionMade(self):
self.factory.sendConnected(self)
self.sendBody(self.factory.cb)
def sendCommand(self, command, path):
self.transport.write(
('%s %s HTTP/1.1\r\n' % (command, path)).encode('utf-8'))
def sendBody(self, b, close=0):
if isinstance(b, domish.Element):
bdata = b.toXml().encode('utf-8')
else:
bdata = b.encode('utf-8')
self.sendCommand('POST', self.factory.url)
self.sendHeader(b'User-Agent', 'Twisted/XEP-0124')
self.sendHeader(b'Host', self.factory.host)
self.sendHeader(b'Content-type', 'text/xml')
self.sendHeader(b'Content-length', str(len(bdata)))
self.endHeaders()
self.transport.write(bdata)
def handleStatus(self, version, status, message):
if status != b'200':
self.factory.badStatus(status, message)
def handleResponse(self, contents):
self.factory.parseResponse(contents, self)
def lineReceived(self, line):
if self.firstLine:
self.firstLine = 0
line_split = line.split(None, 2)
version = line_split[0]
status = line_split[1]
try:
message = line_split[2]
except IndexError:
# sometimes there is no message
message = ""
self.handleStatus(version, status, message)
return
if line:
key, val = line.decode('utf-8').split(':', 1)
val = val.lstrip()
self.handleHeader(key, val)
if key.lower() == 'content-length':
self.length = int(val)
else:
self.__buffer = []
self.handleEndHeaders()
self.setRawMode()
def handleResponseEnd(self):
self.firstLine = 1
if self.__buffer is not None:
b = b''.join(self.__buffer)
self.__buffer = None
self.handleResponse(b)
def handleResponsePart(self, data):
self.__buffer.append(data)
def connectionLost(self, reason):
pass
class QueryFactory(protocol.ClientFactory):
""" a factory to create http client connections.
"""
deferred = None
noisy = False
protocol = QueryProtocol
def __init__(self, url, host, b):
self.url, self.host = url, host
self.deferred = defer.Deferred()
self.cb = b
def send(self, b):
| self.deferred = | defer.Deferred()
self.client.sendBody(b)
return self.deferred
def parseResponse(self, contents, protocol):
self.client = protocol
hp = HttpbParse(True)
try:
body_tag, elements = hp.parse(contents)
except Exception:
raise
else:
if body_tag.hasAttribute('type') \
and body_tag['type'] == 'terminate':
nte = HTTPBNetworkTerminated(body_tag, elements)
error = failure.Failure(nte)
if self.deferred.called:
return defer.fail(error)
else:
self.deferred.errback(error)
return
if self.deferred.called:
return defer.succeed((body_tag, elements))
else:
self.deferred.callback((body_tag, elements))
def sendConnected(self, q):
self.q = q
def clientConnectionLost(self, _, reason):
try:
self.client = None
if not self.deferred.called:
self.deferred.errback(reason)
except Exception:
return reason
clientConnectionFailed = clientConnectionLost
def badStatus(self, status, message):
if not self.deferred.called:
self.deferred.errback(ValueError(status, message))
class Keys:
"""
Generate keys according to
XEP-0124 #15 "Protecting Insecure Sessions".
"""
def __init__(self):
self.k = []
def _set_keys(self):
seed = os.urandom(1024)
num_keys = random.randint(55, 255)
self.k = [hashlib.sha1(seed).hexdigest()]
for i in range(num_keys-1):
self.k.append(hashlib.sha1(self.k[-1].encode('utf-8')).hexdigest())
def getKey(self):
"""
Return (key, newkey), where key is the next key to use and newkey
is the next newkey value to use. If key or newkey are None, the next
request doesn't require that value.
"""
if not self.k:
# This is the first call, so generate keys and only return new_key.
self._set_keys()
return None, self.k.pop()
key = self.k.pop()
if not self.k:
# We're out of keys. Regenerate keys and re-key.
self._set_keys()
return key, self.k.pop()
return key, None
class Proxy:
"""A Proxy for making HTTP Binding calls.
Pass the URL of the remote HTTP Binding server to the constructor.
"""
def __init__(self, url):
"""
Parse the given url and find the host and port to connect to.
"""
parts = urllib.parse.urlparse(url)
self.url = urllib.parse.urlunparse(('', '')+parts[2:])
if self.url == "":
self.url = "/"
if ':' in parts[1]:
self.host, self.port = parts[1].split(':')
self.port = int(self.port)
else:
self.host, self.port = parts[1], None
self.secure = parts[0] == 'https'
def connect(self, b):
"""
Make a connection to the web server and send along the data.
"""
self.factory = QueryFactory(self.url, self.host, b)
if self.secure:
from twisted.internet import ssl
self.rid = reactor.connectSSL(self.host,
self.port or 443,
self.factory,
ssl.ClientContextFactory())
else:
self.rid = reactor.connectTCP(self.host,
self.port or 80, self.factory)
return self.factory.deferred
def send(self, b):
""" Send data to the web server. """
# if keepalive is off we need a new query factory
# TODO - put a check to reuse the factory, right now we open a new one.
d = self.connect(b)
r |
AliLozano/django-messages-extends | messages_extends/urls.py | Python | mit | 358 | 0.005587 | # -*- coding: utf-8 -*-
"""urls.py: messages extends"""
fr | om django.conf.urls import url
from messages_extends.views import message_ma | rk_all_read, message_mark_read
urlpatterns = [
url(r'^mark_read/(?P<message_id>\d+)/$', message_mark_read, name='message_mark_read'),
url(r'^mark_read/all/$', message_mark_all_read, name='message_mark_all_read'),
]
|
argonemyth/sentry | tests/sentry/api/endpoints/test_project_group_index.py | Python | bsd-3-clause | 12,617 | 0.00103 | from __future__ import absolute_import
from datetime import timedelta
from django.core.urlresolvers import reverse
from django.utils import timezone
from mock import patch
from sentry.models import Group, GroupBookmark, GroupSeen, GroupStatus
from sentry.testutils import APITestCase
from sentry.testutils.helpers import parse_link_header
class GroupListTest(APITestCase):
def _parse_links(self, header):
# links come in {url: {...attrs}}, but we need {rel: {...attrs}}
links = {}
for url, attrs in parse_link_header(header).iteritems():
links[attrs['rel']] = attrs
attrs['href'] = url
return links
def test_simple_pagination(self):
project = self.project
now = timezone.now()
group1 = self.create_group(
checksum='a' * 32,
last_seen=now - timedelta(seconds=1),
)
group2 = self.create_group(
checksum='b' * 32,
last_seen=now,
)
self.login_as(user=self.user)
url = reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
})
response = self.client.get(url + '?sort_by=date&limit=1', format='json')
assert response.status_code == 200
assert len(response.data) == 1
assert response.data[0]['id'] == str(group2.id)
links = self._parse_links(response['Link'])
assert links['previous']['results'] == 'false'
assert links['next']['results'] == 'true'
print(links['next']['cursor'])
response = self.client.get(links['next']['href'], format='json')
assert response.status_code == 200
assert len(response.data) == 1
assert response.data[0]['id'] == str(group1.id)
links = self._parse_links(response['Link'])
assert links['previous']['results'] == 'true'
assert links['next']['results'] == 'false'
print(links['previous']['cursor'])
response = self.client.get(links['previous']['href'], format='json')
assert response.status_code == 200
assert len(response.data) == 1
assert response.data[0]['id'] == str(group2.id)
links = self._parse_links(response['Link'])
assert links['previous']['results'] == 'false'
assert links['next']['results'] == 'true'
print(links['previous']['cursor'])
response = self.client.get(links['previous']['href'], format='json')
assert response.status_code == 200
assert len(response.data) == 0
group3 = self.create_group(
checksum='c' * 32,
last_seen=now + timedelta(seconds=1),
)
links = self._parse_links(response['Link'])
assert links['previous']['results'] == 'false'
assert links['next']['results'] == 'true'
print(links['previous']['cursor'])
response = self.client.get(links['previous']['href'], format='json')
assert response.status_code == 200
assert len(response.data) == 1
assert response.data[0]['id'] == str(group3.id)
class GroupUpdateTest(APITestCase):
def test_global_resolve(self):
group1 = self.create_group(checksum='a' * 32, status=GroupStatus.RESOLVED)
group2 = self.create_group(checksum='b' * 32, status=GroupStatus.UNRESOLVED)
group3 = self.create_group(checksum='c' * 32, status=GroupStatus.MUTED)
group4 = self.create_group(
project=self.create_project(slug='foo'),
checksum='b' * 32, status=GroupStatus.UNRESOLVED)
self.login_as(user=self.user)
url = reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
})
response = self.client.put(url + '?status=unresolved', data={
'status': 'resolved',
}, format='json')
assert response.status_code == 200, response.data
assert response.data == {
'status': 'resolved',
}
# the previously resolved entry should not be included
new_group1 = Group.objects.get(id=group1.id)
assert new_group1.status == GroupStatus.RESOLVED
assert new_group1.resolved_at is None
new_group2 = Group.objects.get(id=group2.id)
assert new_group2.status == GroupStatus.RESOLVED
assert new_group2.resolved_at is not None
# the muted entry should not be included
new_group3 = Group.objects.get(id=group3.id)
assert new_group3.status == GroupStatus.MUTED
assert new_group3.resolved_at is None
new_group4 = Group.objects.get(id=group4.id)
assert new_group4.status == GroupStatus.UNRESOLVED
assert new_group4.resolved_at is None
def test_selective_status_update(self):
group1 = self.create_group(checksum='a' * 32, status=GroupStatus.RESOLVED)
group2 = self.create_group(checksum='b' * 32, status=GroupStatus.UNRESOLVED)
group3 = self.create_group(checksum='c' * 32, status=GroupStatus.MUTED)
group4 = self.create_group(
project=self.create_project(slug='foo'),
checksum='b' * 32, status=GroupStatus.UNRESOLVED)
self.login_as(user=self.user)
url = '{url}?id={group1.id}&id={group2.id}&group4={group4.id}'.format(
url=reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
}),
group1=group1,
group2=group2,
group4=group4,
)
response = self.client.put(url, data={
'status': 'resolved',
}, format='json')
assert response.status_code == 200
assert response.data == {
'status': 'resolved',
}
new_group1 = Group.objects.get(id=group1.id)
assert new_group1.resolved_at is None
new_group2 = Group.objects.get(id=group2.id)
assert new_group2.resolved_at is not None
assert new_group2.status == GroupStatus.RESOLVED
new_group3 = Group.objects.get(id=group3.id)
assert new_group3.resolved_at is None
assert new_group3.status == GroupStatus.MUTED
| new_gr | oup4 = Group.objects.get(id=group4.id)
assert new_group4.resolved_at is None
assert new_group4.status == GroupStatus.UNRESOLVED
def test_set_bookmarked(self):
group1 = self.create_group(checksum='a' * 32, status=GroupStatus.RESOLVED)
group2 = self.create_group(checksum='b' * 32, status=GroupStatus.UNRESOLVED)
group3 = self.create_group(checksum='c' * 32, status=GroupStatus.MUTED)
group4 = self.create_group(
project=self.create_project(slug='foo'),
checksum='b' * 32, status=GroupStatus.UNRESOLVED)
self.login_as(user=self.user)
url = '{url}?id={group1.id}&id={group2.id}&group4={group4.id}'.format(
url=reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
}),
group1=group1,
group2=group2,
group4=group4,
)
response = self.client.put(url, data={
'isBookmarked': 'true',
}, format='json')
assert response.status_code == 200
assert response.data == {
'isBookmarked': True,
}
bookmark1 = GroupBookmark.objects.filter(group=group1, user=self.user)
assert bookmark1.exists()
bookmark2 = GroupBookmark.objects.filter(group=group2, user=self.user)
assert bookmark2.exists()
bookmark3 = GroupBookmark.objects.filter(group=group3, user=self.user)
assert not bookmark3.exists()
bookmark4 = GroupBookmark.objects.filter(group=group4, user=self.user)
assert not bookmark4.exists()
def test_set_has_seen(self):
project = self.project
group1 = self.cre |
uranusjr/django | tests/m2m_through_regress/models.py | Python | bsd-3-clause | 3,268 | 0.000612 | from django.contrib.auth.models import User
from django.db import models
# Forward declared intermediate model
class Membership(models.Model):
person = models.ForeignKey('Person', models.CASCADE)
group = models.ForeignKey('Group', models.CASCADE)
price = models.IntegerField(default=100)
def __str__(self):
return "%s is a member of %s" % (self.person.name, self.group.name)
# using custom id column to test ticket #11107
class UserMembership(models.Model):
id = models.AutoField(db_column='usermembership_id', primary_key=True)
user = models.Fo | reignKey(User, models.CASCADE)
group = models.ForeignKey('Group', models.CASCADE)
price = models.IntegerField(default=100)
def __str__(self):
return "%s is a user and member of %s" % | (self.user.username, self.group.name)
class Person(models.Model):
name = models.CharField(max_length=128)
def __str__(self):
return self.name
class Group(models.Model):
name = models.CharField(max_length=128)
# Membership object defined as a class
members = models.ManyToManyField(Person, through=Membership)
user_members = models.ManyToManyField(User, through='UserMembership')
def __str__(self):
return self.name
# A set of models that use a non-abstract inherited model as the 'through' model.
class A(models.Model):
a_text = models.CharField(max_length=20)
class ThroughBase(models.Model):
a = models.ForeignKey(A, models.CASCADE)
b = models.ForeignKey('B', models.CASCADE)
class Through(ThroughBase):
extra = models.CharField(max_length=20)
class B(models.Model):
b_text = models.CharField(max_length=20)
a_list = models.ManyToManyField(A, through=Through)
# Using to_field on the through model
class Car(models.Model):
make = models.CharField(max_length=20, unique=True, null=True)
drivers = models.ManyToManyField('Driver', through='CarDriver')
def __str__(self):
return "%s" % self.make
class Driver(models.Model):
name = models.CharField(max_length=20, unique=True, null=True)
def __str__(self):
return "%s" % self.name
class Meta:
ordering = ('name',)
class CarDriver(models.Model):
car = models.ForeignKey('Car', models.CASCADE, to_field='make')
driver = models.ForeignKey('Driver', models.CASCADE, to_field='name')
def __str__(self):
return "pk=%s car=%s driver=%s" % (str(self.pk), self.car, self.driver)
# Through models using multi-table inheritance
class Event(models.Model):
name = models.CharField(max_length=50, unique=True)
people = models.ManyToManyField('Person', through='IndividualCompetitor')
special_people = models.ManyToManyField(
'Person',
through='ProxiedIndividualCompetitor',
related_name='special_event_set',
)
teams = models.ManyToManyField('Group', through='CompetingTeam')
class Competitor(models.Model):
event = models.ForeignKey(Event, models.CASCADE)
class IndividualCompetitor(Competitor):
person = models.ForeignKey(Person, models.CASCADE)
class CompetingTeam(Competitor):
team = models.ForeignKey(Group, models.CASCADE)
class ProxiedIndividualCompetitor(IndividualCompetitor):
class Meta:
proxy = True
|
wathsalav/xos | xos/core/models/role.py | Python | apache-2.0 | 815 | 0.009816 | import os
import datetime
from django.db import models
from core.models import PlCoreBase
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes | import generic
class Role(PlCoreBase):
role_type = models.CharField(max_length=80, verbose_name="Name")
role = models.CharField(max_length=80, verbose_name="Keystone role id", null=True, blank=True)
description = models.CharField(max_length=1 | 20, verbose_name="Description")
content_type = models.ForeignKey(ContentType, verbose_name="Role Scope")
def __unicode__(self): return u'%s:%s' % (self.content_type,self.role_type)
def save(self, *args, **kwds):
super(Role, self).save(*args, **kwds)
def delete(self, *args, **kwds):
super(Role, self).delete(*args, **kwds)
|
coala-analyzer/coala-bears | tests/general/LicenseHeaderBearTest.py | Python | agpl-3.0 | 2,350 | 0 | imp | ort os
from queue import Queue
from bears.general.LicenseHeaderBear impo | rt LicenseHeaderBear
from coalib.testing.LocalBearTestHelper import LocalBearTestHelper
from coalib.results.Result import Result
from coalib.settings.Section import Section
from coalib.settings.Setting import Setting
def get_testfile_path(name):
return os.path.join(os.path.dirname(__file__),
'licenseheader_test_files',
name)
def load_testfile(name):
with open(get_testfile_path(name)) as f:
output = f.readlines()
return output
class LicenseHeaderBearTest(LocalBearTestHelper):
def setUp(self):
self.section = Section('name')
self.uut = LicenseHeaderBear(self.section, Queue())
def test_copyright_without_author(self):
file_contents = load_testfile('CopyrightWithoutAuthor.java')
self.check_validity(self.uut, file_contents)
def test_copyright_with_given_author(self):
file_contents = load_testfile('copyright_with_given_author.txt')
self.section.append(Setting('author_name', 'The coala developers'))
self.check_validity(
self.uut,
file_contents)
def test_copyright_with_different_author(self):
file_contents = load_testfile('copyright_with_different_author.txt')
self.section.append(Setting('author_name', 'The coala developers'))
self.check_results(
self.uut,
file_contents,
[Result.from_values('LicenseHeaderBear',
'Copyright notice with different/no author '
'present.',
file=get_testfile_path('copyright_with_diff'
'erent_author.txt'))],
filename=get_testfile_path('copyright_with_'
'different_author.txt'))
def test_no_copyright(self):
file_contents = load_testfile('no_copyright.py')
self.check_results(
self.uut,
file_contents,
[Result.from_values('LicenseHeaderBear',
'Copyright notice not present.',
file=get_testfile_path('no_copyright.py'))],
filename=get_testfile_path('no_copyright.py'))
|
wilbuick/django-ttdb | ttdb/__init__.py | Python | bsd-3-clause | 194 | 0.005155 | f | rom .decorators import use_template_database
from .testcases import TemplateDBTestCase
from .testcases import TemplateDBTransactionTestCase
from .testcases import TemplateDBLiveServerTestCase
| |
release-monitoring/anitya | anitya/tests/test_alembic.py | Python | gpl-2.0 | 1,640 | 0.00122 | # (c) 2017 - Copyright Red Hat Inc
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Authors:
# Pierre-Yves Chibon <pingou@pingoured.fr>
"""This test module contains tests for the migration system."""
import os
import subprocess
import unittest
REPO_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
class TestAlembic(unittest.TestCase):
"""This test class contains tests pertaining to alembic."""
def test_alembic_history(self):
"""Enforce a linear alembic history.
This test runs the `alembic history | grep ' (head), '` command,
and ensure it returns only one line.
"""
pr | oc1 = | subprocess.Popen(
["alembic", "history"], cwd=REPO_PATH, stdout=subprocess.PIPE
)
proc2 = subprocess.Popen(
["grep", " (head), "], stdin=proc1.stdout, stdout=subprocess.PIPE
)
stdout = proc2.communicate()[0]
stdout = stdout.strip().split(b"\n")
self.assertEqual(len(stdout), 1)
proc1.communicate()
|
TNT-Samuel/Coding-Projects | DNS Server/Source/Lib/site-packages/dask/dataframe/__init__.py | Python | gpl-3.0 | 848 | 0 | from __future__ import print_function, division, absolute_import
from .core import (DataFrame, Series, Index, _Frame, map_partitions,
repartition, to_delayed, to_datet | ime, to_timedelta)
from .groupby import Aggregation
from .io im | port (from_array, from_pandas, from_bcolz,
from_dask_array, read_hdf, read_sql_table,
from_delayed, read_csv, to_csv, read_table,
demo, to_hdf, to_records, to_bag, read_json, to_json)
from .optimize import optimize
from .multi import merge, concat
from . import rolling
from ..base import compute
from .reshape import get_dummies, pivot_table, melt
from .utils import assert_eq
from .io.orc import read_orc
try:
from .io import read_parquet, to_parquet
except ImportError:
pass
try:
from .core import isna
except ImportError:
pass
|
aljosa/django-tinymce | tests/manage.py | Python | mit | 492 | 0.002033 | #!/usr/bin/env python
import o | s
i | mport sys
try:
os.environ["DJANGO_SETTINGS_MODULE"] = "tests.settings"
test_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, test_dir)
except ImportError:
pass
else:
from django.core.management import execute_from_command_line
def main():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "%s.settings" % __package__)
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
|
MapQuest/mapquest-osm-server | src/python/dbmgr/dbm_stats.py | Python | mit | 3,450 | 0.00029 | # Copyright (c) 2011 AOL Inc. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Manage statistics.
"""
import sys
import threading
import apiserver.const as C
_timer_delay = 1 # Number of seconds between reports.
_stats = {} # Hash maps tracking the collected statistics.
_prevstats = {}
_timer = None # Timer object.
_is_active = None # Run state.
_lock = None
def _display_stats():
"Display statistics"
global _lock, _prevstats
def _format(prefix, absval, incr):
"""Helper function."""
s = ""
if absval:
s += ("%s: %%(_%sv)d" % (prefix.upper(), prefix))
if incr:
s += ("(+%%(_%s)d)" % prefix)
s += " "
return s
# Retrieve the previous and current counts.
_c = _prevstats[C.CHANGESET]
_n = _prevstats[C.NODE]
_w = _prevstats[C.WAY]
_r = _prevstats[C.RELATION]
_lock.acquire()
_cv = _stats[C.CHANGESET]
_nv = _stats[C.NODE]
_wv = _stats[C.WAY]
_rv = _stats[C.RELATION]
_prevstats.update(_stats)
_lock.release()
# Compute incremental changes.
_c = _cv - _c
_n = _nv - _n
_w = _wv - _w
_r = _rv - _r
# Compute the format string.
s = _format('c', _cv, _c)
s += _format('n', _nv, _n)
| s += _format('w', _wv, _w)
s += _for | mat('r', _rv, _r)
print s % locals()
def _stats_timer():
"Invoke the actual display helper and re-arm the timer."
_display_stats()
global _timer
if _is_active:
_timer = threading.Timer(_timer_delay, _stats_timer)
_timer.start()
def init_statistics(config, options):
"Initialize the module."
global _stats, _prevstats
for n in [C.CHANGESET, C.NODE, C.WAY, C.RELATION]:
_stats[n] = _prevstats[n] = 0
global _lock
_lock = threading.Lock()
if options.verbose:
global _is_active, _timer
_is_active = True
_timer = threading.Timer(_timer_delay, _stats_timer)
_timer.daemon = True
_timer.start()
def fini_statistics(options):
"Shutdown the module."
global _is_active
_is_active = False
if _timer:
_timer.cancel()
if options.verbose:
_display_stats()
def increment_stats(namespace):
global _lock, _stats
_lock.acquire()
_stats[namespace] = _stats[namespace] + 1
_lock.release()
|
odoousers2014/LibrERP | l10n_ch_payment_slip/wizard/bvr_import.py | Python | agpl-3.0 | 13,953 | 0.00258 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi. Copyright Camptocamp SA
# Financial contributors: Hasa SA, Open Net SA,
# Prisme Solutions Informatique SA, Quod SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
import time
import re
from openerp.tools.translate import _
from openerp.osv.orm import TransientModel, fields
from openerp.osv.osv import except_osv
from openerp.tools import mod10r
REF = re.compile('[^0-9]')
class BvrImporterWizard(TransientModel):
_name = 'bvr.import.wizard'
_columns = {'file': fields.binary('BVR File')}
def _reconstruct_invoice_ref(self, cursor, user, reference, context=None):
"""Try to get correct invoice/invoice line form ESV/BVR reference"""
id_invoice = False
# On fait d'abord une recherche sur toutes les factures
# we now search for an invoice
user_obj = self.pool['res.users']
user_current = user_obj.browse(cursor, user, user)
cursor.execute("SELECT inv.id, inv.number from account_invoice "
"AS inv where inv.company_id = %s and type='out_invoice'",
(user_current.company_id.id,))
result_invoice = cursor.fetchall()
for inv_id, inv_name in result_invoice:
inv_name = REF.sub('0', str(inv_name))
if inv_name == reference:
id_invoice = inv_id
break
if id_invoice:
cursor.execute('SELECT l.id'
' FROM account_move_line l, account_invoice i'
' WHERE l.move_id = i.move_id AND l.reconcile_id is NULL '
' AND i.id IN %s', (tuple([id_invoice]),))
inv_line = []
for id_line in cursor.fetchall():
inv_line.append(id_line[0])
return inv_line
else:
return []
def _parse_lines(self, cursor, uid, inlines, context=None):
"""Parses raw v11 line and populate records list with dict"""
records = []
total_amount = 0
total_cost = 0
find_total = False
for lines in inlines:
if not lines: # manage new line at end of file
continue
(line, lines) = (lines[:128], lines[128:])
record = {}
if line[0:3] in ('999', '995'):
if find_total:
raise except_osv(_('Error'),
_('Too much total record found!'))
find_total = True
if lines:
raise except_osv(_('Error'),
_('Record found after total record!'))
amount = float(line[39:49]) + (float(line[49:51]) / 100)
cost = float(line[69:76]) + (float(line[76:78]) / 100)
if line[2] == '5':
amount *= -1
cost *= -1
if round(amount - total_amount, 2) >= 0.01 \
or round(cost - total_cost, 2) >= 0. | 01:
raise except_osv(_('Error'),
_('Total record different from the computed!'))
if int(line[51:63]) != len(records):
raise except_osv(_('Error'),
_('Number record different from the computed!'))
else:
record = {
| 'reference': line[12:39],
'amount': float(line[39:47]) + (float(line[47:49]) / 100),
'date': time.strftime('%Y-%m-%d', time.strptime(line[65:71], '%y%m%d')),
'cost': float(line[96:98]) + (float(line[98:100]) / 100),
}
if record['reference'] != mod10r(record['reference'][:-1]):
raise except_osv(_('Error'),
_('Recursive mod10 is invalid for reference: %s') % record['reference'])
if line[2] == '5':
record['amount'] *= -1
record['cost'] *= -1
total_amount += record['amount']
total_cost += record['cost']
records.append(record)
return records
#deprecated
def _create_voucher_from_record(self, cursor, uid, record,
statement, line_ids, context=None):
"""Create a voucher with voucher line"""
context.update({'move_line_ids': line_ids})
voucher_obj = self.pool.get('account.voucher')
move_line_obj = self.pool.get('account.move.line')
voucher_line_obj = self.pool.get('account.voucher.line')
line = move_line_obj.browse(cursor, uid, line_ids[0])
partner_id = line.partner_id and line.partner_id.id or False
if not partner_id:
return False
move_id = line.move_id.id
result = voucher_obj.onchange_partner_id(cursor, uid, [],
partner_id,
statement.journal_id.id,
abs(record['amount']),
statement.currency.id,
'receipt',
statement.date,
context=context)
voucher_res = {'type': 'receipt',
'name': record['reference'],
'partner_id': partner_id,
'journal_id': statement.journal_id.id,
'account_id': result.get('account_id', statement.journal_id.default_credit_account_id.id),
'company_id': statement.company_id.id,
'currency_id': statement.currency.id,
'date': record['date'] or time.strftime('%Y-%m-%d'),
'amount': abs(record['amount']),
'period_id': statement.period_id.id
}
voucher_id = voucher_obj.create(cursor, uid, voucher_res, context=context)
voucher_line_dict = False
if result['value']['line_cr_ids']:
for line_dict in result['value']['line_cr_ids']:
move_line = move_line_obj.browse(cursor, uid, line_dict['move_line_id'], context)
if move_id == move_line.move_id.id:
voucher_line_dict = line_dict
if voucher_line_dict:
voucher_line_dict.update({'voucher_id': voucher_id})
voucher_line_obj.create(cursor, uid, voucher_line_dict, context=context)
return voucher_id
def _get_account(self, cursor, uid, line_ids, record, context=None):
"""Get account from move line or from property"""
property_obj = self.pool.get('ir.property')
move_line_obj = self.pool.get('account.move.line')
account_id = False
if line_ids:
for line in move_line_obj.browse(cursor, uid, line_ids, context=context):
return line.account_id.id
if not account_id and not line_ids:
name = "property_account_receivable"
if record['amount'] < 0:
name = "property_account_payable"
account_id |
ruibarreira/linuxtrail | usr/lib/python2.7/dist-packages/numpy/lib/stride_tricks.py | Python | gpl-3.0 | 4,203 | 0.001665 | """
Utilities that manipulate strides to achieve desirable effects.
An explanation of strides can be found in the "ndarray.rst" file in the
NumPy reference guide.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
__all__ = ['broadcast_arrays']
class DummyArray(object):
""" Dummy object that just exists to hang __array_interface__ dictionaries
and possibly keep alive a reference to a base array.
"""
def __init__(self, interface, base=None):
self.__array_interface__ = interface
self.base = base
def as_strided(x, shape=None, strides=None):
""" Make an ndarray from the given array with the given shape and strides.
"""
interface = dict(x.__array_interface__)
if shape is not None:
interface['shape'] = tuple(shape)
if strides is not None:
interface['strides'] = tuple(strides)
array = np.asarray(DummyArray(interface, base=x))
# Make sure dtype is correct in case of custom dtype
if array.dtype.kind == 'V':
array.dtype = x.dtype
return array
def broadcast_arrays(*args):
"""
Broadcast any number of arrays against each other.
Parameters
----------
`*args` : array_likes
The arrays to broadcast.
Returns
-------
broadcasted : list of arrays
These arrays are views on the original arrays. They are typically
not contiguous. Furthermore, more than one element of a
broadcasted array may refer to a single memory location. If you
need to write to the arrays, make copies first.
Examples
--------
>>> x = np.array([[1,2,3]])
>>> y = np.array([[1],[2],[3]])
>>> np.broadcast_arrays(x, y)
[array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3]]), array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]])]
Here is a useful idiom for getting contiguous copies instead of
non-contiguous views.
>>> [np.array(a) for a in np.broadcast_arrays(x, y)]
[array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3]]), array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]])]
"""
args = [np.asarray(_m) for _m in args]
shapes = [x.shape for x in args]
if len(set(shapes)) == 1:
# Common case where nothing needs to be broadcasted.
return args
shapes = [list(s) for s in shapes]
strides = [list(x.strides) for x in args]
nds = [len(s) for s in shapes]
biggest = max(nds)
# Go through each array and prepend dimensions of length 1 to each of the
# shapes in order to make the number of dimensions equal.
for i in range(len(args)):
diff = biggest - nds[i]
if diff > 0:
shapes[i] = [1] * diff + shapes[i]
strides[ | i] = [0] * diff + strides[i]
# Chech each dimension for compatibility. A dimension length of 1 is
# acc | epted as compatible with any other length.
common_shape = []
for axis in range(biggest):
lengths = [s[axis] for s in shapes]
unique = set(lengths + [1])
if len(unique) > 2:
# There must be at least two non-1 lengths for this axis.
raise ValueError("shape mismatch: two or more arrays have "
"incompatible dimensions on axis %r." % (axis,))
elif len(unique) == 2:
# There is exactly one non-1 length. The common shape will take this
# value.
unique.remove(1)
new_length = unique.pop()
common_shape.append(new_length)
# For each array, if this axis is being broadcasted from a length of
# 1, then set its stride to 0 so that it repeats its data.
for i in range(len(args)):
if shapes[i][axis] == 1:
shapes[i][axis] = new_length
strides[i][axis] = 0
else:
# Every array has a length of 1 on this axis. Strides can be left
# alone as nothing is broadcasted.
common_shape.append(1)
# Construct the new arrays.
broadcasted = [as_strided(x, shape=sh, strides=st) for (x, sh, st) in
zip(args, shapes, strides)]
return broadcasted
|
apikler/VideoStore | site/manage.py | Python | bsd-2-clause | 254 | 0 | #!/usr/bin/env python2
import os
import sys
if __name__ == "_ | _main__":
os.environ.setdefault("DJA | NGO_SETTINGS_MODULE", "videostore.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
cafecivet/django_girls_tutorial | Scripts/gunicorn_paster-script.py | Python | gpl-2.0 | 338 | 0.002959 | #!C:\Users\mbradford\Documents\django_ | projects\mysite\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'gunicorn==19.1.1','console_scripts','gunicorn_paster'
__requires__ = | 'gunicorn==19.1.1'
import sys
from pkg_resources import load_entry_point
sys.exit(
load_entry_point('gunicorn==19.1.1', 'console_scripts', 'gunicorn_paster')()
)
|
itsallvoodoo/csci-school | CSCI220/Week 13 - APR09-13/Board.py | Python | apache-2.0 | 4,833 | 0.046348 | from graphics import *
class Board:
def __init__(self,w,h):
self.win = GraphWin('Scrabble',w,h)
self.win.setCoords(-30,-30,180,180)
self.markers = []
tws_j = [0,0,0,7,7,14,14,14]
tws_i = [0,7,14,0,14,0,7,14]
dls_j = [0,0,2,2,3,3,3,6,6,6,6,7,7,8,8,8,8,11,11,11,12,12,14,14]
| dl | s_i = [3,11,6,8,0,7,14,2,6,8,12,3,11,2,6,8,12,0,7,14,6,8,3,11]
dws_j = [1,1,2,2,3,3,4,4,10,10,11,11,12,12,13,13]
dws_i = [1,13,2,12,3,11,4,10,4,10,3,11,2,12,1,13]
tls_j = [1,1,5,5,5,5,9,9,9,9,13,13]
tls_i = [5,9,1,5,9,13,1,5,9,13,5,9]
for i in range(15):
for j in range(15):
if self.find_ij(i,j,tws_i,tws_j):
self.markers.append(TripleWordScoreMarker(i,j,self.win))
elif i == 7 and j == 7:
self.markers.append(StartMarker(i,j,self.win))
elif self.find_ij(i,j,dls_i,dls_j):
self.markers.append(DoubleLetterScoreMarker(i,j,self.win))
elif self.find_ij(i,j,tls_i,tls_j):
self.markers.append(TripleLetterScoreMarker(i,j,self.win))
elif self.find_ij(i,j,dws_i,dws_j):
self.markers.append(DoubleWordScoreMarker(i,j,self.win))
else:
self.markers.append(BlankMarker(i,j,self.win))
# Linear search
def find_ij(self,i,j,list_i,list_j):
for z in range(len(list_i)):
if i == list_i[z] and j == list_j[z]:
return True
return False
def get_marker_clicked(self,p):
for marker in self.markers:
if marker.clicked(p):
return marker
return None
def score(self,markers,tiles):
sc = 0
mult = 1
for i in range(len(markers)):
if isinstance(markers[i],DoubleWordScoreMarker):
mult = mult*2
elif isinstance(markers[i],TripleWordScoreMarker):
mult = mult*3
if isinstance(markers[i],DoubleLetterScoreMarker):
sc = sc + tiles[i]*2
elif isinstance(markers[i],TripleLetterScoreMarker):
sc = sc + tiles[i]*3
else:
sc = sc + tiles[i]
sc = mult*sc
return sc
def __del__(self):
self.win.close()
class Marker:
def clicked(self,p):
p1 = self.rect.getP1()
p2 = self.rect.getP2()
big_x = max([p1.getX(),p2.getX()])
small_x = min([p1.getX(),p2.getX()])
big_y = max([p1.getY(),p2.getY()])
small_y = min([p1.getY(),p2.getY()])
x = p.getX()
y = p.getY()
if y <= big_y and y >= small_y and x <= big_x and x >= small_x:
return True
return False
class BlankMarker(Marker):
def __init__(self,i,j,win):
self.rect = Rectangle(Point(10*i,10*j),Point(10*i+10,10*j+10))
self.rect.draw(win)
class StartMarker(Marker):
def __init__(self,i,j,win):
self.rect = Rectangle(Point(10*i,10*j),Point(10*i+10,10*j+10))
self.rect.draw(win)
self.rect.setFill('purple')
Text(Point((10*i+10*i+10)/2,(10*j+10*j+10)/2),"@").draw(win)
class TripleWordScoreMarker(Marker):
def __init__(self,i,j,win):
self.rect = Rectangle(Point(10*i,10*j),Point(10*i+10,10*j+10))
self.rect.draw(win)
self.rect.setFill('red')
Text(Point((10*i+10*i+10)/2,(10*j+10*j+10)/2),"x3").draw(win)
class DoubleWordScoreMarker(Marker):
def __init__(self,i,j,win):
self.rect = Rectangle(Point(10*i,10*j),Point(10*i+10,10*j+10))
self.rect.draw(win)
self.rect.setFill('purple')
Text(Point((10*i+10*i+10)/2,(10*j+10*j+10)/2),"x2").draw(win)
class DoubleLetterScoreMarker(Marker):
def __init__(self,i,j,win):
self.rect = Rectangle(Point(10*i,10*j),Point(10*i+10,10*j+10))
self.rect.draw(win)
self.rect.setFill('cyan')
Text(Point((10*i+10*i+10)/2,(10*j+10*j+10)/2),"DL").draw(win)
class TripleLetterScoreMarker(Marker):
def __init__(self,i,j,win):
self.rect = Rectangle(Point(10*i,10*j),Point(10*i+10,10*j+10))
self.rect.draw(win)
self.rect.setFill('blue')
Text(Point((10*i+10*i+10)/2,(10*j+10*j+10)/2),"TL").draw(win)
def main():
b = Board(500,500)
# Get three markers
word_markers = []
tiles = [1,2,1]
while len(word_markers) < 3:
p = b.win.getMouse()
marker = b.get_marker_clicked(p)
if marker != None:
word_markers.append(marker)
print(b.score(word_markers,tiles))
if __name__=="__main__":
main()
|
SuFizz/Dealer-Loves-Code | starter_first.py | Python | apache-2.0 | 14,255 | 0.023641 | import urllib
import twython
def Crowd_twitter(query):
consumer_key = '*****';
consumer_secret = '*****';
access_token = '******';
access_token_secret = '******';
client_args = {'proxies': {'https': 'http://10.93.0.37:3333'}}
t = twython.Twython(app_key=consumer_key,
app_secret=consumer_secret,
oauth_token=access_token,
oauth_token_secret=access_token_secret,
client_args = client_args)
# query=raw_input("What do you want to search for?");
# query.replace(" ","+");
output = t.search(q=query, result_type='popular', count=10) #purposely restricted to 10 users to protect from Spamming the Twitter server which could cause blacklisting of our server
#print output;
aggregater = []
for i in range(10):
aggregater.append(output[u'statuses'][i][u'text']);
happy = open("positive-words.txt",'r')
sad = open("negative-words.txt",'r')
ha = happy.readlines()
sa = sad.readlines()
happy.close()
sad.close()
for i in range(len(ha)):
ha[i]=ha[i].rstrip()
for i in range(len(sa)):
sa[i]=sa[i].rstrip()
#Put basic sentiment analysis on tweet
posi = 0;
negi = 0;
for i in range(10):
for j in range(len(ha)):
if(ha[j] in aggregater[i]):
posi += 1;
for j in range(len(sa)):
if(sa[j] in aggregater[i]):
negi += 1;
#print "<!DOCTYPE html>\n<html>\n<title>Crowd likes!</title>"
if posi > negi:
return "<h1>CROWD LOVES IT!!:-)</h1>"
elif posi<negi:
return "<h1>CROWD DOESN'T LIKE IT!! :-( </h1>"
else:
return "<h1>CROWD CAN'T DECIDE :-| !!</h1>"
def buildwebpage(product_fk,product_cr,product_am,product_eb,search_query):
# return images,links,names,prices
print "<!DOCTYPE html>\n<html>";
print "\n<h1><em><ul>WELCOME TO DEALERSITE - ONE STOP FOR ALL YOUR SHOPPING</ul></em></h1>\n<body>"
print "<h1>THIS IS WHAT THE CROWD THINKS OF "+search_query+":</h1>"
print Crowd_twitter(search_query)
print "\n<h1>AMAZON</h1>";
for i in range(3):
print "\n<h2>"+product_am[2][i]+"</h2>"
print "<img border=\"0\" src=\""+product_am[0][i]+"\" alt=\"Amazon\">"
print "<a href=\""+product_am[1][i]+"\">CLICK THIS TO TAKE YOU TO AMAZONS PAGE TO BUY THE PRODUCT</a>"
print "\n<p>PRICE : Rs."+product_am[3][i]+"</p>";
print "\n<h1>EBAY</h1>";
for i in range(3):
print "\n<h2>"+product_eb[2][i]+"</h2>"
print "<img border=\"0\" src=\""+product_eb[0][i]+"\" alt=\"EBay\">"
print "<a href=\""+product_eb[1][i]+"\">CLICK THIS TO TAKE YOU TO EBAYS PAGE TO BUY THE PRODUCT</a>"
print "\n<p>PRICE : Rs."+product_eb[3][i]+"</p>";
print "\n<h1>FLIPKART</h1>";
for i in range(3):
print "\n<h2>"+product_fk[2][i]+"</h2>"
print "<img border=\"0\" src=\""+product_fk[0][i]+"\" alt=\"Flipkart\">"
print "<a href=\""+product_fk[1][i]+"\">CLICK THIS TO TAKE YOU TO FLIPKARTS PAGE TO BUY THE PRODUCT</a>"
print "\n<p>PRICE : Rs."+product_fk[3][i]+"</p>";
print "\n<h1>CROMA RETAIL</h1>";
for i in range(3):
print "\n<h2>"+product_cr[2][i]+"</h2>"
print "<img border=\"0\" src=\""+product_cr[0][i]+"\" alt=\"CROMA\">"
print "<a href=\""+product_cr[1][i]+"\">CLICK THIS TO TAKE YOU TO CROMA PAGE TO BUY THE PRODUCT</a>"
print "\n<p>PRICE : "+product_cr[3][i]+"</p>";
print "<a href=\"/comparison.html\"><em><b>CLICK HERE FOR A COMPARISON OF DIFFERENT BRANDS</b></em></a>"
# print "<a href=\"/crowd.html\">CLICK HERE FOR WHAT THE CROWD THINKS OF THE PRODUCT</a>"
print "</body>\n</html>"
def link_fk_actu(product_image):
Flipkart_query = "http://www.flipkart.com/all-categories/pr?p%5B%5D=sort%3Drelevance&sid=search.flipkart.com&q=";
# print "\n\n\n\n\nLINK FK ACTUAL";
# print product_image;
names = [];
for i in range(3):
ind = product_image[i].index("data-pid=")+len("data-pid=\"");
indend = product_image[i].index("data-tracking-products",ind) - 2;
names.append(Flipkart_query + product_image[i][ind:indend]);
return names;
def price_fk(product_image):
# print "\n\n\n\n\nPRICE FK";
# print product_image;
names = [];
for i in range(3):
indend = product_image[i].index(";;");
ind = product_image[i].rfind(";",0,indend-1);
names.append(product_image[i][ind+1:indend]);
return names;
def name_fk(product_image):
# print "\n\n\n\n\nNAME FK";
# print product_image;
| names = [];
for i in range(3):
ind = product_image[i].index("alt")+len("alt=\"");
names.append(product_image[i][ind:].split()[0]);
# product_image[i][ind:indend]);
return names;
def link_fk(product_link):
# print | "\n\n\n\n\nLINK FK";
# print product_link;
beg_string = "www.flipkart.com";
links = [];
for i in range(3):
ind = product_link[i].index("a href=")+len("a href=\"");
indend = product_link[i].index("class") - 2;
links.append(beg_string+product_link[i][ind:indend]);
return links;
def image_fk(product_image):
img = [];
counter = 0;
for i in range(len(product_image)):
# print product_image[i];
try:
ind = product_image[i].index("data-src")+len("data-src=\"");
ind_end1 = 10000;
ind_end2 = 10000;
try:
ind_end1 = product_image[i].index("\"",ind);
except ValueError:
ind_end2 = product_image[i].index("\'",ind);
if ind_end2 < ind_end1:
ind_end = ind_end2;
else:
ind_end = ind_end1;
img.append(product_image[i][ind:ind_end]);
++counter;
except ValueError:
ind = product_image[i].index("src=")+len("src=\"");
ind_end1 = 10000;
ind_end2 = 10000;
try:
ind_end1 = product_image[i].index("\"",ind);
except ValueError:
ind_end2 = product_image[i].index("\'",ind);
if ind_end2 < ind_end1:
ind_end = ind_end2;
else:
ind_end = ind_end1;
img.append(product_image[i][ind:ind_end]);
++counter;
if counter == 3:
break;
return img[:3];
def process_fk(fk_lines):
product_image = [];
product_name = [];
product_otherone = [];
flag = 0;
counter = 0;
prev_line = "";
linenum = 0;
for l in fk_lines:
# print l;
# if "<div class=\'product" in l:
# flag = 1;
linenum += 1;
if "<div class='product" in l:
product_name.append(l);
flag = 1;
# if flag == 0 and "<img src=" in l:
# flag =1;
# continue;
if flag == 1 and "<img src=" in l:
product_image.append(l);
product_otherone.append(prev_line);
++counter;
if(counter==12):
break;
flag = 0;
prev_line = l;
product_image = product_image[1:11];
product_name = product_name[1:11];
product_otherone = product_otherone[0:10];
if(len(product_name)>=10):
teer = link_fk_actu(product_name);
else:
teer = link_fk(product_otherone);
return image_fk(product_image),teer,name_fk(product_image),price_fk(product_name);
#####################################################################################################
def process_am(am_lines):
# print am_lines;
links = [];
images = [];
names = [];
prices = [];
flag = 0;
counter = 0;
#urllib has a very strange behaviour when retrieving webpages - The server hands out slightly difficult code to parse.
flag = 0;
for l in am_lines:
# print 1;
try:
if ("<div id=\"srProductTitle" in l) and ("<a href=\"" in l) and ("src=\"" in l) and ("<br clear=\"all\" />" in l):
# print l;
# break;
ind =l |
Hawaii-Smart-Energy-Project/Maui-Smart-Grid | test/test_msg_types.py | Python | bsd-3-clause | 1,301 | 0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Daniel Zhang (張道博)'
__copyright__ = 'Copyright (c) 2014, University of Hawaii Smart Energy Project'
__license__ = 'https://raw.github' \
'.com/Hawaii-Smart-Energy-Project/Maui-Smart-Grid/master/BSD' \
'-LICENSE.txt'
import unittest
from sek.logger import SEKLogger
from msg_types import MSGAggregationTypes
class MSGTypesTester(unittest.TestCase):
"""
Unit tests for MSG Aggregation Types.
"""
def setUp(self):
self.logger = SEKLogger(__name__, 'DEBUG')
def test_aggregation_types(self):
self.assertTrue(MSGAggregationTypes.weather in MSGAggregationTypes)
self.assertTrue(MSGAggregationTypes.egauge in MSGAggregationTypes)
self.assertTrue(MSGAggregationTypes.circuit in MSGAggregationTypes)
self.assertTrue(MSGAggregationTypes.irradiance in MSGAggregationTypes)
def tearDown(self):
pass
if __name__ == '__main__':
RUN_SELECTED_TESTS = True
if RUN_SELECTED_TESTS:
selected_tests = ['test_aggregation_types']
| mySuite | = unittest.TestSuite()
for t in selected_tests:
mySuite.addTest(MSGTypesTester(t))
unittest.TextTestRunner().run(mySuite)
else:
unittest.main()
|
apyrgio/synnefo | snf-admin-app/synnefo_admin/admin/resources/ips/actions.py | Python | gpl-3.0 | 2,190 | 0.000457 | # Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from collections import OrderedDict
from synnefo.logic import ips
from synnefo_admin.admin.actions import AdminAction, noop
from synnefo_admin.admin.utils import update_actions_rbac, send_admin_email
class IPAction(AdminAction):
"""Class for actions on ips. Derived from AdminAction.
Pre-determined Attributes:
target: ip
"""
def __init__(self, name, f, **kwargs):
"""Initialize the class with provided values."""
AdminAction.__init__(self, name=name, target='ip', f=f, **kwargs)
def check_ip_action(action):
"""Check if an action can apply to an IP.
This is a wrapper for ` | validate_ip_action` of the ips module, that handles
the tupples returned by it.
"""
def check(ip, action):
res, _ = ips.validate_ip_action(ip, action)
return res
return lambda ip: check(ip, action)
def generate_actions():
"""Create a list of actions on ips."""
actions = OrderedDict()
|
actions['destroy'] = IPAction(name='Destroy', c=check_ip_action("DELETE"),
f=ips.delete_floating_ip, karma='bad',
caution_level='dangerous',)
actions['reassign'] = IPAction(name='Reassign to project', f=noop,
karma='neutral', caution_level='dangerous',)
actions['contact'] = IPAction(name='Send e-mail', f=send_admin_email,)
update_actions_rbac(actions)
return actions
cached_actions = generate_actions()
|
cortext/crawtextV2 | ~/venvs/crawler/lib/python2.7/site-packages/lxml/html/diff.py | Python | mit | 30,500 | 0.003213 | import difflib
from lxml import etree
from lxml.html import fragment_fromstring
import re
__all__ = ['html_annotate', 'htmldiff']
try:
from html import escape as html_escape
except ImportError:
from cgi import escape as html_escape
try:
_unicode = unicode
except NameError:
# Python 3
_unicode = str
try:
basestring
except NameError:
# Python 3
basestring = str
############################################################
## Annotation
############################################################
def default_markup(text, version):
return '<span title="%s">%s</span>' % (
html_escape(_unicode(version), 1), text)
def html_annotate(doclist, markup=default_markup):
"""
doclist should be ordered from oldest to newest, like::
>>> version1 = 'Hello World'
>>> version2 = 'Goodbye World'
>>> print(html_annotate([(ver | sion1, 'version 1'),
... (version2, 'version 2')]))
<span title="version 2">Goodbye</span> <span title="version 1">World</span>
The documents must be *fragments* (str/UTF8 or unicode), not
complete documents
The markup argument is a | function to markup the spans of words.
This function is called like markup('Hello', 'version 2'), and
returns HTML. The first argument is text and never includes any
markup. The default uses a span with a title:
>>> print(default_markup('Some Text', 'by Joe'))
<span title="by Joe">Some Text</span>
"""
# The basic strategy we have is to split the documents up into
# logical tokens (which are words with attached markup). We then
# do diffs of each of the versions to track when a token first
# appeared in the document; the annotation attached to the token
# is the version where it first appeared.
tokenlist = [tokenize_annotated(doc, version)
for doc, version in doclist]
cur_tokens = tokenlist[0]
for tokens in tokenlist[1:]:
html_annotate_merge_annotations(cur_tokens, tokens)
cur_tokens = tokens
# After we've tracked all the tokens, we can combine spans of text
# that are adjacent and have the same annotation
cur_tokens = compress_tokens(cur_tokens)
# And finally add markup
result = markup_serialize_tokens(cur_tokens, markup)
return ''.join(result).strip()
def tokenize_annotated(doc, annotation):
"""Tokenize a document and add an annotation attribute to each token
"""
tokens = tokenize(doc, include_hrefs=False)
for tok in tokens:
tok.annotation = annotation
return tokens
def html_annotate_merge_annotations(tokens_old, tokens_new):
"""Merge the annotations from tokens_old into tokens_new, when the
tokens in the new document already existed in the old document.
"""
s = InsensitiveSequenceMatcher(a=tokens_old, b=tokens_new)
commands = s.get_opcodes()
for command, i1, i2, j1, j2 in commands:
if command == 'equal':
eq_old = tokens_old[i1:i2]
eq_new = tokens_new[j1:j2]
copy_annotations(eq_old, eq_new)
def copy_annotations(src, dest):
"""
Copy annotations from the tokens listed in src to the tokens in dest
"""
assert len(src) == len(dest)
for src_tok, dest_tok in zip(src, dest):
dest_tok.annotation = src_tok.annotation
def compress_tokens(tokens):
"""
Combine adjacent tokens when there is no HTML between the tokens,
and they share an annotation
"""
result = [tokens[0]]
for tok in tokens[1:]:
if (not result[-1].post_tags and
not tok.pre_tags and
result[-1].annotation == tok.annotation):
compress_merge_back(result, tok)
else:
result.append(tok)
return result
def compress_merge_back(tokens, tok):
""" Merge tok into the last element of tokens (modifying the list of
tokens in-place). """
last = tokens[-1]
if type(last) is not token or type(tok) is not token:
tokens.append(tok)
else:
text = _unicode(last)
if last.trailing_whitespace:
text += last.trailing_whitespace
text += tok
merged = token(text,
pre_tags=last.pre_tags,
post_tags=tok.post_tags,
trailing_whitespace=tok.trailing_whitespace)
merged.annotation = last.annotation
tokens[-1] = merged
def markup_serialize_tokens(tokens, markup_func):
"""
Serialize the list of tokens into a list of text chunks, calling
markup_func around text to add annotations.
"""
for token in tokens:
for pre in token.pre_tags:
yield pre
html = token.html()
html = markup_func(html, token.annotation)
if token.trailing_whitespace:
html += token.trailing_whitespace
yield html
for post in token.post_tags:
yield post
############################################################
## HTML Diffs
############################################################
def htmldiff(old_html, new_html):
## FIXME: this should take parsed documents too, and use their body
## or other content.
""" Do a diff of the old and new document. The documents are HTML
*fragments* (str/UTF8 or unicode), they are not complete documents
(i.e., no <html> tag).
Returns HTML with <ins> and <del> tags added around the
appropriate text.
Markup is generally ignored, with the markup from new_html
preserved, and possibly some markup from old_html (though it is
considered acceptable to lose some of the old markup). Only the
words in the HTML are diffed. The exception is <img> tags, which
are treated like words, and the href attribute of <a> tags, which
are noted inside the tag itself when there are changes.
"""
old_html_tokens = tokenize(old_html)
new_html_tokens = tokenize(new_html)
result = htmldiff_tokens(old_html_tokens, new_html_tokens)
result = ''.join(result).strip()
return fixup_ins_del_tags(result)
def htmldiff_tokens(html1_tokens, html2_tokens):
""" Does a diff on the tokens themselves, returning a list of text
chunks (not tokens).
"""
# There are several passes as we do the differences. The tokens
# isolate the portion of the content we care to diff; difflib does
# all the actual hard work at that point.
#
# Then we must create a valid document from pieces of both the old
# document and the new document. We generally prefer to take
# markup from the new document, and only do a best effort attempt
# to keep markup from the old document; anything that we can't
# resolve we throw away. Also we try to put the deletes as close
# to the location where we think they would have been -- because
# we are only keeping the markup from the new document, it can be
# fuzzy where in the new document the old text would have gone.
# Again we just do a best effort attempt.
s = InsensitiveSequenceMatcher(a=html1_tokens, b=html2_tokens)
commands = s.get_opcodes()
result = []
for command, i1, i2, j1, j2 in commands:
if command == 'equal':
result.extend(expand_tokens(html2_tokens[j1:j2], equal=True))
continue
if command == 'insert' or command == 'replace':
ins_tokens = expand_tokens(html2_tokens[j1:j2])
merge_insert(ins_tokens, result)
if command == 'delete' or command == 'replace':
del_tokens = expand_tokens(html1_tokens[i1:i2])
merge_delete(del_tokens, result)
# If deletes were inserted directly as <del> then we'd have an
# invalid document at this point. Instead we put in special
# markers, and when the complete diffed document has been created
# we try to move the deletes around and resolve any problems.
result = cleanup_delete(result)
return result
def expand_tokens(tokens, equal=False):
"""Given a list of tokens, return a generator of the chunks of
text for the data in the tokens.
"""
fo |
WuPei/cv_reconstructor | Polygon.py | Python | mit | 486 | 0.00823 | # CS4243: Computer Vision and Pattern Recognition
| # Zhou Bin
# 29th, Oct, 2014
import numpy as np
from Vertex import Vertex
class Polygon:
def __init__(self, newVertexList, newTexelList):
| # Create list to store all vertex
self.Vertex = []
for i in newVertexList:
self.Vertex.append(i)
# Create list to store all texel value
self.Texel = []
for i in newTexelList:
self.Texel.append(i)
|
Axilent/sharrock | sharrock_multiversion_example/descriptors/one.py | Python | bsd-3-clause | 208 | 0.004808 | """
1.0 version of the API.
| """
from sharrock.desc | riptors import Descriptor
version = '1.0'
class MultiversionExample(Descriptor):
"""
This is the first version of this particular function.
"""
|
tboyce021/home-assistant | homeassistant/components/switcher_kis/__init__.py | Python | apache-2.0 | 6,458 | 0.001858 | """Home Assistant Switcher Component."""
from asyncio import QueueEmpty, TimeoutError as Asyncio_TimeoutError, wait_for
from datetime import datetime, timedelta
import logging
from typing import Dict, Optional
from aioswitcher.api import SwitcherV2Api
from aioswitcher.bridge import SwitcherV2Bridge
from aioswitcher.consts import COMMAND_ON
import voluptuous as vol
from homeassistant.auth.permissions.const import POLICY_EDIT
from homeassistant.components.switch import DOMAIN as SWITCH_DOMAIN
from homeassistant.const import ATTR_ENTITY_ID, EVENT_HOMEASSISTANT_STOP
from homeassistant.core import callback, split_entity_id
from homeassistant.exceptions import Unauthorized, UnknownUser
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.discovery import async_listen_platform, async_load_platform
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.typing import (
ContextType,
DiscoveryInfoType,
EventType,
HomeAssistantType,
ServiceCallType,
)
from homeassistant.loader import bind_hass
_LOGGER = logging.getLogger(__name__)
DOMAIN = "switcher_kis"
CONF_AUTO_OFF = "auto_off"
CONF_TIMER_MINUTES = "timer_minutes"
CONF_DEVICE_ID = "device_id"
CONF_DEVICE_PASSWORD = "device_password"
CONF_PHONE_ID = "phone_id"
DATA_DEVICE = "device"
SIGNAL_SWITCHER_DEVICE_UPDATE = "switcher_device_update"
ATTR_AUTO_OFF_SET = "auto_off_set"
ATTR_ELECTRIC_CURRENT = "electric_current"
ATTR_REMAINING_TIME = "remaining_time"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_PHONE_ID): cv.string,
vol.Required(CONF_DEVICE_ID): cv.string,
vol.Required(CONF_DEVICE_PASSWORD): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
SERVICE_SET_AUTO_OFF_NAME = "set_auto_off"
SERVICE_SET_AUTO_OFF_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_id,
vol.Required(CONF_AUTO_OFF): cv.time_period_str,
}
)
SERVICE_TURN_ON_WITH_TIMER_NAME = "turn_on_with_timer"
SERVICE_TURN_ON_WITH_TIMER_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TIMER_MINUTES): vol.All(
cv.positive_int, vol.Range(min=1, max=90)
),
}
)
@bind_hass
async def _validate_edit_permission(
hass: HomeAssistantType, context: ContextType, entity_id: str
) -> None:
"""Use for validating user control permissions."""
splited = split_entity_id(entity_id)
if splited[0] != SWITCH_DOMAIN or not splited[1].startswith(DOMAIN):
raise Unauthorized(context=context, entity_id=entity_id, permission=POLICY_EDIT)
user = await hass.auth.async_get_user(context.user_id)
if user is None:
raise UnknownUser(context=context, entity_id=entity_id, permission=POLICY_EDIT)
if not user.permissions.check_entity(entity_id, POLICY_EDIT):
raise Unauthorized(context=context, entity_id=entity_id, permission=POLICY_EDIT)
async def async_setup(hass: HomeAssistantType, config: Dict) -> bool:
"""Set up the switcher component."""
phone_id = config[DOMAIN][CONF_PHONE_ID]
device_id = config[DOMAIN][CONF_DEVICE_ID]
device_password = config[DOMAIN][CONF_DEVICE_PASSWORD]
v2bridge = SwitcherV2Bridge(hass.loop, phone_id, device_id, device_password)
await v2bridge.start()
async def async_stop_bridge(event: EventType) -> None:
"""On Home Assistant stop, gracefully stop the bridge if running."""
await v2bridge.stop()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, async_stop_bridge)
try:
device_data = await wait_for(v2bridge.queue.get(), timeout=10.0)
except (Asyncio_TimeoutError, RuntimeError):
_LOGGER.exception("Failed to get response from device")
await v2bridge.stop()
return False
hass.data[DOMAIN] = {DATA_DEVICE: device_data}
async def async_switch_platform_discovered(
platform: str, discovery_info: DiscoveryInfoType
) -> None:
"""Use for registering services after switch platform is discovered."""
if platform != DOMAIN:
return
async def async_set_auto_off_service(service: ServiceCallType) -> None:
"""Use for handling setting device auto-off service calls."""
await _validate_edit_permission(
hass, service.context, service.data[ATTR_ENTITY_ID]
)
async with SwitcherV2Api(
hass.loop, device_data.ip_addr, phone_id, device_id, device_password
) as swapi:
await swapi.set_auto_shutdown(service.data[CONF_AUTO_OFF])
async def async_turn_on_with_timer_service(service: ServiceCallType) -> None:
"""Use for handling turning device on with a timer service calls."""
await _validate_edit_permission(
hass, service.context, service.data[ATTR_ENTITY_ID]
)
async with SwitcherV2Api(
hass.loop, device_data.ip_addr, phone_id, device_id, device_password
) as swapi:
await swapi.control_device(COMMAND_ON, service.data[CONF_TIMER_MINUTES])
hass.services.async_register(
DOMAIN,
SERVICE_SET_AUTO_OFF_NAME,
async_set_auto_off_service,
schem | a=SERVICE_SET_AUTO_OFF_SCHEMA,
)
hass.services.async_register(
DOMAIN,
SERVICE_TURN_ON_WITH_TIMER_NAME,
async_turn_on_with_timer_service,
schema=SERVICE_TURN_ON_WITH_TIMER_SCHEMA,
)
async_listen_platform(hass, SWITCH_DOMAIN, async_switch_platform_discovered)
hass.async_create_task(async_load_platform(hass, SWITCH_DOMAIN, | DOMAIN, {}, config))
@callback
def device_updates(timestamp: Optional[datetime]) -> None:
"""Use for updating the device data from the queue."""
if v2bridge.running:
try:
device_new_data = v2bridge.queue.get_nowait()
if device_new_data:
async_dispatcher_send(
hass, SIGNAL_SWITCHER_DEVICE_UPDATE, device_new_data
)
except QueueEmpty:
pass
async_track_time_interval(hass, device_updates, timedelta(seconds=4))
return True
|
opendatakosovo/election-results-visualizer | runserver.py | Python | gpl-2.0 | 745 | 0 | import argparse
from erv import create_app
# Create the flask app.
app = | create_app()
# Run the app
if __name__ == '__main__':
# Define the arguments.
parser = argparse.ArgumentParser()
parser.add_argument(
'--host',
default='0.0.0.0',
help='Host to bind to: [%(default)s].')
parser.add_argument(
'--port',
type=int,
default=app.config['SERVER_PORT'],
help='Port to listen to: [%(default)s].')
parser.add_argument(
| '--debug',
action='store_true',
default=False,
help='Debug mode: [%(default)s].')
# Parse arguemnts and run the app.
args = parser.parse_args()
app.run(debug=args.debug, host=args.host, port=args.port)
|
llou/panopticon | panopticon/core/util/comparer.py | Python | gpl-3.0 | 5,798 | 0.005347 | # comparer.py is part of Panopticon.
# Panopticon is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Panopticon is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Panopticon. If not, see <http://www.gnu.org/licenses/>.
from difflib import SequenceMatcher
from panopticon.core.exceptions import PanopticonError
class PanopticonTreeComparerError(PanopticonError):
pass
def error(message):
raise PanopticonTreeComparerError(message)
INSERTED = "insert"
DELETED = "delete"
CHANGED = "change"
def equal(obj1, obj2):
return obj1 == obj2
def compare_dicts(first, second, compare_function=equal):
skf = set(first.keys())
sks = set(second.keys())
insertedk = skf.difference(sks)
deletedk = sks.difference(skf)
commonk = tuple(skf.intersection(sks))
cvf = map(lambda x:first[x], commonk)
cvs = map(lambda x:second[x], commonk)
changedk = []
for k, vf, vs in zip(commonk, cvf, cvs):
if type(vf) != type(vs):
deletedk.add(k)
insertedk.add(k)
elif vf != vs:
changedk.append(k)
return tuple(insertedk), tuple(deletedk), tuple(changedk)
def compare_tuples(first, second):
matcher = SequenceMatcher(a=second, b=first)
inserted = []
deleted = []
for tag, i1, i2, j1, j2 in matcher.get_opcodes():
if tag == "insert":
inserted.extend(range(j1, j2))
if tag == "delete":
deleted.extend(range(i1, i2))
if tag == "replace":
inserted.extend(range(j1, j2))
deleted.extend(range(i1, i2))
return tuple(inserted), tuple(deleted)
def compare_sets(first, second):
inserted = tuple(first - second)
deleted = tuple(second - first)
return inserted, deleted
PATH_SEP = "/"
class TreeComparer(object):
@classmethod
def type_checker(cls, first, second):
return False
@classmethod
def compare(cls, first, second, name="", parent=None):
for comp in cls.comparers:
if comp.type_checker(first, second):
return comp(first, second, name=name, parent=parent).get_changes()
error("Unable to find a comparer to match '%s' with '%s'" % (str(first),
(second)))
def __init__(self, first, second, name="", parent=None):
self.first, self.second = self.coerce_type(first, second)
self.name = name
self.parent = parent
if self.is_root:
self.path = PATH_SEP
elif self.is_subroot:
self.path = PATH_SEP + self.name
else:
self.path = self.parent.path + PATH_SEP + self.name
@property
def is_root(self):
return self.parent is None
@property
def is_subroot(self):
return not self.is_root and self.parent.is_root
def coerce_type(self, first, second):
return first, second
def get_changes(self):
raise NotImplementedError()
def get_path(self, name):
if self.is_root:
return PATH_SEP + name
else:
return self.path + PATH_SEP + name
class TreeStringComparer(TreeComparer):
coercible_types = str, int, float
@classmethod
def type_checker(cls, first, second):
return type(first) in cls.coercible_types and type(second) in cls.coercible_types
@classmethod
def coerce_type(self, first, second):
return str(first), str(second)
def get_changes(self):
return []
class UniqueTypeTreeComparer(TreeComparer):
tree_type = object
@classmethod
def type_checker(cls, first, second):
return isinstance(first, cls.tree_type) and isinstance(second, cls.tree_type)
class TreeDictComparer(UniqueTypeTreeComparer):
tree_type = dict
def get_changes(self):
result = []
inserted, deleted, changed = compare_dicts(self.first, self.second)
for name in inserted:
result.append((INSERTED, self.get_path(n | ame), self.first[name]))
for name | in deleted:
result.append((DELETED, self.get_path(name), self.second[name]))
for name in changed:
result.append((CHANGED, self.get_path(name), self.first[name]))
cs = (self.compare(self.first[name], self.second[name],
name=name, parent=self))
result.extend(cs)
return result
class TreeTupleComparer(UniqueTypeTreeComparer):
tree_type = tuple
def get_changes(self):
result = []
inserted, deleted = compare_tuples(self.first, self.second)
for i in inserted:
result.append((INSERTED, self.get_path(str(i)), self.first[i]))
for i in deleted:
result.append((DELETED, self.get_path(str(i)), self.second[i]))
return result
class TreeSetComparer(UniqueTypeTreeComparer):
tree_type = set
def get_changes(self):
result = []
inserted, deleted = compare_sets(self.first, self.second)
for value in inserted:
result.append((INSERTED, self.get_path(str(value)), value))
for value in deleted:
result.append((DELETED, self.get_path(str(value)), value))
return result
TreeComparer.comparers = (TreeStringComparer, TreeDictComparer,
TreeTupleComparer,TreeSetComparer)
def compare_trees(first, second):
return TreeComparer.compare(first, second)
|
cubledesarrollo/django-cuble-project | project_name/project_name/settings/production.py | Python | mit | 2,222 | 0.005851 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from os import environ
from .base import *
# Normally you should not import ANYTHING from Django directly
# into your settings, but ImproperlyConfigured is an exception.
from django.core.exceptions import ImproperlyConfigured
def get_env_setting(setting):
""" Get the environment setting or return exception """
try:
return environ[setting]
except KeyError:
error_msg = "Set the %s env variable" % setting
raise ImproperlyConfigured(error_msg)
########## HOST CONFIGURATION
# See: https://docs.djangoproject.com/en/1.5/releases/1.5/#allowed-hosts-required-in-production
ALLOWED_HOSTS = []
########## END HOST CONFIGURATION
########## EMAIL CONFIGURATION
# | See: https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-host
EMAIL_HOST = environ.get('EMAIL_HOST', 'smtp.gmail.com')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-host-password
EMAIL_HOST_PASSWORD = enviro | n.get('EMAIL_HOST_PASSWORD', '')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-host-user
EMAIL_HOST_USER = environ.get('EMAIL_HOST_USER', 'your_email@example.com')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-port
EMAIL_PORT = environ.get('EMAIL_PORT', 587)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = '[%s] ' % SITE_NAME
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-use-tls
EMAIL_USE_TLS = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = EMAIL_HOST_USER
########## END EMAIL CONFIGURATION
########## DATABASE CONFIGURATION
DATABASES = {}
########## END DATABASE CONFIGURATION
########## CACHE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {}
########## END CACHE CONFIGURATION
########## SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = get_env_setting('SECRET_KEY')
########## END SECRET CONFIGURATION |
aldenjenkins/foobargamingwebsite | bans/apps.py | Python | bsd-3-clause | 124 | 0 | from __future__ import unicode | _literals
from django.apps import AppConf | ig
class BansConfig(AppConfig):
name = 'bans'
|
MetaSUB/ModuleUltra | moduleultra/daemon/config.py | Python | mit | 3,078 | 0.00065 |
from yaml import load
from os import environ
from os.path import join, isfile
from ..module_ultra_repo import ModuleUltraRepo
from ..module_ultra_config import ModuleUltraConfig
class RepoDaemonConfig:
"""Represent a MU repo to the MU daemon."""
def __init__(self, **kwargs):
self.repo_name = kwargs['repo_name']
self.repo_path = kwargs['repo_path']
self.pipeli | nes = kwargs['pipelines']
def get_repo(self):
"""Return the MU repo that this represents."""
return ModuleUltraRepo(self.repo_path)
def get_pipeline_list(self):
"""Return a list of (pipe_name, version)."""
return [(pipe['name'], pipe['version']) for pipe in self.pipelines]
def get_pipeline_tolerance(self, pipe_name):
| """Return tolerance for the pipeline."""
for pipe in self.pipelines:
if pipe['name'] == pipe_name:
return pipe.get('tolerance', 0)
def get_pipeline_endpts(self, pipe_name):
"""Return a list of endpts or None."""
return None
def get_pipeline_excluded_endpts(self, pipe_name):
"""Return a list of excluded endpts or None."""
return None
class DaemonConfig:
"""Store config information for the MU daemon."""
def __init__(self, repos, total_jobs=10, run_local=True, pipeline_configs={}):
self.repos = repos
self.total_jobs = int(total_jobs)
self.run_local = run_local
self.pipeline_configs = pipeline_configs
def list_repos(self):
"""Return a list of RepoDaemonConfigs."""
repo_configs = []
for repo_name, repo_path, pipelines in self.repos:
repo_configs.append(RepoDaemonConfig(**{
'repo_name': repo_name,
'repo_path': repo_path,
'pipelines': pipelines,
}))
return repo_configs
def get_pipeline_run_config(self, pipe_name, pipe_version):
"""Return a filepath for the config to be used or None."""
return None
@classmethod
def get_daemon_config_filename(ctype):
try:
return environ['MODULE_ULTRA_DAEMON_CONFIG']
except KeyError:
config_dir = ModuleUltraConfig.getConfigDir()
config_filename = join(config_dir, 'daemon_config.yaml')
if isfile(config_filename):
return config_filename
assert False, "No daemon config found"
@classmethod
def load_from_yaml(ctype, yaml_filename=None):
yaml_filename = yaml_filename if yaml_filename else ctype.get_daemon_config_filename()
raw_config = load(open(yaml_filename))
raw_repos = raw_config['repos']
repo_list = [
(raw_repo['name'], raw_repo['path'], raw_repo['pipelines'])
for raw_repo in raw_repos
]
return DaemonConfig(
repo_list,
total_jobs=raw_config.get('num_jobs', 10),
run_local=raw_config.get('run_on_cluster', True),
pipeline_configs=raw_config.get('pipeline_configs', {})
)
|
kato-masahiro/particle_filter_on_episode | PFoE_module/.test_particles_resampling.py | Python | mit | 889 | 0.012739 | #coding:utf-8
"""
functionモジュールのparticle_resampling関数をテストする
"""
from functions import particles_resampling
import pfoe
robot1 = pfoe.Robot(sensor=4,choice=3,particle_num=100)
#case1:パーティクルの分布・重みは等分
for i in range(100):
robot1.particles.d | istribution[i] = i % 5
robot1.particles.weight[i] = 1.0 / 100.0
robot1.particles = particles_resampling(robot1.particles,5)
print robot1.particles.weight
print robot1.particles.distribution
#case2:パーティクルの分布は等分、重みはイベント0に集中
for i in range(100):
robot1.particles.distri | bution[i] = i % 5
if i % 5 == 0:
robot1.particles.weight[i] = 1.0 / 20.0
else:
robot1.particles.weight[i] = 0.0
robot1.particles = particles_resampling(robot1.particles,5)
print robot1.particles.weight
print robot1.particles.distribution
|
jjdmol/LOFAR | CEP/Imager/AWImager2/src/addImagingInfo.py | Python | gpl-3.0 | 16,822 | 0.01183 | # addImagingInfo.py: Python function to add meta info to a CASA image
# Copyright (C) 2012
# ASTRON (Netherlands Institute for Radio Astronomy)
# P.O.Box 2, 7990 AA Dwingeloo, The Netherlands
#
# This file is part of the LOFAR software suite.
# The LOFAR software suite is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The LOFAR software suite is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with the LOFAR software suite. If not, see <http://www.gnu.org/licenses/>.
#
# $Id$
#
# @author Ger van Diepen
import os
import pyrap.tables as pt
import lofar.parmdb as pdb
import lofar.get_rms_noise as grn
import numpy as np
""" Add a subtable of an MS to the image """
def addSubTable (image, msName, subName, removeColumns=[]):
# Make a selection of all rows/columns of the MS subtable
sel = pt.taql ("select * from '" + msName + "/" + subName + "'")
# Remove the required columns.
if len(removeColumns) > 0:
sel.removecols (removeColumns)
# Strip LOFAR_ from column names
for col in sel.colnames():
if len(col) > 6 and col[:6] == "LOFAR_":
sel.renamecol (col, col[6:])
# Copy the subtable to the image and add it as a subtable.
# Always prefix subtable name with LOFAR_.
subNameOut = subName;
if len(subNameOut) < 6 or subNameOut[:6] != "LOFAR_":
subNameOut = "LOFAR_" + subNameOut
subtab = sel.copy (image.name() + "/" + subNameOut, deep=True)
image.putkeyword ("ATTRGROUPS." + subNameOut, subtab)
| print "Added subtable", subNameOut, "containing", subtab.nrows(), "rows"
subtab.close()
sel.close()
""" Create the empty LOFAR_QUALITY subtable """
def addQualityTable (image, usedCounts, visCounts):
# Create the table using TaQL.
tab = pt.taql ("create table '" + | image.name() + "/LOFAR_QUALITY' " +
"QUALITY_MEASURE string, VALUE string, FLAG_ROW bool")
# Get the rms noise of I,Q,U,V as list of tuples.
noises = grn.get_rms_noise (image.name())
for noise in noises:
row = tab.nrows()
tab.addrows (2)
tab.putcell ("QUALITY_MEASURE", row, "RMS_NOISE_"+noise[0])
tab.putcell ("VALUE", row, str(noise[1]))
tab.putcell ("FLAG_ROW", row, False)
perc = 100.
nvis = 1.0 * visCounts.sum()
if nvis > 0:
# Get flagged percentage to 2 decimals.
perc = int(10000. * (1 - usedCounts.sum() / nvis) + 0.5) / 100.
tab.putcell ("QUALITY_MEASURE", row+1, "PERC_FLAGGED_VIS")
tab.putcell ("VALUE", row+1, str(perc)[:5])
tab.putcell ("FLAG_ROW", row+1, False)
tab.flush()
image.putkeyword ("ATTRGROUPS." + "LOFAR_QUALITY", tab)
print "Added subtable LOFAR_QUALITY containing", tab.nrows(), "rows"
tab.close()
""" Create the LOFAR_ORIGIN subtable and fill from all MSs """
def addOriginTable (image, msNames):
# Concatenate the OBSERVATION subtables of all MSs.
obsNames = [name + "/OBSERVATION" for name in msNames]
obstab = pt.table(obsNames, ack=False)
# Select and rename the required columns.
# Some columns are not in the LOFAR_OBSERVATION table. Create them by
# selecting a similarly typed column and fill them later.
selstr = "LOFAR_OBSERVATION_ID as OBSERVATION_ID"
selstr += ",LOFAR_SUB_ARRAY_POINTING as SUB_ARRAY_POINTING"
selstr += ",LOFAR_SUB_ARRAY_POINTING as SUBBAND"
selstr += ",LOFAR_SUB_ARRAY_POINTING as NUM_CHAN"
selstr += ",LOFAR_SUB_ARRAY_POINTING as NTIME_AVG"
selstr += ",LOFAR_SUB_ARRAY_POINTING as NCHAN_AVG"
selstr += ",LOFAR_OBSERVATION_FREQUENCY_MIN as CHANNEL_WIDTH"
selstr += ",LOFAR_OBSERVATION_FREQUENCY_MIN as EXPOSURE"
selstr += ",LOFAR_OBSERVATION_FREQUENCY_MIN as FREQUENCY_MIN"
selstr += ",LOFAR_OBSERVATION_FREQUENCY_MAX as FREQUENCY_MAX"
selstr += ",LOFAR_OBSERVATION_FREQUENCY_CENTER as FREQUENCY_CENTER"
selstr += ",LOFAR_OBSERVATION_START as START"
selstr += ",LOFAR_OBSERVATION_END as END"
selstr += ",FLAG_ROW"
sel = obstab.select(selstr)
# Copy the subtable to the image and add it as a subtable.
subtab = sel.copy (image.name() + "/" + "LOFAR_ORIGIN", deep=True)
subtab = pt.table (image.name() + "/" + "LOFAR_ORIGIN", readonly=False,
ack=False)
obstab.close()
image.putkeyword ("ATTRGROUPS." + "LOFAR_ORIGIN", subtab)
# Set the correct units of columns to update.
subtab.putcolkeyword ("CHANNEL_WIDTH", "QuantumUnits", ["Hz"])
subtab.putcolkeyword ("EXPOSURE", "QuantumUnits", ["s"])
subtab.putcolkeyword ("START", "MEASINFO", {"Ref":"UTC", "type":"epoch"})
subtab.putcolkeyword ("END", "MEASINFO", {"Ref":"UTC", "type":"epoch"})
# Update the columns not in OBSERVATION table.
# Get EXPOSURE from first row in main tables.
# Get NUM_CHAN from SPECTRAL_WINDOW subtables.
# Calculate CHANNEL_WIDTH (convert from MHz to Hz).
# Get SUBBAND from MS name.
for i in range(len(msNames)):
t = pt.table(msNames[i], ack=False)
subtab.putcell ("EXPOSURE", i, t.getcell("EXPOSURE", 0))
t1 = pt.table(t.getkeyword("SPECTRAL_WINDOW"), ack=False)
numchan = t1.getcell("NUM_CHAN", 0)
subtab.putcell ("NUM_CHAN", i, numchan)
freqs = t1.getcell("CHAN_FREQ", 0);
fwidths = t1.getcell("CHAN_WIDTH", 0);
sfreq = freqs[0] - 0.5*fwidths[0];
efreq = freqs[-1] + 0.5*fwidths[-1]
subtab.putcell ("FREQUENCY_MIN", i, sfreq);
subtab.putcell ("FREQUENCY_MAX", i, efreq);
subtab.putcell ("FREQUENCY_CENTER", i, t1.getcell("REF_FREQUENCY",0));
subtab.putcell ("CHANNEL_WIDTH", i, fwidths[0])
# Determine the averaging factors.
avgfreq = 1
avgtime = 1
if ("LOFAR_FULL_RES_FLAG" in t.colnames()):
avgfreq = t.getcolkeyword ("LOFAR_FULL_RES_FLAG", "NCHAN_AVG")
avgtime = t.getcolkeyword ("LOFAR_FULL_RES_FLAG", "NTIME_AVG")
subtab.putcell ("NCHAN_AVG", i, avgfreq)
subtab.putcell ("NTIME_AVG", i, avgtime)
t1.close()
# Determine nr of data points flagged
t.close()
subband = 0
inx = msNames[i].find ("SB")
if inx>= 0:
try:
subband = int(msNames[i][inx+2:inx+5])
except:
pass
subtab.putcell ("SUBBAND", i, subband)
# Ready
subtab.close()
sel.close()
print "Added subtable LOFAR_ORIGIN containing", len(msNames), "rows"
""" Create the LOFAR_SOURCE subtable and fill from the SourceDB """
def addSourceTable (image, sourcedbName, minTime, maxTime):
# Create the table using TaQL.
tab = pt.taql ("create table '" + image.name() + "/LOFAR_SOURCE' " +
"SOURCE_ID int, \TIME double, INTERVAL double, " +
"NUM_LINES int, NAME string, " +
"DIRECTION double shape=[2], " +
"PROPER_MOTION double shape=[2], " +
"FLUX double shape=[4], " +
"SPINX double, REF_FREQUENCY double, " +
"SHAPE double shape=[3]")
tab.putcolkeyword ("TIME", "QuantumUnits", ["s"])
tab.putcolkeyword ("INTERVAL", "QuantumUnits", ["s"])
tab.putcolkeyword ("DIRECTION", "QuantumUnits", ["rad"])
tab.putcolkeyword ("PROPER_MOTION", "QuantumUnits", ["rad/s"])
tab.putcolkeyword ("FLUX", "QuantumUnits", ["Jy"])
tab.putcolkeyword ("REF_FREQUENCY", "QuantumUnits", ["MHz"])
tab.putcolkeyword ("SHAPE", "QuantumUnits", ["rad", "rad", "rad"])
tab.putcolkeyword ("TIME", "MEASINFO", {"Ref":"UTC", "type":"epoch"})
tab.putcolkeyword ("DIRECTION", "MEASINFO", {"Ref":"J2000", "type":"direction"})
tab.flush()
image.putkeyword ("ATTRGROUPS." + "LOFAR_SO |
radiasoft/optics | code_drivers/shadow/driver/shadow_driver_setting.py | Python | apache-2.0 | 330 | 0 | from optics.driver.abstract_driver_setting import AbstractDriverSetting
class ShadowDriverSetting(AbstractDriverSetting):
def __init__(self):
from code_drivers.shadow.driver.shadow_driver imp | ort ShadowDri | ver
AbstractDriverSetting.__init__(self,
driver=ShadowDriver())
|
AAFC-MBB/galaxy-cloudman-playbook | roles/galaxyprojectdotorg.galaxy/files/makepyc.py | Python | mit | 506 | 0 | #!/usr/bin/en | v python
import sys
import compileall
from os import walk, unlink
from os.path import join, splitext, exists
assert sys.argv[1], "usage: makepyc /path/to/lib"
for root, dirs, files in walk(sys.argv[1]):
for name in files:
if name.endswith('.pyc'):
pyc = join(root, name)
py = splitext(pyc)[0] + '.py'
if not exists(py):
print 'Removing orphaned', pyc, '...'
unlin | k(pyc)
compileall.compile_dir(sys.argv[1])
|
dsavoiu/kafe2 | examples/004_constraints/generate_data.py | Python | gpl-3.0 | 1,268 | 0.003155 | import numpy as np
import matplotlib.pyplot as plt
from kafe2 import XYContainer
err_val_x = 0.001
err_val_y = 0.01
num_datapoints = 121
l, delta_l = 10.0, np.random.randn() * 0.001
r, delta_r = 0.052, np.random.randn() * 0.001
g_e = 9.780 # gravitational pull at the equator
y_0, delta_y_0 = 0.6, np.random.randn() * 0.006 # 0.01 relative
c, delta_c = 0.01, np.random.randn() * 0.0005
x = np.linspace(start=0.0, stop=60.0, num=num_datapoints, endpoint=True)
delta_x = np.random.randn(num_datapoints) * err_val_x
print("T: %s" % (2.0 * np.pi * np.sqrt(l / g)))
print("M: %s" % (4/3 * np.pi * r ** 3 * 7874))
def damped_harmonic_oscillator(x, y_0, l, r, g, c):
l_total = l + r
omega_0 = np.sqrt(g / l_total)
omega_d = np.sqrt(omega_0 ** 2 - c ** 2)
return y_0 * np.exp(-c * x) | * (np.cos(omega_d * x) + c / omega_d * np.sin(omega_d * x))
y = damped_harmonic_oscillator(
x + delta_x,
y_0 + delta_y_0,
l + delta_l,
r + delta_r,
g_e,
c + delta_c
)
y += np.random.randn(num_datapoints) * err_val_y
# Optional: plot the data
#plt.plot(_x, _y, '+')
#plt.show()
data = XYContainer(x_data=x, y_data=y)
data.add_error(axis='x', err_val=err_val_x)
data.add_error(axis='y', err_va | l=err_val_y)
data.to_file(filename='data.yml')
|
moodpulse/l2 | users/management/commands/price_import.py | Python | mit | 1,880 | 0.002307 | from decimal import Decimal
from django.core.management.base import BaseCommand
from openpyxl import load_workbook
from contracts.models import PriceName, PriceCoast
from directory.models import Researches
class Command(BaseCommand):
def add_arguments(self, parser):
"""
:param path - файл с картами пациентов + диагноз Д-учета
"""
parser.add_argument('path', type=str)
def handle(self, *args, **kwargs):
"""
Испорт цен услуг
Если услуга(id) существует записать в новый ф-л уже существующие, иначе создать новую запись
:param args:
:param kwargs:
:return:
"""
fp = kwargs["path"]
self.stdout.write("Path: " + fp)
wb = load_workbook(filename=fp)
ws = wb[wb.sheetnames[0]]
starts = False
identify = 0
price_code = 0
coast = 0
for row in ws.rows:
cells = [str(x.value) for x in row]
if not starts:
if "id" in cells and "код_прайс" in cells and "цена" in cells:
starts = True
identify = cells.index("id")
price_code = cells.index("код_прайс")
coast = cells.index("цена")
else:
| price_obj = PriceName.objects.filter(pk=int(cells[price_code]) | ).first()
research_obj = Researches.objects.filter(pk=int(cells[identify])).first()
if cells[coast]:
coast_value = Decimal(cells[coast])
if price_obj and research_obj:
PriceCoast.objects.update_or_create(price_name=price_obj, research=research_obj, defaults={'coast': coast_value})
|
c4goldsw/shogun | examples/undocumented/python_modular/kernel_comm_word_string_modular.py | Python | gpl-3.0 | 1,453 | 0.037853 | #!/usr/bin/env python
from tools.load import LoadMatrix
lm=LoadMatrix()
traindat = lm.load_dna('../data/fm_train_dna.dat')
testdat = lm.load_dna('../data/fm_test_dna.dat')
parameter_list = [[traindat,testdat,4,0,False, False],[t | raindat,testdat,4,0,False,False]]
def kernel_comm_word_string_modular (fm_train_dna=traindat, fm_test_dna=testdat, order=3, gap=0, reverse = False, use_sign = False):
from modshogun import CommWordStringKernel
from modshogun import StringWordFeatures, StringCharFeatures, DNA
from modshogun import SortWordString
charfeat=StringCharFeature | s(DNA)
charfeat.set_features(fm_train_dna)
feats_train=StringWordFeatures(charfeat.get_alphabet())
feats_train.obtain_from_char(charfeat, order-1, order, gap, reverse)
preproc=SortWordString()
preproc.init(feats_train)
feats_train.add_preprocessor(preproc)
feats_train.apply_preprocessor()
charfeat=StringCharFeatures(DNA)
charfeat.set_features(fm_test_dna)
feats_test=StringWordFeatures(charfeat.get_alphabet())
feats_test.obtain_from_char(charfeat, order-1, order, gap, reverse)
feats_test.add_preprocessor(preproc)
feats_test.apply_preprocessor()
kernel=CommWordStringKernel(feats_train, feats_train, use_sign)
km_train=kernel.get_kernel_matrix()
kernel.init(feats_train, feats_test)
km_test=kernel.get_kernel_matrix()
return km_train,km_test,kernel
if __name__=='__main__':
print('CommWordString')
kernel_comm_word_string_modular(*parameter_list[0])
|
tensorflow/tensorflow | tensorflow/compiler/mlir/tfr/define_op_template.py | Python | apache-2.0 | 1,903 | 0.003678 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the Lic | ense.
"""A template to define composite ops."""
# pylint: disable=g-direct-tensorflow-import
import os
import sys
from absl import app
from tensorflow.compiler.ml | ir.tfr.python.composite import Composite
from tensorflow.compiler.mlir.tfr.python.op_reg_gen import gen_register_op
from tensorflow.compiler.mlir.tfr.python.tfr_gen import tfr_gen_from_module
from tensorflow.python.platform import flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
'output', None,
'Path to write the genereated register op file and MLIR file.')
flags.DEFINE_bool('gen_register_op', True,
'Generate register op cc file or tfr mlir file.')
flags.mark_flag_as_required('output')
@Composite('TestRandom', derived_attrs=['T: numbertype'], outputs=['o: T'])
def _composite_random_op():
pass
def main(_):
if FLAGS.gen_register_op:
assert FLAGS.output.endswith('.cc')
generated_code = gen_register_op(sys.modules[__name__], '_composite_')
else:
assert FLAGS.output.endswith('.mlir')
generated_code = tfr_gen_from_module(sys.modules[__name__], '_composite_')
dirname = os.path.dirname(FLAGS.output)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(FLAGS.output, 'w') as f:
f.write(generated_code)
if __name__ == '__main__':
app.run(main=main)
|
CT-Data-Collaborative/ctdata-wagtail-cms | ctdata/migrations/0040_auto_20161129_1118.py | Python | mit | 1,900 | 0.002105 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-11-29 11:18
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.contrib.taggit
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('taggit', '0002_auto_20150616_2121'),
('ctdata', '0039_auto_20161123_1818'),
]
operations = [
migrations.CreateModel(
name='AcademyResourceTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content_object', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='tagged_items', to='ctdata.DataAcademyResource')),
('tag', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ctdata_academyresourcetag_items', to='taggit.Tag')),
],
options={
'abstract': False,
},
),
migrations.RemoveField(
model_name='dataacademyliveevent',
name='size_limit',
),
migrations.AddField(
model_name='dataacademyabstractevent',
name='size_limit',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='dataacademyabstractevent',
name='eventbrite_event_id',
field=models.CharField(default=None, max_length=50, null=True),
| ),
migrations.AddField(
model_name='dataacademyresource',
name='tags',
field=modelcluster.contrib.taggit.Clu | sterTaggableManager(blank=True, help_text='A comma-separated list of tags.', through='ctdata.AcademyResourceTag', to='taggit.Tag', verbose_name='Tags'),
),
]
|
yunity/foodsaving-backend | karrot/bootstrap/tests/test_api.py | Python | agpl-3.0 | 4,392 | 0.001138 | from unittest.mock import ANY, patch
from django.test import override_settings
from geoip2.errors import AddressNotFoundError
from rest_framework import status
from rest_framework.test import APITestCase
from karrot.groups.factories import GroupFactory
from karrot.users.factories import UserFactory
from karrot.utils.geoip import ip_to_city
from karrot.utils.tests.fake import faker
OVERRIDE_SETTINGS = {
'SENTRY_CLIENT_DSN': faker.name(),
'SENTRY_ENVIRONMENT': faker.name(),
'FCM_CLIENT_API_KEY': faker.name(),
'FCM_CLIENT_MESSAGING_SENDER_ID': faker.name(),
'FCM_CLIENT_PROJECT_ID': faker.name(),
'FCM_CLIENT_APP_ID': faker.name(),
}
class TestConfigAPI(APITestCase):
def test_default_config(self):
response = self.client.get('/api/config/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data, {
'fcm': {
'api_key': None,
'messaging_sender_id': None,
'project_id': None,
'app_id': None,
},
'sentry': {
'dsn': None,
'environment': 'production',
},
}, response.data
)
@override_settings(**OVERRIDE_SETTINGS)
def test_config_with_overrides(self):
response = self.client.get('/api/config/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data, {
'fcm': {
'api_key': OVERRIDE_SETTINGS['FCM_CLIENT_API_KEY'],
'messaging_sender_id': OVERRIDE_SETTINGS['FCM_CLIENT_MESSAGING_SENDER_ID'],
'project_id': OVERRIDE_SETTINGS['FCM_CLIENT_PROJECT_ID'],
'app_id': OVERRIDE_SETTINGS['FCM_CLIENT_APP_ID'],
},
'sentry': {
'dsn': OVERRIDE_SETTINGS['SENTRY_CLIENT_DSN'],
'environment': OVERRIDE_SETTINGS['SENTRY_ENVIRONMENT'],
},
}, response.data
)
class TestBootstrapAPI(APITestCase):
def setUp(self):
self.user = UserFactory()
self.member = UserFactory()
self.group = GroupFactory(members=[self.member], application_questions='')
self.url = '/api/bootstrap/'
self.client_ip = '2003:d9:ef08:4a00:4b7a:7964:8a3c:a33e'
ip_to_city.cache_clear() # prevent getting cached mock values
def tearDown(self):
ip_to_city.cache_clear()
def test_as_anon(self):
with self.assertNumQueries(1):
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['server'], ANY)
self.assertEqual(response.data['config'], ANY)
self.assertEqual(response.data['user'], None)
self.assertEqual(response.data['geoip'], None)
self.assertEqual(response.data['groups'], ANY)
@patch('karrot.utils.geoip.geoip')
def test_with_geoip(self, geoip):
lat_lng = [float(val) for val in faker.latlng()]
city = {'latitude': lat_lng[0], 'longitude': lat_lng[1], 'country_code': 'AA', 'time_zone': 'Europe/Berlin'}
geoip.city.return_value = city
response = self.client.get(self.url, HTTP_X_FORWARDED_FOR=self.client_ip)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
dict(response.data['geoip']), {
'lat': city['latitude'],
'lng': city['longitude'],
'country_code': city['country_code'],
'timezone': city['time_zone'],
}
)
@patch('karrot.utils.geoip.geoip')
def test_without_geoip(self, geoip):
geoip.city.side_effect = AddressNotFoundError
response = self.client.get(self.url, HTTP_X_FORWARDED_FOR=self.client_ip)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIsNone(response.data['geoip']) |
def test_when_logged_in(self):
self.client.force_login(user=self.user)
with self.assertNumQueries(2):
response = self.client.get(self.url)
| self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['user']['id'], self.user.id)
|
cihangxie/cleverhans | examples/nips17_adversarial_competition/validation_tool/validate_submission_lib.py | Python | mit | 14,955 | 0.006286 | """Helper library which performs validation of the submission."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import json
import logging
import os
import re
import subprocess
import numpy as np
from PIL import Image
from six import iteritems
from six import PY3
EXTRACT_COMMAND = {
'.zip': ['unzip', '${src}', '-d', '${dst}'],
'.tar': ['tar', 'xvf', '${src}', '-C', '${dst}'],
'.tar.gz': ['tar', 'xvzf', '${src}', '-C', '${dst}'],
}
ALLOWED_SUBMISSION_TYPES = ['attack', 'targeted_attack', 'defense']
REQUIRED_METADATA_JSON_FIELDS = ['entry_point', 'container',
'container_gpu', 'type']
CMD_VARIABLE_RE = re.compile('^\\$\\{(\\w+)\\}$')
BATCH_SIZE = 100
IMAGE_NAME_PATTERN = 'IMG{0:04}.png'
ALLOWED_EPS = [4, 8, 12, 16]
MAX_SUBMISSION_SIZE_ZIPPED = 8*1024*1024*1024 # 8 GiB
MAX_SUBMISSION_SIZE_UNPACKED = 16*1024*1024*1024 # 16 GiB
MAX_DOCKER_IMAGE_SIZE = 8*1024*1024*1024 # 8 GiB
def get_extract_command_template(filename):
"""Returns extraction command based on the filename extension."""
for k, v in iteritems(EXTRACT_COMMAND):
if filename.endswith(k):
return v
return None
def shell_call(command, **kwargs):
"""Calls shell command with parameter substitution.
Args:
command: command to run as a list of tokens
**kwargs: dirctionary with substitutions
Returns:
whether command was successful, i.e. returned 0 status code
Example of usage:
shell_call(['cp', '${A}', '${B}'], A='src_file', B='dst_file')
will call shell command:
cp src_file dst_file
"""
command = list(command)
for i in range(len(command)):
m = CMD_VARIABLE_RE.match(command[i])
if m:
var_id = m.group(1)
if var_id in kwargs:
command[i] = kwargs[var_id]
return subprocess.call(command) == 0
def make_directory_writable(dirname):
"""Makes directory readable and writable by everybody.
Args:
dirname: name of the directory
Returns:
True if operation was successfull
If you run something inside Docker container and it writes files, then
these files will be written as root user with restricted permissions.
So to be able to read/modify these files outside of Docker you have to change
permissions to be world readable and writable.
"""
retval = shell_call(['docker', 'run', '-v',
'{0}:/output_dir'.format(dirname),
'busybox:1.27.2',
'chmod', '-R', 'a+rwx', '/output_dir'])
if not retval:
logging.error('Failed to change permissions on directory: %s', dirname)
return retval
def load_defense_output(filename):
"""Loads output of defense from given file."""
result = {}
with | open(filename) as f:
for row in csv.reader(f):
try:
image_filename = row[0]
if not image_filename.endswith('.png'):
image_filename += '.png'
label = int(row[1])
except (IndexError, ValueError):
continue
result[image_filename] = label
return result
class SubmissionValidator(object):
"""Class which performs validation of the submission."""
def __init__(self, temp_dir, use_gpu):
"""Initializes inst | ance of SubmissionValidator.
Args:
temp_dir: temporary working directory
use_gpu: whether to use GPU
"""
self._temp_dir = temp_dir
self._use_gpu = use_gpu
self._tmp_extracted_dir = os.path.join(self._temp_dir, 'tmp_extracted')
self._extracted_submission_dir = os.path.join(self._temp_dir, 'extracted')
self._sample_input_dir = os.path.join(self._temp_dir, 'input')
self._sample_output_dir = os.path.join(self._temp_dir, 'output')
def _prepare_temp_dir(self):
"""Cleans up and prepare temporary directory."""
shell_call(['rm', '-rf', os.path.join(self._temp_dir, '*')])
# NOTE: we do not create self._extracted_submission_dir
# this is intentional because self._tmp_extracted_dir or it's subdir
# will be renames into self._extracted_submission_dir
os.mkdir(self._tmp_extracted_dir)
os.mkdir(self._sample_input_dir)
os.mkdir(self._sample_output_dir)
# make output dir world writable
shell_call(['chmod', 'a+rwX', '-R', self._sample_output_dir])
def _extract_submission(self, filename):
"""Extracts submission and moves it into self._extracted_submission_dir."""
# verify filesize
file_size = os.path.getsize(filename)
if file_size > MAX_SUBMISSION_SIZE_ZIPPED:
logging.error('Submission archive size %d is exceeding limit %d',
file_size, MAX_SUBMISSION_SIZE_ZIPPED)
return False
# determime archive type
exctract_command_tmpl = get_extract_command_template(filename)
if not exctract_command_tmpl:
logging.error('Input file has to be zip, tar or tar.gz archive; however '
'found: %s', filename)
return False
# extract archive
submission_dir = os.path.dirname(filename)
submission_basename = os.path.basename(filename)
logging.info('Extracting archive %s', filename)
retval = shell_call(
['docker', 'run',
'--network=none',
'-v', '{0}:/input_dir'.format(submission_dir),
'-v', '{0}:/output_dir'.format(self._tmp_extracted_dir),
'busybox:1.27.2'] + exctract_command_tmpl,
src=os.path.join('/input_dir', submission_basename),
dst='/output_dir')
if not retval:
logging.error('Failed to extract submission from file %s', filename)
return False
if not make_directory_writable(self._tmp_extracted_dir):
return False
# find submission root
root_dir = self._tmp_extracted_dir
root_dir_content = [d for d in os.listdir(root_dir) if d != '__MACOSX']
if (len(root_dir_content) == 1
and os.path.isdir(os.path.join(root_dir, root_dir_content[0]))):
logging.info('Looks like submission root is in subdirectory "%s" of '
'the archive', root_dir_content[0])
root_dir = os.path.join(root_dir, root_dir_content[0])
# Move files to self._extracted_submission_dir.
# At this point self._extracted_submission_dir does not exist,
# so following command will simply rename root_dir into
# self._extracted_submission_dir
if not shell_call(['mv', root_dir, self._extracted_submission_dir]):
logging.error('Can''t move submission files from root directory')
return False
return True
def _verify_submission_size(self):
submission_size = 0
for dirname, _, filenames in os.walk(self._extracted_submission_dir):
for f in filenames:
submission_size += os.path.getsize(os.path.join(dirname, f))
logging.info('Unpacked submission size: %d', submission_size)
if submission_size > MAX_SUBMISSION_SIZE_UNPACKED:
logging.error('Submission size exceeding limit %d',
MAX_SUBMISSION_SIZE_UNPACKED)
return submission_size <= MAX_SUBMISSION_SIZE_UNPACKED
def _load_and_verify_metadata(self, submission_type):
"""Loads and verifies metadata.
Args:
submission_type: type of the submission
Returns:
dictionaty with metadata or None if metadata not found or invalid
"""
metadata_filename = os.path.join(self._extracted_submission_dir,
'metadata.json')
if not os.path.isfile(metadata_filename):
logging.error('metadata.json not found')
return None
try:
with open(metadata_filename, 'r') as f:
metadata = json.load(f)
except IOError as e:
logging.error('Failed to load metadata: %s', e)
return None
for field_name in REQUIRED_METADATA_JSON_FIELDS:
if field_name not in metadata:
logging.error('Field %s not found in metadata', field_name)
return None
# Verify submission type
if submission_type != metadata['type']:
logging.error('Invalid submission type in metadata, expected "%s", '
'actual "%s"', submission_type, metadata['type'])
return None
# Check submission entry point
entry_point = metadata['entry_point']
|
jmesteve/saas3 | openerp/addons_extra/l10n_es_aeat/aeat_report.py | Python | agpl-3.0 | 6,565 | 0.005638 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2004-2011
# Pexego Sistemas Informáticos. (http://pexego.es) All Rights Reserved
# Luis Manuel Angueira Blanco (Pexego)
# Copyright (C) 2013
# Ignacio Ibeas - Acysos S.L. (http://acysos.com) All Rights Reserved
# Migración a OpenERP 7.0
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tools.translate import _
from openerp.osv import orm, fields
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
import time
import re
class l10n_es_aeat_report(orm.Model):
_name = "l10n.es.aeat.report"
_description = "AEAT report base module"
def on_change_company_id(self, cr, uid, ids, company_id):
"""
Loads some company data (the VAT number) when the selected
company changes.
"""
company_vat = ''
if company_id:
company = self.pool['res.company'].browse(cr, uid, company_id)
if company.partner_id and company.partner_id.vat:
# Remove the ES part from spanish vat numbers
# (ES12345678Z => 12345678Z)
company_vat = re.match("(ES){0,1}(.*)",
company.partner_id.vat).groups()[1]
return { 'value': { 'company_vat': company_vat } }
_columns = {
'company_id': fields.many2one('res.company', 'Company', required=True,
readonly=True, states={'draft': [('readonly', False)]}),
'number': fields.char('Declaration number', size=13, required=True,
readonly=True),
'previous_number' : fields.char('Previous declaration number',
size=13, states={'done':[('readonly',True)]}),
'representative_vat': fields.char('L.R. VAT number', size=9,
help="Legal Representative VAT number.",
states={'calculated':[('required',True)],
'confirmed':[('readonly',True)]}),
'fiscalyear_ | id': fields.many2one('account.fiscalyear', 'Fiscal year',
required=True, readonly=True,
states={'draft': [('readonly', False)]}) | ,
'company_vat': fields.char('VAT number', size=9, required=True,
readonly=True, states={'draft': [('readonly', False)]}),
'type': fields.selection([('N', 'Normal'),
('C', 'Complementary'),
('S', 'Substitutive')], 'Statement Type',
states={'calculated':[('required',True)],
'done':[('readonly',True)]}),
'support_type': fields.selection(
[('C', 'DVD'),
('T', 'Telematics')], 'Support Type',
states={'calculated':[('required',True)],
'done':[('readonly',True)]}),
'calculation_date': fields.datetime("Calculation date"),
'state' : fields.selection([('draft', 'Draft'),
('calculated', 'Processed'),
('done', 'Done'),
('cancelled', 'Cancelled')],
'State', readonly=True),
}
_defaults = {
'company_id': lambda self, cr, uid, context=None: (
self.pool['res.company']._company_default_get(cr, uid,
'l10n.es.aeat.report', context=context)),
'type': 'N',
'support_type': 'T',
'state': 'draft',
}
def button_calculate(self, cr, uid, ids, context=None):
res = self.calculate(cr, uid, ids, context=context)
self.write(cr, uid, ids,
{'state': 'calculated',
'calculation_date': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)})
return res
def button_recalculate(self, cr, uid, ids, context=None):
self.write(cr, uid, ids,
{'calculation_date': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)})
return self.calculate(cr, uid, ids, context=context)
def calculate(self, cr, uid, ids, context=None):
return True
def button_confirm(self, cr, uid, ids, context=None):
"""Set report status to done."""
self.write(cr, uid, ids, {'state': 'done'}, context=context)
return True
def button_cancel(self, cr, uid, ids, context=None):
"""Set report status to cancelled."""
self.write(cr, uid, ids, {'state': 'cancelled'}, context=context)
return True
def button_recover(self, cr, uid, ids, context=None):
"""Set report status to draft and reset calculation date."""
self.write(cr, uid, ids, {'state': 'draft', 'calculation_date': None})
return True
def button_export(self, cr, uid, ids, context=None):
for report in self.browse(cr, uid, ids, context=context):
export_obj = self.pool["l10n.es.aeat.report.%s.export_to_boe" %report.number]
export_obj.export_boe_file(cr, uid, report, context=context)
return True
def unlink(self, cr, uid, ids, context=None):
for item in self.browse(cr, uid, ids):
if item.state not in ['draft', 'cancelled']:
raise orm.except_orm(_('Error!'),
_("Only reports in 'draft' or "
"'cancelled' state can be removed"))
return super(l10n_es_aeat_report, self).unlink(cr, uid, ids,
context=context)
|
wrouesnel/ansible | lib/ansible/modules/cloud/amazon/elasticache.py | Python | gpl-3.0 | 20,816 | 0.001729 | #!/usr/bin/python
#
# Copyright (c) 2017 Ansible Project
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: elasticache
short_description: Manage cache clusters in Amazon Elasticache.
description:
- Manage cache clusters in Amazon Elasticache.
- Returns information about the specified cache cluster.
version_added: "1.4"
requirements: [ boto3 ]
author: "Jim Dalton (@jsdalton)"
options:
state:
description:
- C(absent) or C(present) are idempotent actions that will create or destroy a cache cluster as needed. C(rebooted) will reboot the cluster,
resulting in a momentary outage.
choices: ['present', 'absent', 'rebooted']
required: true
name:
description:
- The cache cluster identifier
required: true
engine:
description:
- Name of the cache engine to be used.
required: false
default: memcached
choices: ['redis', 'memcached']
cache_engine_version:
description:
- The version number of the cache engine
required: false
default: None
node_type:
description:
- The compute and memory capacity of the nodes in the cache cluster
required: false
default: cache.m1.small
num_nodes:
description:
- The initial number of cache nodes that the cache cluster will have. Required when state=present.
required: false
cache_port:
description:
- The port number on which each of the cache nodes will accept connections
required: false
default: None
cache_parameter_group:
description:
- The name of the cache parameter group to associate with this cache cluster. If this argument is omitted, the default cache parameter group
for the specified engine will be used.
required: false
default: None
version_added: "2.0"
aliases: [ 'parameter_group' ]
cache_subnet_group:
description:
- The subnet group name to associate with. Only use if inside a vpc. Required if inside a vpc
required: false
default: None
version_added: "2.0"
security_group_ids:
description:
- A list of vpc security group names to associate with this cache cluster. Only use if inside a vpc
required: false
default: None
version_added: "1.6"
cache_security_groups:
description:
- A list of cache security group names to associate with this cache cluster. Must be an empty list if inside a vpc
required: false
default: None
zone:
description:
- The EC2 Availability Zone in which the cache cluster will be created
required: false
defa | ult: None
wait:
description:
- Wait for cache cluster result before returning
required: false
default: yes
choices: [ "yes", "no" ]
hard_ | modify:
description:
- Whether to destroy and recreate an existing cache cluster if necessary in order to modify its state
required: false
default: no
choices: [ "yes", "no" ]
extends_documentation_fragment:
- aws
- ec2
"""
EXAMPLES = """
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Basic example
- elasticache:
name: "test-please-delete"
state: present
engine: memcached
cache_engine_version: 1.4.14
node_type: cache.m1.small
num_nodes: 1
cache_port: 11211
cache_security_groups:
- default
zone: us-east-1d
# Ensure cache cluster is gone
- elasticache:
name: "test-please-delete"
state: absent
# Reboot cache cluster
- elasticache:
name: "test-please-delete"
state: rebooted
"""
from time import sleep
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info, boto3_conn, HAS_BOTO3, camel_dict_to_snake_dict
try:
import boto3
import botocore
except ImportError:
pass # will be detected by imported HAS_BOTO3
class ElastiCacheManager(object):
"""Handles elasticache creation and destruction"""
EXIST_STATUSES = ['available', 'creating', 'rebooting', 'modifying']
def __init__(self, module, name, engine, cache_engine_version, node_type,
num_nodes, cache_port, cache_parameter_group, cache_subnet_group,
cache_security_groups, security_group_ids, zone, wait,
hard_modify, region, **aws_connect_kwargs):
self.module = module
self.name = name
self.engine = engine.lower()
self.cache_engine_version = cache_engine_version
self.node_type = node_type
self.num_nodes = num_nodes
self.cache_port = cache_port
self.cache_parameter_group = cache_parameter_group
self.cache_subnet_group = cache_subnet_group
self.cache_security_groups = cache_security_groups
self.security_group_ids = security_group_ids
self.zone = zone
self.wait = wait
self.hard_modify = hard_modify
self.region = region
self.aws_connect_kwargs = aws_connect_kwargs
self.changed = False
self.data = None
self.status = 'gone'
self.conn = self._get_elasticache_connection()
self._refresh_data()
def ensure_present(self):
"""Ensure cache cluster exists or create it if not"""
if self.exists():
self.sync()
else:
self.create()
def ensure_absent(self):
"""Ensure cache cluster is gone or delete it if not"""
self.delete()
def ensure_rebooted(self):
"""Ensure cache cluster is gone or delete it if not"""
self.reboot()
def exists(self):
"""Check if cache cluster exists"""
return self.status in self.EXIST_STATUSES
def create(self):
"""Create an ElastiCache cluster"""
if self.status == 'available':
return
if self.status in ['creating', 'rebooting', 'modifying']:
if self.wait:
self._wait_for_status('available')
return
if self.status == 'deleting':
if self.wait:
self._wait_for_status('gone')
else:
msg = "'%s' is currently deleting. Cannot create."
self.module.fail_json(msg=msg % self.name)
kwargs = dict(CacheClusterId=self.name,
NumCacheNodes=self.num_nodes,
CacheNodeType=self.node_type,
Engine=self.engine,
EngineVersion=self.cache_engine_version,
CacheSecurityGroupNames=self.cache_security_groups,
SecurityGroupIds=self.security_group_ids,
CacheParameterGroupName=self.cache_parameter_group,
CacheSubnetGroupName=self.cache_subnet_group)
if self.cache_port is not None:
kwargs['Port'] = self.cache_port
if self.zone is not None:
kwargs['PreferredAvailabilityZone'] = self.zone
try:
self.conn.create_cache_cluster(**kwargs)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg=e.message, exception=format_exc(),
**camel_dict_to_snake_dict(e.response))
self._refresh_data()
self.changed = True
if self.wait:
self._wait_for_status('available')
return True
def delete(self):
"""Destroy an ElastiCache cluster"""
if self.status == 'gone':
return
if self.status == 'deleting':
if self.wait:
self._wait_for_status('gone')
return
if self.status in ['creating', 'rebooting', 'modifying']:
if self.wait:
self._wait_for_status('available')
else:
msg = "'%s' is currently %s. Cannot delete."
self.module.fail_json(msg=msg % (se |
gnocchixyz/gnocchi | gnocchi/storage/__init__.py | Python | apache-2.0 | 29,952 | 0.000033 | # -*- encoding: utf-8 -*-
#
# Copyright © 2016-2018 Red Hat, Inc.
# Copyright © 2014-2015 eNovance
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import itertools
import operator
import daiquiri
import numpy
from oslo_config import cfg
import six
from gnocchi import carbonara
from gnocchi import utils
OPTS = [
cfg.StrOpt('driver',
default='file',
help='Storage driver to use'),
]
LOG = daiquiri.getLogger(__name__)
ATTRGETTER_METHOD = operator.attrgetter("method")
ATTRGETTER_GRANULARITY = operator.attrgetter("granularity")
class StorageError(Exception):
pass
class MetricDoesNotExist(StorageError):
"""Error raised when this metric does not exist."""
def __init__(self, metric):
self.metric = metric
super(MetricDoesNotExist, self).__init__(
"Metric %s does not exist" % metric)
def jsonify(self):
return {
"cause": "Metric does not exist",
"detail": {
"metric": self.metric,
},
}
class AggregationDoesNotExist(StorageError):
"""Error raised when the aggregation method doesn't exists for a metric."""
def __init__(self, metric, method, granularity):
self.metric = metric
self.method = method
self.granularity = granularity
super(AggregationDoesNotExist, self).__init__(
"Aggregation method '%s' at granularity '%s' "
"for metric %s does not exist" %
(method, utils.timespan_total_seconds(granularity), metric))
def jsonify(self):
return {
"cause": "Aggregation does not exist",
"detail": {
# FIXME(jd) Pecan does not use our JSON renderer for errors
# So we need to convert this
"granularity": utils.timespan_total_seconds(self.granularity),
"aggregation_method": self.method,
},
}
class MetricAlreadyExists(StorageError):
"""Error raised when this metric already exists."""
def __init__(self, metric):
self.metric = metric
super(MetricAlreadyExists, self).__init__(
"Metric %s already exists" % metric)
@utils.retry_on_exception_and_log("Unable to initialize storage driver")
def get_driver(conf):
"""Return the configured driver."""
return utils.get_driver_class('gnocchi.storage', conf.storage)(
conf.storage)
class Statistics(collections.defaultdict):
class StatisticsTimeContext(object):
def __init__(self, stats, name):
self.stats = stats
self.name = name + " time"
def __enter__(self):
self.sw = utils.StopWatch()
self.sw.start()
return self
def __exit__(self, type, value, traceback):
self.stats[self.name] += self.sw.elapsed()
def __init__(self):
super(Statistics, self).__init__(lambda: 0)
def time(self, name):
return self.StatisticsTimeContext(self, name)
class StorageDriver(object):
# NOTE(sileht): By default we use threads, but some driver can disable
# threads by setting this to utils.sequencial_map
MAP_METHOD = staticmethod(utils.parallel_map)
def __init__(self, conf):
self.statistics = Statistics()
@staticmethod
def upgrade():
pass
def _get_splits(self, metrics_aggregations_keys, version=3):
results = collections.defaultdict(
lambda: colle | ctions.defaultdict(list))
for metric, aggregation, split in self.MAP_METHOD(
lambda m, k, a, v: (m, a, self._get_splits_unbatched(m, k, a, v)), # noqa
((metric, key, aggregation, version)
for metric, aggregations_and_keys
in six.iteritems(metrics_aggregations_keys)
| for aggregation, keys
in six.iteritems(aggregations_and_keys)
for key in keys)):
results[metric][aggregation].append(split)
return results
@staticmethod
def _get_splits_unbatched(metric, timestamp_key, aggregation, version=3):
raise NotImplementedError
@staticmethod
def _get_or_create_unaggregated_timeseries_unbatched(metric, version=3):
"""Get the unaggregated timeserie of metrics.
If the metrics does not exist, it is created.
:param metric: A metric.
:param version: The storage format version number.
"""
raise NotImplementedError
def _get_or_create_unaggregated_timeseries(self, metrics, version=3):
"""Get the unaggregated timeserie of metrics.
If the metrics does not exist, it is created.
:param metrics: A list of metrics.
:param version: The storage format version number.
"""
return dict(
six.moves.zip(
metrics,
self.MAP_METHOD(
utils.return_none_on_failure(
self._get_or_create_unaggregated_timeseries_unbatched),
((metric, version) for metric in metrics))))
@staticmethod
def _store_unaggregated_timeseries_unbatched(metric, data, version=3):
"""Store unaggregated timeseries.
:param metric: A metric.
:param data: The data to store.
:param version: Storage engine data format version
"""
raise NotImplementedError
def _store_unaggregated_timeseries(self, metrics_and_data, version=3):
"""Store unaggregated timeseries.
:param metrics_and_data: A list of (metric, serialized_data) tuples
:param version: Storage engine data format version
"""
self.MAP_METHOD(
utils.return_none_on_failure(
self._store_unaggregated_timeseries_unbatched),
((metric, data, version) for metric, data in metrics_and_data))
@staticmethod
def _store_metric_splits_unbatched(metric, key, aggregation, data, offset,
version=3):
"""Store a metric split.
:param metric: A metric.
:param key: The `carbonara.SplitKey`.
:param aggregation: The `carbonara.Aggregation`.
:param data: The actual data to write.
:param offset: The offset to write to.
:param version: Storage engine format version.
"""
raise NotImplementedError
def _store_metric_splits(self, metrics_keys_aggregations_data_offset,
version=3):
"""Store metric splits.
Store a bunch of splits for some metrics.
:param metrics_keys_aggregations_data_offset: A dict where keys are
`storage.Metric` and
values are a list of
(key, aggregation,
data, offset) tuples.
:param version: Storage engine format version.
"""
self.MAP_METHOD(
self._store_metric_splits_unbatched,
((metric, key, aggregation, data, offset, version)
for metric, keys_aggregations_data_offset
in six.iteritems(metrics_keys_aggregations_data_offset)
for key, aggregation, data, offset
in keys_aggregations_data_offset))
@staticmethod
def _list_split_keys_unbatched(self, metric, aggregations, version=3):
"""List split keys for a metric.
:param metric: The metric to look key for.
:param aggregations: List of Aggregations to look fo |
Bryukh-Checkio-Tasks/checkio-task-mono-captcha | verification/referee.py | Python | gpl-2.0 | 221 | 0 | from checkio.sig | nals import ON_CONNECT
from checkio import api
from checkio.referees.io import CheckiOReferee
from tests import TESTS
api.add_liste | ner(
ON_CONNECT,
CheckiOReferee(
tests=TESTS).on_ready)
|
neuroelectro/neuroelectro_org | article_text_mining/deprecated/db_add_full_text_wiley.py | Python | gpl-2.0 | 10,680 | 0.009551 | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 07 14:13:18 2013
@author: Shreejoy
"""
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 20 09:54:11 2012
@author: Shreejoy
"""
import os
import os.path
import re
import struct
import gc
from matplotlib.pylab import *
from xml.etree.ElementTree import XML
from urllib import quote_plus, quote
from urllib2 import Request, urlopen, URLError, HTTPError
from xml.etree.ElementTree import XML
import json
from pprint import pprint
from bs4 import BeautifulSoup
import time
from HTMLParser import HTMLParseError
def get_full_text_links():
NUMHITS = 200
firstInd = 1
maxURLTries = 5
waitTime = 10
#totalArticles = NUMHITS + firstInd + 1 # just set this later when it gets searched
totalArticles = 56712
#totalArticles = 3694
searchLinkBase = 'http://onlinelibrary.wiley.com/advanced/search/results/reentry?scope=allContent&dateRange=between&inTheLastList=6&startYear=1996&endYear=2013&queryStringEntered=false&searchRowCriteria[0].queryString=neuron+membrane+potential&searchRowCriteria[0].fieldName=all-fields&searchRowCriteria[0].booleanConnector=and&searchRowCriteria[1].fieldName=all-fields&searchRowCriteria[1].booleanConnector=and&searchRowCriteria[2].fieldName=all-fields&searchRowCriteria[2].booleanConnector=and&start=%s&resultsPerPage=%s&ordering=relevancy&publicationFacet=journal'
#searchLinkBase = 'http://onlinelibrary.wiley.com/advanced/search/results/reentry?scope=allContent&dateRange=between&inTheLastList=6&startYear=1996&endYear=2013&queryStringEntered=false&searchRowCriteria[0].queryString=neuron+membrane+potential&searchRowCriteria[0].fieldName=all-fields&searchRowCriteria[0].booleanConnector=and&searchRowCriteria[1].queryString=European+Journal+of+Neuroscience&searchRowCriteria[1].fieldName=publication-title&searchRowCriteria[1].booleanConnector=or&searchRowCriteria[2].fieldName=all-fields&searchRowCriteria[2].booleanConnector=and&start=%s&resultsPerPage=%s&ordering=relevancy'
# 'http://jn.physiology.org/search?tmonth=Mar&pubdate_year=&submit=yes&submit=yes&submit=Submit&andorexacttitle=and&format=condensed&firstpage=&fmonth=Jan&title=&tyear=2012'
# searchLinkBase = 'http://jn.physiology.org/search?tmonth=Mar&pubdate_year=&submit=yes&submit=yes&submit=Submit&andorexacttitle=and&format=condensed&firstpage=&fmonth=Jan&title=&tyear=2012&hits=' + str(NUMHITS) + '&titleabstract=&flag=&journalcode=jn&volume=&sortspec=date&andorexacttitleabs=and&author2=&andorexactfulltext=and&author1=&fyear=1997&doi=&fulltext=%22input%20resistance%22%20AND%20neuron&FIRSTINDEX=' + str(firstInd)
fullTextLinks = []
pdfLinks = []
while firstInd + NUMHITS <= totalArticles:
print 'searching %d of %d articles' % (firstInd, totalArticles)
try:
# searchLinkFull = 'http://jn.physiology.org/search?tmonth=Mar&pubdate_year=&submit=yes&submit=yes&submit=Submit&andorexacttitle=and&format=condensed&firstpage=&fmonth=Jan&title=&tyear=2012&hits=' + str(NUMHITS) + '&titleabstract=&flag=&journalcode=jn&volume=&sortspec=date&andorexacttitleabs=and&author2=&andorexactfulltext=and&author1=&fyear=1997&doi=&fulltext=%22input%20resistance%22%20AND%20neuron&FIRSTINDEX=' + str(firstInd)
# searchLinkFull = 'http://www.jneurosci.org/search?tmonth=Mar&pubdate_year=&submit=yes&submit=yes&submit=Submit&andorexacttitle=and&format=condensed&firstpage=&fmonth=Jan&title=&tyear=2012&hits=' + str(NUMHITS) + '&titleabstract=&volume=&sortspec=date&andorexacttitleabs=and&author2=&tocsectionid=all&andorexactfulltext=and&author1=&fyear=1997&doi=&fulltext=input%20resistance%20neuron&FIRSTINDEX=' + str(firstInd)
searchLinkFull = searchLinkBase % (firstInd, NUMHITS)
handle = urlopen(searchLinkFull) # open the url
data = handle.read() # read the data
soup = BeautifulSoup(data)
except Exception, e:
print 'skipping'
print e
continue
for link in soup.find_all('a'):
# print link.get('rel')
if link.string == 'Full Article (HTML)':
currLink = link.get('href')
# do a check to see if
pmid = get_pmid_from_doi(currLink)
if len(pmid) == 1:
fullTextLinks.append((currLink, pmid[0]))
firstInd += NUMHITS
print 'now waiting %d secs before next search' % waitTime
time.sleep(waitTime)
return fullTextLinks
MAXURLTRIES = 2
def get_full_text_from_link(fullTextLink, pmid):
os.chdir('C:\Users\Shreejoy\Desktop\wiley_html')
# actually try to get full text
success = False
numTries = 0
waitTimeLong = .5
waitTimeShort = 2
link = 'http://onlinelibrary.wiley.com' + fullTextLink
request = Request(link)
while numTries < MAXURLTRIES and success == False:
try:
fullText = urlopen(request).read()
#print 'file opened successfully'
# fullText get succeeded!
soup = BeautifulSoup(fullText)
fullTextTag = soup.find(id = "fulltext")
accessDeniedTag = soup.find(id = "accessDenied")
if accessDeniedTag is None:
titleTag = soup.find(id="articleTitle")
articleTitle = titleTag.h1.text
titleEncoded = | articleTitle.encode("iso-8859-15", "replace")
# | save full text to a file
fileName = make_html_filename(titleEncoded, pmid)
if os.path.isfile(fileName):
print 'found identical file'
pass
else:
# file doesn't exist
f = open(fileName, 'wb')
f.write(str(fullTextTag))
f.close()
print 'found unique file'
success = True
time.sleep(waitTimeShort)
else:
print 'access denied to full text'
print link
# full text not available for some reason
break
except Exception, e:
print e
# if e.code == 403:
# #print '%s failed cause access restricted' % (articleTitle)
# fullText = False
# pmid = False
# break
# else:
print link + ' failed %s times' % numTries
numTries += 1
print 'now waiting %d secs before trying search again' % (waitTimeLong*numTries)
time.sleep(waitTimeLong*numTries)
if numTries == MAXURLTRIES:
fullText = False
pmid = False
def get_full_text_from_link_all(fullTextLinkListTuple):
cnt = 0
for fullTextLinkList in fullTextLinkListTuple:
print '%d of %d articles' % (cnt, len(fullTextLinkListTuple))
link = fullTextLinkList[0]
pmid = fullTextLinkList[1]
get_full_text_from_link(link, pmid)
cnt += 1
def get_full_text_from_link_all_dict(fullTextLinkDict):
cnt = 0
num_articles = len(fullTextLinkDict)
for pmid in fullTextLinkDict.keys():
print '%d of %d articles' % (cnt, num_articles)
link = fullTextLinkDict[pmid]
get_full_text_from_link(link, pmid)
cnt += 1
MAXURLTRIES = 2
MAXTITLELEN = 100
def make_html_filename(title, pmid):
title = '%s_%s' % (pmid, title)
title = re.sub('\s', '_', title)
pattern = '[a-zA-Z0-9_]'
title = ''.join(re.findall(pattern, title))
fileName = title[0:min(MAXTITLELEN, len(title))]
fileName = fileName + '.html'
return fileName
def get_pmid_from_doi(doiStr):
doiSearchStr = re.sub('/doi/', '', doiStr)
doiSearchStr = re.sub('/full', '', doiSearchStr)
searchLink = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=pubmed&term=%s[aid]' % (doiSearchStr)
try:
handle = urlopen(searchLink)
data = handl |
rocketrip/django-jsonfield | jsonfield/fields.py | Python | mit | 5,921 | 0.001351 | import copy
from django.db import models
from django.utils.translation import ugettext_lazy as _
try:
from django.utils import six
except ImportError:
import six
try:
import json
except ImportError:
from django.utils import simplejson as json
from django.forms import fields
try:
from django.forms.utils import ValidationError
except ImportError:
from django.forms.util import ValidationError
from .subclassing import SubfieldBase
from .encoder import JSONEncoder
class JSONFormFieldBase(object):
def __init__(self, *args, **kwargs):
self.load_kwargs = kwargs.pop('load_kwargs', {})
super(JSONFormFieldBase, self).__init__(*args, **kwargs)
def to_python(self, value):
if isinstance(value, six.string_types):
if not value.strip():
return ''
try:
return json.loads(value, **self.load_kwargs)
except ValueError:
raise ValidationError(_("Enter valid JSON"))
return value
def clean(self, value):
if not value and not self.required:
if value == '':
return ''
return None
# Trap cleaning errors & bubble them up as JSON errors
try:
return super(JSONFormFieldBase, self).clean(value)
except TypeError:
raise ValidationError(_("Enter valid JSON"))
class JSONFormField(JSONFormFieldBase, fields.CharField):
pass
class JSONCharFormField(JSONFormFieldBase, fields.CharField):
pass
class JSONFieldBase(six.with_metaclass(SubfieldBase, models.Field)):
def __init__(self, *args, **kwargs):
self.dump_kwargs = kwargs.pop('dump_kwargs', {
'cls': JSONEncoder,
'separators': (',', ':')
})
self.load_kwargs = kwargs.pop('load_kwargs', {})
super(JSONFieldBase, self).__init__(*args, **kwargs)
def pre_init(self, value, obj):
"""Convert a string value to JSON only if it needs to be deserialized.
SubfieldBase metaclass has been modified to call this method instead of
to_python so that we can check the obj state and determine if it needs to be
deserialized"""
if obj._state.adding:
# Make sure the primary key actually exists on the object before
# checking if it's empty. This is a special case for South datamigrations
# see: https://github.com/bradjasper/django-jsonfield/issues/52
if isinstance(value, six.string_types):
try:
return json.loads(value, **self.load_kwargs)
except ValueError:
raise ValidationError(_("Enter valid JSON"))
except AttributeError:
# south fake meta class doesn't create proper attributes
# see this:
# https://github.com/bradjasper/django-jsonfield/issues/52
pass
return value
def to_python(self, value):
"""The SubfieldBase metaclass calls pre_init instead of to_python, however to_python
is still necessary for Django's deserializer"""
return value
def get_db_prep_value(self, value, connection, prepared=False):
"""Convert JSON object to a string"""
if self.null and value is None:
return None
if value == '':
return value
return json.dumps(value, **self.dump_kwargs)
def value_to_string(self, obj):
value = self._get_val_from_obj(obj)
return self.get_db_prep_value(value, None)
def value_from_object(self, obj):
value = super(JSONFieldBase, self).value_from_object(obj)
if self.null and value is None:
return None
if value == '':
return ''
return self.dumps_for_display(value)
def dumps_for_display(self, value):
return json.dumps(value, **self.dump_kwargs)
def formfield(self, **kwargs):
if "form_class" not in kwargs:
kwargs["form_class"] = self.form_class
| field | = super(JSONFieldBase, self).formfield(**kwargs)
if isinstance(field, JSONFormFieldBase):
field.load_kwargs = self.load_kwargs
if not field.help_text:
field.help_text = "Enter valid JSON"
return field
def get_default(self):
"""
Returns the default value for this field.
The default implementation on models.Field calls force_unicode
on the default, which means you can't set arbitrary Python
objects as the default. To fix this, we just return the value
without calling force_unicode on it. Note that if you set a
callable as a default, the field will still call it. It will
*not* try to pickle and encode it.
"""
if self.has_default():
if callable(self.default):
return self.default()
return copy.deepcopy(self.default)
# If the field doesn't have a default, then we punt to models.Field.
return super(JSONFieldBase, self).get_default()
class JSONField(JSONFieldBase, models.TextField):
"""JSONField is a generic textfield that serializes/deserializes JSON objects"""
form_class = JSONFormField
def dumps_for_display(self, value):
kwargs = {"indent": 2}
kwargs.update(self.dump_kwargs)
return json.dumps(value, **kwargs)
class JSONCharField(JSONFieldBase, models.CharField):
"""JSONCharField is a generic textfield that serializes/deserializes JSON objects,
stored in the database like a CharField, which enables it to be used
e.g. in unique keys"""
form_class = JSONCharFormField
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^jsonfield\.fields\.(JSONField|JSONCharField)"])
except ImportError:
pass
|
openstack/trove | trove/tests/unittests/backup/test_backup_models.py | Python | apache-2.0 | 26,428 | 0 | # Copyright 2013 Hewlett-Packard Development Company, L.P.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from unittest.mock import DEFAULT
from unittest.mock import MagicMock
from unittest.mock import patch
from swiftclient.client import ClientException
from trove.backup import models
from trove.backup import state
from trove.common import context
from trove.common import exception
from trove.common import timeutils
from trove.common import utils
from trove.db.models import DatabaseModelBase
from trove.instance import models as instance_models
from trove.taskmanager import api
from trove.tests.unittests import trove_testtools
from trove.tests.unittests.util import util
def _prep_conf(current_time):
current_time = str(current_time)
_context = context.TroveContext(tenant='TENANT-' + current_time)
instance_id = 'INSTANCE-' + current_time
return _context, instance_id
BACKUP_NAME = 'WORKS'
BACKUP_NAME_2 = 'IT-WORKS'
BACKUP_NAME_3 = 'SECOND-LAST-ONE'
BACKUP_NAME_4 = 'LAST-ONE-FULL'
BACKUP_NAME_5 = 'LAST-ONE-INCREMENTAL'
BACKUP_NAME_6 = 'LAST-ONE-DELETED'
BACKUP_STATE = state.BackupState.NEW
BACKUP_STATE_COMPLETED = state.BackupState.COMPLETED
BACKUP_DESC = 'Backup test'
BACKUP_FILENAME = '45a3d8cb-ade8-484c-a8a5-0c3c7286fb2f.xbstream.gz'
BACKUP_LOCATION = 'https://hpcs.com/tenant/database_backups/' + BACKUP_FILENAME
class BackupCreateTest(trove_testtools.TestCase):
def setUp(self):
super(BackupCreateTest, self).setUp()
util.init_db()
self.context, self.instance_id = _prep_conf(timeutils.utcnow())
self.created = False
def tearDown(self):
super(BackupCreateTest, self).tearDown()
if self.created:
models.DBBackup.find_by(
tenant_id=self.context.project_id).delete()
@patch.object(api.API, 'get_client', MagicMock(return_value=MagicMock()))
def test_create(self):
instance = MagicMock()
with patch.object(instance_models.BuiltInstance, 'load',
return_value=instance):
instance.validate_can_perform_action = MagicMock(
return_value=None)
instance.datastore_version = MagicMock()
instance.datastore_version.id = 'datastore-id-999'
instance.cluster_id = None
with patch.multiple(models.Backup,
validate_can_perform_action=DEFAULT,
verify_swift_auth_token=DEFAULT):
with patch.object(api.API, 'create_backup',
MagicMock(return_value=None)):
bu = models.Backup.create(self.context, self.instance_id,
BACKUP_NAME, BACKUP_DESC)
self.created = True
self.assertEqual(BACKUP_NAME, bu.name)
self.assertEqual(BACKUP_DESC, bu.description)
self.assertEqual(self.instance_id, bu.instance_id)
self.assertEqual(state.BackupState.NEW, bu.state)
db_record = models.DBBackup.find_by(id=bu.id)
self.assertEqual(bu.id, db_record['id'])
self.assertEqual(BACKUP_NAME, db_record['name'])
self.assertEqual(BACKUP_DESC, db_record['description'])
self.assertEqual(self.instance_id,
db_record['instance_id'])
self.assertEqual(state.BackupState.NEW,
db_record['state'])
self.assertEqual(ins | tance.datastore_version.id,
db_record['datastore_version_id'])
@patch.object(api.API, 'get_client', MagicMock(return_value=MagicMock()))
def test | _create_incremental(self):
instance = MagicMock()
parent = MagicMock(spec=models.DBBackup)
with patch.object(instance_models.BuiltInstance, 'load',
return_value=instance):
instance.validate_can_perform_action = MagicMock(
return_value=None)
instance.validate_can_perform_action = MagicMock(
return_value=None)
instance.datastore_version = MagicMock()
instance.datastore_version.id = 'datastore-id-999'
instance.cluster_id = None
with patch.multiple(models.Backup,
validate_can_perform_action=DEFAULT,
verify_swift_auth_token=DEFAULT,
get_by_id=MagicMock(return_value=parent)):
with patch.object(api.API, 'create_backup',
MagicMock(return_value=None)):
incremental = models.Backup.create(
self.context,
self.instance_id,
BACKUP_NAME,
BACKUP_DESC,
parent_id='parent_uuid')
self.created = True
db_record = models.DBBackup.find_by(id=incremental.id)
self.assertEqual(incremental.id,
db_record['id'])
self.assertEqual(BACKUP_NAME,
db_record['name'])
self.assertEqual(BACKUP_DESC,
db_record['description'])
self.assertEqual(self.instance_id,
db_record['instance_id'])
self.assertEqual(state.BackupState.NEW,
db_record['state'])
self.assertEqual('parent_uuid',
db_record['parent_id'])
self.assertEqual(instance.datastore_version.id,
db_record['datastore_version_id'])
def test_create_instance_not_found(self):
self.assertRaises(exception.NotFound, models.Backup.create,
self.context, self.instance_id,
BACKUP_NAME, BACKUP_DESC)
def test_create_incremental_not_found(self):
instance = MagicMock()
with patch.object(instance_models.BuiltInstance, 'load',
return_value=instance):
instance.validate_can_perform_action = MagicMock(
return_value=None)
instance.cluster_id = None
with patch.object(models.Backup, 'validate_can_perform_action',
return_value=None):
with patch.object(models.Backup, 'verify_swift_auth_token',
return_value=None):
self.assertRaises(exception.NotFound, models.Backup.create,
self.context, self.instance_id,
BACKUP_NAME, BACKUP_DESC,
parent_id='BAD')
def test_create_instance_not_active(self):
instance = MagicMock()
with patch.object(instance_models.BuiltInstance, 'load',
return_value=instance):
instance.validate_can_perform_action = MagicMock(
side_effect=exception.UnprocessableEntity)
self.assertRaises(exception.UnprocessableEntity,
models.Backup.create,
self.context, self.instance_id,
BACKUP_NAME, BACKUP_DESC)
def test_create_backup_swift_token_invalid(self):
instance = MagicMock(cluster_id=None |
slaughterjames/etendard | shellplate.py | Python | gpl-3.0 | 3,659 | 0.006832 | '''
Etendard v0.4 - Copyright 2012 James Slaughter,
This file is part of Etendard v0.4.
Etendard v0.4 is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Etendard v0.4 is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Etendard v0.4. If not, see <http://www.gnu.org/licenses/>.
'''
'''
shellplate.py - This file is responsible for generating an exploit template and then
saving it to a file pre-populated with arguments from the command line
'''
#No python imports
#programmer generated imports
from fileio import fileio
'''
shellplate
Class: This class is responsible for generating an exploit template and then
saving it to a file pre-populated with arguments from th | e command line
'''
class shellplate:
'''
Constructor
'''
def __init__(self):
| '''
Not used
'''
'''
CreateTemplate()
Function: - Creates a Python template that an exploit can be built from
- Saves the template to disk for further modification
- Returns to etendard
'''
def CreateTemplate(self, target, protocol, port, filename):
FIO = fileio()
templateshellcode = (
"#!/usr/bin/python\n"
+ "#Exploit Description \n"
+ "\n"
+ "\n"
+"# python imports\n"
+ "import os\n"
+ "import sys\n"
+ "import time\n"
+ "import socket\n"
+ "import struct\n"
+ "\n"
+ "shellcode = ()"
+ "\n"
+ "\n"
+ "def Exploit(target, port):\n"
+ " sockAddr = (" + target + ", " + str(port) + "))\n"
+ " tsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n"
+ " tsock.connect(sockAddr)\n"
+ " response = tsock.recv(1024)\n"
+ "\n"
+ " #payload = input payload\n"
+ "\n"
+ " payload += ' '\n"
+ " tsock.send(payload)\n"
+ "\n"
+ "if __name__ == '__main__':\n"
+ " try:\n"
+ " target = sys.argv[1]\n"
+ " port = sys.argv[2]\n"
+ " except IndexError:\n"
+ " print 'Usage: %s <target> <port>' % sys.argv[0]\n"
+ " sys.exit(-1)\n"
+ " "
+ " Exploit(target, port)\n")
if len(filename) < 3:
filename = 'template.py'
ret = 0
ret = FIO.WriteFile(filename, templateshellcode)
return ret
|
2e2a/l-rex | apps/item/migrations/0003_alter_itemfeedback_scale_values.py | Python | gpl-3.0 | 644 | 0.001553 | # Generated by Django 3.2 on 2021-04-22 04:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [ |
('lrex_item', '0002_auto_20201210_0817'),
]
operations = [
migrations.AlterField(
model_name='itemfeedback',
| name='scale_values',
field=models.TextField(help_text='Scale values, separated by commas (e.g. "1,3"). If a label contains a comma itself, escape it with "\\" (e.g. "A,B,Can\'t decide\\, I like both"). The feedback will be shown to the participant if one of these ratings is selected.', max_length=10000),
),
]
|
forslund/mycroft-core | mycroft/skills/mycroft_skill/event_container.py | Python | apache-2.0 | 6,352 | 0 | from inspect import signature
from mycroft.messagebus import Message
from mycroft.metrics import Stopwatch, report_timing
from mycroft.util.log import LOG
from ..skill_data import to_alnum
def unmunge_message(message, skill_id):
"""Restore message keywords by removing the Letterified skill ID.
Args:
message (Message): Intent result message
skill_id (str): skill identifier
Returns:
Message without clear keywords
"""
if isinstance(message, Message) and isinstance(message.data, dict):
skill_id = to_alnum(skill_id)
for key in list(message.data.keys()):
if key.startswith(skill_id):
# replace the munged key with the real one
new_key = key[len(skill_id):]
message.data[new_key] = message.data.pop(key)
return message
def get_handler_name(handler):
"""Name (including class if available) of handler function.
Args:
handler (function): Function to be named
Returns:
string: handler name as string
"""
if '__self__' in dir(handler) and 'name' in dir(handler.__self__):
return handler.__self__.nam | e + '.' + handler.__name__
else:
return handler.__name__
def create_wrapper(handler, skill_id, on_start, on_end, on_error):
"""Create the default skill handler wrapper.
This wrapper handles things like metrics, reporting handler start/stop
and errors.
handler (callable): method/function to call
skill | _id: skill_id for associated skill
on_start (function): function to call before executing the handler
on_end (function): function to call after executing the handler
on_error (function): function to call for error reporting
"""
def wrapper(message):
stopwatch = Stopwatch()
try:
# TODO: Fix for real in mycroft-messagebus-client
# Makes sure the message type is consistent with the type declared
# in mycroft.messagebus and isinstance will work.
message = Message(message.msg_type,
data=message.data,
context=message.context)
message = unmunge_message(message, skill_id)
if on_start:
on_start(message)
with stopwatch:
if len(signature(handler).parameters) == 0:
handler()
else:
handler(message)
except Exception as e:
if on_error:
on_error(e)
finally:
if on_end:
on_end(message)
# Send timing metrics
context = message.context
if context and 'ident' in context:
report_timing(context['ident'], 'skill_handler', stopwatch,
{'handler': handler.__name__,
'skill_id': skill_id})
return wrapper
def create_basic_wrapper(handler, on_error=None):
"""Create the default skill handler wrapper.
This wrapper handles things like metrics, reporting handler start/stop
and errors.
Args:
handler (callable): method/function to call
on_error (function): function to call to report error.
Returns:
Wrapped callable
"""
def wrapper(message):
try:
if len(signature(handler).parameters) == 0:
handler()
else:
handler(message)
except Exception as e:
if on_error:
on_error(e)
return wrapper
class EventContainer:
"""Container tracking messagbus handlers.
This container tracks events added by a skill, allowing unregistering
all events on shutdown.
"""
def __init__(self, bus=None):
self.bus = bus
self.events = []
def set_bus(self, bus):
self.bus = bus
def add(self, name, handler, once=False):
"""Create event handler for executing intent or other event.
Args:
name (string): IntentParser name
handler (func): Method to call
once (bool, optional): Event handler will be removed after it has
been run once.
"""
def once_wrapper(message):
# Remove registered one-time handler before invoking,
# allowing them to re-schedule themselves.
self.remove(name)
handler(message)
if handler:
if once:
self.bus.once(name, once_wrapper)
self.events.append((name, once_wrapper))
else:
self.bus.on(name, handler)
self.events.append((name, handler))
LOG.debug('Added event: {}'.format(name))
def remove(self, name):
"""Removes an event from bus emitter and events list.
Args:
name (string): Name of Intent or Scheduler Event
Returns:
bool: True if found and removed, False if not found
"""
LOG.debug("Removing event {}".format(name))
removed = False
for _name, _handler in list(self.events):
if name == _name:
try:
self.events.remove((_name, _handler))
except ValueError:
LOG.error('Failed to remove event {}'.format(name))
pass
removed = True
# Because of function wrappers, the emitter doesn't always directly
# hold the _handler function, it sometimes holds something like
# 'wrapper(_handler)'. So a call like:
# self.bus.remove(_name, _handler)
# will not find it, leaving an event handler with that name left behind
# waiting to fire if it is ever re-installed and triggered.
# Remove all handlers with the given name, regardless of handler.
if removed:
self.bus.remove_all_listeners(name)
return removed
def __iter__(self):
return iter(self.events)
def clear(self):
"""Unregister all registered handlers and clear the list of registered
events.
"""
for e, f in self.events:
self.bus.remove(e, f)
self.events = [] # Remove reference to wrappers
|
jyeatman/dipy | dipy/viz/fvtk.py | Python | bsd-3-clause | 53,175 | 0.000414 | ''' Fvtk module implements simple visualization functions using VTK.
The main idea is the following:
A window can have one or more renderers. A renderer can have none, one or more actors. Examples of actors are a sphere, line, point etc.
You basically add actors in a renderer and in that way you can visualize the forementioned objects e.g. sphere, line ...
Examples
---------
>>> from dipy.viz import fvtk
>>> r=fvtk.ren()
>>> a=fvtk.axes()
>>> fvtk.add(r,a)
>>> #fvtk.show(r)
For more information on VTK there many neat examples in
http://www.vtk.org/Wiki/VTK/Tutorials/External_Tutorials
'''
from __future__ import division, print_function, absolute_import
from dipy.utils.six.moves import xrange
import types
import numpy as np
from dipy.core.ndindex import ndindex
# Conditional import machinery for vtk
from ..utils.optpkg import optional_package
# Allow import, but disable doctests if we don't have vtk
vtk, have_vtk, setup_module = optional_package('vtk')
colors, have_vtk_colors, _ = optional_package('vtk.util.colors')
cm, have_matplotlib, _ = optional_package('matplotlib.cm')
if have_matplotlib:
get_cmap = cm.get_cmap
else:
from dipy.data import get_cmap
# a track buffer used only with picking tracks
track_buffer = []
# indices buffer for the tracks
ind_buffer = []
# tempory renderer used only with picking tracks
tmp_ren = None
if have_vtk:
major_version = vtk.vtkVersion.GetVTKMajorVersion()
# Create a text mapper and actor to display the results of picking.
textMapper = vtk.vtkTextMapper()
tprop = textMapper.GetTextProperty()
tprop.SetFontFamilyToArial()
tprop.SetFontSize(10)
# tprop.BoldOn()
# tprop.ShadowOn()
tprop.SetColor(1, 0, 0)
textActor = vtk.vtkActor2D()
textActor.VisibilityOff()
textActor.SetMapper(textMapper)
# Create a cell picker.
picker = vtk.vtkCellPicker()
def ren():
'''Create a renderer.
Returns
-------
v : vtkRenderer() object
Renderer.
Examples
--------
>>> from dipy.viz import | fvtk
>>> import numpy as np
>>> r=fvtk.ren()
>>> lines=[np.random.rand(10,3)]
>>> c=fvtk.line(lines, fvtk.colors.red)
>>> fvtk.add(r,c)
>>> #fvtk.show(r)
'''
return vtk.vtkRenderer()
def add(ren, a):
''' Add a specific actor
'''
if isinstance(a, vtk.vtkVolume):
ren.AddVolume(a)
else:
ren.AddActor(a)
def rm(ren, a):
''' Remove a specific actor
'''
ren | .RemoveActor(a)
def clear(ren):
''' Remove all actors from the renderer
'''
ren.RemoveAllViewProps()
def rm_all(ren):
''' Remove all actors from the renderer
'''
clear(ren)
def _arrow(pos=(0, 0, 0), color=(1, 0, 0), scale=(1, 1, 1), opacity=1):
''' Internal function for generating arrow actors.
'''
arrow = vtk.vtkArrowSource()
# arrow.SetTipLength(length)
arrowm = vtk.vtkPolyDataMapper()
if major_version <= 5:
arrowm.SetInput(arrow.GetOutput())
else:
arrowm.SetInputData(arrow.GetOutput())
arrowa = vtk.vtkActor()
arrowa.SetMapper(arrowm)
arrowa.GetProperty().SetColor(color)
arrowa.GetProperty().SetOpacity(opacity)
arrowa.SetScale(scale)
return arrowa
def axes(scale=(1, 1, 1), colorx=(1, 0, 0), colory=(0, 1, 0), colorz=(0, 0, 1),
opacity=1):
""" Create an actor with the coordinate's system axes where
red = x, green = y, blue =z.
Parameters
----------
scale : tuple (3,)
axes size e.g. (100, 100, 100)
colorx : tuple (3,)
x-axis color. Default red.
colory : tuple (3,)
y-axis color. Default blue.
colorz : tuple (3,)
z-axis color. Default green.
Returns
-------
vtkAssembly
"""
arrowx = _arrow(color=colorx, scale=scale, opacity=opacity)
arrowy = _arrow(color=colory, scale=scale, opacity=opacity)
arrowz = _arrow(color=colorz, scale=scale, opacity=opacity)
arrowy.RotateZ(90)
arrowz.RotateY(-90)
ass = vtk.vtkAssembly()
ass.AddPart(arrowx)
ass.AddPart(arrowy)
ass.AddPart(arrowz)
return ass
def _lookup(colors):
''' Internal function
Creates a lookup table with given colors.
Parameters
------------
colors : array, shape (N,3)
Colormap where every triplet is encoding red, green and blue e.g.
::
r1,g1,b1
r2,g2,b2
...
rN,gN,bN
where
::
0=<r<=1,
0=<g<=1,
0=<b<=1,
Returns
----------
vtkLookupTable
'''
colors = np.asarray(colors, dtype=np.float32)
if colors.ndim > 2:
raise ValueError('Incorrect shape of array in colors')
if colors.ndim == 1:
N = 1
if colors.ndim == 2:
N = colors.shape[0]
lut = vtk.vtkLookupTable()
lut.SetNumberOfColors(N)
lut.Build()
if colors.ndim == 2:
scalar = 0
for (r, g, b) in colors:
lut.SetTableValue(scalar, r, g, b, 1.0)
scalar += 1
if colors.ndim == 1:
lut.SetTableValue(0, colors[0], colors[1], colors[2], 1.0)
return lut
def streamtube(lines, colors, opacity=1, linewidth=0.15, tube_sides=8,
lod=True, lod_points=10 ** 4, lod_points_size=5):
""" Uses streamtubes to visualize polylines
Parameters
----------
lines : list
list of N curves represented as 2D ndarrays
colors : array (N, 3) or tuple (3,)
opacity : float
linewidth : float
tube_sides : int
lod : bool
use vtkLODActor rather than vtkActor
lod_points : int
number of points to be used when LOD is in effect
lod_points_size : int
size of points when lod is in effect
Examples
--------
>>> from dipy.viz import fvtk
>>> r=fvtk.ren()
>>> lines=[np.random.rand(10, 3), np.random.rand(20, 3)]
>>> colors=np.random.rand(2, 3)
>>> c=fvtk.streamtube(lines, colors)
>>> fvtk.add(r,c)
>>> #fvtk.show(r)
Notes
-----
Streamtubes can be heavy on GPU when loading many streamlines and therefore,
you may experience slow rendering time depending on system GPU. A solution
to this problem is to reduce the number of points in each streamline. In Dipy
we provide an algorithm that will reduce the number of points on the straighter
parts of the streamline but keep more points on the curvier parts. This can
be used in the following way
from dipy.tracking.distances import approx_polygon_track
lines = [approx_polygon_track(line, 0.2) for line in lines]
"""
points = vtk.vtkPoints()
colors = np.asarray(colors)
if colors.ndim == 1:
colors = np.tile(colors, (len(lines), 1))
# Create the polyline.
streamlines = vtk.vtkCellArray()
cols = vtk.vtkUnsignedCharArray()
cols.SetName("Cols")
cols.SetNumberOfComponents(3)
len_lines = len(lines)
prior_line_shape = 0
for i in range(len_lines):
line = lines[i]
streamlines.InsertNextCell(line.shape[0])
for j in range(line.shape[0]):
points.InsertNextPoint(*line[j])
streamlines.InsertCellPoint(j + prior_line_shape)
color = (255 * colors[i]).astype('ubyte')
cols.InsertNextTuple3(*color)
prior_line_shape += line.shape[0]
profileData = vtk.vtkPolyData()
profileData.SetPoints(points)
profileData.SetLines(streamlines)
profileData.GetPointData().AddArray(cols)
# Add thickness to the resulting line.
profileTubes = vtk.vtkTubeFilter()
profileTubes.SetNumberOfSides(tube_sides)
if major_version <= 5:
profileTubes.SetInput(profileData)
else:
profileTubes.SetInputData(profileData)
#profileTubes.SetInput(profileData)
profileTubes.SetRadius(linewidth)
profileMapper = vtk.vtkPolyDataMapper()
profileMapper.SetInputConnection(profileTubes.GetOutputPort())
profileMapper.ScalarVisibilityOn()
profileMapper.SetScalarModeToUsePointFieldData()
profileMapper.SelectColorArray("Cols")
profileMapper.GlobalImmediateModeRendering |
CCLab/Raw-Salad | scripts/db/budget/budgeutr.py | Python | bsd-3-clause | 11,394 | 0.014785 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
import:
Budżet środków europejskich w układzie tradycyjnym
flat structure (each data unit is a separate doc in the collection)
parenting is archieved through 'parent' key
bulk of files:
- this file (budgeutr.py)
- data file CSV, produced from XLS (for example, budgeutr.csv)
- conf file with mongo connection settings
type python budgeutr.py -h for instructions
"""
import getpass
import os
import optparse
import csv
import pymongo
import simplejson as json
from ConfigParser import ConfigParser
collection_budgtr= 'dd_budg2011_tr'
#-----------------------------
def get_db_connect(fullpath, dbtype):
connect_dict= {}
defaults= {
'basedir': fullpath
}
cfg= ConfigParser(defaults)
cfg.read(fullpath)
connect_dict['host']= cfg.get(dbtype,'host')
connect_dict['port']= cfg.getint(dbtype,'port')
connect_dict['database']= cfg.get(dbtype,'database')
connect_dict['username']= cfg.get(dbtype,'username')
try:
connect_dict['password']= cfg.get(dbtype,'password')
except:
connect_dict['password']= None
return connect_dict
#-----------------------------
def sort_format(src):
"""
format 1-2-3... to 0001-0002-0003...
src should be convertable to int
"""
src_list= src.split('-')
res_list= []
for elm in src_list:
try:
res_list.append('%04d' % int(elm))
except:
res_list.append(elm)
res= '-'.join(res_list)
return res
#-----------------------------
def db_insert(data_bulk, db, collname, clean_first=False):
collect= db[collname]
if clean_first:
collect.remove()
collect.insert(data_bulk)
return collect.find().count()
#-----------------------------
def complete_elt_a(src):
# filling totals
rst= {}
for kk in schema_dict()["type"].items():
if kk[1] == 'int':
rst[kk[0]]= src[kk[0]]
return rst
#-----------------------------
def fill_elt_a(idef, curr_type):
# filling program
return {
"idef": str(idef),
"idef_sort": sort_format(str(idef)),
"parent": None,
"parent_sort": None,
"level": "a",
"type": " ".join([curr_type, str(idef)]),
"leaf": False
}
#-----------------------------
def fill_elt_b(src, idef_b, parent, db):
# filling czesc
idef= "-".join([str(parent), str(idef_b)])
idef_sort= sort_format(idef)
# filling Czesc from the collection of traditional budget
czesc_idef= src["czesc"].strip()
suppl_idef= ""
if "/" in czesc_idef:
czesc_idef= src["czesc"].split("/")[0].strip()
suppl_idef= src["czesc"].split("/")[1].strip()
if len(src['name']) > 0:
elm_name= src['name']
else:
elm_name= db[collection_budgtr].find_one({ "level" : "a", "idef" : czesc_idef }, { "_id": 0, "name": 1 })["name"]
if len(suppl_idef) > 0:
elm_name= ": ".join([ elm_name.encode('utf-8'), teryt_dict()[suppl_idef] ])
return {
"idef": idef,
"idef_sort": idef_sort,
"parent": str(parent),
"parent_sort": sort_format(str(parent)),
"name": elm_name.replace(" ", " ").strip(),
"level": "b",
"type": " ".join(["Część", src["czesc"]]).strip(),
"leaf": True
}
#-----------------------------
def fill_docs(budg_data, db):
# format parsed data (list of dicts) for upload
# add keys: idef, idef_sort, parent, parent_sort, level, leaf
out= []
row_dict= {}
levels= ['a', 'b', 'c', 'd', 'e', 'f', 'g']
max_level= 0
idef_level_a= 0
nss= True # whether a program of NSS (Narodowa Strategia Spójności)
for row_doc in budg_data:
if len(row_doc['type'].strip()) > 0: # type is not empty, meaning it's either a program name or 'total' field
idef_level_b= 0
if "OGółEM" in row_doc['name'].strip().upper():
row_dict_a.update(complete_elt_a(row_doc))
row_dict_a["czesc"]= None # no czesc at the level of program
out.append(row_dict_a)
elif "OGÓŁEM" in row_doc['name'].strip().upper() and "NSS" in row_doc['name'].strip().upper(): # NSS total
nss= False
elif "OGÓŁEM PROGRAMY" in row_doc['name'].strip().upper(): # grand total
row_dict_a= row_doc.copy()
row_dict_a['nss']= False
row_dict_a.update(fill_elt_a("9999", "Total"))
row_dict_a.update(complete_elt_a(row_doc))
row_dict_a["type"]= "Total"
row_dict_a["name"]= "OGÓŁEM"
row_dict_a["leaf"]= True
row_dict_a["czesc"]= None
out.append(row_dict_a)
else: # ordinary record - start filling it, will fill values upon meeting 'OGółEM'
idef_level_a += 1
row_dict_a= row_doc.copy()
# cleaning names before insert
row_dict_a['name']= row_dict_a['name'].replace('\n', ' ')
row_dict_a['name']= row_dict_a['name'].replace('Ŝ', 'ż')
row_dict_a['name']= row_dict_a['name'].replace(" ", " ").strip()
row_dict_a['nss']= nss
row_dict_a.update(fill_elt_a(idef_level_a, row_dict_a['type'])) # idef, parent, leaf, level, etc.
# immediately creating and saving elt level b
idef_level_b += 1
row_dict_b= row_doc.copy()
row_dict_b.update(fill_elt_b(row_doc, idef_level_b, idef_level_a, db)) # idef, parent, leaf, level, etc.
out.append(row_dict_b)
else: # type is empty, dealing with czesc here
idef_level_b += 1
row_dict_b= row_doc.copy()
row_dict_b.update(fill_elt_b(row_doc, idef_level_b, idef_level_a, db)) # idef, parent, leaf, level, etc.
row_dict_b['nss']= nss
out.append(row_dict_b)
return out
#-----------------------------
def csv_parse(csv_read):
# parse csv and return dict
out= []
schema= schema_dict()
dbkey_alias= schema["alias"] # dict of aliases -> document keys in db
dbval_types= schema["type"] # dict of types -> values types in db
for row in csv_read:
keys= tuple(row)
keys_len= len(keys)
row= iter(row)
for row in csv_read:
i= 0
dict_row= {} # this holds the data of the current row
for field in row:
new_key= [v for k, v in dbkey_alias.iteritems() if i == int(k)][0]
new_type= None
if new_key in dbval_types:
new_type= dbval_types[new_key]
if new_type == "string":
dict_row[new_key] = str(field)
elif new_type == "int":
if field.strip() == '':
dict_row[new_key] = | 0
else:
dict_row[new_key] = int(field)
elif new_type == "float":
if ',' in field:
| field= field.replace(',', '.')
dict_row[new_key]= float(field)
elif new_type == None:
try:
dict_row[new_key]= float(field) # then if it is a number
if dict_row[new_key].is_integer(): # it can be integer
dict_row[new_key] = int(field)
except:
dict_row[new_key] = field # no, it is a string
#additional fields
dict_row['parent']= None
i += 1
out.append(dict_row)
return out
def schema_dict():
return {
"alias": {
"0":"type",
"1":"name",
"2":"czesc",
"3":"v_eu"
},
"type": {
"czesc": "string",
"v_eu": "int"
}
}
def teryt_dict():
return {
"02": "Dolnośląskie",
"04": "Kujawsko-pomorskie",
"06": "Lubelskie",
"08": "Lubuskie",
"10": "Łódzkie",
|
bradmontgomery/django-janitor | janitor/__init__.py | Python | mit | 72 | 0 | __version__ = '0.5.0'
defaul | t_ap | p_config = 'janitor.apps.JanitorConfig'
|
CapstoneGrader/codeta | codeta/models/user.py | Python | mit | 6,342 | 0.000946 | from flask.ext.login import UserMixin, AnonymousUserMixin
from codeta import app, auth, logger
from codeta.models.course import Course
class User(UserMixin):
def __init__(self, user_id, username, password, email, fname, lname, active=True, courses=[]):
self.user_id = user_id
self.username = username
self.password = password
self.email = email
self.fname = fname
self.lname = lname
self.active = active
self.update_courses()
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return unicode(self.user_id)
def __repr__(self):
return '<User %r>' % (self.username)
def get_courses(self):
return self.courses
def get_course_titles(self):
"""
Gets a list of course titles the user is enrolled in
"""
titles = []
[ titles.append(c.title) for c in self.courses ]
return titles
def add_course(self, course):
"""
Adds a course to the list of courses
"""
self.courses.append(course)
def update_courses(self):
""" Get a new list of courses from the database """
self.courses = Course.get_courses(self.username)
def create(self):
"""
Register a user in the database
"""
pw_hash = auth.hash_password(self.password)
sql = ("""
insert into Users
(username, password, email, first_name, last_name)
values
(%s, %s, %s, %s, %s)
returning
user_id
""")
data = (
self.username,
pw_hash,
self.email,
self.fname,
self.lname,
)
user_id = app.db.exec_query(sql, data, 'commit', 'returning')
if user_id:
self.user_id = user_id
self.password = pw_hash
logger.debug("Created new user_id: %s | username: %s" % (user_id, self.username))
else:
logger.debug("Failed to create username: %s" % (username))
return user_id
def read(self):
"""
Update the User member variables with fresh data from the database
"""
sql = ("""
select
*
from
Users
where
user_id = (%s)
""")
data = (
int(self.user_id),
)
user = app.db.exec_query(sql, data, 'fetchall', 'return_dict')
if user:
user = user[0]
self.user_id = int(user['user_id'])
self.username = user['username']
self.password = user['password']
self.email = user['email']
self.fname = user['first_name']
self.lname = user['last_name']
return user
def update(self):
"""
Update the user's data in the database from member variables
"""
sql = ("""
update Users set
password = (%s),
email = (%s),
first_name = (%s),
last_name = (%s)
where
user_id = (%s)
""")
data = (
self.password,
self.email,
self.fname,
self.lname,
int(self.user_id),
)
commit = app.db.exec_query(sql, data, 'commit')
if commit:
logger.debug("Successfully updated user: %s" % (self.username))
else:
logger.debug("Failed to update user: %s" % (self.username))
return commit
@staticmethod
def auth_user(username, password):
"""
Authenticates a user and returns a User object
if the correct credentials were provided
otherwise, return None
"""
logger.debug("User: %s - Pass: %s - auth attempt. " % (username, password))
sql = ("""
select
*
from
Users
where
username = (%s)
| """)
data = (
username,
)
user = app.db.exec_query(sql, data, 'fetchall', 'return_dict')
if user:
user = user[0]
if(auth.check_password(password, user['password'])):
user = User(
int(user['use | r_id']),
user['username'],
user['password'],
user['email'],
user['first_name'],
user['last_name'])
logger.debug("User: %s - auth success." % (username))
else:
user = None
logger.debug("User: %s - auth failure." % (username))
return user
@staticmethod
def get_user(user_id):
"""
Creates a new User object from the database
returns a User object if found, otherwise None
"""
sql = ("""
select
*
from
Users
where
user_id = (%s)
""")
data = (
int(user_id),
)
user = app.db.exec_query(sql, data, 'fetchall', 'return_dict')
if user:
user = user[0]
user = User(
int(user['user_id']),
user['username'],
user['password'],
user['email'],
user['first_name'],
user['last_name'])
return user
@staticmethod
def check_username(username):
"""
Checks to see if a username already exists in the db.
returns username if username is found, otherwise None
"""
sql = ("""
select
username
from
Users
where
username = (%s)
""")
data = (
username,
)
username = app.db.exec_query(sql, data, 'fetchall', 'return_dict')
if username:
return username[0].get('username')
else:
return None
|
d0u9/examples | .ycm_extra_conf.py | Python | gpl-2.0 | 6,088 | 0.021025 | # This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. | We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTW | ARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-Wc++98-compat',
'-Wno-long-long',
'-Wno-variadic-macros',
'-fexceptions',
'-DNDEBUG',
# You 100% do NOT need -DUSE_CLANG_COMPLETER in your flags; only the YCM
# source code needs it.
'-DUSE_CLANG_COMPLETER',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=gnu98',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c',
'-I',
'.',
# For Linux
'-isystem','/usr/lib/gcc/x86_64-linux-gnu/4.8',
'-isystem','/usr/local/include',
'-isystem','/usr/include',
'-I','/usr/include/c++/4.9',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
try:
final_flags.remove( '-stdlib=libc++' )
except ValueError:
pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
|
mdlaskey/DeepLfD | src/deep_lfd/synthetic/synthetic_rope.py | Python | gpl-3.0 | 3,594 | 0.01419 | import numpy as np
import cv2
from scipy import interpolate
from random import randint
import IPython
from alan.rgbd.basic_imaging import cos,sin
from alan.synthetic.synthetic_util import rand_sign
from alan.core.points import Point
"""
generates rope using non-holonomic car model dynamics (moves with turn radius)
generates labels at ends of rope
parameters:
h, w of image matrix
l, w of rope
returns:
image matrix with rope drawn
[left label, right label]
"""
def get_rope_car(h = 420, w = 420, rope_l_pixels = 800 , rope_w_pixels = 8, pix_per_step = 10, steps_per_curve = 10, lo_turn_delta = 5, hi_turn_delta = 10):
#randomize start
init_pos = np.array([randint(0, w - 1), randint(0, h - 1), randint(0, 360)])
all_positions = np.array([init_pos])
#dependent parameter (use float division)
| num_curves = int(rope_l_pixels/(steps_per_curve * pix_per_step * 1.0))
#point generation
for c in range(num_curves):
turn_delta = rand_sign() * randint(lo_turn_delta, hi_turn_delta)
for s in range(steps_per_curve):
curr_pos = all_positions[-1]
delta_pos = np.array([pix_per_step * cos(curr_pos[2]), pix_per_step * sin(curr_pos[2]), turn_delta])
all_positions = np.append(all_positions, [cu | rr_pos + delta_pos], axis = 0)
#center the points (avoid leaving image bounds)
mid_x_points = (min(all_positions[:,0]) + max(all_positions[:,0]))/2.0
mid_y_points = (min(all_positions[:,1]) + max(all_positions[:,1]))/2.0
for pos in all_positions:
pos[0] -= (mid_x_points - w/2.0)
pos[1] -= (mid_y_points - h/2.0)
#draw rope
image = np.zeros((h, w))
prev_pos = all_positions[0]
for curr_pos in all_positions[1:]:
cv2.line(image, (int(prev_pos[0]), int(prev_pos[1])), (int(curr_pos[0]), int(curr_pos[1])), 255, rope_w_pixels)
prev_pos = curr_pos
#get endpoint labels, sorted by x
labels = [all_positions[0], all_positions[-1]]
if labels[0][0] > labels[1][0]:
labels = [labels[1], labels[0]]
#labels = [[l[0], l[1], l[2] + 90] for l in labels]
#Ignoring Rotation for Now
labels = [[l[0], l[1], 0] for l in labels]
#rejection sampling
for num_label in range(2):
c_label = labels[num_label]
#case 1- endpoints not in image
if check_bounds(c_label, [w, h]) == -1:
return image, labels, -1
#case 2- endpoint on top of other rope segment
if check_overlap(c_label, [w, h], image, rope_w_pixels) == -1:
return image, labels, -1
return image, labels, 1
def check_bounds(label, bounds):
bound_tolerance = 5
for dim in range(2):
if label[dim] < bound_tolerance or label[dim] > (bounds[dim] - 1 - bound_tolerance):
return -1
return 0
def check_overlap(label, bounds, image, rope_w_pixels):
lb = []
ub = []
for dim in range(2):
lb.append(int(max(0, label[dim] - rope_w_pixels)))
ub.append(int(min(bounds[dim] - 1, label[dim] + rope_w_pixels)))
pixel_sum = 0
for x in range(lb[0], ub[0]):
for y in range(lb[1], ub[1]):
pixel_sum += (image[y][x]/255.0)
#if more than 60% of adjacent (2 * rope_w x 2 * rope_w) pixels are white, endpoint is probably lying on rope
expected_sum = 0.6 * (ub[1] - lb[1]) * (ub[0] - lb[0])
if pixel_sum > expected_sum:
return -1
return 0
|
kenshay/ImageScript | Script_Runner/PYTHON/Lib/ctypes/_aix.py | Python | gpl-3.0 | 12,565 | 0.002149 | """
Lib/ctypes.util.find_library() support for AIX
Similar approach as done for Darwin support by using separate files
but unlike Darwin - no extension such as ctypes.macholib.*
dlopen() is an interface to AIX initAndLoad() - primary documentation at:
https://www.ibm.com/support/knowledgecenter/en/ssw_aix_61/com.ibm.aix.basetrf1/dlopen.htm
https://www.ibm.com/support/knowledgecenter/en/ssw_aix_61/com.ibm.aix.basetrf1/load.htm
AIX supports two styles for dlopen(): svr4 (System V Release 4) which is common on posix
platforms, but also a BSD style - aka SVR3.
From AIX 5.3 Difference Addendum (December 2004)
2.9 SVR4 linking affinity
Nowadays, there are two major object file formats used by the operating systems:
XCOFF: The COFF enhanced by IBM and others. The original COFF (Common
Object File Format) was the base of SVR3 and BSD 4.2 systems.
ELF: Executable and Linking Format that was developed by AT&T and is a
base for SVR4 UNIX.
While the shared library content is identical on AIX - one is located as a filepath name
(svr4 style) and the other is located as a member of an archive (and the archive
is located as a filepath name).
The key difference arises when supporting multiple abi formats (i.e., 32 and 64 bit).
For svr4 either only one ABI is | supported, or there are two directories, or there
are different file names. The most common solution for multiple ABI is multiple
directories.
For the XCOFF (aka AIX) style - one directory (one archive file) is sufficient
as multiple shared libraries ca | n be in the archive - even sharing the same name.
In documentation the archive is also referred to as the "base" and the shared
library object is referred to as the "member".
For dlopen() on AIX (read initAndLoad()) the calls are similar.
Default activity occurs when no path information is provided. When path
information is provided dlopen() does not search any other directories.
For SVR4 - the shared library name is the name of the file expected: libFOO.so
For AIX - the shared library is expressed as base(member). The search is for the
base (e.g., libFOO.a) and once the base is found the shared library - identified by
member (e.g., libFOO.so, or shr.o) is located and loaded.
The mode bit RTLD_MEMBER tells initAndLoad() that it needs to use the AIX (SVR3)
naming style.
"""
__author__ = "Michael Felt <aixtools@felt.demon.nl>"
import re
from os import environ, path
from sys import executable
from ctypes import c_void_p, sizeof
from subprocess import Popen, PIPE, DEVNULL
# Executable bit size - 32 or 64
# Used to filter the search in an archive by size, e.g., -X64
AIX_ABI = sizeof(c_void_p) * 8
from sys import maxsize
def _last_version(libnames, sep):
def _num_version(libname):
# "libxyz.so.MAJOR.MINOR" => [MAJOR, MINOR]
parts = libname.split(sep)
nums = []
try:
while parts:
nums.insert(0, int(parts.pop()))
except ValueError:
pass
return nums or [maxsize]
return max(reversed(libnames), key=_num_version)
def get_ld_header(p):
# "nested-function, but placed at module level
ld_header = None
for line in p.stdout:
if line.startswith(('/', './', '../')):
ld_header = line
elif "INDEX" in line:
return ld_header.rstrip('\n')
return None
def get_ld_header_info(p):
# "nested-function, but placed at module level
# as an ld_header was found, return known paths, archives and members
# these lines start with a digit
info = []
for line in p.stdout:
if re.match("[0-9]", line):
info.append(line)
else:
# blank line (separator), consume line and end for loop
break
return info
def get_ld_headers(file):
"""
Parse the header of the loader section of executable and archives
This function calls /usr/bin/dump -H as a subprocess
and returns a list of (ld_header, ld_header_info) tuples.
"""
# get_ld_headers parsing:
# 1. Find a line that starts with /, ./, or ../ - set as ld_header
# 2. If "INDEX" in occurs in a following line - return ld_header
# 3. get info (lines starting with [0-9])
ldr_headers = []
p = Popen(["/usr/bin/dump", f"-X{AIX_ABI}", "-H", file],
universal_newlines=True, stdout=PIPE, stderr=DEVNULL)
# be sure to read to the end-of-file - getting all entries
while True:
ld_header = get_ld_header(p)
if ld_header:
ldr_headers.append((ld_header, get_ld_header_info(p)))
else:
break
p.stdout.close()
p.wait
return ldr_headers
def get_shared(ld_headers):
"""
extract the shareable objects from ld_headers
character "[" is used to strip off the path information.
Note: the "[" and "]" characters that are part of dump -H output
are not removed here.
"""
shared = []
for (line, _) in ld_headers:
# potential member lines contain "["
# otherwise, no processing needed
if "[" in line:
# Strip off trailing colon (:)
shared.append(line[line.index("["):-1])
return shared
def get_one_match(expr, lines):
"""
Must be only one match, otherwise result is None.
When there is a match, strip leading "[" and trailing "]"
"""
# member names in the ld_headers output are between square brackets
expr = rf'\[({expr})\]'
matches = list(filter(None, (re.search(expr, line) for line in lines)))
if len(matches) == 1:
return matches[0].group(1)
else:
return None
# additional processing to deal with AIX legacy names for 64-bit members
def get_legacy(members):
"""
This routine provides historical aka legacy naming schemes started
in AIX4 shared library support for library members names.
e.g., in /usr/lib/libc.a the member name shr.o for 32-bit binary and
shr_64.o for 64-bit binary.
"""
if AIX_ABI == 64:
# AIX 64-bit member is one of shr64.o, shr_64.o, or shr4_64.o
expr = r'shr4?_?64\.o'
member = get_one_match(expr, members)
if member:
return member
else:
# 32-bit legacy names - both shr.o and shr4.o exist.
# shr.o is the preffered name so we look for shr.o first
# i.e., shr4.o is returned only when shr.o does not exist
for name in ['shr.o', 'shr4.o']:
member = get_one_match(re.escape(name), members)
if member:
return member
return None
def get_version(name, members):
"""
Sort list of members and return highest numbered version - if it exists.
This function is called when an unversioned libFOO.a(libFOO.so) has
not been found.
Versioning for the member name is expected to follow
GNU LIBTOOL conventions: the highest version (x, then X.y, then X.Y.z)
* find [libFoo.so.X]
* find [libFoo.so.X.Y]
* find [libFoo.so.X.Y.Z]
Before the GNU convention became the standard scheme regardless of
binary size AIX packagers used GNU convention "as-is" for 32-bit
archive members but used an "distinguishing" name for 64-bit members.
This scheme inserted either 64 or _64 between libFOO and .so
- generally libFOO_64.so, but occasionally libFOO64.so
"""
# the expression ending for versions must start as
# '.so.[0-9]', i.e., *.so.[at least one digit]
# while multiple, more specific expressions could be specified
# to search for .so.X, .so.X.Y and .so.X.Y.Z
# after the first required 'dot' digit
# any combination of additional 'dot' digits pairs are accepted
# anything more than libFOO.so.digits.digits.digits
# should be seen as a member name outside normal expectations
exprs = [rf'lib{name}\.so\.[0-9]+[0-9.]*',
rf'lib{name}_?64\.so\.[0-9]+[0-9.]*']
for expr in exprs:
versions = []
for line in members:
m = re.search(expr, line)
if m:
versions.append(m.group(0))
if versions:
return _last_version(versions, '.')
return None
def get_member(name, members):
"""
Return an archive member m |
Gustry/inasafe | safe/metadata/property/list_property.py | Python | gpl-3.0 | 1,695 | 0 | # -*- coding: utf-8 -*-
"""
InaSAFE Disaster risk assessment tool developed by AusAid - |
**metadata module.**
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU | General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'ismail@kartoza.com'
__revision__ = '$Format:%H$'
__date__ = '10/12/15'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
import json
from types import NoneType
from safe.common.exceptions import MetadataCastError
from safe.metadata.property import BaseProperty
class ListProperty(BaseProperty):
"""A property that accepts list input."""
# if you edit this you need to adapt accordingly xml_value and is_valid
_allowed_python_types = [list, NoneType]
def __init__(self, name, value, xml_path):
super(ListProperty, self).__init__(
name, value, xml_path, self._allowed_python_types)
@classmethod
def is_valid(cls, value):
return True
def cast_from_str(self, value):
try:
return json.loads(value)
except ValueError as e:
raise MetadataCastError(e)
@property
def xml_value(self):
if self.python_type is list:
return json.dumps(self.value)
elif self.python_type is NoneType:
return ''
else:
raise RuntimeError('self._allowed_python_types and self.xml_value'
'are out of sync. This should never happen')
|
YosaiProject/yosai_dpcache | tests/cache/test_utils.py | Python | apache-2.0 | 905 | 0 | from unittest import TestCase
from dogpile.cache import util
class UtilsTest(TestCase):
""" Test the relevant utils functionality.
"""
def test_coerce_string_conf(self):
settings = {'expiration_time': '-1'}
coerced = util.coerce_string_conf(settings)
self.assertEqual(coerced['expiration_time'], -1)
| settings = {'expiration_time': '+1'}
coerced = util.coerce_string_conf(settings)
self.assertEqual(coerced['expiration_time'], 1)
self.assertEqu | al(type(coerced['expiration_time']), int)
settings = {'arguments.lock_sleep': '0.1'}
coerced = util.coerce_string_conf(settings)
self.assertEqual(coerced['arguments.lock_sleep'], 0.1)
settings = {'arguments.lock_sleep': '-3.14e-10'}
coerced = util.coerce_string_conf(settings)
self.assertEqual(coerced['arguments.lock_sleep'], -3.14e-10)
|
sebriois/biomart | biomart/attribute_page.py | Python | bsd-2-clause | 703 | 0.004267 | class BiomartAttributePage(object):
def __init__(self, name, display_name=None, attributes=None, default_attributes=None, is_default=False):
self.name = name
self.display | _name = display_name or name
self.attributes = attributes if attributes else {}
self.default_attributes = default_attributes if default_attributes else []
self.is_default = is_default
def add(self, attribute):
attribute.is_default = attribute.name in self.default_attributes
self.attributes[attribute.name] = attribute
def __repr__(self):
return | "'%s': (attributes: %s, defaults: %s)" % (self.display_name, self.attributes, repr(self.default_attributes))
|
benkoo/nand2tetris | 06/Python/FileManipulation/FileError.py | Python | cc0-1.0 | 635 | 0 | # Copyright (C) 2011 Mark Armbrust. Permission granted for educational use.
"""
hasmError.py -- Er | ror handling for Hack Assembler
See "The Elements of Computing Systems", by Noam Nisan and Shimon Schocken
"""
import sys
def Error(message, lineNumber=0, line=''):
"""
Print an error message and continue.
"""
if lineNumber != 0:
print('Line %d: %s' % (lineNumber, line))
print(' '+message)
else:
print(message)
print(' | ')
def FatalError(message, lineNumber=0, line=''):
"""
Print an error message and abort.
"""
Error(message, lineNumber, line)
sys.exit(-1)
|
neale/CS-program | 533-ReinforcementLearning/assignment1/mdp2.py | Python | unlicense | 6,848 | 0.004965 | import sys
import operator
import numpy as np
import matplotlib.pyplot as plt
import itertools, functools
import re
import argparse
""" Grid Layout
grid[0][0] = num_states
grid[0][1] = num_actions
"""
def load_args():
parser = argparse.ArgumentParser(description='Description of your program')
parser.add_argument('-t', '--timesteps', default=0, help='horizon length, discarded if discount provided', required=False)
parser.add_argument('-g', '--gamma', default=0, help='discount factor', required=False)
parser.add_argument('-i', '--input_file', default='MDP1.txt', help='input file with MDP description', required=True)
parser.add_argument('-e', '--epsilon', default=None, help='epsilon, or early stopping conditions', required=False)
args = parser.parse_args()
return args
def load_data(path):
with open(path, 'rb') as f:
train = f.readlines()
train = [line.strip('\n') for line in train]
train = [re.sub(r'[ | ^\x00-\x7f]',r'', line) for line in train]
train[0] = [int(a) for a in train[0].split(' ')]
num_states, num_actions = train[0]
lines = num_actions * num_states + num_actions
grid = []
for i in range(1, lines+(num_actions-1)):
if (i-1) % (num_states+1) is not 0:
grid.append([float(n) for n in train[i].split(' ')[::4]])
| train[i] = [float(n) for n in train[i].split(' ')[::4]]
actions = []
for i in range(num_actions):
actions.append(grid[(i*num_states):((1+i)*num_states)])
train = np.array(train)
return train, actions
class MDP(object):
def __init__(self, args, grid, actions):
self.args = args
self.grid = grid
self.gamma = float(args.gamma)
self.num_states, self.num_actions = grid[0]
self.actions = actions
self.rewards = grid[-1]
self.Value = [x for x in self.rewards]
self.print_attrs()
if args.epsilon is None:
self.epsilon = ((1*10**-10)*((1-self.gamma)**2))/(2*(self.gamma**2))
else:
self.epsilon = float(args.epsilon)
def print_attrs(self):
print "number of states: {}\n".format(self.num_states)
print "number of possible actions: {}\n".format(self.num_actions)
print "rewards per state: {}\n".format(self.rewards)
# Reward of being in a given state, given by value iteration
def Reward(self, state):
return self.rewards[state]
# returns probability of going to state X from state Y
def T(self, state, action, next_state):
return self.actions[action][state][next_state]
"""
Value Iteration algorithm:
U1(state) = Reward(state)
Ui+1(state) = Reward(state) = gamma*max(for all next states (T(state, action, next_state)(U(i))))
computes the utility of each state when considering all next states
"""
def V(self, state):
p_actions = []
max_p, sum_p = 0, 0
for action in range(self.num_actions):
sum_p = 0
p_actions = []
for next_state in range(self.num_states):
p_actions.append((self.T(state, action, next_state), action, next_state))
for p in p_actions:
sum_p += p[0] * self.Value[p[2]]
if (sum_p > max_p) or (max_p is 0):
max_p = sum_p
return self.gamma*max_p + self.Reward(state)
"""
Q iterates through the algorithm until the utility update is less than delta
as the utility of each state is updated, the difference between the old and the
new utility functions can be taken, this is compared against the delta equation
"""
def Q(self) :
# fill in Utility for each state
max_state = 1
if self.epsilon > 0:
while max_state > self.epsilon:
max_state = 0
new_value = [0]*self.num_states
next_prob = []
for state in range(self.num_states):
state_value = self.V(state)
if state_value is not None:
max_state = max(max_state, abs(self.Value[state] - state_value))
new_value[state] = state_value
self.Value = new_value
else:
# for finite horizon, collect intermediate V and Pi
values, policies = [], []
for it in range(int(self.args.timesteps)):
for s in range(it):
print it
new_value = [0]*self.num_states
next_prob = []
for state in range(self.num_states):
state_value = self.V(state)
if state_value is not None:
max_state = max(max_state, abs(self.Value[state] - state_value))
new_value[state] = state_value
self.Value = new_value
values.append(self.Value)
policies.append(self.policy())
return values, policies
return self.Value
""" finds the best policy based on the current utility function
simply returns the best next state: next state with the highest utility
"""
def policy(self):
proto_policy = []
def argmax(state):
res = {}
for action in range(self.num_actions):
res[action] = 0
self.p_states = []
for next_state in range(self.num_states):
self.p_states.append((self.T(state, action, next_state), action, next_state))
for p in self.p_states:
res[action] += p[0] * self.Value[p[2]]
return (max(res.items(), key=operator.itemgetter(1))[0] if res else None)
for state in range(self.num_states):
proto_policy.append(argmax(state))
return proto_policy
if __name__ == '__main__':
args = load_args()
grid, actions = load_data(args.input_file)
mdp = MDP(args, grid, actions)
if int(args.timesteps) > 0: finite = True
else: finite = False
if finite is False:
Utility = mdp.Q()
Policy = mdp.policy()
U = ["%.5f" % v for v in Utility]
P = ["%.5f" % v for v in Policy]
print "**************************************\nEnd Policy: {}\nEnd Value function: {}\n**************************************".format(P, U)
else:
Utility, Policy = mdp.Q()
for i in range(10):
U = ["%.5f" % v for v in Utility[i]]
P = ["%.5f" % v for v in Policy[i]]
print "***********************************"
print "Utility for state {} : {}".format(i, U)
print "Policy for state {} : {}\n**************************************".format(i, P)
|
iamsteadman/bambu-cron | bambu_cron/middleware.py | Python | apache-2.0 | 152 | 0.013158 | import bambu_cron
bambu_cron.autodiscover()
class CronMiddleware(object):
def process_reques | t(self, *args, **kwargs): |
bambu_cron.site.run() |
retr0h/maquina | test/unit/test_scenario.py | Python | mit | 2,054 | 0 | # Copyright (c) 2015-2017 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWA | RE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import pytest
from molecule import scenario
@pytest.fixture
def scenario_instance(config_instance):
return scen | ario.Scenario(config_instance)
def test_name_property(scenario_instance):
assert 'default' == scenario_instance.name
def test_directory_property(molecule_scenario_directory, scenario_instance):
assert molecule_scenario_directory == scenario_instance.directory
def test_check_sequence_property(scenario_instance):
x = ['destroy', 'create', 'converge', 'check', 'destroy']
assert x == scenario_instance.check_sequence
def test_converge_sequence_property(scenario_instance):
x = ['create', 'converge']
assert x == scenario_instance.converge_sequence
def test_test_sequence_property(scenario_instance):
x = [
'destroy', 'dependency', 'syntax', 'create', 'converge', 'idempotence',
'lint', 'verify', 'destroy'
]
assert x == scenario_instance.test_sequence
|
c-amr/camr | model.py | Python | gpl-2.0 | 18,664 | 0.015216 | #!/usr/bin/python
from __future__ import absolute_import
import bz2,contextlib
import numpy as np
import sys
import json
import cPickle as pickle
#import simplejson as json
from constants import *
from common.util import Alphabet,ETag,ConstTag
import importlib
from collections import defaultdict
_FEATURE_TEMPLATES_FILE = './feature/basic_abt_feats.templates'
class Model():
"""weights and templates"""
#weight = None
#n_class = None
#n_rel = None
#n_tag = None
indent = " "*4
#feature_codebook = None
#class_codebook = None
#feats_generator = None
def __init__(self,elog=sys.stdout):
self.elog = elog
self.weight = None
self.aux_weight = None
self.avg_weight = None # for store the averaged weights
#self.n_class = n_class
#self.n_rel = n_rel
#self.n_tag = n_tag
self._feats_templates_file = _FEATURE_TEMPLATES_FILE
self._feature_templates_list = []
self._feats_gen_filename = None
self.feats_generator = None
self.token_to_concept_table = defaultdict(set)
self.pp_count_dict = defaultdict(int)
self.total_num_words = 0
self.token_label_set = defaultdict(set)
self.class_codebook = None
self.feature_codebook = None
self.rel_codebook = Alphabet()
self.tag_codebook = {
'Concept':Alphabet(),
'ETag':Alphabet(),
'ConstTag':Alphabet(),
'ABTTag':Alphabet()
}
self.abttag_count = defaultdict(int)
def setup(self,action_type,instances,parser,feature_templates_file=None):
if feature_templates_file:
self._feats_templates_file = feature_templates_file
self.class_codebook = Alphabet.from_dict(dict((i,k) for i,(k,v) in enumerate(ACTION_TYPE_TABLE[action_type])),True)
self.feature_codebook = dict([(i,Alphabet()) for i in self.class_codebook._index_to_label.keys()])
self.read_templates()
#n_rel,n_tag = self._set_rel_tag_codebooks(instances,parser)
n_subclass = self._set_rel_tag_codebooks(instances,parser)
self._set_class_weight(self.class_codebook.size(),n_subclass)
self._set_statistics(instances)
self.output_feature_generator()
def _set_statistics(self,instances):
#pp_count_dict = defaultdict(int)
for inst in instances:
sent = inst.tokens
self.total_num_words += len(sent)
for token in sent:
if token['pos'] == 'IN' and token['rel'] == 'prep':
self.pp_count_dict[token['form'].lower()] += 1
def _set_rel_tag_codebooks(self,instances,parser):
#TODO
self.rel_codebook.add(NULL_EDGE)
self.rel_codebook.add(START_EDGE)
#self.tag_codebook['Concept'].add(NULL_TAG)
for inst in instances:
gold_graph = inst.gold_graph
gold_nodes = gold_graph.nodes
#gold_edges = gold_graph.edges
sent_tokens = inst.tokens
#state = parser.testOracleGuide(inst)
for g,d in gold_graph.tuples():
if isinstance(g,int):
gnode = gold_nodes[g]
g_span_wds = [tok['lemma'] for tok in sent_tokens if tok['id'] in range(gnode.start,gnode.end)]
g_span_ne = sent_tokens[g]['ne']
g_entity_tag = gold_graph.get_node_tag(g)
#if len(g_span_wds) > 1:
# for gwd in g_span_wds:
# self.token_to_concept_table[gwd].add(g_entity_tag)
if g_span_ne not in ['O','NUMBER']: # is name entity
self.token_to_concept_table[g_span_ne].add(g_entity_tag)
self.token_to_concept_table[','.join(g_span_wds)].add(g_entity_tag)
if isinstance(g_entity_tag,ETag):
self.tag_codebook['ETag'].add(g_entity_tag)
elif isinstance(g_entity_tag,ConstTag):
self.tag_codebook['ConstTag'].add(g_entity_tag)
else:
self.tag_codebook['Concept'].add(g_entity_tag)
else:
g_entity_tag = gold_graph.get | _node_tag(g)
self.tag_codebook['ABTTag'].add(g_entity_tag)
self.abttag_count[g_entity_tag] += 1
'''
elif g in state.gold_graph.abt_node_table and isinstance(state.gold_graph.abt_node_table[g],int): # post aligned
| gnode = state.A.nodes[state.gold_graph.abt_node_table[g]]
g_span_wds = [tok['lemma'] for tok in sent_tokens if tok['id'] in range(gnode.start,gnode.end)]
g_span_ne = sent_tokens[state.gold_graph.abt_node_table[g]]['ne']
g_entity_tag = gold_graph.get_node_tag(g)
if g_span_ne not in ['O','NUMBER']: # is name entity
self.token_to_concept_table[g_span_ne].add(g_entity_tag)
self.token_to_concept_table[','.join(g_span_wds)].add(g_entity_tag)
if isinstance(g_entity_tag,ETag):
self.tag_codebook['ETag'].add(g_entity_tag)
elif isinstance(g_entity_tag,ConstTag):
self.tag_codebook['ConstTag'].add(g_entity_tag)
else:
self.tag_codebook['Concept'].add(g_entity_tag)
'''
if isinstance(d,int):
dnode = gold_nodes[d]
d_span_wds = [tok['lemma'] for tok in sent_tokens if tok['id'] in range(dnode.start,dnode.end)]
d_span_ne = sent_tokens[d]['ne']
d_entity_tag = gold_graph.get_node_tag(d)
#if len(d_span_wds) > 1:
# for dwd in d_span_wds:
# self.token_to_concept_table[dwd].add(d_entity_tag)
if d_span_ne not in ['O','NUMBER']:
self.token_to_concept_table[d_span_ne].add(d_entity_tag)
self.token_to_concept_table[','.join(d_span_wds)].add(d_entity_tag)
if isinstance(d_entity_tag,ETag):
self.tag_codebook['ETag'].add(d_entity_tag)
elif isinstance(d_entity_tag,ConstTag):
self.tag_codebook['ConstTag'].add(d_entity_tag)
else:
self.tag_codebook['Concept'].add(d_entity_tag)
#self.tag_codebook.add(d_entity_tag)
else:
d_entity_tag = gold_graph.get_node_tag(d)
self.tag_codebook['ABTTag'].add(d_entity_tag)
self.abttag_count[d_entity_tag] += 1
'''
elif d in state.gold_graph.abt_node_table and isinstance(state.gold_graph.abt_node_table[d],int): # post aligned
dnode = state.A.nodes[state.gold_graph.abt_node_table[d]]
d_span_wds = [tok['lemma'] for tok in sent_tokens if tok['id'] in range(dnode.start,dnode.end)]
d_span_ne = sent_tokens[state.gold_graph.abt_node_table[d]]['ne']
d_entity_tag = gold_graph.get_node_tag(d)
if d_span_ne not in ['O','NUMBER']: # is name entity
self.token_to_concept_table[d_span_ne].add(d_entity_tag)
self.token_to_concept_table[','.join(d_span_wds)].add(d_entity_tag)
if isinstance(d_entity_tag,ETag):
self.tag_codebook['ETag'].add(d_entity_tag)
elif isinstance(d_entity_tag,ConstTag):
self.tag_codebook['ConstTag'].add(d_entity_tag)
else:
self.tag_codebook['Concept'].add(d_entity_tag)
'''
g_edge_label = gold_graph.get_edge_label(g,d)
#if g_span_ne not in ['O','NUMBER']:
# sel |
GbalsaC/bitnamiP | venv/src/django-oauth2-provider/tests/settings.py | Python | agpl-3.0 | 1,878 | 0.003195 | # Django settings for example project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Tester', 'test@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '%s/db.sqlite' % os.path.dirname(__file__), # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
SITE_ID = 1
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media serve | d from MEDIA_ROOT. Make sure to use a
| # trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'secret'
ROOT_URLCONF = 'tests.urls'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'provider',
'provider.oauth2',
)
|
unlessbamboo/grocery-shop | language/gcc/compile.py | Python | gpl-3.0 | 4,158 | 0.00051 | #!/usr/bin/env python
# coding:utf-8
"""
设置基础环境
"""
import os
import shutil
import re
import traceback
import sys
import subprocess
def exception_catch(func, *args, **kw):
"""异常处理"""
def inner_func(*args, **kw):
"""处理"""
try:
return func(*args, **kw)
except BaseException, msg:
exc_type, _, exc_tb = sys.exc_info()
trace_list = traceback.extract_tb(exc_tb)
for (filename, lineno, funcname, _) in trace_list:
print "Error, type:%s, file:%s, func:%s" \
",lineno:%s, msg:%s, args:%s, kwargs:%s" % (
exc_type, filename, funcname,
lineno, msg, args, kw)
return inner_func
def create_build_directory():
"""create build directory
创建build目录以及其他目录,用于存放编译后的可执行文件、配置文件
等等
"""
build_dir = ['build', 'build/conf', 'build/bin', 'build/libs',
'build/include']
for path in build_dir:
if not os.path.isdir(path):
os.makedirs(path)
def update_run_ldconf(install_dir):
"""update and run ldconfig
添加/apps/gcc/libs/lib到ldconfig中
"""
bold_conf = "/etc/ld.so.conf.d/gcc-x86_64.conf"
bo_value = install_dir + "/libs/lib"
if not os.path.isfile(bold_conf):
open(bold_conf, "w").close()
with open(bold_conf, "w") as fobj:
| fobj.write(bo_value)
# run ldconf
rst = subprocess.check_call(['ldconfig'])
if rst:
print "Call ldconfig failed:{0}.".format(
subprocess.CalledProcessError)
def update_run_conf(log_dir, install_dir):
"""Update configure
运行时配置文件修改:
将日志配置文件,gcc运行配置文件中的路径更改
"""
src_log_conf = "c | onf/gcc-zlog.ini"
new_log_conf = "build/conf/gcc-zlog.ini"
src_run_conf = "conf/gcc.ini"
new_run_conf = "build/conf/gcc.ini"
conf_dir = install_dir + "/conf"
# mkdir
if not os.path.exists(log_dir):
os.makedirs(log_dir)
if not os.path.exists(conf_dir):
os.makedirs(conf_dir)
# copy
shutil.copy(src_log_conf, new_log_conf)
shutil.copy(src_run_conf, new_run_conf)
# sub gcc-zlog.conf
with open(new_log_conf, "r") as fobj:
old_str = fobj.read()
with open(new_log_conf, "w") as fobj:
fobj.write(re.sub("/data/log/gcc/", log_dir + "/", old_str))
# ini
with open(new_run_conf, "r") as fobj:
old_str = fobj.read()
with open(new_run_conf, "w") as fobj:
fobj.write(re.sub("\nconf_dir=.*?\n",
"\nconf_dir={0}\n".format(conf_dir), old_str))
def update_compile_include(install_dir):
"""update common.h
更改common.h文件中的宏,该宏记录了gcc.ini的位置
@@:
#define LOG_CONF_PATH "/apps/gcc/conf/gcc.ini"
"""
new_conf = '"' + install_dir + '/conf/gcc.ini' + '"'
comm = './include/common.h'
with open(comm, 'r') as fobj:
old_str = fobj.read()
with open(comm, 'w') as fobj:
fobj.write(re.sub("\n#define LOG_CONF_PATH .*?\n",
"\n#define LOG_CONF_PATH{0:>52}\n".format(
new_conf), old_str))
def update_compile_makefile(install_dir):
"""update makefile
编译时配置文件修改:
更改makefile,保证编译安装的顺利执行
"""
mk1 = './Makefile'
with open(mk1, "r") as fobj:
old_str = fobj.read()
with open(mk1, "w") as fobj:
fobj.write(re.sub("\ninstall_dir=.*\n",
"\ninstall_dir={0}\n".format(install_dir),
old_str))
@exception_catch
def main(log_dir, install_dir):
"""main"""
create_build_directory()
update_compile_makefile(install_dir)
update_compile_include(install_dir)
update_run_ldconf(install_dir)
update_run_conf(log_dir, install_dir)
if __name__ == '__main__':
main('/data/log/gcc', '/apps/gcc')
print "Compile prepare complete!"
|
andrewyoung1991/supriya | supriya/tools/requesttools/ErrorRequest.py | Python | mit | 632 | 0.009494 | # -*- encoding: utf-8 -*-
from supriya.tools.requesttools.Request import Request
class ErrorRequest(Request):
### CLASS VARIABLES ###
__slots__ = (
)
### INITIALIZER ###
def __init__(
self,
):
Request.__init__(self)
raise NotImplementedEr | ror
### | PUBLIC METHODS ###
def to_osc_message(self):
raise NotImplementedError
### PUBLIC PROPERTIES ###
@property
def response_specification(self):
return None
@property
def request_id(self):
from supriya.tools import requesttools
return requesttools.RequestId.ERROR |
berrange/nova | nova/tests/integrated/test_api_samples.py | Python | apache-2.0 | 173,795 | 0.000685 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import copy
import datetime
import inspect
import os
import re
import urllib
import uuid as uuid_lib
from lxml import etree
import mock
from oslo.config import cfg
from nova.api.metadata import password
from nova.api.openstack.compute.contrib import fping
from nova.api.openstack.compute import extensions
# Import extensions to pull in osapi_compute_extension CONF option used below.
from nova.cells import rpcapi as cells_rpcapi
from nova.cells import state
from nova.cloudpipe import pipelib
from nova.compute import api as compute_api
from nova.compute import cells_api as cells_api
from nova.compute import manager as compute_manager
from nova.compute import rpcapi as compute_rpcapi
from nova.conductor import manager as conductor_manager
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception
from nova.network import api as network_api
from nova import objects
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
import nova.quota
from nova.servicegroup import api as service_group_api
from nova import test
from nova.tests.api.openstack.compute.contrib import test_fping
from nova.tests.api.openstack.compute.contrib import test_networks
from nova.tests.api.openstack.compute.contrib import test_services
from nova.tests.api.openstack import fakes
from nova.tests import fake_block_device
from nova.tests import fake_instance
from nova.tests import fake_network
from nova.tests import fake_network_cache_model
from nova.tests import fake_server_actions
from nova.tests import fake_utils
from nova.tests.image import fake
from nova.tests.integrated import api_samples_test_base
from nova.tests.integrated import integrated_helpers
from nova.tests.objects import test_network
from nova.tests import utils as test_utils
from nova.tests.virt.baremetal.db import base as bm_db_base
from nova import utils
from nova.volume import cinder
CONF = cfg.CONF
CONF.import_opt('allow_resize_to_same_host', 'nova.compute.api')
CONF.import_opt('shelved_offload_time', 'nova.compute.manager')
CONF.import_opt('enable_network_quota',
'nova.api.openstack.compute.contrib.os_tenant_networks')
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.extensions')
CONF.import_opt('vpn_image_id', 'nova.cloudpipe.pipelib')
CONF.import_opt('osapi_compute_link_prefix', 'nova.api.openstack.common')
CONF.import_opt('osapi_glance_link_prefix', 'nova.api.openstack.common')
CONF.import_opt('enable', 'nova.cells.opts', group='cells')
CONF.import_opt('cell_type', 'nova.cells.opts', group='cells')
CONF.import_opt('db_check_interval', 'nova.cells.state', group='cells')
LOG = logging.getLogger(__name__)
class ApiSampleTestBaseV2(api_samples_test_base.ApiSampleTestBase):
_api_version = 'v2'
def setUp(self):
extends = []
self.flags(use_ipv6=False,
osapi_compute_link_prefix=self._get_host(),
osapi_glance_link_prefix=self._get_glance_host())
if not self.all_extensions:
if hasattr(self, 'extends_name'):
extends = [self.extends_name]
ext = [self.extension_name] if self.extension_name else []
self.flags(osapi_compute_extension=ext + extends)
super(ApiSampleTestBaseV2, self).setUp()
self.useFixture(test.SampleNetworks(host=self.network.host))
fake_network.stub_compute_with_ips(self.stubs)
fake_utils.stub_out_utils_spawn_n(self.stubs)
self.generate_samples = os.getenv('GENERATE_SAMPLES') is not None
class ApiSamplesTrap(ApiSampleTestBaseV2):
"""Make sure extensions don't get added without tests."""
all_extensions = True
def _get_extensions_tested(self):
tests = []
for attr in globals().values():
if not inspect.isclass(attr):
continue # Skip non-class objects
if not issubclass(attr, integrated_helpers._IntegratedTestBase):
continue | # Skip non-test classes
if attr.extension_name is None:
continue # Skip base tests
cls = importutils.import_class(attr.extension_name)
tests.append(cls.alias)
return tests
def _get_extensions(self):
extensions = []
response = self._do_get('extensions')
for | extension in jsonutils.loads(response.content)['extensions']:
extensions.append(str(extension['alias']))
return extensions
def test_all_extensions_have_samples(self):
# NOTE(danms): This is a list of extensions which are currently
# in the tree but that don't (yet) have tests. This list should
# NOT be allowed to grow, and should shrink to zero (and be
# removed) soon.
do_not_approve_additions = []
do_not_approve_additions.append('os-create-server-ext')
tests = self._get_extensions_tested()
extensions = self._get_extensions()
missing_tests = []
for extension in extensions:
# NOTE(danms): if you add tests, remove it from the
# exclusions list
self.assertFalse(extension in do_not_approve_additions and
extension in tests)
# NOTE(danms): if you add an extension, it must come with
# api_samples tests!
if (extension not in tests and
extension not in do_not_approve_additions):
missing_tests.append(extension)
if missing_tests:
LOG.error("Extensions are missing tests: %s" % missing_tests)
self.assertEqual(missing_tests, [])
class VersionsSampleJsonTest(ApiSampleTestBaseV2):
def test_versions_get(self):
response = self._do_get('', strip_version=True)
subs = self._get_regexes()
self._verify_response('versions-get-resp', subs, response, 200)
class VersionsSampleXmlTest(VersionsSampleJsonTest):
ctype = 'xml'
class ServersSampleBase(ApiSampleTestBaseV2):
def _post_server(self):
subs = {
'image_id': fake.get_valid_image_id(),
'host': self._get_host(),
}
response = self._do_post('servers', 'server-post-req', subs)
subs = self._get_regexes()
return self._verify_response('server-post-resp', subs, response, 202)
class ServersSampleJsonTest(ServersSampleBase):
def test_servers_post(self):
return self._post_server()
def test_servers_get(self):
uuid = self.test_servers_post()
response = self._do_get('servers/%s' % uuid)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
subs['id'] = uuid
subs['hypervisor_hostname'] = r'[\w\.\-]+'
subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
self._verify_response('server-get-resp', subs, response, 200)
def test_servers_list(self):
uuid = self._post_server()
response = self._do_get('servers')
subs = self._get_regexes()
subs['id'] = uuid
self._verify_response('servers-list-resp', subs, response, 200)
def test_servers_details(self):
uuid = self._post_server()
response = self._do_get('servers/detail')
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
subs['id'] = uuid
subs['hypervisor_hostname'] = r'[\w\.\-]+'
subs['mac_addr'] = '(?:[a |
geminy/aidear | oss/qt/qt-everywhere-opensource-src-5.9.0/qtwebengine/src/3rdparty/chromium/third_party/WebKit/Source/bindings/scripts/v8_attributes.py | Python | gpl-3.0 | 29,128 | 0.00206 | # Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# pylint: disable=relative-import
"""Generate template values for attributes.
Extends IdlType with property |constructor_type_name|.
Design doc: http://www.chromium.org/developers/design-documents/idl-compiler
"""
import idl_types
from idl_types import inherits_interface
from v8_globals import includes
import v8_types
import v8_utilities
from v8_utilities import (cpp_name_or_partial, capitalize, cpp_name, has_extended_attribute,
has_extended_attribute_value, scoped_name, strip_suffix,
uncapitalize, extended_attribute_value_as_list, is_unforgeable,
is_legacy_interface_type_checking)
def attribute_context(interface, attribute, interfaces):
"""Creates a Jinja template context for an attribute of an interface.
Args:
interface: An interface which |attribute| belongs to
attribute: An attribute to create the context for
interfaces: A dict which maps an interface name to the definition
which can be referred if needed
Returns:
A Jinja template context for |attribute|
"""
idl_type = attribute.idl_type
base_idl_type = idl_type.base_type
extended_attributes = attribute.extended_attributes
idl_type.add_includes_for_type(extended_attributes)
if idl_type.enum_values:
includes.add('core/inspector/ConsoleMessage.h')
# [CheckSecurity]
is_do_not_check_security = 'DoNotCheckSecurity' in extended_attributes
is_check_security_for_receiver = (
has_extended_attribute_value(interface, 'CheckSecurity', 'Receiver') and
not is_do_not_check_security)
is_check_security_for_return_value = (
has_extended_attribute_value(attribute, 'CheckSecurity', 'ReturnValue'))
if is_check_security_for_receiver or is_check_security_for_return_value:
includes.add('bindings/core/v | 8/BindingSecurity.h')
# [Constructor]
# TODO(yukishiino): Constructors are much like methods although constructors
# are not methods. Constructors must be data-type properties, and we can
# support them as a kind of methods.
constructor_type = idl_type.constructor_t | ype_name if is_constructor_attribute(attribute) else None
# [CEReactions]
is_ce_reactions = 'CEReactions' in extended_attributes
if is_ce_reactions:
includes.add('core/dom/custom/CEReactionsScope.h')
# [CustomElementCallbacks], [Reflect]
is_custom_element_callbacks = 'CustomElementCallbacks' in extended_attributes
is_reflect = 'Reflect' in extended_attributes
if is_custom_element_callbacks or is_reflect:
includes.add('core/dom/custom/V0CustomElementProcessingStack.h')
# [ImplementedInPrivateScript]
is_implemented_in_private_script = 'ImplementedInPrivateScript' in extended_attributes
if is_implemented_in_private_script:
includes.add('bindings/core/v8/PrivateScriptRunner.h')
includes.add('core/frame/LocalFrame.h')
includes.add('platform/ScriptForbiddenScope.h')
# [OnlyExposedToPrivateScript]
is_only_exposed_to_private_script = 'OnlyExposedToPrivateScript' in extended_attributes
# [PerWorldBindings]
if 'PerWorldBindings' in extended_attributes:
assert idl_type.is_wrapper_type or 'LogActivity' in extended_attributes, '[PerWorldBindings] should only be used with wrapper types: %s.%s' % (interface.name, attribute.name)
# [SaveSameObject]
is_save_same_object = (
'SameObject' in attribute.extended_attributes and
'SaveSameObject' in attribute.extended_attributes)
if is_save_same_object:
includes.add('bindings/core/v8/V8PrivateProperty.h')
if (base_idl_type == 'EventHandler' and
interface.name in ['Window', 'WorkerGlobalScope'] and
attribute.name == 'onerror'):
includes.add('bindings/core/v8/V8ErrorHandler.h')
cached_attribute_validation_method = extended_attributes.get('CachedAttribute')
keep_alive_for_gc = is_keep_alive_for_gc(interface, attribute)
if cached_attribute_validation_method or keep_alive_for_gc:
includes.add('bindings/core/v8/V8HiddenValue.h')
# [CachedAccessor]
is_cached_accessor = 'CachedAccessor' in extended_attributes
if is_cached_accessor:
includes.add('bindings/core/v8/V8PrivateProperty.h')
context = {
'access_control_list': access_control_list(interface, attribute),
'activity_logging_world_list_for_getter': v8_utilities.activity_logging_world_list(attribute, 'Getter'), # [ActivityLogging]
'activity_logging_world_list_for_setter': v8_utilities.activity_logging_world_list(attribute, 'Setter'), # [ActivityLogging]
'activity_logging_world_check': v8_utilities.activity_logging_world_check(attribute), # [ActivityLogging]
'argument_cpp_type': idl_type.cpp_type_args(used_as_rvalue_type=True),
'cached_attribute_validation_method': cached_attribute_validation_method,
'constructor_type': constructor_type,
'cpp_name': cpp_name(attribute),
'cpp_type': idl_type.cpp_type,
'cpp_type_initializer': idl_type.cpp_type_initializer,
'deprecate_as': v8_utilities.deprecate_as(attribute), # [DeprecateAs]
'enum_type': idl_type.enum_type,
'enum_values': idl_type.enum_values,
'exposed_test': v8_utilities.exposed(attribute, interface), # [Exposed]
'has_custom_getter': has_custom_getter(attribute),
'has_custom_setter': has_custom_setter(attribute),
'has_setter': has_setter(interface, attribute),
'idl_type': str(idl_type), # need trailing [] on array for Dictionary::ConversionContext::setConversionType
'is_cached_accessor': is_cached_accessor,
'is_call_with_execution_context': has_extended_attribute_value(attribute, 'CallWith', 'ExecutionContext'),
'is_call_with_script_state': has_extended_attribute_value(attribute, 'CallWith', 'ScriptState'),
'is_ce_reactions': is_ce_reactions,
'is_check_security_for_receiver': is_check_security_for_receiver,
'is_check_security_for_return_value': is_check_security_for_return_value,
'is_custom_element_callbacks': is_custom_element_callbacks,
# TODO(yukishiino): Make all DOM attributes accessor-type properties.
'is_data_type_property': not ('CachedAccessor' in extended_attributes) and is_data_type_property(interface, attribute),
'is_getter_raises_exception': # [RaisesException]
'RaisesException' in extended_attributes and
extended_attributes['RaisesException'] in (None, 'Getter'),
'is_implemented_in_ |
openearth/delft3d-gt-server | delft3dworker/serializers.py | Python | gpl-3.0 | 5,793 | 0.000518 | from django.contrib.auth.models import Group, User
from rest_framework import serializers
from delft3dworker.models import Scenario, Scene, SearchForm, Template, Version_Docker
class VersionSerializer(serializers.ModelSerializer):
"""
A default REST Framework ModelSerializer for the Version_Docker model
source: http://www.django-rest-framework.org/api-guide/serializers/
"""
# here we will write custom serialization and validation methods
class Meta:
model = Version_Docker
fields = "__all__"
class UserSerializer(serializers.ModelSerializer):
"""
A default REST Framework ModelSerializer for the User model
source: http://www.django-rest-framework.org/api-guide/serializers/
"""
# here we will write custom serialization and validation methods
class Meta:
model = User
fields = (
"id",
"username",
"first_name",
"last_name",
"email",
"groups",
)
class GroupSerializer(serializers.ModelSerializer):
"""
A default REST Framework ModelSerializer for the Group model
source: http://www.django-rest-framework.org/api-guide/serializers/
"""
# here we will write custom serialization and validation methods
class Meta:
model = Group
fields = (
"id",
"name",
)
class SceneFullSerializer(serializers.ModelSerializer):
"""
A default REST Framework ModelSerializer for the Scene model, which
is used for detail views of scenes, providing all valuable data of
a single model to the frontend.
source: http://www.django-rest-framework.org/api-guide/serializers/
"""
owner = UserSerializer(read_only=True)
state = serializers.CharField(source="get_phase_display", read_only=True)
template = serializers.SerializerMethodField()
outdated = serializers.BooleanField(source="workflow.is_outdated", read_only=True)
entrypoints = serializers.SerializerMethodField(read_only=True)
outdated_changelog = serializers.CharField(
source="workflow.outdated_changelog", read_only=True
)
class Meta:
model = Scene
fields = (
"date_created",
"date_started",
"fileurl",
"id",
"info",
"name",
"owner",
"parameters",
"phase",
"progress",
"scenario",
"shared",
"state",
"suid",
"task_id",
"workingdir",
"template",
"outdated",
"entrypoints",
"outdated_changelog",
)
def get_entrypoints(self, obj):
if hasattr(obj, "workflow"):
return obj.workflow.outdated_entrypoints()
else:
return None
def get_template(self, obj):
scenario = obj.scenario.first()
# Only retrieve template in case of a connected scenario
if scenario is not None and scenario.template is not None:
return scenario.template.name
else:
return None
class SceneSparseSerializer(serializers.ModelSerializer):
"""
A default REST Framework ModelSerializer for the Scene model, which
is used for list views of scenes, providing only essential data in
a list of many models to the frontend.
source: http://www.django-rest-framework.org/api-guide/serializers/
"""
state = serializers.CharField(source="get_phase_display", read_only=True)
template_name = serializers.SerializerMethodField()
class Meta:
model = Scene
fields = (
"suid",
"id",
"name",
"owner",
"progress",
"shared",
"state",
"template_name",
)
def get_template_name(self, obj):
scenario = obj.scenario.first()
# Only retrieve template in case of a connected scenario
if scenario is not None and scenario.template is not None:
return scenario.template.name
else:
return None
class ScenarioSerializer(serializers.ModelSerializer):
"""
A default REST Framework ModelSerializer for the Scenario model
source: http://www.django-rest-framework.org/api-guide/serializers/
"""
# here we will write custom serialization and validation methods
state = serializers.CharField(source="_update_state_and_save", read_only=True)
owner_url = serializers.HyperlinkedRelatedField(
read_only=True, view_name="user-detail", source="owner"
)
class Meta:
model = Scenario
fields = (
"id",
"name",
"owner_url",
"template",
"parameters",
"state",
"progress",
"scene_set",
)
class SearchFormSerializer(serializers.ModelSerializer):
"""
A default REST Framework ModelSerializer for the Template model
source: http://www.django-rest-framework.org/api-guide/serializers/
"""
# here we will write custom serialization and validation methods
class Meta:
model = SearchForm
fields = (
"id",
"name",
" | sections",
"templates",
)
class TemplateSerializer(serializers.ModelSerializer):
"""
A default REST Framework ModelSerializer for the Template model
source: http://www.django-rest-framework.org/api-guide/serializers/
"""
# here we will write custom serialization and validation methods
clas | s Meta:
model = Template
fields = (
"id",
"name",
"meta",
"sections",
)
|
lordmos/blink | Tools/Scripts/webkitpy/layout_tests/servers/http_server_unittest.py | Python | mit | 4,790 | 0.002505 | # Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA | , OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
import sys
import webkitpy.thirdparty.unittest2 as unittest
from webkitpy.common.host_mock import MockHost
from webkitpy.layout_tests.port import test
from webkitpy.layout_tests.servers.http_server import Li | ghttpd
from webkitpy.layout_tests.servers.http_server_base import ServerError
class TestHttpServer(unittest.TestCase):
def test_start_cmd(self):
# Fails on win - see https://bugs.webkit.org/show_bug.cgi?id=84726
if sys.platform in ('cygwin', 'win32'):
return
host = MockHost()
test_port = test.TestPort(host)
host.filesystem.write_text_file(
"/mock-checkout/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/servers/lighttpd.conf", "Mock Config\n")
host.filesystem.write_text_file(
"/usr/lib/lighttpd/liblightcomp.dylib", "Mock dylib")
server = Lighttpd(test_port, "/mock/output_dir",
additional_dirs={
"/mock/one-additional-dir": "/mock-checkout/one-additional-dir",
"/mock/another-additional-dir": "/mock-checkout/one-additional-dir"})
self.assertRaises(ServerError, server.start)
config_file = host.filesystem.read_text_file("/mock/output_dir/lighttpd.conf")
self.assertEqual(re.findall(r"alias.url.+", config_file), [
'alias.url = ( "/js-test-resources" => "/test.checkout/LayoutTests/resources" )',
'alias.url += ( "/mock/one-additional-dir" => "/mock-checkout/one-additional-dir" )',
'alias.url += ( "/mock/another-additional-dir" => "/mock-checkout/one-additional-dir" )',
'alias.url += ( "/media-resources" => "/test.checkout/LayoutTests/media" )',
])
def test_win32_start_and_stop(self):
host = MockHost()
test_port = test.TestPort(host)
host.filesystem.write_text_file(
"/mock-checkout/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/servers/lighttpd.conf", "Mock Config\n")
host.filesystem.write_text_file(
"/usr/lib/lighttpd/liblightcomp.dylib", "Mock dylib")
host.platform.is_win = lambda: True
host.platform.is_cygwin = lambda: False
server = Lighttpd(test_port, "/mock/output_dir",
additional_dirs={
"/mock/one-additional-dir": "/mock-checkout/one-additional-dir",
"/mock/another-additional-dir": "/mock-checkout/one-additional-dir"})
server._check_that_all_ports_are_available = lambda: True
server._is_server_running_on_all_ports = lambda: True
server.start()
self.assertNotEquals(host.executive.calls, [])
def wait_for_action(action):
if action():
return True
return action()
def mock_returns(return_values):
def return_value_thunk(*args, **kwargs):
return return_values.pop(0)
return return_value_thunk
host.executive.check_running_pid = mock_returns([True, False])
server._wait_for_action = wait_for_action
server.stop()
self.assertEqual(['taskkill.exe', '/f', '/t', '/pid', 42], host.executive.calls[1])
|
wez/watchman | tests/integration/test_invalid_watchmanconfig.py | Python | apache-2.0 | 680 | 0 | # vim:ts=4:sw=4:et:
# Copyright | 2012-present Facebook, Inc.
# Licensed under the Apache License, Version 2.0
# no unicode literals
from __future__ import absolute_import, division, print_function
import os
import WatchmanTestCase
@WatchmanTestCase.expand_matrix
class TestWatchmanConfigValid(WatchmanTestCase.WatchmanTestCase):
def test_trailing_comma(self):
root = self.mkdtemp()
with open(os.path.join(root, ".watchmanconfig"), "w") as f:
f.write('{"ignore_dirs":["foo",],}')
with self.assert | Raises(Exception) as ctx:
self.watchmanCommand("watch", root)
self.assertIn("failed to parse json", str(ctx.exception))
|
muchnoi/HPGe | bepc/ourweb.py | Python | gpl-3.0 | 5,626 | 0.026681 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import ROOT, time
toff = 0 # -8*3600
def Create_html(le,lp):
htmlhead = '''<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<meta name="author" content="Nickolai Muchnoi">
<meta http-equiv="refresh" content="30">
<title>BEPC-II Beam Energy Measurement System Status</title>
</head>
<style type="text/css">
<!--
BODY {font-family: Arial, Helvetica, sans-serif; font-size: 24px; font-weight: normal; color: #ffffff}
TABLE {font-family: Arial, Helvetica, sans-serif; font-size: 24px; font-weight: normal; color: #ffffff}
A {color: #ffffff; background-color: transparent; text-decoration: none;}
A:visited {color: #ffffff; background-color: transparent; text-decoration: none;}
A:hover {color: #ffffff; background-color: transparent; text-decoration: underline;}
-->
</style>
<SCRIPT type=text/javascript>
function zoom(n,x,y)
{
window.open(n, '_blank', 'width='+x+',height='+y+',status=no,menubar=no,toolbar=no,location=no,scrollbars=no,resizable=yes');
}
</SCRIPT>
<body bgcolor="#404040">
<div align='center'>
<H4><font color=#ffff40>Beam Energy Measurement System Status </font>
<font color=#40ffff>(%s)</font></H4>
''' % (time.ctime())
E_bt = time.ctime(le['t']-toff-le['dt']); E_et = time.ctime(le['t']-toff+le['dt'])
E_EB = le['E']; E_dE = le['dE']; E_BS = le['S']; E_dS = le['dS']
P_bt = time.ctime(lp['t']-toff-lp['dt']); P_et = time.ctime(lp['t']-toff+lp['dt'])
P_EB = lp['E']; P_dE = lp['dE']; P_BS = lp['S']; P_dS = lp['dS']
htmltable = '''
<table width=1000 border=1 bordercolor=#ffffff cellpadding=4 cellspacing=0>
<tr>
<td> </td>
<td nosave align=center> <font color=#40ff40><a href='images/E.png' target='_blank'>Electrons</a></font> </td>
<td nosave align=center> <font color=#ff4040><a href='images/P.png' target='_blank'>Positrons</a></font> </td>
</tr><tr>
<td>Energy, MeV:</td>
<td nosave align=center> <font color=#40ff40>%8.3f ± %8.3f</font> </td>
<td nosave align=center> <font color=#ff4040>%8.3f ± %8.3f</font> </td>
</tr><tr>
<td>Energy spread, keV:</td>
<td nosave align=center> <font color=#40ff40>%5.0f ± %5.0f</font> </td>
<td nosave align=center><font color=#ff4040> %5.0f ± %5.0f</font> </td>
</tr><tr>
<td>Measured from:</td>
<td nosave align=center> <font color=#40ff40>%s</font> </td>
<td nosave align=center> <font color=#ff4040>%s</font> </td>
</tr><tr>
<td>Measured until:</td>
<td nosave align=center> <font color=#40ff40>%s</font> </td>
<td nosave align=center> <font color=#ff4040>%s</font> </td>
</tr>
</table>
''' % (E_EB, E_dE, P_EB, P_dE, E_BS, E_dS, P_BS, P_dS, E_bt, P_bt, E_et, P_et)
# ''' % (E_EB, E_dE, P_EB, P_dE, E_BS, E_dS, P_BS, P_dS, E_bt, P_bt, E_et, P_et)
htmlfoot = '''
<img src='images/in-time.png' align='center', border=0, hspace=10, vspace=10>
</BODY></html>
'''
with open('index.html','w+') as f:
f.write(htmlhead)
f.write(htmltable)
f.write(htmlfoot)
def Get_Graph(filename):
t, dt = int(time.time()), 12*3600
L, G1, G2, N = [], ROOT.TGraphErrors(), ROOT.TGraphErrors(), 0
with open(filename,'r') as f: lines = f.readlines()
# with open(filename,'w+') as f:
for line in lines[1:]:
OK, fields = False, line.strip().split()
if int(fields[0]) > t-dt or line==lines[-1]:
L.append({'t':int(fields[0])+toff,'dt':int(fields[1]),
'E':float(fields[2]),'dE':float(fields[3]), # BEMS Energy
'S':float(fields[4]),'dS':float(fields[5]), # BEMS Spread
'B':float(fields[6]),'dB':float(fields[7])}) # BEPC Energy
t_last, e_last = int(fields[0]), float(fields[2])
# f.write(line)
for R in L:
if R['t'] > t-dt:
G1.SetPoint(N, R['t'], R['E']); G1.SetPointError(N, R['dt'], R['dE'])
G2.SetPoint(N, R['t'], R['B']); G2.SetPointError(N, R['dt'], R['dB']); N+=1
if N: return (G1, G2, t_last, e_last, L[-1])
else: return (0,0,0,0,L[-1])
def Energy_Plot():
EG1, EG2, te, ee, le | = Get_Graph('E.results')
PG1, PG2, tp, ep, lp = Get_Graph('P.results')
MG = ROOT.TMultiGraph()
if EG2: EG2.SetMarkerStyle(20); EG2.SetMarkerColor(ROOT.kGray+1); EG2.SetLineColor(ROOT.kGray+1); EG2.SetLineWidth(1); MG.Add(EG2)
if PG2: PG2.SetMarkerStyle(20); PG2.SetMarkerColor(ROOT.kGray+1); PG2.SetLineColor(ROOT.kGray+1); PG2.SetLineWidth(1); MG.Add(PG2)
if EG1: EG1.SetMarkerStyle(20); EG1.SetMarkerColor(ROOT.kGreen+2); EG1.SetLineColor(ROOT.kGreen+2); EG1.SetLineWidth(2); MG.Add(EG1 | )
if PG1: PG1.SetMarkerStyle(20); PG1.SetMarkerColor(ROOT.kRed+2); PG1.SetLineColor(ROOT.kRed+2); PG1.SetLineWidth(2); MG.Add(PG1)
cv = ROOT.TCanvas('cvt','Compton Beam Energy Monitor',0,0,1000,800); cv.cd(); cv.SetGrid()
MG.Draw('AP'); MG.GetYaxis().SetDecimals(); MG.GetYaxis().SetTitle('beam energy, MeV'); MG.GetYaxis().SetTitleOffset(1.2)
MG.GetXaxis().SetTitle('Beijing time'); MG.GetXaxis().SetTimeDisplay(1); MG.GetXaxis().SetTimeOffset(0); MG.GetXaxis().SetTimeFormat("%H:%M")
lg = ROOT.TLegend(0.8, 0.8, 0.98, 0.98, '', 'brNDC'); lg.SetFillColor(ROOT.kGreen-9)
lg.AddEntry(PG2, 'BEPC energy', 'lpe')
lg.AddEntry(EG1, 'BEMS: e^{-}', 'lpe')
lg.AddEntry(PG1, 'BEMS: e^{+}', 'lpe')
lg.Draw('SAME')
cv.SetFillColor(ROOT.kGray); cv.GetFrame().SetFillColor(ROOT.kGreen-10)
cv.Modified(); cv.Update(); cv.SaveAs('in-time.png')
return le, lp
print 'Creating plot'
le,lp = Energy_Plot()
print 'Creating html'
Create_html(le,lp)
|
mogproject/calendar-cli | src/calendar_cli/operation/__init__.py | Python | apache-2.0 | 180 | 0 | from .help_operation import HelpOperation
from .setup_operation import Se | tupOperation
from .summary_operation import Su | mmaryOperation
from .create_operation import CreateOperation
|
gwq5210/python_learn | ctypes_test.py | Python | gpl-2.0 | 250 | 0.032 | #!/usr/bin/env python
# cod | ing=utf-8
from ctypes import *;
import os;
#libtest = CDLL(os.getcwd() + '/multiply.so')
#print libtest.multiply(2, 3)
lib = CDLL(". | /lib.so");
s = "nihao";
lib.display_sz(s);
print "done!"
lib.display(s);
print "done!!"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.